blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53bca9adc8334f6c4fced883e5abc47aa24987f5 | db82ec070fd356ea5a5aebd5ada39c7fe58a2b38 | /TestCase/Test/BianMa/python_deal_bianma.py | 0a54a6c3052569372811b5e01abf897713d8c516 | [] | no_license | peng211217610/Python | 8d7217af06262fda2133e50c634eac0148e2ace5 | e65889e87aa51e1cb94601d3470e3906ad9e2d0a | refs/heads/master | 2023-08-22T16:55:51.592534 | 2023-08-10T09:43:22 | 2023-08-10T09:43:22 | 379,676,153 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
remark = '''下载附件cfiles.zip(见附件cfiles.zip)
解压该压缩包,里面 包含了两个文件。 一个叫 'gbk编码.txt',
该文件是gbk编码的。
另一个文件叫 'utf8编码.txt', 该文件是utf8编码的。
两个文件里面的内容都包含中文。
要求大家编写一个python程序,该程序做到以下2点
1. 将两个文件内容读出, 合并内容到一个字符串中,
并能用print语句将合并后的内容正确显示
2. 然后,程序用中文提示用户“请输入 新文件的名称”,
用户输入文件名可以包含中文
将上面合并后的内容存储到一个新文件中,以utf8格式编码。
新文件的文件名就是上面用户输入的名字。'''
import os, sys
gbk_file_dir = os.getcwd() + r'\cfiles\gbk编码.txt'
utf_file_dir = os.getcwd() + r'\cfiles\utf8编码.txt'
new_file_dir = r'D:\TDdownload\Document\Python\test\BianMa'
#用什么编码的文件,就用什么格式encoding
with open(gbk_file_dir,'r',encoding='gbk') as gf:
g_content = gf.read()
with open(utf_file_dir,'r',encoding='utf8') as uf:
u_content = uf.read()
content = g_content + u_content
print(content)
new_name = input('请输入新文件的名称:')
with open('%s/%s.txt' % (new_file_dir,new_name),'w',encoding='utf8') as nf:
nf.write(content)
| [
"[email protected]"
] | |
2154222905bdc45937011c9e9931325a8e3bd675 | 1c9c0918637209b31fae10fec8329a864f4ddf2a | /lib/fabio/test/testedfimage.py | f873dc27b2b4ede5836bfd9f7688c9aa30702c7d | [] | no_license | albusdemens/astrarecon_tests | 224f2695ba14e4e6c8a2173132c1d30edba24e1b | 6b0ee69a2357eb568e2fde1deccfa8b6dd998496 | refs/heads/master | 2021-01-18T19:43:17.485309 | 2017-07-21T12:17:54 | 2017-07-21T12:17:54 | 100,536,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,590 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fable Input Output
# https://github.com/silx-kit/fabio
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
# Unit tests
# builds on stuff from ImageD11.test.testpeaksearch
28/11/2014
"""
from __future__ import print_function, with_statement, division, absolute_import
import unittest
import sys
import os
import numpy
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test")
from .utilstest import UtilsTest
logger = UtilsTest.get_logger(__file__)
fabio = sys.modules["fabio"]
from ..edfimage import edfimage
from ..third_party import six
from ..fabioutils import GzipFile, BZ2File
class TestFlatEdfs(unittest.TestCase):
""" test some flat images """
def common_setup(self):
self.BYTE_ORDER = "LowByteFirst" if numpy.little_endian else "HighByteFirst"
self.MYHEADER = six.b("{\n%-1020s}\n" % (
"""Omega = 0.0 ;
Dim_1 = 256 ;
Dim_2 = 256 ;
DataType = FloatValue ;
ByteOrder = %s ;
Image = 1;
History-1 = something=something else;
\n\n""" % self.BYTE_ORDER))
self.MYIMAGE = numpy.ones((256, 256), numpy.float32) * 10
self.MYIMAGE[0, 0] = 0
self.MYIMAGE[1, 1] = 20
assert len(self.MYIMAGE[0:1, 0:1].tostring()) == 4, self.MYIMAGE[0:1, 0:1].tostring()
def setUp(self):
""" initialize"""
self.common_setup()
self.filename = os.path.join(UtilsTest.tempdir, "im0000.edf")
if not os.path.isfile(self.filename):
outf = open(self.filename, "wb")
assert len(self.MYHEADER) % 1024 == 0
outf.write(self.MYHEADER)
outf.write(self.MYIMAGE.tostring())
outf.close()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.BYTE_ORDER = self.MYHEADER = self.MYIMAGE = None
def test_read(self):
""" check readable"""
obj = edfimage()
obj.read(self.filename)
self.assertEqual(obj.dim1, 256, msg="dim1!=256 for file: %s" % self.filename)
self.assertEqual(obj.dim2, 256, msg="dim2!=256 for file: %s" % self.filename)
self.assertEqual(obj.bpp, 4, msg="bpp!=4 for file: %s" % self.filename)
self.assertEqual(obj.bytecode, numpy.float32, msg="bytecode!=flot32 for file: %s" % self.filename)
self.assertEqual(obj.data.shape, (256, 256), msg="shape!=(256,256) for file: %s" % self.filename)
self.assertEqual(obj.header['History-1'],
"something=something else")
def test_getstats(self):
""" test statistics"""
obj = edfimage()
obj.read(self.filename)
self.assertEqual(obj.getmean(), 10)
self.assertEqual(obj.getmin(), 0)
self.assertEqual(obj.getmax(), 20)
class TestBzipEdf(TestFlatEdfs):
""" same for bzipped versions """
def setUp(self):
"""set it up"""
TestFlatEdfs.setUp(self)
if not os.path.isfile(self.filename + ".bz2"):
with BZ2File(self.filename + ".bz2", "wb") as f:
with open(self.filename, "rb") as d:
f.write(d.read())
self.filename += ".bz2"
class TestGzipEdf(TestFlatEdfs):
""" same for gzipped versions """
def setUp(self):
""" set it up """
TestFlatEdfs.setUp(self)
if not os.path.isfile(self.filename + ".gz"):
with GzipFile(self.filename + ".gz", "wb") as f:
with open(self.filename, "rb") as d:
f.write(d.read())
self.filename += ".gz"
# statistics come from fit2d I think
# filename dim1 dim2 min max mean stddev
TESTIMAGES = """F2K_Seb_Lyso0675.edf 2048 2048 982 17467 1504.29 217.61
F2K_Seb_Lyso0675.edf.bz2 2048 2048 982 17467 1504.29 217.61
F2K_Seb_Lyso0675.edf.gz 2048 2048 982 17467 1504.29 217.61
id13_badPadding.edf 512 512 85 61947 275.62 583.44 """
class TestEdfs(unittest.TestCase):
"""
Read some test images
"""
def setUp(self):
self.im_dir = os.path.dirname(UtilsTest.getimage("F2K_Seb_Lyso0675.edf.bz2"))
UtilsTest.getimage("id13_badPadding.edf.bz2")
def test_read(self):
""" check we can read these images"""
for line in TESTIMAGES.split("\n"):
vals = line.split()
name = vals[0]
dim1, dim2 = [int(x) for x in vals[1:3]]
mini, maxi, mean, stddev = [float(x) for x in vals[3:]]
obj = edfimage()
try:
obj.read(os.path.join(self.im_dir, name))
except:
print ("Cannot read image", name)
raise
self.assertAlmostEqual(mini, obj.getmin(), 2, "testedfs: %s getmin()" % name)
self.assertAlmostEqual(maxi, obj.getmax(), 2, "testedfs: %s getmax" % name)
logger.info("%s Mean: exp=%s, obt=%s" % (name, mean, obj.getmean()))
self.assertAlmostEqual(mean, obj.getmean(), 2, "testedfs: %s getmean" % name)
logger.info("%s StdDev: exp=%s, obt=%s" % (name, stddev, obj.getstddev()))
self.assertAlmostEqual(stddev, obj.getstddev(), 2, "testedfs: %s getstddev" % name)
self.assertEqual(dim1, obj.dim1, "testedfs: %s dim1" % name)
self.assertEqual(dim2, obj.dim2, "testedfs: %s dim2" % name)
obj = None
def test_rebin(self):
"""test the rebin of edfdata"""
f = edfimage()
f.read(os.path.join(self.im_dir, "F2K_Seb_Lyso0675.edf"))
f.rebin(1024, 1024)
self.assertEqual(abs(numpy.array([[1547, 1439], [1536, 1494]]) - f.data).max(), 0, "data are the same after rebin")
def tearDown(self):
unittest.TestCase.tearDown(self)
self.im_dir = None
class testedfcompresseddata(unittest.TestCase):
"""
Read some test images with their data-block compressed.
Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental
"""
def setUp(self):
self.im_dir = os.path.dirname(UtilsTest.getimage("edfGzip_U16.edf.bz2"))
UtilsTest.getimage("edfCompressed_U16.edf.bz2")
UtilsTest.getimage("edfUncompressed_U16.edf.bz2")
def test_read(self):
""" check we can read these images"""
ref = edfimage()
gzipped = edfimage()
compressed = edfimage()
refFile = "edfUncompressed_U16.edf"
gzippedFile = "edfGzip_U16.edf"
compressedFile = "edfCompressed_U16.edf"
try:
ref.read(os.path.join(self.im_dir, refFile))
except:
raise RuntimeError("Cannot read image Uncompressed image %s" % refFile)
try:
gzipped.read(os.path.join(self.im_dir, gzippedFile))
except:
raise RuntimeError("Cannot read image gzippedFile image %s" % gzippedFile)
try:
compressed.read(os.path.join(self.im_dir, compressedFile))
except:
raise RuntimeError("Cannot read image compressedFile image %s" % compressedFile)
self.assertEqual((ref.data - gzipped.data).max(), 0, "Gzipped data block is correct")
self.assertEqual((ref.data - compressed.data).max(), 0, "Zlib compressed data block is correct")
class TestEdfMultiFrame(unittest.TestCase):
"""
Read some test images with their data-block compressed.
Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental
"""
def setUp(self):
self.multiFrameFilename = UtilsTest.getimage("MultiFrame.edf.bz2")[:-4]
self.Frame0Filename = UtilsTest.getimage("MultiFrame-Frame0.edf.bz2")[:-4]
self.Frame1Filename = UtilsTest.getimage("MultiFrame-Frame1.edf.bz2")[:-4]
self.ref = edfimage()
self.frame0 = edfimage()
self.frame1 = edfimage()
try:
self.ref.read(self.multiFrameFilename)
except:
raise RuntimeError("Cannot read image multiFrameFilename image %s" % self.multiFrameFilename)
try:
self.frame0.read(self.Frame0Filename)
except:
raise RuntimeError("Cannot read image Frame0File image %s" % self.Frame0File)
try:
self.frame1.read(self.Frame1Filename)
except:
raise RuntimeError("Cannot read image Frame1File image %s" % self.Frame1File)
def tearDown(self):
unittest.TestCase.tearDown(self)
self.multiFrameFilename = self.Frame0Filename = self.Frame1Filename = self.ref = self.frame0 = self.frame1 = None
def test_getFrame_multi(self):
"""testedfmultiframe.test_getFrame_multi"""
self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "getFrame_multi: Same data for frame 0")
f1_multi = self.ref.getframe(1)
# logger.warning("f1_multi.header=%s\nf1_multi.data= %s" % (f1_multi.header, f1_multi.data))
self.assertEqual((f1_multi.data - self.frame1.data).max(), 0, "getFrame_multi: Same data for frame 1")
def test_getFrame_mono(self):
"testedfmultiframe.test_getFrame_mono"
self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "getFrame_mono: Same data for frame 0")
f1_mono = self.frame0.getframe(1)
self.assertEqual((f1_mono.data - self.frame1.data).max(), 0, "getFrame_mono: Same data for frame 1")
def test_next_multi(self):
"""testedfmultiframe.test_getFrame_mono"""
self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "next_multi: Same data for frame 0")
next_ = self.ref.next()
self.assertEqual((next_.data - self.frame1.data).max(), 0, "next_multi: Same data for frame 1")
def text_next_mono(self):
"testedfmultiframe.text_next_mono"
self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "next_mono: Same data for frame 0")
next_ = self.frame0.next()
self.assertEqual((next_.data - self.frame1.data).max(), 0, "next_mono: Same data for frame 1")
def test_previous_multi(self):
"""testedfmultiframe.test_previous_multi"""
f1 = self.ref.getframe(1)
self.assertEqual((f1.data - self.frame1.data).max(), 0, "previous_multi: Same data for frame 1")
f0 = f1.previous()
self.assertEqual((f0.data - self.frame1.data).max(), 0, "previous_multi: Same data for frame 0")
def test_previous_mono(self):
"testedfmultiframe.test_previous_mono"
f1 = self.ref.getframe(1)
self.assertEqual((f1.data - self.frame1.data).max(), 0, "previous_mono: Same data for frame 1")
prev = self.frame1.previous()
self.assertEqual((prev.data - self.frame0.data).max(), 0, "previous_mono: Same data for frame 0")
def test_openimage_multiframes(self):
"test if openimage can directly read first or second frame of a multi-frame"
self.assertEqual((fabio.open(self.multiFrameFilename).data - self.frame0.data).max(), 0, "openimage_multiframes: Same data for default ")
# print(fabio.open(self.multiFrameFilename, 0).data)
self.assertEqual((fabio.open(self.multiFrameFilename, 0).data - self.frame0.data).max(), 0, "openimage_multiframes: Same data for frame 0")
self.assertEqual((fabio.open(self.multiFrameFilename, 1).data - self.frame1.data).max(), 0, "openimage_multiframes: Same data for frame 1")
class TestEdfFastRead(unittest.TestCase):
"""
Read some test images with their data-block compressed.
Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental
"""
def setUp(self):
self.refFilename = UtilsTest.getimage("MultiFrame-Frame0.edf.bz2")
self.fastFilename = self.refFilename[:-4]
def test_fastread(self):
ref = fabio.open(self.refFilename)
refdata = ref.data
obt = ref.fastReadData(self.fastFilename)
self.assertEqual(abs(obt - refdata).max(), 0, "testedffastread: Same data")
class TestEdfWrite(unittest.TestCase):
"""
Write dummy edf files with various compression schemes
"""
tmpdir = UtilsTest.tempdir
def setUp(self):
self.data = numpy.arange(100).reshape((10, 10))
self.header = {"toto": "tutu"}
def testFlat(self):
self.filename = os.path.join(self.tmpdir, "merged.azim")
e = edfimage(data=self.data, header=self.header)
e.write(self.filename)
r = fabio.open(self.filename)
self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK")
self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK")
self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block")
def testGzip(self):
self.filename = os.path.join(self.tmpdir, "merged.azim.gz")
e = edfimage(data=self.data, header=self.header)
e.write(self.filename)
r = fabio.open(self.filename)
self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK")
self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK")
self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block")
def testBzip2(self):
self.filename = os.path.join(self.tmpdir, "merged.azim.gz")
e = edfimage(data=self.data, header=self.header)
e.write(self.filename)
r = fabio.open(self.filename)
self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK")
self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK")
self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block")
def tearDown(self):
os.unlink(self.filename)
class TestEdfRegression(unittest.TestCase):
"""
Test suite to prevent regression
"""
def bug_27(self):
"""
import fabio
obj = fabio.open("any.edf")
obj.header["missing"]="blah"
obj.write("any.edf")
"""
# create dummy image:
shape = (32, 32)
data = numpy.random.randint(0, 6500, size=shape[0] * shape[1]).astype("uint16").reshape(shape)
fname = os.path.join(UtilsTest.tempdir, "bug27.edf")
e = edfimage(data=data, header={"key1": "value1"})
e.write(fname)
del e
obj = fabio.open(fname)
obj.header["missing"] = "blah"
obj.write(fname)
del obj
# os.unlink(fname)
def suite():
testsuite = unittest.TestSuite()
testsuite.addTest(TestFlatEdfs("test_read"))
testsuite.addTest(TestFlatEdfs("test_getstats"))
testsuite.addTest(TestBzipEdf("test_read"))
testsuite.addTest(TestBzipEdf("test_getstats"))
testsuite.addTest(TestGzipEdf("test_read"))
testsuite.addTest(TestGzipEdf("test_getstats"))
testsuite.addTest(TestEdfs("test_read"))
testsuite.addTest(TestEdfs("test_rebin"))
testsuite.addTest(testedfcompresseddata("test_read"))
testsuite.addTest(TestEdfMultiFrame("test_getFrame_multi"))
testsuite.addTest(TestEdfMultiFrame("test_getFrame_mono"))
testsuite.addTest(TestEdfMultiFrame("test_next_multi"))
testsuite.addTest(TestEdfMultiFrame("text_next_mono"))
testsuite.addTest(TestEdfMultiFrame("test_previous_multi"))
testsuite.addTest(TestEdfMultiFrame("test_openimage_multiframes"))
testsuite.addTest(TestEdfFastRead("test_fastread"))
testsuite.addTest(TestEdfWrite("testFlat"))
testsuite.addTest(TestEdfWrite("testGzip"))
testsuite.addTest(TestEdfWrite("testBzip2"))
testsuite.addTest(TestEdfRegression("bug_27"))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"[email protected]"
] | |
ced63f89e52d591f602dcaf068e82acde3be2326 | 3a7840ae82c09719e945e2b0d36382567b817dd1 | /svn2git_functions.py | 73224da8dc4b90c41d5a040b4e20c4fb90a21ef5 | [] | no_license | shivpsingh/svn-to-git | 49c9d2d26bdd345025620efb830f23e88652f0df | 1e2702158c35fd9cfba39f2111ed2a8c9ab622f2 | refs/heads/master | 2023-04-08T02:37:54.980552 | 2021-04-23T06:14:40 | 2021-04-23T06:14:40 | 170,160,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,976 | py | """
##############################################################
##
## Name : Shiv Pratap Singh
## Description : SVN TO GIT Migration Script
## Requirements : git-bash.exe for windows and git for linux
## svn for Windows and svn for linux
## config JSON
##
##############################################################
"""
import os
import re
from git_functions import git_dir, mk_ch_dir
def svn2dict(data_str=None):
"""
Parses the SVN logs and generates the JSON Object Based on that
:param data_str: logs to be parsed
:return: JSON Object of Details required for GIT Repo
"""
lines = list()
lines_w_nl = str(data_str).split("\n")
for line in lines_w_nl:
lines.append(str(line).strip("\n"))
del lines_w_nl
dashes = re.compile(r'^-+$')
key = 0
dict_of_log = dict()
for line in lines:
if not dashes.match(line):
dict_of_log[str(key)].append(line)
else:
key += 1
dict_of_log[str(key)] = []
del dict_of_log[str(list(dict_of_log.keys())[-1])]
parsed_log = {}
for key in reversed(list(dict_of_log.keys())):
data = dict_of_log[str(key)]
spaces_in_file_commit = re.compile(r'^\s+')
revision_number = re.compile(r'^r.*$')
blank_line = re.compile(r'^$')
blank_line_flag = False
per_rev_dict = dict()
per_rev_dict['Files_Commit'] = []
path = ""
branch_name = ""
for details in data:
if revision_number.match(details):
per_rev_dict['Revision'] = details
revision = str(details).split("|")[0].strip(" ") if str(details).split("|")[0].strip(" ") else "r"
if blank_line.match(details):
blank_line_flag = True
elif blank_line_flag:
per_rev_dict['Comment'] = details
if spaces_in_file_commit.match(details) and not blank_line_flag:
added_modified = str(details).strip(" ").split(" ")[0] if str(details).strip(" ").split(" ")[0] else ""
files = str(details).strip(" ").split(" ")[1] if str(details).strip(" ").split(" ")[1] else ""
if files.find("/trunk") != -1:
path = "trunk"
if files.find("/branches") != -1:
path = "branches"
try:
branch_name = files.replace("/branches/", "").split("/")[0]
except Exception as err:
print(str(err))
branch_name = ""
if files.find("/tags") != -1:
path = "tags"
try:
branch_name = files.replace("/tags/", "").split("/")[0]
except Exception as err:
print(str(err))
branch_name = ""
if os.name == "nt":
files = files.replace('/', os.sep)
per_rev_dict['Files_Commit'].append(tuple([added_modified, files]))
per_rev_dict['Path'] = path
per_rev_dict['Branch_Name'] = branch_name
try:
parsed_log['r1']['Path'] = ""
except KeyError:
pass
parsed_log[revision] = per_rev_dict
# Debug Line
# print(parsed_log)
return parsed_log
def svn_log2json(config):
"""
Takes the config JSON file and reads the SVN Logs and Create a JSON Dictionary sorted on Revision Number
:param config: config JSON
:return: JSON Dictionary Contains details of revision and Files to be commited
"""
# Get SVN URL
try:
svn_url = config['SVN_URL']
except KeyError:
print("ERROR: SVN URL not Found")
exit(-1)
# Creating SVN Commands to read the logs
svn_log_command = "svn log -v"
svn_command = f"{svn_log_command} {svn_url}"
data_str = os.popen(f"{svn_command}").read()
# Checking Error for Logs
if not data_str:
print(f"Error: While getting logs for the SVN Url")
exit(-1)
try:
# Parsing the logs
log_dict = svn2dict(data_str)
except FileNotFoundError as err1:
print(f"Error: While getting logs for the SVN Url : {str(err1)}")
exit(-1)
except Exception as err2:
print(f"Error: While getting logs for the SVN Url : {str(err2)}")
exit(-1)
return log_dict
def svn_checkout(svn_url, config):
"""
Take SVN Checkout for Copying the files to a GIT Repo
:param svn_url: SVN Url
:param config: Config JSON Object
:return: Directory SVN Checkout is taken
"""
# Context Manager defined in git_functions
with mk_ch_dir("SVN_TEMP_DIR"):
# Checking for Username or Password if Provided
if config['SVN_USERNAME'] and config['SVN_PASSWORD']:
svn_checkout_cmd = f"svn checkout --username " \
f"{str(config['SVN_USERNAME'])} --password {str(config['SVN_PASSWORD'])}"
else:
svn_checkout_cmd = "svn checkout"
svn_command = f"{svn_checkout_cmd} {svn_url}"
os.popen(svn_command)
current_dir = os.getcwd()
return current_dir
def json_to_git(config, log_dict):
"""
Creates a Git Repo with config and details Provided by JSON Object
:param config: Config Details for Initializing Git Repo
:param log_dict: JSON Objects containing details for git Repo
:return: None
"""
svn_url = config['SVN_URL']
with mk_ch_dir("TEMP_DIR"):
dir_name = svn_checkout(svn_url, config)
svn_dir = os.path.basename(svn_url)
git_dir(from_svn=dir_name, to_git=svn_dir, svn_log_dict=log_dict, config=config)
| [
"[email protected]"
] | |
9d158a0112e6f467f80794d1b0980e61160c69ac | 1ef7948e70b316ea1881d5a057672d66a0a425fc | /0x02-python-import_modules/2-args.py | 934127f39a5c1e5b2f1f6756fd622b8b4d40fc8c | [] | no_license | juandsuarezz/holbertonschool-higher_level_programming | a76a9b838c79840f62389364a93e37751ebd960e | f24ed35a340d6cf7b7acf151e816e20bdbf89ffb | refs/heads/master | 2022-12-15T19:27:54.843113 | 2020-09-25T03:10:01 | 2020-09-25T03:10:01 | 259,238,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
print("{} argument".format(len(argv) - 1), end='')
if len(argv) - 1 == 0:
print("s.")
else:
if len(argv) - 1 == 1:
print(":")
else:
print("s:")
for i in range(1, len(argv)):
print("{}: {}".format(i, argv[i]))
| [
"[email protected]"
] | |
31fb6a43385ed8cb87205f2e7848d1d6e1662762 | 49a167d942f19fc084da2da68fc3881d44cacdd7 | /kubernetes_asyncio/test/test_v1_aws_elastic_block_store_volume_source.py | 440aa232820b413843892bb80c0f8ff0041fcc5b | [
"Apache-2.0"
] | permissive | olitheolix/kubernetes_asyncio | fdb61323dc7fc1bade5e26e907de0fe6e0e42396 | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | refs/heads/master | 2020-03-19T12:52:27.025399 | 2018-06-24T23:34:03 | 2018-06-24T23:34:03 | 136,546,270 | 1 | 0 | Apache-2.0 | 2018-06-24T23:52:47 | 2018-06-08T00:39:52 | Python | UTF-8 | Python | false | false | 1,148 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1AWSElasticBlockStoreVolumeSource(unittest.TestCase):
"""V1AWSElasticBlockStoreVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1AWSElasticBlockStoreVolumeSource(self):
"""Test V1AWSElasticBlockStoreVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_aws_elastic_block_store_volume_source.V1AWSElasticBlockStoreVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
863a96705f9a7d86693ed40c618239f06ac48652 | 9416a099eca5028474ac3db6b5b4d833889be2c6 | /xpense2/flock/admin.py | 04af8914a007c94216757ff9d2485545ad234550 | [
"MIT"
] | permissive | akhilraj95/xpense | 35d1577fd0afc346211fe5f211f8cca44454c28e | d107662eca77cbee302ce7c8fea8a1bee0d4bb8b | refs/heads/master | 2021-01-14T02:28:28.874948 | 2017-04-07T15:41:59 | 2017-04-07T15:41:59 | 81,942,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from django.contrib import admin
from .models import User,Currency,Chat,Track,Expense,Bill
admin.site.register(User)
admin.site.register(Currency)
admin.site.register(Chat)
admin.site.register(Track)
admin.site.register(Expense)
admin.site.register(Bill)
| [
"[email protected]"
] | |
d42a56d08ebf329be52f9dea7d554d3e78fd01c2 | d2e3c9e09e00931b8666f2919a3abe2d2c60290c | /RestApi_d/rest_inatel_env/comentarios/urls.py | 6880afccf688dbf65393ba4f1e04a87dd8d1239c | [] | no_license | alanleonardo10/InatelP | 24af4a5f68206ae7a5496c4e3ed1abb797e0e3c7 | 3a73701ac75064b16d7114876ad12f7989f33d0f | refs/heads/master | 2016-08-12T07:00:46.069759 | 2016-04-11T12:21:38 | 2016-04-11T12:21:38 | 55,797,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from comentarios import views
urlpatterns = [
url(r'^comentarios/$', views.ComentarioList.as_view(), name="comentarios"),
]
"""
urlpatterns = patterns('',
url(r'^comentarios/$', views.ComentarioList.as_view(), name="comentarios"),
url(r'^comentarios/(?P<pk>[0-9]+)/$', views.ComentarioDetail.as_view(), name="comentarios-detail"),
url(r'^$', views.ComentarioView.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
""" | [
"[email protected]"
] | |
038ee378ad09a412396cdf7e951169e020d57372 | 574ba062709bd4dfae4b86a1e8b77e8dd64153da | /hooks.py | ec717d855246bffc792d1a27add93d8d4f5385ea | [
"MIT"
] | permissive | dagnelies/temser | 8c4e598e5e09fa1a665085225709ef3f4c8bd55a | 0a4c6d8d34d1f8c14b5edc0b441e6c869a76402b | refs/heads/master | 2020-05-21T23:48:45.827472 | 2016-10-16T19:53:02 | 2016-10-16T19:53:02 | 63,253,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | import os
import os.path
def dir(param, parsed, current_path):
return os.path.listdir(current_path + '/' + param) | [
"[email protected]"
] | |
656de7abdaf27b4332361740593c9b3305fbeebd | a7c16c93dd270eb152a9ef6497ba66e04f90ee90 | /reto/urls.py | 31c104a71789cec3555b70dc04d5c514dbf376bb | [] | no_license | sergiocanalesm1/delivery | 73762073c63e780c1914a46fa984e23ba9bc7f12 | 04516a6c082687d41878548a9ff607ae377f6c41 | refs/heads/master | 2022-12-05T00:28:28.500808 | 2020-08-26T20:06:37 | 2020-08-26T20:06:37 | 286,611,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | """reto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
#path('',TemplateView.as_view(template_name='index.html'))#renderizar lo que esté en front/build/static/index.html cuando se ingrese a la raiz
path('domicilios/',include('domicilios.urls'))
]
| [
"[email protected]"
] | |
4717b522dff1f23aaf00bd152123436c327d04f5 | f68afe06e4bbf3d523584852063e767e53441b2b | /Toontown/otp/ai/MagicWordManager.py | 64eddeecbde1b8285b078adf665f2f27a9a2792d | [] | no_license | DankMickey/Toontown-Offline-Squirting-Flower-Modded- | eb18908e7a35a5f7fc95871814207858b94e2600 | 384754c6d97950468bb62ddd8961c564097673a9 | refs/heads/master | 2021-01-19T17:53:36.591832 | 2017-01-15T02:00:04 | 2017-01-15T02:00:04 | 34,639,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from otp.ai.MagicWordGlobal import *
from otp.nametag.NametagConstants import *
lastClickedNametag = None
class MagicWordManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('MagicWordManager')
neverDisable = 1
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.accept('magicWord', self.handleMagicWord)
def disable(self):
self.ignore('magicWord')
DistributedObject.DistributedObject.disable(self)
def handleMagicWord(self, magicWord):
if not self.cr.wantMagicWords:
return
if magicWord.startswith('~~'):
if lastClickedNametag == None:
target = base.localAvatar
else:
target = lastClickedNametag
magicWord = magicWord[2:]
if magicWord.startswith('~'):
target = base.localAvatar
magicWord = magicWord[1:]
targetId = target.doId
self.sendUpdate('sendMagicWord', [magicWord, targetId])
if target == base.localAvatar:
response = spellbook.process(base.localAvatar, target, magicWord)
if response:
self.sendMagicWordResponse(response)
def sendMagicWordResponse(self, response):
self.notify.info(response)
base.localAvatar.setSystemMessage(0, 'Spellbook: ' + str(response))
| [
"[email protected]"
] | |
5f1aa11dabee5a326f0aaab6b2a4adb6715046a3 | 1836303cba6cf9e959009fa452a17f74e8ecff69 | /Chapter02-Decision/SimpleDecision.py | 252de343c0bde186c374e4f270d8c263aecb0b13 | [] | no_license | werisonfernandes/python-samples | a7344ea0a98940e7a2723d4cdced8053dc839b44 | 1dec7abf9d66083af313db5083224ed529c2cb54 | refs/heads/main | 2023-04-26T19:13:40.457650 | 2021-05-06T22:14:11 | 2021-05-06T22:14:11 | 362,855,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | nome=input("Digite o nome: ")
idade=int(input("Digite a idade: "))
prioridade="NÃO"
if idade>=65:
prioridade="SIM"
print("O paciente " + nome + " possui atendimento prioritário? " + prioridade) | [
"[email protected]"
] | |
615eee3652d8297716bb2c0dbe9d52873f47eac8 | c4fd462b57f75167e79d9f1bddee60cbebe0b61d | /tiler/tiler-v02/geometry/testing/dimensions_test.py | ef38259663ae951c122f801916c8a1ba713ded3f | [] | no_license | conchis/webact2 | 3395ca1be37cf809703cc85494982a37aa86fdbb | aa2e5ed2334db758aac4e5d13257ea9776ae89aa | refs/heads/master | 2016-09-05T18:09:48.166354 | 2011-04-11T12:22:12 | 2011-04-11T12:22:12 | 759,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | import unittest
from ispace.geometry.dimensions import *
class DimensionsTest(unittest.TestCase):
def testPoint(self):
p1 = Dimensions(width = 1, height = 2)
self.assertEqual(1, p1.width)
self.assertEqual(2, p1.height)
def testEquals(self):
d1 = Dimensions(2, 3)
d2 = Dimensions(3, 4)
d3 = Dimensions(2, 3)
self.assertEqual(d1, d3)
self.assertNotEqual(d1, d2)
def suite():
return unittest.makeSuite(DimensionsTest)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| [
"[email protected]"
] | |
5d9b65d4ae86a1b066f20a844471a895805f46e2 | cdcaf65d69a8ee71a1384804d253ebda8650a194 | /app/app/settings.py | dcc733fca071ccfbafcc890db74abc1103f300fb | [
"MIT"
] | permissive | madlad33/recipe-api | 9a29a3db1ed8bbb233d97f25f17a762f861b9a13 | 669dcfaf800fab763e7cad5cdfb3d71402dffb17 | refs/heads/main | 2023-02-13T05:38:34.603115 | 2021-01-11T19:22:40 | 2021-01-11T19:22:40 | 326,607,572 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,309 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from .secret_settings import SECRET_KEY
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER':os.environ.get('DB_USER'),
'PASSWORD':os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| [
"[email protected]"
] | |
21dddd6600f20df53abe0a55ec5e3415f1fab8fe | 2cb79704b461064657b7414862f450f7dbf2ea93 | /epd_ckean_kraken2.py | 211b7ae00940bba642298ffbb62202a3716afd05 | [] | no_license | XingchaoWu/scripts | 56a230ad679804139692e337fb9613750e2d7424 | 3ac2181f3cca5573012960eec0d8845a8b8cadfb | refs/heads/master | 2020-09-26T10:32:21.069845 | 2020-02-16T09:04:50 | 2020-02-16T09:04:50 | 226,236,702 | 2 | 1 | null | 2019-12-18T05:46:50 | 2019-12-06T03:17:30 | Python | UTF-8 | Python | false | false | 2,146 | py | #_*_coding:UTF-8_*_
import os
import subprocess
import argparse
def kraken_verificate_epd(kraken_path,kraken_edp_index,bracken_path,cur_path):
# kraken
cmd = kraken_path + " --db " + kraken_edp_index \
+ " --gzip-compressed --threads 8 --report-zero-counts --confidence 0.65 --report " \
+ cur_path + "/" + args.sample_num + ".epd.kraken.report.txt" \
+ " --classified-out " + cur_path + "/" + args.sample_num + ".epd.fastq.gz" \
+ " --use-names " + "/home/pmd/analysis/" + args.batch + "/se_v1/" \
+ args.sample_num + "/map/" + args.sample_num + ".non_human.exclude.fastq.gz" \
+ " > " + cur_path + "/" + args.sample_num + ".epd.kraken.out.txt"
subprocess.check_output(cmd, shell=True)
# bracken
# species
cmd1 = "python " + bracken_path + " -k /pmd/Genome/fungi/kraken_eupathDB/database75mers.kmer_distrib -t 1 -l S -i " \
+ cur_path + "/" + args.sample_num + ".epd.kraken.report.txt" \
+ " -o " + cur_path + "/" + args.sample_num + ".epd.bracken.species.txt"
# genus
cmd2 = "python " + bracken_path + " -k /pmd/Genome/fungi/kraken_eupathDB/database75mers.kmer_distrib -t 1 -l G -i " \
+ cur_path + "/" + args.sample_num + ".epd.kraken.report.txt" \
+ " -o " + cur_path + "/" + args.sample_num + ".epd.bracken.genus.txt"
subprocess.check_output(cmd1, shell=True)
subprocess.check_output(cmd2, shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser("eupathdb verification")
parser.add_argument("-b", "--batch", type=str, help="batch, as 'PM19408' ")
parser.add_argument("-s", "--sample_num", type=str, help="sample number")
# parser.add_argument("-t", "--taxid", type=str, help="taxid as '5059' ")
args = parser.parse_args()
cur_path = os.getcwd()
kraken_path = "/data1/soft/kraken2/kraken2/kraken2"
kraken_edp_index = "/pmd/Genome/fungi/kraken_eupathDB"
bracken_path = "/data1/share/bin/Bracken-2.2/src//est_abundance.py"
kraken_verificate_epd(kraken_path,kraken_edp_index,bracken_path,cur_path) | [
"[email protected]"
] | |
e1a2dcf9e23bbf066d64d6990ef9ea167318ed72 | 6521b069e778f6e7a5be1aabb282cfddde72f190 | /reproduction/text_classification/train_awdlstm.py | 007b2910a19fca150eb98825a1a6d293dae29dbd | [
"Apache-2.0"
] | permissive | choosewhatulike/fastNLP | 12068fc618245d9cbb137729063ee390de26d696 | 14778ee071ace8825acc0f0834a26eccfda70667 | refs/heads/master | 2021-04-06T10:51:49.961158 | 2019-07-09T06:00:40 | 2019-07-09T06:00:40 | 124,500,643 | 0 | 0 | null | 2018-03-09T06:54:25 | 2018-03-09T06:54:25 | null | UTF-8 | Python | false | false | 2,104 | py | # 这个模型需要在pytorch=0.4下运行,weight_drop不支持1.0
# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径
import os
os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/'
os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'
import torch.nn as nn
from data.IMDBLoader import IMDBLoader
from fastNLP.modules.encoder.embedding import StaticEmbedding
from model.awd_lstm import AWDLSTMSentiment
from fastNLP.core.const import Const as C
from fastNLP import CrossEntropyLoss, AccuracyMetric
from fastNLP import Trainer, Tester
from torch.optim import Adam
from fastNLP.io.model_io import ModelLoader, ModelSaver
import argparse
class Config():
train_epoch= 10
lr=0.001
num_classes=2
hidden_dim=256
num_layers=1
nfc=128
wdrop=0.5
task_name = "IMDB"
datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"}
save_model_path="./result_IMDB_test/"
opt=Config()
# load data
dataloader=IMDBLoader()
datainfo=dataloader.process(opt.datapath)
# print(datainfo.datasets["train"])
# print(datainfo)
# define model
vocab=datainfo.vocabs['words']
embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True)
model=AWDLSTMSentiment(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, nfc=opt.nfc, wdrop=opt.wdrop)
# define loss_function and metrics
loss=CrossEntropyLoss()
metrics=AccuracyMetric()
optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr)
def train(datainfo, model, optimizer, loss, metrics, opt):
trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1,
n_epochs=opt.train_epoch, save_path=opt.save_model_path)
trainer.train()
if __name__ == "__main__":
train(datainfo, model, optimizer, loss, metrics, opt)
| [
"[email protected]"
] | |
5a89be06f85e553651befb8730989a1b40fa919d | 176fe2cf55211febb23ab2545d0d378cdd049197 | /codefig8.py | 1d949eee5b5cc5a7e62dcba903fdb8bb61e06494 | [
"CC-BY-4.0"
] | permissive | asl-epfl/interplay_topology_OJSP | 9960fbace536db367848ae9138da932111d3f69f | a1668752766a1c699123c440c4bc8c7b74b5ea7e | refs/heads/main | 2023-04-18T20:42:09.423302 | 2021-07-27T15:06:31 | 2021-07-27T15:06:31 | 308,682,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,527 | py | """
This code can be used to generate simulations similar to Fig. 8 in the following paper:
Vincenzo Matta, Virginia Bordignon, Augusto Santos, Ali H. Sayed, "Interplay Between Topology and Social Learning Over Weak Graphs", IEEE Open Journal of Signal Processing, 2020.
Please note that the code is not generally perfected for performance, but is rather meant to illustrate certain results from the paper. The code is provided as-is without guarantees.
July 2020 (Author: Virginia Bordignon)
"""
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy as sp
import random
import os
from decimal import *
from functions import *
#%%
mpl.style.use('seaborn-deep')
plt.rcParams.update({'text.usetex': True})
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Computer Modern Roman'
#%%
FIG_PATH = 'figs/'
if not os.path.isdir(FIG_PATH):
os.makedirs(FIG_PATH)
#%%
NR = 4
NS = [4,4,4]
Nid = np.cumsum([0]+NS+[NR])
M = 3
N_T = sum(NS)+NR
N_ITER = 10000
N_PLOT = 300
np.random.seed(10)
random.seed(0)
#%%
################################ Build Network Topology ################################
Gr = [nx.erdos_renyi_graph(n, 0.7) for n in NS + [NR]]
while not all([nx.is_connected(g) for g in Gr]):
Gr = [nx.erdos_renyi_graph(n, 0.7) for n in NS + [NR]]
G= [nx.adjacency_matrix(gr) for gr in Gr]
A = [build_adjacency_metropolis(g.shape[0], g) for g in G]
#%%
Tsr = np.zeros((sum(NS), NR))
for i in range(NR):
Tsr[:,i]=np.random.choice(a=[0, 1.0], size=(sum(NS)), p=[.5, .5])
#%%
A_full = sp.linalg.block_diag(*A)
A_full[:sum(NS),sum(NS):]=Tsr
A_full = A_full/np.sum(A_full, axis = 0)
A_lim = np.linalg.matrix_power(A_full,100)
G_full = nx.from_numpy_array(A_full, create_using=nx.MultiDiGraph())
A_full_dec = np.array([[Decimal(x) for x in y] for y in A_full])
#%%
fixedpos = {i: np.array([0,3])+(5*(np.random.rand(2)-0.5)) for i in np.arange(NS[0])}
fixedpos.update({i: np.array([4,3])+(3*(np.random.rand(2)-0.5)) for i in np.arange(NS[0], NS[0]+NS[1])})
fixedpos.update({i: np.array([2,-3])+(NR*(np.random.rand(2)-0.5)) for i in np.arange(sum(NS), N_T)})
fixedpos = {0: [0,3], 1: [0.6, 6], 2: [-2.3, 2], 3: [2, 2.5]}
fixedpos.update({NS[0]: [6, 2], NS[0]+1: [7, 6], NS[0]+2: [5, 3.5], NS[0]+3: [8, 3]})
fixedpos.update({NS[1]+NS[0]: [13, 2], NS[1]+NS[0]+1: [12, 5],NS[1]+ NS[0]+2: [14, 4], NS[1]+NS[0]+3: [11, 2]})
fixedpos.update({sum(NS): [4, -3], sum(NS)+1: [6, -4], sum(NS)+2: [2, -5], sum(NS)+3: [6, -6]})
pos = nx.spring_layout(G_full, fixed = fixedpos.keys(), pos = fixedpos)
f,ax=plt.subplots(1,1, figsize=(5.85,4.5))
ax.set_xlim(-5,15.8)
ax.set_ylim(-8,8)
plt.axis('off')
nx.draw_networkx_nodes(G_full, pos=pos, node_color= 'C0', vmin=0, vmax= 2, nodelist = range(NS[0]),node_size=350, edgecolors='k', linewidths=.5)
nx.draw_networkx_nodes(G_full, pos=pos, node_color = 'C1', vmin=0, vmax= 2, nodelist = range(NS[0], NS[0]+NS[1]), node_size=350, edgecolors='k', linewidths=.5)
nx.draw_networkx_nodes(G_full, pos=pos, node_color = 'C2', vmin=0, vmax= 2, nodelist = range(NS[0]+NS[1],sum(NS)), node_size=350, edgecolors='k', linewidths=.5)
nx.draw_networkx_nodes(G_full, pos=pos, node_color = '#E2C458', vmin=0, vmax= 2, nodelist = range(sum(NS),N_T), node_size=350, edgecolors='k', linewidths=.5)
circle1 = mpl.patches.Circle((-.2, 3.5), 3.3, fc='None', ec='k', linewidth=0.8)
circle2 = mpl.patches.Circle((6.5, 3.9), 2.8, fc='None', ec='k', linewidth=0.8)
circle3 = mpl.patches.Circle((4.3, -4.5), 3., fc='None', ec='k', linewidth=0.8)
circle4 = mpl.patches.Circle((12.4, 3.2), 2.6, fc='None', ec='k', linewidth=0.8)
ax.add_artist(circle1)
ax.add_artist(circle2)
ax.add_artist(circle3)
ax.add_artist(circle4)
nx.draw_networkx_labels(G_full,pos,{i: i+1 for i in range(N_T)},font_size=14, font_color='black', alpha = 1)
nx.draw_networkx_edges(G_full, pos = pos, node_size=350, alpha=1, arrowsize=6, width=0.5);
plt.savefig(FIG_PATH + 'fig8_panel1.pdf', bbox_inches='tight')
#%%
################################ Run Social Learning ################################
theta = np.arange(1, M+1)
x = np.linspace(1e-10, 1-1e-10, 1000)
dt = (max(x)-min(x))/len(x)
var = 1
#%%
muk = np.random.rand(len(NS)+1,M)/5 - 1/10
L0 = [beta_dist(x, 2,theta[0]+1+m[0]) for m in muk]
L1 = [beta_dist(x, 2,theta[1]+1+m[1]) for m in muk]
L2 = [beta_dist(x, 2,theta[2]+1+m[2]) for m in muk]
#%%
TS = np.array([0,1,2,0])+1
#%%
csi = np.zeros((N_T, N_ITER))
for l in range(N_T):
csi[l] = np.random.beta(a= 2, b = TS[l//4]+1, size = N_ITER)
#%%
FS = [beta_dist(x, 2,ts+1) for ts in TS[:-1]]
#%%
mu_0 = np.random.rand(N_T, M)
mu_0 = mu_0/np.sum(mu_0, axis = 1)[:, None]
#%%
mu = mu_0.copy()
mu = np.array([[Decimal(x) for x in y] for y in mu])
MU = [mu]
L_i=np.zeros((N_T, M))
PSI_DEC=[]
for i in range(N_ITER):
for k in range(len(NS)+1):
L_i[Nid[k]:Nid[k+1]] = np.array([beta_dist(csi[Nid[k]:Nid[k+1],i], 2,t+1+muk[k,t-1]) for t in theta]).T
L_i_dec = np.array([[Decimal(x) for x in y] for y in L_i])
psi = bayesian_update(L_i_dec, mu)
decpsi = np.array([[Decimal(x).ln() for x in y] for y in psi])
mu = np.exp((A_full_dec.T).dot(decpsi))/np.sum(np.exp((A_full_dec.T).dot(decpsi)),axis =1)[:,None]
MU.append(mu)
PSI_DEC.append(decpsi)
#%%
################################ Estimate Weights ################################
psi_dec = [np.array([PSI_DEC[i][x,[k for k in range(M) if k !=np.argmax(PSI_DEC[i][sum(NS):],axis=1)[(x-sum(NS))]]]/(i+1) for i in range(len(PSI_DEC))]) for x in range(sum(NS), N_T)]
ones = np.array([Decimal(1) for i in range(N_ITER)])[:,None]
psi_dec_const= [np.hstack([psi_dec[i], ones]) for i in range(len(psi_dec))]
#%%
DD=[]
for i in range(N_ITER):
TR_=np.argmax(PSI_DEC[i][sum(NS):], axis=1)
D_=[]
for k in range(NR):
LR =[L0, L1, L2][TR_[k]]
D_.append(np.array([[np.sum(Y*np.log(Y/LR[iY])*dt)-np.sum(Y*np.log(Y/X)*dt) for X in [L0[iY], L1[iY], L2[iY]]] for iY, Y in enumerate(FS)] ).T)
DD.append(D_)
#%%
for i in range(N_ITER):
for j in range(NR):
DD[i][j]=DD[i][j][np.where(DD[i][j].any(axis=1))[0]]
DD[i][j]=np.vstack([DD[i][j], np.ones(3)])
#%%
DD_invdec=[]
for i in range(N_ITER):
D_inv = [np.linalg.pinv(d) for d in DD[i]]
D_invdec = [np.array([[Decimal(x) for x in y] for y in dinv]) for dinv in D_inv]
DD_invdec.append(D_invdec)
#%%
sol = [np.array([DD_invdec[j][x].dot(psi_dec_const[x][j]) for j in range(N_ITER)]) for x in range(NR)]
#%%
plt.figure(figsize=(5,4.5))
for i in range(NR):
plt.subplot(2,2,i+1)
plt.plot(np.ones(len(sol[i]))*A_lim[:NS[0],i+sum(NS)].sum(), ':' ,color='C0')
plt.plot(np.ones(len(sol[i]))*A_lim[NS[0]:NS[1]+NS[0],i+sum(NS)].sum(), ':' ,color='C1')
plt.plot(np.ones(len(sol[i]))*A_lim[NS[1]+NS[0]:sum(NS),i+sum(NS)].sum(), ':' ,color='C2')
if i==NR-1:
plt.plot([sol[i][x][0] for x in range(len(sol[0]))], color = 'C0', label='$s=1$')
plt.plot([sol[i][x][1] for x in range(len(sol[0]))], color = 'C1', label='$s=2$')
plt.plot([sol[i][x][2] for x in range(len(sol[0]))], color = 'C2', label='$s=3$')
else:
plt.plot([sol[i][x][0] for x in range(len(sol[0]))], color = 'C0')
plt.plot([sol[i][x][1] for x in range(len(sol[0]))], color = 'C1')
plt.plot([sol[i][x][2] for x in range(len(sol[0]))], color = 'C2')
plt.ylim(0,1)
plt.xlim(0, 2000)
plt.title('Agent {}'.format(i+1+sum(NS)), fontsize=16)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.xlabel('$i$', fontsize=16)
plt.ylabel(r'$\widehat x_{{s{i}}}$'.format(i=i+sum(NS)+1), fontsize=16)
if i==0:
plt.annotate('%0.2f' % A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum()), xycoords=('axes fraction', 'data'), color = 'C0', fontsize=13)
plt.annotate('%0.2f' % (A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum()-.01), xy=(1.01,A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum()-.04), xycoords=('axes fraction', 'data'), color = 'C1', fontsize=13)
plt.annotate('%0.2f' % A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum()), xycoords=('axes fraction', 'data'), color = 'C2', fontsize=13)
elif i==1:
plt.annotate('%0.2f' % A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum()-0.03), xycoords=('axes fraction', 'data'), color = 'C0', fontsize=13)
plt.annotate('%0.2f' % A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum()+0.08), xycoords=('axes fraction', 'data'), color = 'C1', fontsize=13)
plt.annotate('%0.2f' % A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum()-0.06), xycoords=('axes fraction', 'data'), color = 'C2', fontsize=13)
elif i==3:
plt.annotate('%0.2f' % A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[0*NS[0]:0*NS[0]+NS[0],i+sum(NS)].sum()), xycoords=('axes fraction', 'data'), color = 'C0', fontsize=13)
plt.annotate('%0.2f' % A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[1*NS[0]:1*NS[0]+NS[0],i+sum(NS)].sum()), xycoords=('axes fraction', 'data'), color = 'C1', fontsize=13)
plt.annotate('%0.2f' % A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[2*NS[0]:2*NS[0]+NS[0],i+sum(NS)].sum()-.1), xycoords=('axes fraction', 'data'), color = 'C2', fontsize=13)
else:
for iv, s in enumerate(sol[i].T):
plt.annotate('%0.2f' % A_lim[iv*NS[0]:iv*NS[0]+NS[0],i+sum(NS)].sum(), xy=(1.01,A_lim[iv*NS[0]:iv*NS[0]+NS[0],i+sum(NS)].sum()), xycoords=('axes fraction', 'data'), color = 'C'+str(iv), fontsize=13)
plt.figlegend(ncol=3, bbox_to_anchor=(0.45, -.38, 0.5, 0.5), fontsize=16, handlelength=1)
plt.tight_layout()
plt.subplots_adjust(bottom=.2,wspace=.7, right=.98)
plt.savefig(FIG_PATH+'fig8_panel3.pdf', bbox_inches='tight')
#%%
plt.figure(figsize=(5,4.5))
for i in range(NR):
plt.subplot(2,2, i+1)
h=plt.plot([MU[k][i+sum(NS),:] for k in range(N_PLOT)] )
plt.title('Agent {}'.format(i+1+sum(NS)), fontsize=16)
plt.xlabel('$i$', fontsize=16)
plt.ylabel(r'$\mu_{{{k},i}}(\theta)$'.format(k=i+1+sum(NS)), fontsize=16)
plt.xticks([0,100,200,300],fontsize=13)
plt.yticks(fontsize=13)
plt.figlegend(h,[r'$\theta=1$',r'$\theta=2$',r'$\theta=3$'],ncol=3, bbox_to_anchor=(0.45, -.38, 0.5, 0.5), fontsize=16, handlelength=1)
plt.tight_layout()
plt.subplots_adjust(bottom=.2,wspace=.45, right=.98)
plt.savefig(FIG_PATH+'fig8_panel2.pdf', bbox_inches='tight')
| [
"[email protected]"
] | |
db373a3628626d3ac5f75cbe6e7231c9d7c5690b | 5a997a2486fe4a677b7f902466c0accff784be8c | /test/test_dbserver_postgres.py | 1f312c14cb0848553a6112926f20cef865b0cc6c | [
"MIT"
] | permissive | jjkoletar/Astron | c86d21f371418e6f6a8fe4fa57074c414f7a45c8 | 956ebe21aabf415bc6fb041fb7932942df30e1bf | refs/heads/master | 2020-12-28T19:47:01.775152 | 2013-12-07T17:06:45 | 2013-12-07T17:06:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/env python2
import unittest
import os, time
from socket import *
from testdc import test_dc
from common import Daemon, MDConnection
from test_dbserver import DatabaseBaseTests
CONFIG = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 777
generate:
min: 1000000
max: 1000010
backend:
type: postgresql
database: astron_test
""" % test_dc
class TestDatabaseServerPostgres(unittest.TestCase, DatabaseBaseTests):
@classmethod
def setUpClass(cls):
cls.daemon = Daemon(CONFIG)
cls.daemon.start()
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 57123))
cls.conn = MDConnection(sock)
@classmethod
def tearDownClass(cls):
time.sleep(0.25) # Wait for database to finish any operations
cls.conn.close()
cls.daemon.stop()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
90dcf05cd9ba9ceb6b04ea508af0d8574010c883 | 950577d2afbf349a6d28c16f1332e2e73d369431 | /qmeq/tests/data_builder_elph.py | a7f9c8b66f26760621e2768bacc0577507d728f2 | [
"BSD-2-Clause"
] | permissive | M-Josefsson/qmeq | 617b68c20fa21fd724643ea797b5e6d9d4034c93 | f4f08864fc778de7c14b198c0ffbaafe33ce18f6 | refs/heads/master | 2023-04-29T12:22:41.706416 | 2021-05-24T13:05:50 | 2021-05-24T13:05:50 | 263,661,418 | 0 | 0 | BSD-2-Clause | 2021-05-24T13:05:50 | 2020-05-13T14:57:24 | null | UTF-8 | Python | false | false | 7,031 | py | data = {
'Pauli22current': [-3.602032381671809e-08, 3.6020323816713775e-08, -3.602032381671809e-08, 3.6020323816713775e-08],
'Pauli22energy_current': [-3.6020323816717015e-09, -3.6020323816714856e-09, -3.6020323816717015e-09, -3.6020323816714856e-09],
'Redfield00current': [-2.396815483544316e-08, 2.396815483540777e-08, -2.396815483544316e-08, 2.396815483540777e-08],
'Redfield00energy_current': [-2.689229619262236e-09, -4.072076712399872e-09, -2.689229619262236e-09, -4.072076712399872e-09],
'Redfield02current': [-3.600941233519408e-08, 3.600941233518016e-08, -3.600941233519408e-08, 3.600941233518016e-08],
'Redfield02energy_current': [-3.606873616307786e-09, -3.602008515832424e-09, -3.606873616307786e-09, -3.602008515832424e-09],
'Redfield10current': [-2.3965670226602342e-08, 2.39656702266174e-08, -2.3965670226602342e-08, 2.39656702266174e-08],
'Redfield10energy_current': [-2.68947820124473e-09, -4.072370573058238e-09, -2.68947820124473e-09, -4.072370573058238e-09],
'Redfield12current': [-3.600695619491466e-08, 3.60069561949047e-08, -3.600695619491466e-08, 3.60069561949047e-08],
'Redfield12energy_current': [-3.6071142033407265e-09, -3.602301518523242e-09, -3.607114203340726e-09, -3.6023015185232415e-09],
'Redfield20current': [-2.3979370744862137e-08, 2.3979370744834503e-08, -2.3979370744862137e-08, 2.3979370744834503e-08],
'Redfield20energy_current': [-2.6850534312908126e-09, -4.071367007741718e-09, -2.6850534312908126e-09, -4.071367007741718e-09],
'Redfield22current': [-3.602031349920948e-08, 3.602031349918888e-08, -3.602031349920948e-08, 3.602031349918888e-08],
'Redfield22energy_current': [-3.602031595941338e-09, -3.6020327838284667e-09, -3.602031595941338e-09, -3.6020327838284667e-09],
'1vN00current': [-1.985304901425201e-08, 1.985304901420306e-08, -1.985304901425201e-08, 1.985304901420306e-08],
'1vN00energy_current': [-2.2734997137905297e-09, -3.658335646791615e-09, -2.2734997137905297e-09, -3.6583356467916152e-09],
'1vN02current': [-3.601239045924915e-08, 3.601239045924151e-08, -3.601239045924915e-08, 3.601239045924151e-08],
'1vN02energy_current': [-3.601992870105317e-09, -3.605360209892802e-09, -3.6019928701053175e-09, -3.605360209892802e-09],
'1vN10current': [-1.9850625426784767e-08, 1.9850625426805132e-08, -1.9850625426784767e-08, 1.9850625426805132e-08],
'1vN10energy_current': [-2.273640315079029e-09, -3.6584768389031423e-09, -2.273640315079029e-09, -3.658476838903142e-09],
'1vN12current': [-3.600993541674126e-08, 3.600993541674398e-08, -3.600993541674126e-08, 3.600993541674398e-08],
'1vN12energy_current': [-3.6021343397972284e-09, -3.605492920352015e-09, -3.6021343397972284e-09, -3.605492920352015e-09],
'1vN20current': [-1.9860646245046236e-08, 1.9860646245034424e-08, -1.9860646245046236e-08, 1.9860646245034424e-08],
'1vN20energy_current': [-2.272566745499942e-09, -3.6559145437733978e-09, -2.272566745499942e-09, -3.6559145437733978e-09],
'1vN22current': [-3.602030995450536e-08, 3.602030995449963e-08, -3.602030995450536e-08, 3.602030995449963e-08],
'1vN22energy_current': [-3.602031241471274e-09, -3.602032429359029e-09, -3.602031241471274e-09, -3.602032429359029e-09],
'Lindblad22current': [-3.602190690666546e-08, 3.6017106162292576e-08, -3.602190690666546e-08, 3.6017106162292576e-08],
'Lindblad22energy_current': [-3.602070531210417e-09, -3.6018304939917723e-09, -3.602070531210417e-09, -3.6018304939917723e-09],
'pyPauli22current': [-3.602032381671809e-08, 3.6020323816713775e-08, -3.602032381671809e-08, 3.6020323816713775e-08],
'pyPauli22energy_current': [-3.6020323816717015e-09, -3.6020323816714856e-09, -3.6020323816717015e-09, -3.6020323816714856e-09],
'pyRedfield00current': [-2.396815483544316e-08, 2.396815483540777e-08, -2.396815483544316e-08, 2.396815483540777e-08],
'pyRedfield00energy_current': [-2.689229619262236e-09, -4.072076712399872e-09, -2.689229619262236e-09, -4.072076712399872e-09],
'pyRedfield02current': [-3.600941233519408e-08, 3.600941233518016e-08, -3.600941233519408e-08, 3.600941233518016e-08],
'pyRedfield02energy_current': [-3.606873616307786e-09, -3.602008515832424e-09, -3.606873616307786e-09, -3.602008515832424e-09],
'pyRedfield10current': [-2.3965670226602342e-08, 2.39656702266174e-08, -2.3965670226602342e-08, 2.39656702266174e-08],
'pyRedfield10energy_current': [-2.68947820124473e-09, -4.072370573058238e-09, -2.68947820124473e-09, -4.072370573058238e-09],
'pyRedfield12current': [-3.600695619491466e-08, 3.60069561949047e-08, -3.600695619491466e-08, 3.60069561949047e-08],
'pyRedfield12energy_current': [-3.6071142033407265e-09, -3.602301518523242e-09, -3.607114203340726e-09, -3.6023015185232415e-09],
'pyRedfield20current': [-2.3979370744862137e-08, 2.3979370744834503e-08, -2.3979370744862137e-08, 2.3979370744834503e-08],
'pyRedfield20energy_current': [-2.6850534312908126e-09, -4.071367007741718e-09, -2.6850534312908126e-09, -4.071367007741718e-09],
'pyRedfield22current': [-3.602031349920948e-08, 3.602031349918888e-08, -3.602031349920948e-08, 3.602031349918888e-08],
'pyRedfield22energy_current': [-3.602031595941338e-09, -3.6020327838284667e-09, -3.602031595941338e-09, -3.6020327838284667e-09],
'py1vN00current': [-1.985304901425201e-08, 1.985304901420306e-08, -1.985304901425201e-08, 1.985304901420306e-08],
'py1vN00energy_current': [-2.2734997137905297e-09, -3.658335646791615e-09, -2.2734997137905297e-09, -3.6583356467916152e-09],
'py1vN02current': [-3.601239045924915e-08, 3.601239045924151e-08, -3.601239045924915e-08, 3.601239045924151e-08],
'py1vN02energy_current': [-3.601992870105317e-09, -3.605360209892802e-09, -3.6019928701053175e-09, -3.605360209892802e-09],
'py1vN10current': [-1.9850625426784767e-08, 1.9850625426805132e-08, -1.9850625426784767e-08, 1.9850625426805132e-08],
'py1vN10energy_current': [-2.273640315079029e-09, -3.6584768389031423e-09, -2.273640315079029e-09, -3.658476838903142e-09],
'py1vN12current': [-3.600993541674126e-08, 3.600993541674398e-08, -3.600993541674126e-08, 3.600993541674398e-08],
'py1vN12energy_current': [-3.6021343397972284e-09, -3.605492920352015e-09, -3.6021343397972284e-09, -3.605492920352015e-09],
'py1vN20current': [-1.9860646245046236e-08, 1.9860646245034424e-08, -1.9860646245046236e-08, 1.9860646245034424e-08],
'py1vN20energy_current': [-2.272566745499942e-09, -3.6559145437733978e-09, -2.272566745499942e-09, -3.6559145437733978e-09],
'py1vN22current': [-3.602030995450536e-08, 3.602030995449963e-08, -3.602030995450536e-08, 3.602030995449963e-08],
'py1vN22energy_current': [-3.602031241471274e-09, -3.602032429359029e-09, -3.602031241471274e-09, -3.602032429359029e-09],
'pyLindblad22current': [-3.602046960217681e-08, 3.6020469602176456e-08, -3.602046960217681e-08, 3.6020469602176456e-08],
'pyLindblad22energy_current': [-3.6020468747597642e-09, -3.602046874759747e-09, -3.6020468747597642e-09, -3.602046874759747e-09]
} | [
"[email protected]"
] | |
1e3a726e2e1da9488648424acf45e510f528f691 | 156cdcdd99b599e82a00931d32c68b7c2edbbe6c | /fmriprep/__init__.py | 2cb4d3103c0bba044a947432c8636c597a68109d | [
"BSD-3-Clause"
] | permissive | rwblair/motcorr_comp | 9e2d65cb06a972938d62e6ef4eb7183e77b0f54e | d06abe502bb21829ed3725966e9cfcfc37238506 | refs/heads/master | 2021-01-13T17:29:51.411175 | 2017-02-14T07:27:43 | 2017-02-14T07:27:43 | 81,917,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This pipeline is developed by the Poldrack lab at Stanford University
(https://poldracklab.stanford.edu/) for use at
the Center for Reproducible Neuroscience (http://reproducibility.stanford.edu/),
as well as for open-source software distribution.
"""
from .info import (
__version__,
__author__,
__copyright__,
__license__,
__maintainer__,
__email__,
__status__,
__url__,
__packagename__,
__description__,
__longdesc__
)
| [
"[email protected]"
] | |
208820efd8e4b735c6591cb207f5de74e42ff08b | eb574c5597c3bff0631c7fcef2fd029abd2f3c30 | /DotPy/ImpossibleRiddle.py | d0d7de12f658fb516ad11f4cd7f260ec37534b30 | [] | no_license | aaditya420/Python_Codes | 6d824c1f0c84c8133346685cf5e7d59a6cf5b070 | 2e9041566368ff3fec34b642969293468ca83bdc | refs/heads/master | 2021-11-29T23:15:57.151336 | 2019-03-14T05:22:06 | 2019-03-14T05:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py |
def Main():
nums = [1, 3, 5, 7, 9, 11, 13, 15]
len_nums = len(nums)
magic_num = 30
for i in range(len_nums):
for j in range(len_nums):
for k in range(len_nums):
if ((nums[i] + nums[j] + nums[k]) == magic_num):
print("{0} + {1} + {2} = {3}".format(nums[i], nums[j], nums[k], magic_num))
if __name__ == "__main__":
Main() | [
"[email protected]"
] | |
eab47a6b950350fe98a49ab448691de40292168d | f9a2c12b28bd934bb59fc7a7d4a7a77ca2695a53 | /seletion sort.py | 89badc4b27e40bd95b3edab9bc8fb52bf2a4856f | [] | no_license | tusharta07/tushar | 798b62bffb9cc0a574717128f7ddd1b4ecd32916 | eb5ed3de42e7d9400f6418fb2079344e74336b93 | refs/heads/master | 2020-08-27T23:04:00.413869 | 2019-11-10T14:37:07 | 2019-11-10T14:37:07 | 217,514,238 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | def selection_sort(arr):
n=len(arr)
for i in range(n):
min_idx = i
for j in range(i + 1, n):
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
arr= [int(k) for k in input().split()]
selection_sort(arr)
print(arr) | [
"[email protected]"
] | |
8e10e7621a42f1988776e1e9494795084c6851ca | 9e3620265aee10c0772484403509fbace7259f40 | /mhdata/io/datarow.py | ad7cd7c4ecbae0f48d0ffd1e851cf8cd312f8afc | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | nikibobi/MHWorldData | 5d104fa886087fe121b262497686ad81e6720751 | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | refs/heads/master | 2020-12-14T23:41:45.224370 | 2020-01-20T08:40:16 | 2020-01-20T08:40:16 | 234,912,823 | 0 | 0 | MIT | 2020-01-19T14:25:05 | 2020-01-19T14:25:04 | null | UTF-8 | Python | false | false | 2,337 | py | from collections.abc import MutableMapping
from .functions import to_basic
class DataRow(MutableMapping):
"""Defines a single row of a datamap object.
These objects are regular dictionaries that can also get translated names.
"""
def __init__(self, row_id: int, datarowdict: dict):
self._data = { 'id': row_id }
for key, value in datarowdict.items():
if key != 'id':
self._data[key] = value
@property
def id(self):
"Returns the id associated with this DataRow"
return self['id']
def name(self, lang_id):
"Returns the name of this data map row in a specific language"
return self['name'][lang_id]
def names(self):
"Returns a collection of (language, name) tuples for this row"
for (lang, name) in self['name'].items():
yield (lang, name)
def set_value(self, key, value, *, after=""):
""""Sets a value in this dictionary.
Same as using [key]=value, but allows an item to be placed after another"""
if not after:
self[key] = value
return
keys_to_move = []
found_item = False
for item_key in self._data.keys():
if found_item:
keys_to_move.append(item_key)
elif item_key == after:
found_item = True
self[key] = value
# Move every entry to the end of the list
for item_key in keys_to_move:
value = self._data[item_key]
del self._data[item_key]
self._data[item_key] = value
def to_dict(self):
return to_basic(self)
def __getitem__(self, key: str):
if key in self._data:
return self._data[key]
elif '_' in key:
parts = key.rsplit('_', 1)
return self._data[parts[0]][parts[1]]
else:
raise KeyError(f'No entry with {key} found in data row')
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def __repr__(self):
# Show the repr of the shallow copy
return repr({ k:v for (k, v) in self.items()})
| [
"[email protected]"
] | |
0485565df76e01214fa2eb5cec6f49fedb9dc012 | 30e2407d44083cf5fa3737f56d40c32c9df31bf8 | /exp_7/yolov3/yolov3-bcl/demo/evaluate.py | 998b0c7ce378c77924ce1e82ddb4df976f2acec3 | [
"MIT"
] | permissive | gzq942560379/ICSE | 1179c8189df574f14bd2389c93d1510bb0489743 | ca433ec5fa022c5b31cdf47730ffaee070dea9ca | refs/heads/master | 2023-05-08T11:09:41.648880 | 2021-06-03T03:19:11 | 2021-06-03T03:19:11 | 351,345,412 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,988 | py | #! /usr/bin/env python
# coding=utf-8
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
import time
import argparse
from core.config import cfg
from core.yolov3 import YOLOV3
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.TEST.ANNOT_PATH
self.number = cfg.TEST.NUMBER
self.weight_file = cfg.TEST.WEIGHT_FILE
self.model_file = cfg.TEST.MODEL_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
self.batch_size = cfg.TEST.BATCH_SIZE
self.core_version = cfg.RUNTIME.CORE_VERSION
self.precision = cfg.RUNTIME.PRECISION
self.data_parallelism = cfg.RUNTIME.DATA_PARALLELISM
self.model_parallelism = cfg.RUNTIME.MODEL_PARALLELISM
self.core_num = cfg.RUNTIME.CORE_NUM
if os.path.exists(self.model_file):
print ("model is exit")
else :
print ("please check out model_file")
graph = load_graph(self.model_file)
self.input_data = graph.get_tensor_by_name("import/input/input_data:0" )
self.pred_sbbox = graph.get_tensor_by_name("import/pred_sbbox/concat_2:0" )
self.pred_mbbox = graph.get_tensor_by_name("import/pred_mbbox/concat_2:0" )
self.pred_lbbox = graph.get_tensor_by_name("import/pred_lbbox/concat_2:0" )
self.bbox_raw = graph.get_tensor_by_name("import/Yolov3DetectionOutput:0" )
config = tf.ConfigProto(allow_soft_placement=True,
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
config.mlu_options.data_parallelism = self.data_parallelism
config.mlu_options.model_parallelism = self.model_parallelism
config.mlu_options.core_num = self.core_num
config.mlu_options.core_version = self.core_version
config.mlu_options.precision = self.precision
config.mlu_options.save_offline_model = True
config.mlu_options.offline_model_name = "yolov3_int8.cambricon"
self.sess = tf.Session(config = config, graph = graph)
def predict_bak(self, images):
org_h = [0 for i in range(self.batch_size)]
org_w = [0 for i in range(self.batch_size)]
for i in range(self.batch_size):
org_h[i], org_w[i], _ = images[i].shape
image_data = utils.images_preporcess(images, [self.input_size, self.input_size])
start = time.time()
pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
feed_dict={
self.input_data: image_data,
}
)
np.savetxt("pred_sbbox.txt",pred_sbbox.flatten())
np.savetxt("pred_mbbox.txt",pred_mbbox.flatten())
np.savetxt("pred_lbbox.txt",pred_lbbox.flatten())
end = time.time()
batch_bboxes = []
for idx in range(self.batch_size):
pred_bbox = np.concatenate([np.reshape(pred_sbbox[idx], (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox[idx], (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox[idx], (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h[idx], org_w[idx]), self.input_size, self.score_threshold)
batch_bboxes.append(utils.nms(bboxes, self.iou_threshold))
print("bbox num : ",len(batch_bboxes))
exit(0)
return batch_bboxes, (end - start)
def predict(self, images):
org_h = [0 for i in range(self.batch_size)]
org_w = [0 for i in range(self.batch_size)]
for i in range(self.batch_size):
org_h[i], org_w[i], _ = images[i].shape
image_data, dh, dw, scale = utils.images_preporcess(images, [self.input_size, self.input_size])
start = time.time()
bbox_raw = self.sess.run(
self.bbox_raw,
feed_dict={
self.input_data: image_data,
}
)
end = time.time()
print("inference time include postprocess is: ", (end-start) * 1000)
batch_bboxes = []
num_batches = 1
num_boxes = 1024 * 2
predicts_mlu = bbox_raw.flatten()
for batchIdx in range(num_batches):
result_boxes = int(predicts_mlu[batchIdx * (64 + num_boxes * 7)])
current_bboxes = []
for i in range(result_boxes):
batchId = predicts_mlu[i * 7 + 0 + 64 + batchIdx * (64 + num_boxes * 7)]
classId = predicts_mlu[i * 7 + 1 + 64 + batchIdx * (64 + num_boxes * 7)]
score = predicts_mlu[i * 7 + 2 + 64 + batchIdx * (64 + num_boxes * 7)]
x1 = 1.0*(predicts_mlu[i * 7 + 3 + 64 + batchIdx * (64 + num_boxes * 7)] * self.input_size - dw)/scale
y1 = 1.0*(predicts_mlu[i * 7 + 4 + 64 + batchIdx * (64 + num_boxes * 7)] * self.input_size - dh)/scale
x2 = 1.0*(predicts_mlu[i * 7 + 5 + 64 + batchIdx * (64 + num_boxes * 7)] * self.input_size - dw)/scale
y2 = 1.0*(predicts_mlu[i * 7 + 6 + 64 + batchIdx * (64 + num_boxes * 7)] * self.input_size - dh)/scale
bbox = [x1, y1, x2, y2, score, classId]
current_bboxes.append(np.array(bbox))
batch_bboxes.append(current_bboxes)
return batch_bboxes, (end - start)
def evaluate(self):
predicted_dir_path = self.write_image_path + '/mAP/predicted'
ground_truth_dir_path = self.write_image_path + '/mAP/ground-truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.makedirs(predicted_dir_path)
os.makedirs(ground_truth_dir_path)
batch_idx = 0
alltime_sess = 0
start = []
end = []
start_end2end = 0.0
start_post = 0.0
end_post = 0.0
alltime_end2end = 0.0
alltime_prepare = 0.0
alltime_post = 0.0
alltime_sess_run = 0.0
batch_count = 0
batch_image = []
batch_image_name = []
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
if batch_idx == 0:
start_end2end = time.time()
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
batch_image_name.append(image_name)
image = cv2.imread(image_path)
batch_image.append(image)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt=[]
classes_gt=[]
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
if batch_idx < self.batch_size - 1:
batch_idx += 1
continue
print("=> Predicting %d th batch images." % (batch_count + 1))
start.append(time.time())
bboxes_pr, sess_run_time = self.predict(batch_image)
end.append(time.time())
if batch_count > 0:
alltime_sess_run += sess_run_time
duration_time = (end[batch_count] - start[batch_count])
alltime_sess += duration_time
alltime_prepare = alltime_prepare + (start[batch_count] - start_end2end)
if self.write_image:
for idx in range(self.batch_size):
image = utils.draw_bbox(batch_image[idx], bboxes_pr[idx], show_label=self.show_label)
print("######### SAVE IMAGE ,",self.write_image_path+"/"+batch_image_name[idx])
cv2.imwrite(self.write_image_path+"/"+batch_image_name[idx], image)
for idx in range(self.batch_size):
predict_result_path = os.path.join(predicted_dir_path,
str(batch_count * self.batch_size + idx) + '.txt')
with open(predict_result_path, 'w') as f:
for bbox in bboxes_pr[idx]:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind + 1]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
if batch_count > 0:
temp = time.time()
alltime_end2end = alltime_end2end + (temp - start_end2end)
alltime_post = alltime_post + temp - end[batch_count]
batch_count += 1
if self.number < (batch_count + 1) * self.batch_size:
print("we have evaluated %d batch images"%(batch_count))
break
batch_idx = 0
batch_image = []
batch_image_name = []
if(self.number > 1):
print('latency: %f (ms)' % (alltime_sess_run * 1000 / (batch_count - 1)))
print('throughput: %f' % (((batch_count - 1) * self.batch_size) / alltime_sess_run))
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
print("model_file",model_file)
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--result_path", help="result path to write")
parser.add_argument("--records", help="records to be processed")
parser.add_argument("--number", type=int, help="number of records to be processed")
parser.add_argument("--core_version", type=str, help="MLU100/MLU270", default="MLU100")
parser.add_argument("--precision", type=str, help="float/int8", default="float")
parser.add_argument("--data_parallelism", type=int, help="data_parallelism")
parser.add_argument("--model_parallelism", type=int, help="model_parallelism")
parser.add_argument("--core_num", type=int, help="core_num")
parser.add_argument("--input_size", type=int, help="choose 416 or 544", default=416)
parser.add_argument("--batch_size", type=int, help="batch size")
args = parser.parse_args()
if args.graph:
cfg.TEST.MODEL_FILE = args.graph
if args.result_path:
cfg.TEST.WRITE_IMAGE_PATH = args.result_path
if args.records:
cfg.TEST.ANNOT_PATH = args.records
if args.number:
cfg.TEST.NUMBER = args.number
if args.core_version:
cfg.RUNTIME.CORE_VERSION = args.core_version
if args.precision:
cfg.RUNTIME.PRECISION = args.precision
if args.data_parallelism:
cfg.RUNTIME.DATA_PARALLELISM = args.data_parallelism
if args.model_parallelism:
cfg.RUNTIME.MODEL_PARALLELISM = args.model_parallelism
if args.core_num:
cfg.RUNTIME.CORE_NUM = args.core_num
if args.input_size:
cfg.TEST.INPUT_SIZE = args.input_size
if args.batch_size:
cfg.TEST.BATCH_SIZE = args.batch_size
YoloTest().evaluate()
| [
"[email protected]"
] | |
5fd09957f77c36c18570a88be4dde87a4b98b932 | 585765284ab1a1e9b03d870943401a1e71a73a54 | /character/Movement.py | 4f161f89c06eb2dccaf1e901bec0f7fd38b7d517 | [] | no_license | FreddieWho/WChatAVG | 1132359a80dcd2106ee6a2e008f8c14273e57b3f | 43b5d28735fd6b204ddbdf618199ed24cc219724 | refs/heads/master | 2023-01-22T23:38:01.124109 | 2020-11-18T17:09:01 | 2020-11-18T17:09:01 | 287,499,181 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | class PlayersMovement:
def Attach(Player_A,Player_B):
rawDamage = Player_A.MagicDamage * Player_A
| [
"[email protected]"
] | |
f7074980cd5970c5bc5c986a45251506e2b0dd58 | 726be1bb36eea7046eec331e02140181eba3ed91 | /similarity.py | 061c17cb49c8342a3ab006d295ef80ec01922fda | [] | no_license | Jazafras/Goddess-Database | 205fc0d465b75940cc70f6793854bc22e9c38c55 | 2da204ed24637f6b205e5d4803bbdd164d1ea46c | refs/heads/master | 2020-03-17T12:11:12.821235 | 2017-12-11T01:09:27 | 2017-12-11T01:09:27 | 133,577,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,288 | py | from indexer import iter_goddess, get_text_from_html, load_goddess
from collections import Counter
from itertools import tee
import re
import json
import numpy as np
from math import log
from scipy.spatial.distance import cosine
from scipy.linalg import svd
import os
__alpha_re__ = re.compile(r"[^a-zA-Z ]")
def replace_non_alpha(string):
return re.sub(__alpha_re__, ' ', string)
__space_re__ = re.compile(r"\s+")
def contract_spaces(string):
return re.sub(__space_re__, ' ', string)
def prepare_string(string):
if not string:
return ""
return contract_spaces(replace_non_alpha(string.casefold()))
def grams(iterable, skip=None):
"""From the 'pairwise' recipe:
https://docs.python.org/3.6/library/itertools.html"""
first, second = tee(iterable)
next(second, None)
if skip:
for i in range(skip):
next(second, None)
return zip(first, second)
def get_wordcounts():
wordcounts = Counter()
for _, words in iter_words():
wordcounts.update(words)
return wordcounts
def iter_words():
for goddess in iter_goddess():
yield goddess['pageid'], prepare_string(get_text_from_html(goddess['extract'])).split()
def get_vocab(wordcounts):
return set(wordcounts.keys())
def get_pairs(skip=None):
counts = {}
for goddess in iter_goddess():
words = prepare_string(get_text_from_html(goddess['extract'])).split()
counts[goddess['pageid']] = Counter(grams(words, skip=skip))
return counts
def get_occurrences():
counts = {}
for goddess in iter_goddess():
words = prepare_string(get_text_from_html(goddess['extract'])).split()
counts[goddess['pageid']] = Counter(words)
return counts
def get_lengths(occurrences=None):
if occurrences is not None:
return {k: sum(v.values()) for k, v in occurrences.items()}
lengths = {}
for goddess in iter_goddess():
words = prepare_string(get_text_from_html(goddess['extract'])).split()
lengths[goddess['pageid']] = len(words)
return lengths
def get_single_word_probabilities(wordcounts):
total_number_of_words = sum(v for v in wordcounts.values())
return {k: n/total_number_of_words for k, n in wordcounts.items()}
def get_ids_and_maps(counts, vocab):
outer_ids = sorted(counts.keys())
outer_map = {pageid : i for i, pageid in enumerate(outer_ids)}
inner_ids = sorted(vocab)
inner_map = {word : i for i, word in enumerate(inner_ids)}
return ((outer_ids, inner_ids), (outer_map, inner_map))
def threshold_your_vocab(wordcounts, threshold):
return {k for k, v in wordcounts.items() if v > threshold}
def threshold_a_pair_counter(original_counter, thresholded_vocab):
return Counter({(a, b): v for (a, b), v in original_counter.items() if a in thresholded_vocab and b in thresholded_vocab})
def threshold_the_pair_counts(counts):
return {k: threshold_a_pair_counter(v) for k, v in counts.items()}
def threshold_a_counter(original_counter, thresholded_vocab):
return Counter({k: v for k, v in original_counter.items() if k in thresholded_vocab})
def threshold_the_single_word_counts(counts):
return {k: threshold_a_counter(v) for k, v in counts.items()}
def get_ids_and_maps(counts, vocab):
outer_ids = sorted(counts.keys())
outer_map = {pageid : i for i, pageid in enumerate(outer_ids)}
inner_ids = sorted(vocab)
inner_map = {word : i for i, word in enumerate(inner_ids)}
return ((outer_ids, inner_ids), (outer_map, inner_map))
def get_term_frequency(occurrences):
"""Adjusted for document length because that varies a lot"""
# Note: if the occurrences are thresholded, the uncommon
# words will not be counted in the document lengths.
lengths = get_lengths(occurrences)
return {goddess_id: {term: occurrence/lengths[goddess_id]
for term, occurrence
in occurrences[goddess_id].items()}
for goddess_id in lengths}
def get_inverse_document_frequency(occurrences):
vocab_bags = {k: v.keys() for k, v in occurrences.items()}
words_counted_once_per_document = Counter(
word for goddess_id, vocab in vocab_bags.items() for word in vocab
)
all_documents = len(occurrences.keys())
return {term: log(all_documents/documents) for term, documents in words_counted_once_per_document.items()}
def get_tf_idf_dict():
occurrences = get_occurrences()
tf = get_term_frequency(occurrences)
idf = get_inverse_document_frequency(occurrences)
return {goddess_id:
{term: term_freq*idf[term]
for term, term_freq in tf_dict.items()}
for goddess_id, tf_dict in tf.items()}
def tf_idf_matrix(tf_idf_dict, vocab=None):
if vocab is None:
vocab = {term for counter in tf_idf_dict.values() for term in counter.keys()}
(outer_ids, inner_ids), (outer_map, inner_map) = get_ids_and_maps(tf_idf_dict, vocab)
# first index is term, second index is column
# this is the opposite of the 3d version
# which is why outer/inner is backwards
matrix = np.zeros(shape=(len(inner_ids), len(outer_ids)))
for goddess_id, goddess_tf_idf in tf_idf_dict.items():
goddess_index = outer_map[goddess_id]
for term, tf_idf in goddess_tf_idf.items():
term_index = inner_map[term]
matrix[term_index, goddess_index] = tf_idf
return matrix, outer_map, inner_map
def cooccurrence_matrix(thresholded_counts, single_word_probabilities, thresholded_vocab=None, inner_ids_maps=None):
if inner_ids_maps is not None:
inner_ids, inner_map = inner_ids_maps
else:
(_, inner_ids), (_, inner_map) = get_ids_and_maps(thresholded_counts, thresholded_vocab)
matrix = np.zeros(shape=(len(inner_ids), len(inner_ids)))
for counter_dict in thresholded_counts.values():
for (first, second), num in counter_dict.items():
first_index = inner_map[first]
second_index = inner_map[second]
matrix[first_index, second_index] += num
# p(a, b) / (p(a)p(b))
# p(a, b) is number of that pair over all pairs
# p(a) is count of that word over all words
# p(b) is count of that word over all words
total_number_of_pairs = np.sum(np.sum(matrix, axis=0), axis=0)
matrix /= total_number_of_pairs
for word, word_prob in single_word_probabilities.items():
if word not in inner_map:
continue
ind = inner_map[word]
matrix[:, ind] /= word_prob
matrix[ind, :] /= word_prob
return matrix
def get_word_vectors_from_cooccurrence(matrix):
print("Beginning the SVD")
print("The matrix is {} so it'll take a while".format(matrix.shape))
u, s, vh = svd(matrix)
return u * s
def get_doc_vectors_from_word_vectors(vectors, inner_map):
doc_vectors = {}
for g_id, words in iter_words():
shape = vectors[0].shape
doc_vector = np.zeros(shape)
factor = 1/len(words)
for word in words:
if word in inner_map:
word_index = inner_map[word]
word_vec = vectors[word_index]
doc_vector += factor * word_vec
doc_vectors[g_id] = doc_vector
return doc_vectors
def get_doc_vectors_from_tf_idf(matrix):
print("Beginning the SVD")
print("The matrix is {} so it'll take a while".format(matrix.shape))
u, s, vh = svd(matrix)
return s * vh
def get_them_doc_vectors(threshold=20):
tf_idf_dict = get_tf_idf_dict()
vocab = threshold_your_vocab(get_wordcounts(), threshold)
tf_idf_dict = {g_id:
{k: tf_idf
for k, tf_idf in v.items()
if k in vocab}
for g_id, v in tf_idf_dict.items()}
matrix, outer_map, inner_map = tf_idf_matrix(tf_idf_dict)
doc_vectors = get_doc_vectors_from_tf_idf(matrix)
return doc_vectors, outer_map
def get_distances_from_doc_vectors(doc_vectors, outer_map, limit=10):
distances = {}
for g_id in outer_map:
distances[g_id] = []
for second_id in outer_map:
if g_id == second_id:
continue
distances[g_id].append(
(cosine(
doc_vectors[outer_map[g_id]],
doc_vectors[outer_map[second_id]]), second_id))
distances[g_id].sort()
distances[g_id] = distances[g_id][:limit]
return distances
def path_of_g_id(g_id):
return os.path.join("data", str(g_id) + ".json")
def replace_all_the_json_distances_with_tf_idf_ones(threshold=15):
doc_vectors, outer_map = get_them_doc_vectors(threshold=threshold)
distances = get_distances_from_doc_vectors(doc_vectors, outer_map, limit=3)
clipped_distances = {k: [g_id for _, g_id in l] for k, l in distances.items()}
for g_id, similar in clipped_distances.items():
goddess = load_goddess(g_id)
goddess['similar'] = json.dumps(similar)
with open(path_of_g_id(g_id), 'w') as fp:
json.dump(goddess, fp)
def main():
try:
replace_all_the_json_distances_with_tf_idf_ones(threshold=10)
except MemoryError:
replace_all_the_json_distances_with_tf_idf_ones(threshold=20)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
37986059ff2963ec4fce44a1fa022ce3c4a2f7a5 | fc6c11f139e051cb0f21df7fe6d376e2d12f3d63 | /python/lib/sldr/ldml_exemplars.py | d73996d4bcdf778aeb212982391dfcaab4768444 | [
"MIT"
] | permissive | enabling-languages/sldr | aaccd9e9a4eff707da2a62f1a0d2910f06d87cf7 | 73f977743df9f0a2d521e4c026eb97df3d7e457d | refs/heads/master | 2020-03-19T01:52:38.137993 | 2018-05-31T09:03:39 | 2018-05-31T09:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,071 | py | #!/usr/bin/python
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from icu import Char, Script, UCharCategory, UProperty, UScriptCode
from icu import Normalizer2, UNormalizationMode2, UnicodeString
from collections import Counter
import codecs
try:
import sldr.UnicodeSets
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'sldr', 'python', 'lib')))
import sldr.UnicodeSets
def main():
pass
class UCD(object):
def __init__(self):
# Maybe in new versions of PyICU the following
# (now commented out) shorthand function is defined.
# self.normalizer_nfc = Normalizer2.getNFCInstance()
# Since it is not, use the non-shorthand function with the needed parameters
self.normalizer_nfc = Normalizer2.getInstance(None, 'nfc', UNormalizationMode2.COMPOSE)
self.normalizer_nfd = Normalizer2.getInstance(None, 'nfc', UNormalizationMode2.DECOMPOSE)
self.normalizer_nfkc = Normalizer2.getInstance(None, 'nfkc', UNormalizationMode2.COMPOSE)
self.normalizer_nfkd = Normalizer2.getInstance(None, 'nfkc', UNormalizationMode2.DECOMPOSE)
def normalize(self, form, text):
"""Return the normal form form for the Unicode string text.
Valid values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
if form == 'NFC':
return self.normalizer_nfc.normalize(text)
elif form == 'NFD':
return self.normalizer_nfd.normalize(text)
elif form == 'NFKC':
return self.normalizer_nfkc.normalize(text)
elif form == 'NFKD':
return self.normalizer_nfkd.normalize(text)
def normalize_nfc(self, text):
"""Return the NFC form for the Unicode string text."""
return self.normalize('NFC', text)
@staticmethod
def ismark(char):
"""True if the character is a mark (general category M)."""
numeric_char_type = Char.charType(char)
if (numeric_char_type == UCharCategory.NON_SPACING_MARK or
numeric_char_type == UCharCategory.COMBINING_SPACING_MARK or
numeric_char_type == UCharCategory.ENCLOSING_MARK):
return True
return False
@staticmethod
def isnukta(char):
"""True if the character is a nukta."""
if Char.getCombiningClass(char) == 7:
return True
return False
def is_always_combine(self, char):
"""True if Mark always combines (logically) with the base character."""
if self.isnukta(char):
return True
return False
@staticmethod
def is_sometimes_combine(char):
"""True if Mark sometimes combines (logically) with the base character."""
if 0x0300 <= ord(char) <= 0x036F:
return True
return False
def is_never_combine(self, char):
"""True if Mark never combines (logically) with the base character."""
if self.is_always_combine(char):
return False
if self.is_sometimes_combine(char):
return False
return True
@staticmethod
def isnumber(char):
"""True if the character is a number (general category Nd or No)."""
numeric_char_type = Char.charType(char)
if (numeric_char_type == UCharCategory.DECIMAL_DIGIT_NUMBER or
numeric_char_type == UCharCategory.OTHER_NUMBER):
return True
return False
@staticmethod
def isformat(char):
"""True if the character is a format character (general category Cf)."""
numeric_char_type = Char.charType(char)
if numeric_char_type == UCharCategory.FORMAT_CHAR:
return True
return False
@staticmethod
def is_space_separator(char):
"""True if the character is space separator (general category Zs)."""
numeric_char_type = Char.charType(char)
if numeric_char_type == UCharCategory.SPACE_SEPARATOR:
return True
return False
@staticmethod
def is_specific_script(char):
"""True if the character has a specific Script property,
that is, not the values Common or Inherited.
"""
script = Script.getScript(char)
script_code = Script.getScriptCode(script)
if script_code == UScriptCode.COMMON or script_code == UScriptCode.INHERITED:
return False
return True
@staticmethod
def is_exemplar_wordbreak(char):
"""True if the character has the Word_Break properties Katakana, ALetter, or MidLetter."""
# The following should be exposed by PyICU, but does not seem to be implemented.
# There are other values, but these are the ones need for this function.
WB_ALETTER = 1
WB_KATAKANA = 3
WB_MIDLETTER = 4
numeric_wordbreak_type = Char.getIntPropertyValue(char, UProperty.WORD_BREAK)
if (numeric_wordbreak_type == WB_KATAKANA or
numeric_wordbreak_type == WB_ALETTER or
numeric_wordbreak_type == WB_MIDLETTER):
return True
return False
def ispunct(self, char):
"""True if the character is punctuation for purposes of finding exemplars."""
# Some punctuation characters have other properties
# that means they are not punctuation exemplars.
if self.is_exemplar_wordbreak(char):
return False
return Char.ispunct(char)
@staticmethod
def toupper(text):
"""Map string to uppercase."""
lowercase = UnicodeString(text)
uppercase = lowercase.toUpper()
return unicode(uppercase)
def need_hex_escape(self, char, is_isolated):
"""Determine if a characters needs to be escaped with hex digits."""
if self.ismark(char) and is_isolated:
return True
if Char.hasBinaryProperty(char, UProperty.DEFAULT_IGNORABLE_CODE_POINT):
return True
if self.isformat(char):
return True
if self.is_space_separator(char):
return True
return False
class Exemplar(object):
def __init__(self, base, trailers=''):
self.base = base
self.trailers = trailers
def _get_text(self):
"""Return the whole exemplar (base + mark)."""
return self.base + self.trailers
text = property(_get_text)
def __str__(self):
if self.trailers == '':
return self.base
else:
return '{} {}'.format(self.base, self.trailers)
def __repr__(self):
base = codecs.encode(self.base, 'unicode_escape')
if self.trailers == '':
return "'Exemplar('{}')'".format(base)
else:
trailers = codecs.encode(self.trailers, 'unicode_escape')
return "'Exemplar('{}', '{}')'".format(base, trailers)
def __hash__(self):
return hash((self.base, self.trailers))
def __eq__(self, other):
if self.base == other.base and self.trailers == other.trailers:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
class Exemplars(object):
def __init__(self):
self.ucd = UCD()
# User settable configuration.
self.many_bases = 5
self.frequent = 0.1
# User data that should be accessed through getters and setters.
self._main = set()
self._auxiliary = set()
self._index = set()
self._punctuation = set()
self._digits = set()
self._graphemes = list()
self._frequency = list()
# Internal parameters.
self.clusters = Counter()
self.scripts = Counter()
self.codes_for_scripts = dict()
self.bases_for_marks = dict()
self.max_multigraph_length = 1
self.always_separate_marks = set()
self.need_splitting = True
self.unittest = False
def _set_main(self, ldml_exemplars):
"""Set LDML exemplars data for the main set."""
self._main = self.ldml_read(ldml_exemplars)
def _set_auxiliary(self, ldml_exemplars):
"""Set LDML exemplars data for the auxiliary set."""
self._auxiliary = self.ldml_read(ldml_exemplars)
def _set_index(self, ldml_exemplars):
"""Set LDML exemplars data for the index set."""
self._index = self.ldml_read(ldml_exemplars)
def _set_punctuation(self, ldml_exemplars):
"""Set LDML exemplars data for the punctuation set."""
self._punctuation = self.ldml_read(ldml_exemplars)
def _set_digits(self, ldml_exemplars):
"""Set LDML exemplars data for the digits set."""
self._digits = self.ldml_read(ldml_exemplars)
def _get_main(self):
"""Return LDML exemplars data for the main set."""
return self.ldml_write(self._main)
def _get_auxiliary(self):
"""Return LDML exemplars data for the auxiliary set."""
return self.ldml_write(self._auxiliary)
def _get_index(self):
"""Return LDML exemplars data for the index set."""
return self.ldml_write(self._index)
def _get_punctuation(self):
"""Return LDML exemplars data for the punctuation set."""
return self.ldml_write(self._punctuation)
def _get_digits(self):
"""Return LDML exemplars data for the digits set."""
return self.ldml_write(self._digits)
def _get_graphemes(self):
"""Return the list of found graphemes."""
return self.ldml_write(self._graphemes, sort=False)
def _get_frequency(self):
"""Return the list of found graphemes with frequency of occurrence."""
return self.ldml_write(self._frequency, sort=False)
def _get_script(self):
"""Return most frequently occurring script."""
script_code_and_count_list = self.scripts.most_common(1)
if len(script_code_and_count_list) == 0:
return ''
else:
script_code_and_count = script_code_and_count_list[0]
script_code = script_code_and_count[0]
script = self.codes_for_scripts[script_code]
script_name = Script.getShortName(script)
return script_name
main = property(_get_main, _set_main)
auxiliary = property(_get_auxiliary, _set_auxiliary)
index = property(_get_index, _set_index)
punctuation = property(_get_punctuation, _set_punctuation)
digits = property(_get_digits, _set_digits)
graphemes = property(_get_graphemes)
frequency = property(_get_frequency)
script = property(_get_script)
def ldml_read(self, ldml_exemplars):
"""Read exemplars from a string from a LDML formatted file."""
if self.unittest:
list_exemplars = ldml_exemplars.split()
else:
list_exemplars = sldr.UnicodeSets.us2list(ldml_exemplars)
exemplars = set()
for exemplar in list_exemplars:
exemplar = self.ucd.normalize('NFD', exemplar)
self.max_multigraph_length = max(self.max_multigraph_length, len(exemplar))
exemplars.add(exemplar)
return exemplars
def ldml_write(self, exemplars, sort=True):
"""Write exemplars to a string that can be written to a LDML formatted file."""
if sort:
# Exemplars mentioned in UTS #35 need to be sorted.
list_exemplars = list()
for exemplar in sorted(exemplars):
list_exemplars.append(exemplar)
else:
# Graphemes should be sorted by frequency,
# and since they already are,
# do nothing further here with the order.
list_exemplars = exemplars
list_nfc_exemplars = map(self.ucd.normalize_nfc, list_exemplars)
if self.unittest:
return ' '.join(list_nfc_exemplars)
else:
return sldr.UnicodeSets.list2us(list_nfc_exemplars, self.ucd)
def analyze(self):
"""Analyze the found exemplars and classify them."""
self.ignore_phantoms()
self.find_punctuation()
self.save_graphemes()
self.find_numbers()
self.count_marks()
while self.need_splitting:
self.need_splitting = False
self.find_indic_matras_and_viramas()
self.find_marks_on_same_bases()
self.find_productive_marks()
self.find_second_marks()
self.parcel_ignorable()
self.parcel_frequency()
self.make_index()
def ignore_phantoms(self):
"""Ignore phantom exemplars.
Phantoms are exemplars that have been set in one of the exemplar fields
(such as main or auxiliary) initially but not seen in the actual data processed.
"""
self._main = set()
self._auxiliary = set()
self._index = set()
self._punctuation = set()
self._digits = set()
def find_punctuation(self):
"""Put punctuation into the punctuation exemplar."""
for exemplar in list(self.clusters.keys()):
if self.ucd.ispunct(exemplar.base[0]):
self._punctuation.add(exemplar.base)
del self.clusters[exemplar]
def save_graphemes(self):
"""Save the list of found graphemes."""
for exemplar, count in self.clusters.most_common():
self._graphemes.append(exemplar.text)
self._frequency.append(u'{}:{}'.format(exemplar.text, count))
def count_marks(self):
"""Count how many different bases a mark occurs on."""
for exemplar in self.clusters.keys():
for trailer in exemplar.trailers:
if not self.ucd.ismark(trailer):
continue
# Only Marks get counted (and added to self.bases_for_marks).
mark = trailer
if mark in self.bases_for_marks:
bases_for_mark = self.bases_for_marks[mark]
bases_for_mark.add(exemplar.base)
else:
bases_for_mark = set()
bases_for_mark.add(exemplar.base)
self.bases_for_marks[mark] = bases_for_mark
def find_numbers(self):
"""Numbers without diacritics go into the digits exemplar."""
for exemplar in list(self.clusters.keys()):
if self.ucd.isnumber(exemplar.base) and len(exemplar.trailers) == 0:
self._digits.add(exemplar.base)
del self.clusters[exemplar]
def split_exemplar(self, exemplar, index, count):
"""Split an exemplar into separate exemplars."""
# If the exemplar is already a separate mark,
# the base of the exemplar will be an empty string,
# and therefore no further processing is needed
# on that exemplar.
if exemplar.base == '':
return
mark = exemplar.trailers[index]
before_current_mark = exemplar.trailers[:index]
after_current_mark = exemplar.trailers[index+1:]
exemplar_mark = Exemplar('', mark)
self.clusters[exemplar_mark] += count
new_exemplar = Exemplar(exemplar.base, before_current_mark + after_current_mark)
self.clusters[new_exemplar] += count
del self.clusters[exemplar]
self.need_splitting = True
def find_indic_matras_and_viramas(self):
"""Indic matras and viramas are always separate marks."""
for exemplar in list(self.clusters.keys()):
count = self.clusters[exemplar]
for trailer_index in range(len(exemplar.trailers)):
trailer = exemplar.trailers[trailer_index]
if (self.ucd.is_never_combine(trailer) or
Char.hasBinaryProperty(trailer, UProperty.DEFAULT_IGNORABLE_CODE_POINT)):
self.split_exemplar(exemplar, trailer_index, count)
def find_marks_on_same_bases(self):
"""If a set of diacritics has the sames bases, the diacritics are separate."""
for exemplar in list(self.clusters.keys()):
count = self.clusters[exemplar]
for trailer_index in range(len(exemplar.trailers)):
trailer = exemplar.trailers[trailer_index]
if trailer in self.bases_for_marks:
# The trailer is a Mark, as it was found,
# and only Marks are in that data structure.
current_mark = trailer
current_bases = self.bases_for_marks[current_mark]
# Compare the current set of bases to all the other sets of bases.
for other_mark in self.bases_for_marks.keys():
if current_mark != other_mark:
other_bases = self.bases_for_marks[other_mark]
difference = current_bases.symmetric_difference(other_bases)
if len(difference) == 0:
self.split_exemplar(exemplar, trailer_index, count)
def find_productive_marks(self):
"""Split clusters if a mark occurs on many bases."""
for exemplar in list(self.clusters.keys()):
count = self.clusters[exemplar]
for trailer_index in range(len(exemplar.trailers)):
trailer = exemplar.trailers[trailer_index]
if trailer in self.bases_for_marks:
# The trailer is a Mark, as it was found,
# and only Marks are in that data structure.
mark = trailer
bases_for_mark = self.bases_for_marks[mark]
# If a mark has more than many_bases ...
if len(bases_for_mark) > self.many_bases:
# then the base and mark are separate exemplars.
self.split_exemplar(exemplar, trailer_index, count)
def find_second_marks(self):
"""Split clusters if a mark is a second or later stacking diacritic."""
for exemplar in list(self.clusters.keys()):
count = self.clusters[exemplar]
for trailer_index in range(len(exemplar.trailers)):
trailer = exemplar.trailers[trailer_index]
# If the mark has already been found to be a always separate mark,
# split the exemplar.
if trailer in self.always_separate_marks:
self.split_exemplar(exemplar, trailer_index, count)
# Only graphemes with more than one mark need to be looked at
# for finding stacking diacritics that are separate.
if trailer_index > 0:
current_mark_ccc = Char.getCombiningClass(trailer)
previous_mark_ccc = Char.getCombiningClass(previous_trailer)
# If a mark has the same combining class (ccc) as the previous mark,
# then the mark is a second or later stacking diacritic and is a separate mark.
# Also, if the mark has already been found to be a always separate mark,
# split the exemplar.
if current_mark_ccc == previous_mark_ccc:
self.always_separate_marks.add(trailer)
self.split_exemplar(exemplar, trailer_index, count)
previous_trailer = trailer
def parcel_ignorable(self):
"""Move Default_Ignorable_Code_Point characters to auxiliary."""
for exemplar in list(self.clusters.keys()):
for trailer in exemplar.trailers:
if trailer not in self.bases_for_marks:
# if Char.hasBinaryProperty(trailer, UProperty.DEFAULT_IGNORABLE_CODE_POINT):
# The trailer is a Default_Ignorable_Code_Point
# which needs to go in the auxiliary list.
self._auxiliary.add(trailer)
del self.clusters[exemplar]
def parcel_frequency(self):
"""Parcel exemplars between main and auxiliary based on frequency."""
total_count = sum(self.clusters.values())
item_count = len(self.clusters)
if item_count != 0:
average = total_count / float(item_count)
else:
average = 0
frequent = average * (self.frequent / float(100))
for exemplar in self.clusters.keys():
occurs = self.clusters[exemplar]
if occurs > frequent:
self._main.add(exemplar.text)
else:
self._auxiliary.add(exemplar.text)
def make_index(self):
"""Analyze the found exemplars for indices and classify them."""
possible_index = self._main # .union(self._auxiliary)
for exemplar in possible_index:
# An index cannot be an empty string.
# This case should not occur, but it does, uncomment the test below
# to enable the script to run without errors until the bug that is
# causing empty exemplars to be produced is fixed.
# if exemplar == '':
# continue
# An index should not be an isolated mark.
if self.ucd.ismark(exemplar[0]):
continue
# Index exemplars are uppercase.
uppercase = self.ucd.toupper(exemplar)
self._index.add(uppercase)
def allowable(self, char):
"""Make sure exemplars have the needed properties."""
# Numbers with or without diacritics need to be allowed.
if self.ucd.isnumber(char):
return True
# Exemplars must be lowercase.
if Char.isUUppercase(char):
return False
# Characters with a specific script can be exemplars.
if self.ucd.is_specific_script(char):
return True
# Some punctuation and symbols are handled as letters.
if self.ucd.is_exemplar_wordbreak(char):
return True
# Other characters must be Alphabetic.
if Char.isUAlphabetic(char):
return True
return False
def process(self, text):
"""Analyze a string."""
i = 0
text = self.ucd.normalize('NFD', text)
# Record script of each character.
for char in text:
script = Script.getScript(char)
script_code = Script.getScriptCode(script)
self.scripts[script_code] += 1
self.codes_for_scripts[script_code] = script
# Record clusters
while i < len(text):
# Look for multigraphs (from length of max_multigraph_length down to 1) character(s)
# of multigraphs already specified in a LDML file.
# Longest possible matches are looked at first.
for multigraph_length in range(self.max_multigraph_length, 0, -1):
multigraph = text[i:i + multigraph_length]
if (multigraph in self._main or
multigraph in self._auxiliary or
multigraph in self._index or
multigraph in self._punctuation):
exemplar = Exemplar(multigraph)
self.clusters[exemplar] += 1
i += multigraph_length
break
# No multigraphs were found at this position,
# so continue processing a single character
# if we have not gone beyond the end of the text.
if not i < len(text):
break
char = text[i]
# Test for punctuation.
if self.ucd.ispunct(char):
exemplar = Exemplar(char)
self.clusters[exemplar] += 1
i += 1
continue
# Find grapheme clusters.
# Ensure exemplar base has needed properties.
if not self.allowable(char):
i += 1
continue
# The current character is a base character.
base = char
# Then find the end of the cluster
# (which may consist of only base characters).
length = base_length = 1
while i + length < len(text):
trailer = text[i + length]
if Char.hasBinaryProperty(trailer, UProperty.DEFAULT_IGNORABLE_CODE_POINT):
# A Default_Ignorable_Code_Point was found, so the cluster continues.
length += 1
continue
if self.ucd.ismark(trailer):
# A Mark was found, so the cluster continues.
length += 1
# Marks such as nuktas are considered part of the base.
if self.ucd.is_always_combine(trailer):
# A Mark such as a nukta was found, so the base continues,
# as well as the cluster.
base_length += 1
base = text[i:i + base_length]
continue
else:
# No more marks, so the end of the cluster has been reached.
break
# Extract cluster
# If no nuktas have been found,
# then the base will be the single character already called base (or char).
# If no non-nukta marks have been found,
# then the trailers variable will be an empty string.
trailers = text[i + base_length:i + length]
exemplar = Exemplar(base, trailers)
self.clusters[exemplar] += 1
i += length
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1481078cea1ef6095a4216468d561597aee7ba3a | f8580d2c963b6a3c34e918e0743d0a503a9584bd | /wscript | 3950282f86c51f18a2949a03fc08317732d0a76b | [] | no_license | pypy/wxpython-cffi | f59c3faeed26e6a26d0c87f4f659f93e5366af28 | 877b7e6c1b5880517456f1960db370e4bb7f5c90 | refs/heads/master | 2023-07-08T21:13:22.765786 | 2016-12-02T22:10:45 | 2016-12-02T22:10:45 | 397,124,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,300 | #!/usr/bin/python
#-----------------------------------------------------------------------------
# WAF script for building and installing the wxPython extension modules.
#
# Author: Robin Dunn
# Copyright: (c) 2013 by Total Control Software
# License: wxWindows License
#-----------------------------------------------------------------------------
import sys
import os
import buildtools.config
cfg = buildtools.config.Config(True)
#-----------------------------------------------------------------------------
# Options and configuration
APPNAME = 'wxPython'
VERSION = cfg.VERSION
isWindows = sys.platform.startswith('win')
isDarwin = sys.platform == "darwin"
top = '.'
out = 'build/waf'
def options(opt):
if isWindows:
opt.load('msvc')
else:
opt.load('compiler_cc compiler_cxx')
opt.load('python')
opt.add_option('--debug', dest='debug', action='store_true', default=False,
help='Turn on debug compile options.')
opt.add_option('--python', dest='python', default='', action='store',
help='Full path to the Python executrable to use.')
opt.add_option('--wx_config', dest='wx_config', default='wx-config', action='store',
help='Full path to the wx-config script to be used for this build.')
opt.add_option('--no_magic', dest='no_magic', action='store_true', default=False,
help='Don\'t use linker magic to enable wx libs to be bundled with '
'wxPython. See build.py for more info.')
opt.add_option('--mac_arch', dest='mac_arch', default='', action='store',
help='One or more comma separated architecture names to be used for '
'the Mac builds. Should be at least a subset of the architectures '
'used by wxWidgets and Python')
opt.add_option('--msvc_arch', dest='msvc_arch', default='x86', action='store',
help='The architecture to target for MSVC builds. Supported values '
'are: "x86" or "x64"')
#opt.add_option('--msvc_ver', dest='msvc_ver', default='9.0', action='store',
# help='The MSVC version to use for the build, if multiple versions are '
# 'installed. Currently supported values are: "9.0" or "10.0"')
# TODO: The waf msvc tool has --msvc_version and --msvc_target options
# already. We should just switch to those instead of adding our own
# option names...
def configure(conf):
if isWindows:
# For now simply choose the compiler version based on the Python
# version. We have a chicken-egg problem here. The compiler needs to
# be selected before the Python stuff can be configured, but we need
# Python to know what version of the compiler to use.
# TODO: Fix this
msvc_version = '9.0' #conf.options.msvc_ver
if conf.options.python and '33' in conf.options.python:
msvc_version = '10.0'
conf.env['MSVC_VERSIONS'] = ['msvc ' + msvc_version]
conf.env['MSVC_TARGETS'] = [conf.options.msvc_arch]
conf.load('msvc')
else:
conf.load('compiler_cc compiler_cxx')
if conf.options.python:
conf.env.PYTHON = conf.options.python
conf.load('python')
conf.check_python_version(minver=(2,7,0))
if isWindows:
# Search for the Python headers without doing some stuff that could
# iccorectly fail on Windows. See my_check_python_headers below.
conf.my_check_python_headers()
else:
conf.check_python_headers()
# fetch and save the debug option
conf.env.debug = conf.options.debug
# Ensure that the headers in siplib and Phoenix's src dir can be found
conf.env.INCLUDES_WXPY = ['sip/siplib', 'src']
if isWindows:
# Windows/MSVC specific stuff
cfg.finishSetup(debug=conf.env.debug)
conf.env.INCLUDES_WX = cfg.includes
conf.env.DEFINES_WX = cfg.wafDefines
conf.env.CFLAGS_WX = cfg.cflags
conf.env.CXXFLAGS_WX = cfg.cflags
conf.env.LIBPATH_WX = cfg.libdirs
conf.env.LIB_WX = cfg.libs
conf.env.LIBFLAGS_WX = cfg.lflags
_copyEnvGroup(conf.env, '_WX', '_WXADV')
conf.env.LIB_WXADV += cfg.makeLibName('adv')
_copyEnvGroup(conf.env, '_WX', '_WXSTC')
conf.env.LIB_WXSTC += cfg.makeLibName('stc')
_copyEnvGroup(conf.env, '_WX', '_WXHTML')
conf.env.LIB_WXHTML += cfg.makeLibName('html')
_copyEnvGroup(conf.env, '_WX', '_WXGL')
conf.env.LIB_WXGL += cfg.makeLibName('gl')
_copyEnvGroup(conf.env, '_WX', '_WXWEBVIEW')
conf.env.LIB_WXWEBVIEW += cfg.makeLibName('webview')
_copyEnvGroup(conf.env, '_WX', '_WXXML')
conf.env.LIB_WXXML += cfg.makeLibName('xml', isMSWBase=True)
_copyEnvGroup(conf.env, '_WX', '_WXXRC')
conf.env.LIB_WXXRC += cfg.makeLibName('xrc')
_copyEnvGroup(conf.env, '_WX', '_WXRICHTEXT')
conf.env.LIB_WXRICHTEXT += cfg.makeLibName('richtext')
# ** Add code for new modules here
# tweak the PYEXT compile and link flags if making a --debug build
if conf.env.debug:
for listname in ['CFLAGS_PYEXT', 'CXXFLAGS_PYEXT']:
lst = conf.env[listname]
for opt in '/Ox /MD /DNDEBUG'.split():
try:
lst.remove(opt)
except ValueError:
pass
lst[1:1] = '/Od /MDd /Z7 /D_DEBUG'.split()
conf.env['LINKFLAGS_PYEXT'].append('/DEBUG')
conf.env['LIB_PYEXT'][0] += '_d'
else:
# Configuration stuff for non-Windows ports using wx-config
conf.env.CFLAGS_WX = list()
conf.env.CXXFLAGS_WX = list()
conf.env.CFLAGS_WXPY = list()
conf.env.CXXFLAGS_WXPY = list()
# finish configuring the Config object
conf.env.wx_config = conf.options.wx_config
cfg.finishSetup(conf.env.wx_config, conf.env.debug)
# Check wx-config exists and fetch some values from it
rpath = ' --no-rpath' if not conf.options.no_magic else ''
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs core,net' + rpath,
uselib_store='WX', mandatory=True)
# Run it again with different libs options to get different
# sets of flags stored to use with varous extension modules below.
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs adv,core,net' + rpath,
uselib_store='WXADV', mandatory=True)
libname = '' if cfg.MONOLITHIC else 'stc,' # workaround bug in wx-config
conf.check_cfg(path=conf.options.wx_config, package='',
args=('--cxxflags --libs %score,net' % libname) + rpath,
uselib_store='WXSTC', mandatory=True)
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs html,core,net' + rpath,
uselib_store='WXHTML', mandatory=True)
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs gl,core,net' + rpath,
uselib_store='WXGL', mandatory=True)
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs webview,core,net' + rpath,
uselib_store='WXWEBVIEW', mandatory=True)
if isDarwin:
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs core,net' + rpath,
uselib_store='WXWEBKIT', mandatory=True)
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs xml,core,net' + rpath,
uselib_store='WXXML', mandatory=True)
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs xrc,xml,core,net' + rpath,
uselib_store='WXXRC', mandatory=True)
libname = '' if cfg.MONOLITHIC else 'richtext,' # workaround bug in wx-config
conf.check_cfg(path=conf.options.wx_config, package='',
args='--cxxflags --libs %score,net' % libname + rpath,
uselib_store='WXRICHTEXT', mandatory=True)
# ** Add code for new modules here
# NOTE: This assumes that if the platform is not win32 (from
# the test above) and not darwin then we must be using the
# GTK2 port of wxWidgets. If we ever support other ports then
# this code will need to be adjusted.
if not isDarwin:
gtkflags = os.popen('pkg-config gtk+-2.0 --cflags', 'r').read()[:-1]
conf.env.CFLAGS_WX += gtkflags.split()
conf.env.CXXFLAGS_WX += gtkflags.split()
# clear out Python's default NDEBUG and make sure it is undef'd too just in case
if 'NDEBUG' in conf.env.DEFINES_PYEXT:
conf.env.DEFINES_PYEXT.remove('NDEBUG')
conf.env.CFLAGS_WXPY.append('-UNDEBUG')
conf.env.CXXFLAGS_WXPY.append('-UNDEBUG')
# Add basic debug info for all builds
conf.env.CFLAGS_WXPY.append('-g')
conf.env.CXXFLAGS_WXPY.append('-g')
# And if --debug is set turn on more detailed debug info and turn off optimization
if conf.env.debug:
conf.env.CFLAGS_WXPY.extend(['-ggdb', '-O0'])
conf.env.CXXFLAGS_WXPY.extend(['-ggdb', '-O0'])
# Remove some compile flags we don't care about, ones that we may be
# replacing ourselves anyway, or ones which may have duplicates.
flags = ['CFLAGS_PYEXT', 'CXXFLAGS_PYEXT', 'LINKFLAGS_PYEXT',
'CFLAGS_cshlib', 'CXXFLAGS_cshlib', 'LINKFLAGS_cshlib',
'CFLAGS_cxxshlib', 'CXXFLAGS_cxxshlib', 'LINKFLAGS_cxxshlib']
for key in flags:
_cleanFlags(conf, key)
# Use the same compilers that wxWidgets used
if cfg.CC:
conf.env.CC = cfg.CC.split()
if cfg.CXX:
conf.env.CXX = cfg.CXX.split()
# Some Mac-specific stuff
if isDarwin:
conf.env.MACOSX_DEPLOYMENT_TARGET = "10.5"
if conf.options.mac_arch:
conf.env.ARCH_WXPY = conf.options.mac_arch.split(',')
#import pprint
#pprint.pprint( [(k, conf.env[k]) for k in conf.env.keys()] )
#
# This is a copy of WAF's check_python_headers with some problematic stuff ripped out.
#
from waflib.Configure import conf
@conf
def my_check_python_headers(conf):
"""
Check for headers and libraries necessary to extend or embed python by using the module *distutils*.
On success the environment variables xxx_PYEXT and xxx_PYEMBED are added:
* PYEXT: for compiling python extensions
* PYEMBED: for embedding a python interpreter
"""
env = conf.env
if not env['CC_NAME'] and not env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not env['PYTHON_VERSION']:
conf.check_python_version()
pybin = conf.env.PYTHON
if not pybin:
conf.fatal('Could not find the python executable')
v = 'prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS'.split()
try:
lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)]
conf.to_log("Configuration returned from %r:\n%r\n" % (pybin, '\n'.join(vals)))
dct = dict(zip(v, lst))
x = 'MACOSX_DEPLOYMENT_TARGET'
if dct[x]:
conf.env[x] = conf.environ[x] = dct[x]
env['pyext_PATTERN'] = '%s' + dct['SO'] # not a mistake
# Check for python libraries for embedding
all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEMBED')
all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEXT')
if isWindows:
libname = 'python' + conf.env['PYTHON_VERSION'].replace('.', '')
# TODO: libpath will be incorrect in virtualenv's. Fix this...
libpath = [os.path.join(dct['prefix'], "libs")]
conf.env['LIBPATH_PYEMBED'] = libpath
conf.env.append_value('LIB_PYEMBED', [libname])
conf.env['LIBPATH_PYEXT'] = conf.env['LIBPATH_PYEMBED']
conf.env['LIB_PYEXT'] = conf.env['LIB_PYEMBED']
else:
result = None
for name in ('python' + env['PYTHON_VERSION'], 'python' + env['PYTHON_VERSION'].replace('.', '')):
# LIBPATH_PYEMBED is already set; see if it works.
if not result and env['LIBPATH_PYEMBED']:
path = env['LIBPATH_PYEMBED']
conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path)
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBPATH_PYEMBED' % name)
if not result and dct['LIBDIR']:
path = [dct['LIBDIR']]
conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path)
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBDIR' % name)
if not result and dct['LIBPL']:
path = [dct['LIBPL']]
conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in python_LIBPL' % name)
if not result:
path = [os.path.join(dct['prefix'], "libs")]
conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $prefix/libs' % name)
if result:
break # do not forget to set LIBPATH_PYEMBED
if result:
env['LIBPATH_PYEMBED'] = path
env.append_value('LIB_PYEMBED', [name])
else:
conf.to_log("\n\n### LIB NOT FOUND\n")
conf.to_log("Include path for Python extensions "
"(found via distutils module): %r\n" % (dct['INCLUDEPY'],))
env['INCLUDES_PYEXT'] = [dct['INCLUDEPY']]
env['INCLUDES_PYEMBED'] = [dct['INCLUDEPY']]
# Code using the Python API needs to be compiled with -fno-strict-aliasing
if env['CC_NAME'] == 'gcc':
env.append_value('CFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_value('CFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env['CXX_NAME'] == 'gcc':
env.append_value('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_value('CXXFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env.CC_NAME == "msvc":
from distutils.msvccompiler import MSVCCompiler
dist_compiler = MSVCCompiler()
dist_compiler.initialize()
env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared)
#-----------------------------------------------------------------------------
# Build command
def build(bld):
# Ensure that the directory containing this script is on the python
# path for spawned commands so the builder and phoenix packages can be
# found.
thisdir = os.path.abspath(".")
sys.path.insert(0, thisdir)
from distutils.file_util import copy_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer, newer_group
from buildtools.config import opj, loadETG, getEtgSipCppFiles
cfg.finishSetup(bld.env.wx_config)
# update the license files
mkpath('license')
for filename in ['preamble.txt', 'licence.txt', 'licendoc.txt', 'lgpl.txt']:
copy_file(opj(cfg.WXDIR, 'docs', filename), opj('license',filename), update=1, verbose=1)
# create the package's __version__ module
open(opj(cfg.PKGDIR, '__version__.py'), 'w').write(
"# This file was generated by Phoenix's wscript.\n\n"
"VERSION_STRING = '%(VERSION)s'\n"
"MAJOR_VERSION = %(VER_MAJOR)s\n"
"MINOR_VERSION = %(VER_MINOR)s\n"
"RELEASE_NUMBER = %(VER_RELEASE)s\n\n"
"VERSION = (MAJOR_VERSION, MINOR_VERSION, RELEASE_NUMBER, '%(VER_FLAGS)s')\n"
% cfg.__dict__)
# and one for the demo folder too
open('demo/version.py', 'w').write(
"# This file was generated by Phoenix's wscript.\n\n"
"VERSION_STRING = '%(VERSION)s'\n"
% cfg.__dict__)
# copy the wx locale message catalogs to the package dir
if isWindows or isDarwin:
cfg.build_locale_dir(opj(cfg.PKGDIR, 'locale'))
# copy __init__.py
copy_file('src/__init__.py', cfg.PKGDIR, update=1, verbose=1)
# Create the build tasks for each of our extension modules.
siplib = bld(
features = 'c cxx cshlib cxxshlib pyext',
target = makeTargetName(bld, 'siplib'),
source = ['sip/siplib/apiversions.c',
'sip/siplib/bool.cpp',
'sip/siplib/descriptors.c',
'sip/siplib/objmap.c',
'sip/siplib/qtlib.c',
'sip/siplib/siplib.c',
'sip/siplib/threads.c',
'sip/siplib/voidptr.c',
],
uselib = 'WX WXPY',
)
makeExtCopyRule(bld, 'siplib')
etg = loadETG('etg/_core.py')
rc = ['src/wxc.rc'] if isWindows else []
bld(features = 'c cxx cxxshlib pyext',
target = makeTargetName(bld, '_core'),
source = getEtgSipCppFiles(etg) + rc,
uselib = 'WX WXPY',
)
makeExtCopyRule(bld, '_core')
# ** Add code for new modules here
#-----------------------------------------------------------------------------
# helpers
# Remove some unwanted flags from the given key in the context's environment
def _cleanFlags(ctx, key):
cleaned = list()
skipnext = False
for idx, flag in enumerate(ctx.env[key]):
if flag in ['-arch', '-isysroot', '-compatibility_version', '-current_version']:
skipnext = True # implicitly skips this one too
elif not skipnext:
cleaned.append(flag)
else:
skipnext = False
ctx.env[key] = cleaned
def makeTargetName(bld, name):
if isWindows and bld.env.debug:
name += '_d'
return name
# Make a rule that will copy a built extension module to the in-place package
# dir so we can test locally without doing an install.
def makeExtCopyRule(bld, name):
name = makeTargetName(bld, name)
src = bld.env.pyext_PATTERN % name
tgt = 'pkg.%s' % name # just a name to be touched to serve as a timestamp of the copy
bld(rule=copyFileToPkg, source=src, target=tgt, after=name)
# This is the task function to be called by the above rule.
def copyFileToPkg(task):
from distutils.file_util import copy_file
from buildtools.config import opj
src = task.inputs[0].abspath()
tgt = task.outputs[0].abspath()
task.exec_command('touch %s' % tgt)
tgt = opj(cfg.PKGDIR, os.path.basename(src))
copy_file(src, tgt, verbose=1)
return 0
# Copy all the items in env with a matching postfix to a similarly
# named item with the dest postfix.
def _copyEnvGroup(env, srcPostfix, destPostfix):
import copy
for key in env.keys():
if key.endswith(srcPostfix):
newKey = key[:-len(srcPostfix)] + destPostfix
env[newKey] = copy.copy(env[key])
#-----------------------------------------------------------------------------
| [
"[email protected]"
] | ||
114710e4f722ca063ad75bb0d2c872f98be54a5c | a1210d5864760071ff1e10cdf37bfdd9d2397d53 | /measurement_tools/flexran_cpu_utilization.py | 7d7b633361f398881913329963769a608f7bdb53 | [
"Apache-2.0"
] | permissive | arled-papa/marc | fe8bf619e6b44102702cebda1a8a92a9ef1055dc | cb94636d786e215195e914b37131277f835bcf52 | refs/heads/main | 2023-06-24T09:55:21.154407 | 2021-07-12T11:22:54 | 2021-07-12T11:22:54 | 383,223,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021 Arled Papa
# Author: Arled Papa <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import psutil
import logging
"""
Function that retrieves the process_id of the respective controller and measurements the cpu consumption
"""
def utilization():
logging.basicConfig(
filename='cpu_utilization',
format='%(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
for p in psutil.process_iter(attrs=['pid', 'name']):
if p.info['name'] == 'rt_controller':
pid = p.info['pid']
process_id = psutil.Process(pid)
while True:
logging.info(process_id.cpu_percent(interval=1))
if __name__ == "__main__":
utilization()
| [
"[email protected]"
] | |
3897eaf8273a08dde7708c3057d753400c716b19 | 5b9d0dd5fcd286c65023d8f4bf2e38e44e67083f | /ROS-groovy/hector_quadrotor/hector_uav_msgs/src/hector_uav_msgs/msg/_RawImu.py | 7492ad58deb8650251252d340f6ca6c4d583f904 | [] | no_license | AlessioTonioni/Autonomous-Flight-ROS | be53b2b37f0740ca4ade3a821ca4a1774709d91c | 2163b728631d0b2d71cb76f3419e45f4e0b8ce6e | refs/heads/master | 2023-01-02T03:40:38.301122 | 2022-12-10T17:59:09 | 2022-12-10T17:59:09 | 54,210,299 | 54 | 34 | null | 2022-12-10T17:59:10 | 2016-03-18T15:07:43 | C++ | UTF-8 | Python | false | false | 6,007 | py | """autogenerated by genpy from hector_uav_msgs/RawImu.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class RawImu(genpy.Message):
_md5sum = "0879a838e899792bcf72ccfe7b5595ef"
_type = "hector_uav_msgs/RawImu"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
uint16[3] angular_velocity
uint16[3] linear_acceleration
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','angular_velocity','linear_acceleration']
_slot_types = ['std_msgs/Header','uint16[3]','uint16[3]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,angular_velocity,linear_acceleration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RawImu, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.angular_velocity is None:
self.angular_velocity = [0,0,0]
if self.linear_acceleration is None:
self.linear_acceleration = [0,0,0]
else:
self.header = std_msgs.msg.Header()
self.angular_velocity = [0,0,0]
self.linear_acceleration = [0,0,0]
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_3H.pack(*self.angular_velocity))
buff.write(_struct_3H.pack(*self.linear_acceleration))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 6
self.angular_velocity = _struct_3H.unpack(str[start:end])
start = end
end += 6
self.linear_acceleration = _struct_3H.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(self.angular_velocity.tostring())
buff.write(self.linear_acceleration.tostring())
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 6
self.angular_velocity = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=3)
start = end
end += 6
self.linear_acceleration = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=3)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_3H = struct.Struct("<3H")
| [
"[email protected]"
] | |
2be73068ede16bf460b393f81d7298a7a7979e02 | ad9201c2b7411267130fe23deaaf672f61d37713 | /stocks_api/models/schemas.py | 4da9addd563649393d7dea5be26b52c7d0b1ddf1 | [] | no_license | MaxMcF/stocks_api | 00119201001f6ff6ec294727bba97300b599b628 | 0d741c7bc1da16e0598291157e3de3ed7da3b5cf | refs/heads/master | 2022-12-11T18:48:05.736475 | 2018-09-10T01:18:18 | 2018-09-10T01:18:18 | 145,773,231 | 0 | 0 | null | 2022-12-08T02:53:03 | 2018-08-22T23:22:11 | HTML | UTF-8 | Python | false | false | 796 | py | from marshmallow_sqlalchemy import ModelSchema
from marshmallow_sqlalchemy.fields import fields
from . import StocksInfo, Account, AccountRole
# from . import Portfolio
from .portfolio import Portfolio
class StocksInfoSchema(ModelSchema):
class Meta:
model = StocksInfo
class AccountRoleSchema(ModelSchema):
class Meta:
model = AccountRole
class AccountSchema(ModelSchema):
roles = fields.Nested(AccountRoleSchema, many=True, only='name')
class Meta:
model = Account
class PortfolioSchema(ModelSchema):
roles = fields.Nested(AccountRoleSchema, many=True, only='name')
account = fields.Nested(AccountSchema, exclude=(
'password', 'portfolios', 'roles', 'date_created', 'date_updated',
))
class Meta:
model = Portfolio
| [
"[email protected]"
] | |
92f2d4f9596ffdcbfec56db5b6143db1da193641 | 61aafe6990305f15fdadd1b423ae6c2cbbedcfa6 | /analysis/Analysis_by_Educator/analysis_tanuja.py | f5d9a9ed1fb345a47edfe4ce043fcc4a51eb2914 | [] | no_license | Colo55us/BeatO | baef4086f6e98b7326160fb126f986061eb23a6a | 3eb9dd54eacb20a0d41ca383d040d1549cdcd042 | refs/heads/master | 2021-05-04T16:51:40.001719 | 2018-02-22T10:34:28 | 2018-02-22T10:34:28 | 120,260,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 13:00:34 2018
@author: BeatO
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('D:/Data_For Sugar_Educators-HOD Dashboard_2018_01_01.csv',sep=',')
#print(df['reading'].count())
df = df.loc[df.duplicated(subset='reading',keep=False),:]
#df = df.iloc[0:200]
#print(df.sort_values(['name'],ascending=1))
#print(df[df['name']=='NaN'])
df = df[df.readingtime.str.contains("Fasting")== True]
df = df[df.eduname.str.contains("Tanuja ")== True]
df = df.drop(['email','phone','deviceid','appversion','os','readingdevice','firstentrydate','lastentrydate'],axis=1)
li_userid = list(set(list(df['userid'])))
print(df.head(40))
li_m = []
li_c = []
print(len(li_userid))
counter = 0
for uid in li_userid:
li_helper = []
for index,row in df.iterrows():
if row['userid']==uid:
if row['reading']>30 and row['reading']<300:
li_helper.append(row['reading'])
df = df.drop([index])
if len(li_helper)>3:
l = np.array(np.arange(0,len(li_helper)))
z = np.polyfit(l,np.array(li_helper),1)
x = np.array(z).tolist()
if x[0]<=1E308:
li_m.append(x[0])
li_c.append(x[1])
print('slope: ',x[0],' Y-intercept:',x[1], counter,' out of ',len(li_userid),'user_id:',uid)
else:
pass
counter = counter+1
m_final = np.mean(np.array(li_m))
c_final = np.mean(np.array(li_c))
Y = []
X = []
for i in range(1,11):
y_loc = m_final*i + c_final
X.append(i)
Y.append(y_loc)
print('Mean slope=',m_final,' Mean Y-intercept=',c_final)
fig = plt.figure()
plt.plot(X,Y)
fig.suptitle('tanuja',fontsize=25)
plt.xlabel('frequency',fontsize=18)
plt.ylabel('Glucose Level',fontsize=18)
plt.show()
fig.savefig('graph/tanuja.jpg')
| [
"[email protected]"
] | |
8ddf43ffd289df2ae2f8e5151b48b7468ecc6fb9 | 1da3c32cf4b66f0d2874effe568ae138b14d5378 | /deployment/sagemaker-dashboards-for-ml/cloudformation/assistants/solution-assistant/src/index.py | 2f618e0e1f8072324c6ccaa097e56cd4f697adce | [
"Apache-2.0",
"MIT-0"
] | permissive | smart-patrol/streamlit-application-deployment-on-aws | 0f096fb1009ac7d725ed030aa1943195f6d06e64 | 577772ac2ebeda990f55e4dd015d363d524e3a7f | refs/heads/main | 2023-04-03T03:00:31.006225 | 2021-04-14T07:06:13 | 2021-04-14T07:06:13 | 358,070,426 | 0 | 0 | MIT-0 | 2021-04-14T23:36:57 | 2021-04-14T23:36:57 | null | UTF-8 | Python | false | false | 3,505 | py | import sys
sys.path.append('./site-packages')
# flake8: noqa: E402
from crhelper import CfnResource
import boto3
from pathlib import Path
helper = CfnResource()
@helper.create
@helper.update
def on_create(event, __):
pass
def delete_ecr_images(repository_name):
ecr_client = boto3.client("ecr")
try:
images = ecr_client.describe_images(repositoryName=repository_name)
image_details = images["imageDetails"]
if len(image_details) > 0:
image_ids = [
{"imageDigest": i["imageDigest"]} for i in image_details
]
ecr_client.batch_delete_image(
repositoryName=repository_name, imageIds=image_ids
)
print(
"Successfully deleted {} images from repository "
"called '{}'. ".format(len(image_details), repository_name)
)
else:
print(
"Could not find any images in repository "
"called '{}' not found. "
"Skipping delete.".format(repository_name)
)
except ecr_client.exceptions.RepositoryNotFoundException:
print(
"Could not find repository called '{}' not found. "
"Skipping delete.".format(repository_name)
)
def delete_sagemaker_endpoint(endpoint_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
print(
"Successfully deleted endpoint "
"called '{}'.".format(endpoint_name)
)
except sagemaker_client.exceptions.ClientError as e:
if "Could not find endpoint" in str(e):
print(
"Could not find endpoint called '{}'. "
"Skipping delete.".format(endpoint_name)
)
else:
raise e
def delete_sagemaker_endpoint_config(endpoint_config_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_endpoint_config(
EndpointConfigName=endpoint_config_name
)
print(
"Successfully deleted endpoint configuration "
"called '{}'.".format(endpoint_config_name)
)
except sagemaker_client.exceptions.ClientError as e:
if "Could not find endpoint configuration" in str(e):
print(
"Could not find endpoint configuration called '{}'. "
"Skipping delete.".format(endpoint_config_name)
)
else:
raise e
def delete_sagemaker_model(model_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_model(ModelName=model_name)
print("Successfully deleted model called '{}'.".format(model_name))
except sagemaker_client.exceptions.ClientError as e:
if "Could not find model" in str(e):
print(
"Could not find model called '{}'. "
"Skipping delete.".format(model_name)
)
else:
raise e
@helper.delete
def on_delete(event, __):
ecr_repository = event["ResourceProperties"]["ECRRepository"]
delete_ecr_images(ecr_repository)
model_name = event["ResourceProperties"]["SageMakerModel"]
delete_sagemaker_model(model_name)
delete_sagemaker_endpoint_config(model_name)
delete_sagemaker_endpoint(model_name)
def handler(event, context):
helper(event, context)
| [
"[email protected]"
] | |
b8a4f1c06952b1ad1c17e082ede8477d4c7a2eb2 | 8d7a2409599da9d7f2918d2b8e77105b10eb3811 | /hospital/backend/migrations/0006_remove_time_table_title.py | 63a5d9745d59f8762039e92356aca50a93c5341a | [] | no_license | woodsman1/Hospital-Booking | 9f2834eb31e79646c54c6af4e3f1215daa3b7e4f | a6a22b0b6ade719aa289ec253390a0f10dc7ceb4 | refs/heads/master | 2023-04-11T01:13:11.047197 | 2021-04-08T07:51:14 | 2021-04-08T07:51:14 | 354,848,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # Generated by Django 3.1.7 on 2021-04-08 04:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_slot_booked'),
]
operations = [
migrations.RemoveField(
model_name='time_table',
name='title',
),
]
| [
"[email protected]"
] | |
6c2ea7a5adf69811a6bc5e8370267dc58620ee6b | fa04c7977385556dbe0b7894be34288e3e041152 | /v0.4.0/aplab_tool_manager.py | 86fcd4d96f8d751ac582189e9a37d714413c9358 | [] | no_license | lars-frogner/Astrophotography-Lab | 8c39f39bdcd5318c96bb9bea4016fa6350066125 | ce073007d465a804b5ecbd39bacad231abf9bd2f | refs/heads/master | 2021-01-19T03:30:00.893494 | 2018-11-24T15:22:48 | 2018-11-24T15:22:48 | 39,679,829 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129,464 | py | # -*- coding: utf-8 -*-
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkfont
import webbrowser
import os
import numpy as np
import aplab_common as apc
from aplab_common import C, MessageWindow
from aplab_image_calculator import ImageCalculator
from aplab_image_simulator import ImageSimulator
from aplab_plotting_tool import PlottingTool
from aplab_image_analyser import ImageAnalyser
from aplab_fov_calculator import FOVCalculator
class ToolManager(tk.Tk):
def __init__(self, cnum, tnum, fs):
'''Initialize application.'''
tk.Tk.__init__(self) # Run parent constructor
self.container = ttk.Frame(self) # Define main frame
self.cnum = cnum # Index if camera in camera data lists. None if no camera is selected.
self.tnum = tnum
self.toolsConfigured = False # True if the non-"Image Analyser" classes are initialized
self.title('Astrophotography Lab 0.4.0') # Set window title
self.addIcon(self) # Add window icon if it exists
if not C.is_win:
s = ttk.Style()
s.theme_use('alt')
if fs == 'auto':
fs = 8 # Smallest font size
# Adjust font size according to horizontal screen resolution
if C.sw >= 1024:
if C.sw >= 1280:
if C.sw >= 1440:
if C.sw >= 1920:
fs = 12
else:
fs = 11
else:
fs = 10
else:
fs = 9
# Set font sizes
self.tt_fs = fs - 1
self.small_fs = fs
self.medium_fs = fs + 1
self.large_fs = fs + 2
# Define fonts
self.small_font = tkfont.Font(root=self, family=C.gfont, size=self.small_fs)
self.smallbold_font = tkfont.Font(root=self, family=C.gfont, size=self.small_fs, weight='bold')
self.medium_font = tkfont.Font(root=self, family=C.gfont, size=self.medium_fs, weight='bold')
self.large_font = tkfont.Font(root=self, family=C.gfont, size=self.large_fs, weight='bold')
# Configure widget styles
style = ttk.Style(None)
style.configure('TLabel', font=self.small_font, background=C.DEFAULT_BG)
style.configure('file.TLabel', font=self.small_font, background='white')
style.configure('leftselectedfile.TLabel', font=self.small_font, background='royalblue')
style.configure('rightselectedfile.TLabel', font=self.small_font, background='crimson')
style.configure('leftrightselectedfile.TLabel', font=self.small_font, background='forestgreen')
style.configure('TFrame', background=C.DEFAULT_BG)
style.configure('files.TFrame', background='white')
style.configure('TRadiobutton', font=self.small_font, background=C.DEFAULT_BG, bordercolor=C.DEFAULT_BG, activebackground=C.DEFAULT_BG)
if C.is_win:
style.configure('TButton', font=self.small_font, background=C.DEFAULT_BG)
style.configure('TMenubutton', font=self.small_font, background=C.DEFAULT_BG)
else:
style.configure('TButton', font=self.small_font)
style.configure('TMenubutton', font=self.small_font)
self.container.pack(side='top', fill='both', expand=True) # Pack main frame
# Make rows and columns expand with window
self.container.grid_rowconfigure(0, weight=1)
self.container.grid_columnconfigure(0, weight=1)
# Define attributes to keep track of active page
self.calMode = tk.IntVar()
self.simMode = tk.IntVar()
self.plMode = tk.IntVar()
self.anMode = tk.IntVar()
self.fovMode = tk.IntVar()
self.calMode.set(0)
self.simMode.set(0)
self.plMode.set(0)
self.anMode.set(1)
self.fovMode.set(0)
# Define attributes to keep track of camera end telescope name
self.varCamName = tk.StringVar()
self.varTelName = tk.StringVar()
self.varFLMod = tk.StringVar() # Displayed focal length modifier string
# Set default values
self.varCamName.set('Camera:')
self.varTelName.set('Telescope:')
self.varFLMod.set('Focal length modifier: 1x')
self.FLModVal = 1.0 # Focal length modifier value
self.avgWL = 555.0 # Assumed average wavelength for electron flux-luminance conversion
self.TLoss = 0.1 # Assumed transmission loss of optical train
# Define attributes to keep track of current flux unit
self.lumSignalType = tk.IntVar()
self.electronSignalType = tk.IntVar()
# Define attributes to keep track of current DR unit
self.stopsDRUnit = tk.IntVar()
self.dBDRUnit = tk.IntVar()
# Define attributes to keep track of current angle unit
self.degAngleUnit = tk.IntVar()
self.dmsAngleUnit = tk.IntVar()
# Set default angle unit
self.dmsAngleUnit.set(1)
self.degAngleUnit.set(0)
# Define attributes to keep track of tooltip states
self.tooltipsOn = tk.IntVar()
self.tooltipsOn.set(1 if C.TT_STATE == 'on' else 0)
self.defaultTTState = tk.IntVar()
self.defaultTTState.set(1 if C.TT_STATE == 'on' else 0)
# Setup window menu
self.menubar = tk.Menu(self)
# "Tool" menu
menuTool = tk.Menu(self.menubar, tearoff=0)
menuTool.add_checkbutton(label='Image Analyser', variable=self.anMode,
command=self.enterAnFrame)
menuTool.add_checkbutton(label='Image Calculator', variable=self.calMode,
command=self.enterCalFrame)
menuTool.add_checkbutton(label='Image Simulator', variable=self.simMode,
command=self.enterSimFrame)
menuTool.add_checkbutton(label='Plotting Tool', variable=self.plMode,
command=self.enterPlotFrame)
menuTool.add_checkbutton(label='FOV Calculator', variable=self.fovMode,
command=self.enterFOVFrame)
self.menubar.add_cascade(label='Tool', menu=menuTool)
# "File" menu
self.menuFile = tk.Menu(self.menubar, tearoff=0)
self.menuFile.add_command(label='Save image data', command=self.saveData)
self.menuFile.add_command(label='Load image data', command=self.loadData)
self.menuFile.add_command(label='Manage image data', command=self.manageData)
self.menubar.add_cascade(label='File', menu=self.menuFile)
# "Settings" menu
self.menuSettings = tk.Menu(self.menubar, tearoff=0)
# "Camera" submenu of "Settings"
self.menuCamera = tk.Menu(self.menubar, tearoff=0)
self.menuCamera.add_command(label='Change camera', command=self.changeCamera)
self.menuCamera.add_command(label='Modify camera parameters', command=self.modifyCamParams)
self.menuSettings.add_cascade(label='Camera', menu=self.menuCamera)
# "Telescope" submenu of "Settings"
self.menuTelescope = tk.Menu(self.menubar, tearoff=0)
self.menuTelescope.add_command(label='Change telescope', command=self.changeTelescope)
self.menuTelescope.add_command(label='Modify telescope parameters', command=self.modifyTelParams)
self.menuSettings.add_cascade(label='Telescope', menu=self.menuTelescope)
# Add FL modifier command
self.menuSettings.add_command(label='Change FL modifier', command=self.changeFLMod)
self.menuSettings.add_separator()
# "Signal quantity" submenu of "Settings"
menuSignalType = tk.Menu(self.menubar, tearoff=0)
menuSignalType.add_checkbutton(label='Luminance', variable=self.lumSignalType,
command=self.setLumSignalType)
menuSignalType.add_checkbutton(label='Electron flux', variable=self.electronSignalType,
command=self.setElectronSignalType)
self.menuSettings.add_cascade(label='Signal quantity', menu=menuSignalType)
# "Dynamic range unit" submenu of "Settings"
menuDRUnit = tk.Menu(self.menubar, tearoff=0)
menuDRUnit.add_checkbutton(label='stops', variable=self.stopsDRUnit,
command=self.setStopsDRUnit)
menuDRUnit.add_checkbutton(label='dB', variable=self.dBDRUnit,
command=self.setdBDRUnit)
self.menuSettings.add_cascade(label='Dynamic range unit', menu=menuDRUnit)
# "Angle unit" submenu of "Settings"
menuAngleUnit = tk.Menu(self.menubar, tearoff=0)
menuAngleUnit.add_checkbutton(label='deg/arcmin/arcsec', variable=self.dmsAngleUnit,
command=self.setDMSAngleUnit)
menuAngleUnit.add_checkbutton(label='degrees', variable=self.degAngleUnit,
command=self.setDegAngleUnit)
self.menuSettings.add_cascade(label='Angle unit', menu=menuAngleUnit)
self.menuSettings.add_separator()
# "Tooltips" submenu of "Settings"
self.menuTT = tk.Menu(self.menubar, tearoff=0)
self.menuTT.add_command(label='Toggle tooltips', command=self.toggleTooltips)
if self.tooltipsOn.get():
self.menuTT.add_command(label='Turn off as default', command=self.toogleDefaultTTState)
else:
self.menuTT.add_command(label='Turn on as default', command=self.toogleDefaultTTState)
self.menuSettings.add_cascade(label='Tooltips', menu=self.menuTT)
# Add font size command
self.menuSettings.add_command(label='Change font size', command=self.changeFS)
self.menubar.add_cascade(label='Settings', menu=self.menuSettings)
# "Help" menu
self.menuHelp = tk.Menu(self.menubar, tearoff=0)
self.menuHelp.add_command(label='User guide', command=self.showUserGuide)
self.menubar.add_cascade(label='Help', menu=self.menuHelp)
# Show menubar
self.config(menu=self.menubar)
# Some menu items are disabled on startup
self.menuFile.entryconfig(0, state='disabled')
self.menuFile.entryconfig(1, state='disabled')
# Dictionary to hold all frames
self.frames = {}
# Initialize Message Window class
frameMsg = MessageWindow(self.container, self)
self.frames[MessageWindow] = frameMsg
frameMsg.grid(row=0, column=0, sticky='NSEW')
# Initialize Image Analyser class
frameAn = ImageAnalyser(self.container, self)
self.frames[ImageAnalyser] = frameAn
frameAn.grid(row=0, column=0, sticky='NSEW')
# Resize and recenter window
apc.setupWindow(self, *C.AN_WINDOW_SIZE)
# If no camera is active
if self.cnum is None:
# Run camera selection method
if not self.changeCamera():
self.destroy()
return None
else:
self.isDSLR = C.TYPE[self.cnum] == 'DSLR' # Set new camera type
self.hasQE = C.QE[self.cnum][0] != 'NA' # Set new QE state
self.noData = C.GAIN[self.cnum][0][0] == 0 # Determine if camera data exists
self.varCamName.set('Camera: ' + C.CNAME[self.cnum])
# Show relevant camera type widgets
if self.isDSLR:
frameAn.radioCCDm.grid_forget()
frameAn.radioCCDc.grid_forget()
frameAn.labelDSLR.grid(row=0, column=0, columnspan=2, sticky='EW')
else:
frameAn.labelDSLR.grid_forget()
frameAn.varCCDType.set('mono')
frameAn.radioCCDm.grid(row=0, column=0)
frameAn.radioCCDc.grid(row=0, column=1)
# If no telescope is active
if self.tnum is None:
# Run camera selection method
if not self.changeTelescope():
self.destroy()
return None
else:
self.varTelName.set('Telescope: ' + C.TNAME[self.tnum])
# Image scale for the selected camera-telescope combination
self.ISVal = np.arctan2(C.PIXEL_SIZE[self.cnum][0]*1e-3,
C.FOCAL_LENGTH[self.tnum][0]*self.FLModVal)*180*3600/np.pi
# Set default flux unit
self.lumSignalType.set(self.hasQE)
self.electronSignalType.set(not self.hasQE)
# Set default DR unit
self.stopsDRUnit.set(1)
self.dBDRUnit.set(0)
# Setup frames and add to dictionary
frameCal = ImageCalculator(self.container, self)
frameSim = ImageSimulator(self.container, self)
framePlot = PlottingTool(self.container, self)
frameFOV = FOVCalculator(self.container, self)
self.frames[ImageCalculator] = frameCal
self.frames[ImageSimulator] = frameSim
self.frames[PlottingTool] = framePlot
self.frames[FOVCalculator] = frameFOV
frameCal.grid(row=0, column=0, sticky='NSEW')
frameSim.grid(row=0, column=0, sticky='NSEW')
framePlot.grid(row=0, column=0, sticky='NSEW')
frameFOV.grid(row=0, column=0, sticky='NSEW')
self.toolsConfigured = True
# Show start page
self.showFrame(ImageAnalyser)
self.focus_force()
def showFrame(self, page):
'''Shows the given frame.'''
self.frames[page].tkraise()
def showUserGuide(self):
webbrowser.open('http://lars-frogner.github.io/Astrophotography-Lab/userguide.html')
def enterPlotFrame(self):
'''Shows the Plotting Tool frame.'''
# Do nothing if already in plotting frame
if not self.calMode.get() and not self.simMode.get() \
and not self.anMode.get() and not self.fovMode.get():
self.plMode.set(1) # Keep state from changing
return None
if self.noData:
self.frames[MessageWindow].varHeaderLabel.set('Plotting Tool')
self.showFrame(MessageWindow)
else:
self.frames[PlottingTool].varMessageLabel.set('') # Clear message label
self.showFrame(PlottingTool) # Show plot frame
# Close simulation window if it is open
try:
self.frames[ImageSimulator].topCanvas.destroy()
except:
pass
# Resize and re-center window
apc.setupWindow(self, *C.PLOT_WINDOW_SIZE)
# Configure menu items
self.menuFile.entryconfig(0, state='disabled')
self.menuFile.entryconfig(1, state='normal')
self.plMode.set(1)
self.calMode.set(0)
self.simMode.set(0)
self.anMode.set(0)
self.fovMode.set(0)
def enterCalFrame(self):
'''Shows the Image Calculator frame.'''
# Do nothing if already in calculator frame
if not self.simMode.get() and not self.plMode.get() \
and not self.anMode.get() and not self.fovMode.get():
self.calMode.set(1) # Keep state from changing
return None
if self.noData:
self.frames[MessageWindow].varHeaderLabel.set('Image Calculator')
self.showFrame(MessageWindow)
else:
self.frames[ImageCalculator].varMessageLabel.set('') # Clear message label
self.showFrame(ImageCalculator) # Show calculator frame
# Close simulation window if it is open
try:
self.frames[ImageSimulator].topCanvas.destroy()
except:
pass
# Resize and re-center window
apc.setupWindow(self, *C.CAL_WINDOW_SIZE)
# Configure menu items
self.menuFile.entryconfig(0, state='normal')
self.menuFile.entryconfig(1, state='normal')
self.calMode.set(1)
self.simMode.set(0)
self.plMode.set(0)
self.anMode.set(0)
self.fovMode.set(0)
def enterSimFrame(self):
'''Shows the Image Simulator frame.'''
# Do nothing if already in simulator frame
if not self.calMode.get() and not self.plMode.get() \
and not self.anMode.get() and not self.fovMode.get():
self.simMode.set(1) # Keep state from changing
return None
if self.noData:
self.frames[MessageWindow].varHeaderLabel.set('Image Simulator')
self.showFrame(MessageWindow)
else:
self.frames[ImageSimulator].varMessageLabel.set('') # Clear message label
self.showFrame(ImageSimulator) # Show simulator frame
# Resize and re-center window
apc.setupWindow(self, *C.SIM_WINDOW_SIZE)
# Configure menu items
self.menuFile.entryconfig(0, state='disabled')
self.menuFile.entryconfig(1, state='normal')
self.calMode.set(0)
self.simMode.set(1)
self.plMode.set(0)
self.anMode.set(0)
self.fovMode.set(0)
def enterAnFrame(self):
'''Shows the Image Analyser frame.'''
# Do nothing if already in Analyser frame
if not self.calMode.get() and not self.simMode.get() \
and not self.plMode.get() and not self.fovMode.get():
self.anMode.set(1) # Keep state from changing
return None
self.frames[ImageAnalyser].varMessageLabel.set('') # Clear message label
self.showFrame(ImageAnalyser)
# Close simulation window if it is open
try:
self.frames[ImageSimulator].topCanvas.destroy()
except:
pass
# Resize and re-center window
apc.setupWindow(self, *C.AN_WINDOW_SIZE)
# Configure menu items
self.menuFile.entryconfig(0, state='disabled')
self.menuFile.entryconfig(1, state='disabled')
self.calMode.set(0)
self.simMode.set(0)
self.plMode.set(0)
self.anMode.set(1)
self.fovMode.set(0)
def enterFOVFrame(self):
'''Shows the FOV Calculator frame.'''
# Do nothing if already in FOV frame
if not self.calMode.get() and not self.simMode.get() \
and not self.plMode.get() and not self.anMode.get():
self.fovMode.set(1) # Keep state from changing
return None
fovframe = self.frames[FOVCalculator]
fovframe.varMessageLabel.set('') # Clear message label
self.showFrame(FOVCalculator)
# Close simulation window if it is open
try:
self.frames[ImageSimulator].topCanvas.destroy()
except:
pass
# Resize and re-center window
apc.setupWindow(self, *C.FOV_WINDOW_SIZE)
fovframe.update()
fovframe.selectObject(fovframe.obj_idx)
fovframe.setFOV()
# Configure menu items
self.menuFile.entryconfig(0, state='disabled')
self.menuFile.entryconfig(1, state='disabled')
self.calMode.set(0)
self.simMode.set(0)
self.plMode.set(0)
self.anMode.set(0)
self.fovMode.set(1)
def saveData(self):
'''Creates window with options for saving image data.'''
frame = self.frames[ImageCalculator]
# Show error if no image data is calculated
if not frame.dataCalculated:
frame.varMessageLabel.set('Image data must be calculated before saving.')
frame.labelMessage.configure(foreground='crimson')
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
return None
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
self.varKeywords = tk.StringVar()
self.varSaveError = tk.StringVar()
# Construct saving window on top
self.topSave = tk.Toplevel(background=C.DEFAULT_BG)
self.topSave.title('Save image data')
self.addIcon(self.topSave)
apc.setupWindow(self.topSave, 300, 145)
labelSave = ttk.Label(self.topSave, text='Input image keywords\n(target, location, date etc.)')
entryKeywords = ttk.Entry(self.topSave, textvariable=self.varKeywords, font=self.small_font,
background=C.DEFAULT_BG, width=35)
buttonSave = ttk.Button(self.topSave, text='Save', command=self.executeSave)
labelSaveError = ttk.Label(self.topSave, textvariable=self.varSaveError)
labelSave.pack(side='top', pady=10*C.scsy, expand=True)
entryKeywords.pack(side='top', pady=5*C.scsy, expand=True)
buttonSave.pack(side='top', pady=6*C.scsy, expand=True)
labelSaveError.pack(side='top', expand=True)
entryKeywords.focus()
self.wait_window(self.topSave)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def executeSave(self):
'''Saves image data to text file.'''
keywords = self.varKeywords.get() # Get user inputted keyword string
if not ',' in keywords and keywords != '':
self.topSave.destroy() # Close window
# Append image data to the text file
file = open('imagedata.txt', 'a')
frame = self.frames[ImageCalculator]
file.write('{},{},{:d},{:d},{:g},{:d},{:g},{:g},{:g},{:g},{:d},{:.3f},{:.3f},{:.3f},0\n'.format(C.CNAME[self.cnum],
keywords,
frame.gain_idx,
frame.rn_idx,
frame.exposure,
frame.use_dark,
frame.dark_input,
frame.bgn,
frame.bgl,
frame.target,
self.hasQE,
frame.df,
(self.convSig(frame.sf, True) if self.hasQE else frame.sf),
(self.convSig(frame.tf, True) if self.hasQE else frame.tf)))
file.close()
frame.varMessageLabel.set('Image data saved.')
frame.labelMessage.configure(foreground='navy')
# Show error message if the keyword contains a ","
elif ',' in keywords:
self.varSaveError.set('The keyword cannot contain a ",".')
# Show error message if user hasn't inputted a save keyword
else:
self.varSaveError.set('Please insert a keyword.')
def loadData(self):
'''Creates a window with options for loading image data, and reads the image data file.'''
self.varLoadData = tk.StringVar()
frame = self.currentFrame()
data = [] # Holds image data
names = [] # Holds camera name
keywords = [] # Holds save ID
# Show error message if image data file doesn't exist
try:
file = open('imagedata.txt', 'r')
except IOError:
frame.varMessageLabel.set('No image data to load.')
frame.labelMessage.configure(foreground='crimson')
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
return None
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Read file
lines = file.read().split('\n')
file.close()
# Show error message if image data file is empty
if len(lines) == 1:
frame.varMessageLabel.set('No image data to load.')
frame.labelMessage.configure(foreground='crimson')
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
return None
# Get image data from file and store in lists
for line in lines[:-1]:
elements = line.split(',')
name = elements[0]
keyword = elements[1]
names.append(name)
keywords.append(keyword + ' (' + name + ')')
data.append(elements[2:])
self.varLoadData.set(keywords[0])
# Create loading window on top
self.topLoad = tk.Toplevel(background=C.DEFAULT_BG)
self.topLoad.title('Load image data')
apc.setupWindow(self.topLoad, 300, 135)
self.addIcon(self.topLoad)
self.topLoad.focus_force()
labelLoad = ttk.Label(self.topLoad, text='Choose image data to load:', anchor='center')
optionLoad = ttk.OptionMenu(self.topLoad, self.varLoadData, None, *keywords)
buttonLoad = ttk.Button(self.topLoad, text='Load',
command=lambda: self.executeLoad(names, keywords, data))
labelLoad.pack(side='top', pady=10*C.scsy, expand=True)
optionLoad.pack(side='top', pady=6*C.scsy, expand=True)
buttonLoad.pack(side='top', pady=14*C.scsy, expand=True)
self.wait_window(self.topLoad)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def executeLoad(self, names, keywords, datalist):
'''Gets relevant loaded data and inserts to relevant widgets.'''
frame = self.currentFrame()
datanum = keywords.index(self.varLoadData.get()) # Index of data to load
name = names[datanum] # Camera name for selected save
data = datalist[datanum] # Data from selected save
# If image data is from the same camera model
if name == C.CNAME[self.cnum]:
# If Image Calculator is the active frame
if self.calMode.get():
# Set loaded data in calculator frame
frame.gain_idx = int(data[0])
frame.rn_idx = int(data[1])
frame.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][frame.gain_idx])
frame.varGain.set(C.GAIN[self.cnum][0][frame.gain_idx])
frame.varRN.set(C.RN[self.cnum][0][frame.rn_idx])
frame.varExp.set(data[2])
frame.varUseDark.set(int(data[3]))
frame.varDark.set(data[4] if int(data[3]) else '')
frame.varBGN.set(data[5] if self.isDSLR else '')
frame.varBGL.set(data[6])
frame.varTarget.set(data[7] if float(data[7]) > 0 else '')
frame.dataCalculated = False # Previously calculated data is no longer valid
frame.toggleDarkInputMode() # Change to the dark input mode that was used for the data
frame.updateSensorLabels() # Update sensor info labels in case the ISO has changed
frame.emptyInfoLabels() # Clear other info labels
frame.varMessageLabel.set('Image data loaded.')
frame.labelMessage.configure(foreground='navy')
# If Image Simulator is the active frame
elif self.simMode.get():
# Set loaded data in simulator frame
frame.gain_idx = int(data[0])
frame.rn_idx = int(data[1])
frame.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][frame.gain_idx])
frame.varGain.set(C.GAIN[self.cnum][0][frame.gain_idx])
frame.varRN.set(C.RN[self.cnum][0][frame.rn_idx])
frame.varExp.set(data[2])
frame.varDF.set(data[9] if float(data[9]) > 0 else 0)
frame.varSF.set(data[10])
frame.varTF.set(data[11] if float(data[11]) > 0 else 0)
frame.varLF.set(data[12] if float(data[12]) > 0 else '')
frame.varSubs.set(1)
if int(data[8]):
self.setLumSignalType()
else:
self.setElectronSignalType()
frame.dataCalculated = False # Previously calculated data is no longer valid
frame.updateSensorLabels() # Update sensor info labels in case the ISO has changed
frame.emptyInfoLabels() # Clear other info labels
frame.varMessageLabel.set('Image data loaded.' if int(data[3]) else \
'Note: loaded signal data does not contain a separate value for dark current.')
frame.labelMessage.configure(foreground='navy')
# If Plotting Tool is the active frame
else:
# Set loaded data in plot frame
frame.gain_idx = int(data[0])
frame.rn_idx = int(data[1])
frame.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][frame.gain_idx])
frame.varGain.set(C.GAIN[self.cnum][0][frame.gain_idx])
frame.varRN.set(C.RN[self.cnum][0][frame.rn_idx])
frame.varExp.set(data[2])
frame.varDF.set(data[9] if float(data[9]) > 0 else 0)
frame.varSF.set(data[10])
frame.varTF.set(data[11] if float(data[11]) > 0 else 0)
frame.varLF.set(data[12] if float(data[12]) > 0 else '')
if int(data[8]):
self.setLumSignalType()
else:
self.setElectronSignalType()
frame.ax.cla() # Clear plot
frame.varMessageLabel.set('Image data loaded.' if int(data[3]) else \
'Note: loaded signal data does not contain a separate value for dark current.')
frame.labelMessage.configure(foreground='navy')
# If image data is from another camera model:
# If Image Simulator or Plotting Tool is the active frame
elif (not self.calMode.get()) and int(data[8]) and self.hasQE:
# Set signal data
frame.varSF.set(data[10])
frame.varTF.set(data[11])
frame.varLF.set(data[12] if float(data[12]) > 0 else '')
self.setLumSignalType()
if self.simMode.get():
frame.dataCalculated = False # Previously calculated data is no longer valid
frame.emptyInfoLabels() # Clear info labels
else:
frame.ax.cla() # Clear plot
frame.varMessageLabel.set('Signal data loaded.')
frame.labelMessage.configure(foreground='navy')
# If Image Calculator is the active frame
else:
frame.varMessageLabel.set('Image data is from another camera model. No data loaded.')
frame.labelMessage.configure(foreground='crimson')
self.topLoad.destroy() # Close loading window
def manageData(self):
'''Creates a window with options for renaming saves or deleting saved data.'''
self.varManageData = tk.StringVar()
frame = self.currentFrame()
data = [] # Holds image data
names = [] # Holds camera name
keywords = [] # Holds save keyword
display_keywords = [] # Holds save name to display
# Show error message if data file doesn't exist
try:
file = open('imagedata.txt', 'r')
except IOError:
frame.varMessageLabel.set('No image data to manage.')
frame.labelMessage.configure(foreground='crimson')
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
return None
# Read data file
lines = file.read().split('\n')
file.close()
# Show error message if data file is empty
if len(lines) == 1:
frame.varMessageLabel.set('No image data to manage.')
frame.labelMessage.configure(foreground='crimson')
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
return None
# Get data from file and store in lists
for line in lines[:-1]:
elements = line.split(',')
name = elements[0]
keyword = elements[1]
names.append(name)
keywords.append(keyword)
display_keywords.append(keyword + ' (' + name + ')')
data.append(elements[2:])
self.varManageData.set(display_keywords[0])
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup managing window
self.topManage = tk.Toplevel(background=C.DEFAULT_BG)
self.topManage.title('Manage image data')
self.addIcon(self.topManage)
apc.setupWindow(self.topManage, 300, 170)
self.topManage.focus_force()
labelManage = ttk.Label(self.topManage, text='Choose image data:',anchor='center')
optionManage = ttk.OptionMenu(self.topManage, self.varManageData, None, *display_keywords)
frameLow = ttk.Frame(self.topManage)
labelManage.pack(side='top', pady=10*C.scsy, expand=True)
optionManage.pack(side='top', pady=6*C.scsy, expand=True)
frameLow.pack(side='top', pady=14*C.scsy, expand=True)
buttonRename = ttk.Button(frameLow, text='Rename',
command=lambda: self.executeManage(names,
keywords,
display_keywords,
data))
buttonDelete = ttk.Button(frameLow, text='Delete',
command=lambda: self.executeManage(names,
keywords,
display_keywords,
data,
mode='delete'))
buttonAddLim = ttk.Button(frameLow, text='Add limit signal',
command=lambda: self.executeManage(names,
keywords,
display_keywords,
data,
mode='add'))
buttonRename.grid(row=0, column=0, padx=(0, 5*C.scsx))
buttonDelete.grid(row=0, column=1)
buttonAddLim.grid(row=1, column=0, columnspan=2, pady=(5*C.scsy, 0))
if not self.calMode.get(): buttonAddLim.configure(state='disabled')
self.wait_window(self.topManage)
try:
self.topRename.destroy()
except:
pass
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def executeManage(self, names, keywords, display_keywords, datafull, mode='rename'):
'''Deletes selected data, or creates window for renaming selected save.'''
linenum = display_keywords.index(self.varManageData.get()) # Index of relevant data
if mode == 'delete':
file = open('imagedata.txt', 'w')
# Rewrite the data file without the line containing the data selected for deleting
for i in range(linenum):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
if linenum < len(keywords):
for i in range(linenum+1, len(keywords)):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
file.close()
# Go back to an updated managing window
self.topManage.destroy()
self.manageData()
self.currentFrame().varMessageLabel.set('Image data deleted.')
self.currentFrame().labelMessage.configure(foreground='navy')
elif mode == 'add':
calframe = self.frames[ImageCalculator]
if C.CNAME[self.cnum] != names[linenum]:
calframe.varMessageLabel.set(\
'Cannot add limit signal to a file saved from another camera.')
calframe.labelMessage.configure(foreground='crimson')
return None
if not calframe.dataCalculated or calframe.tf == 0:
calframe.varMessageLabel.set(\
'Target signal must be calculated before it can be saved as limit signal.')
calframe.labelMessage.configure(foreground='crimson')
return None
file = open('imagedata.txt', 'w')
for i in range(linenum):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{:.3f}\n' \
.format(*tuple([names[linenum]] + [keywords[linenum]] + datafull[linenum][:-1] \
+ [self.convSig(calframe.tf, True) if self.hasQE else calframe.tf])))
if linenum < len(keywords):
for i in range(linenum+1, len(keywords)):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
file.close()
# Go back to an updated managing window
self.topManage.destroy()
self.currentFrame().varMessageLabel.set('Limit signal value saved to the image data file.')
self.currentFrame().labelMessage.configure(foreground='navy')
else:
# Create window for inputting new save name
self.varNewname = tk.StringVar()
self.varNewnameError = tk.StringVar()
self.varNewname.set(keywords[linenum])
self.topRename = tk.Toplevel(background=C.DEFAULT_BG)
self.topRename.title('Insert new name')
self.addIcon(self.topRename)
apc.setupWindow(self.topRename, 300, 135)
self.topRename.focus_force()
labelRename = ttk.Label(self.topRename, text='Insert new name:', anchor='center')
entryRename = ttk.Entry(self.topRename, textvariable=self.varNewname, font=self.small_font,
background=C.DEFAULT_BG, width=35)
buttonRename = ttk.Button(self.topRename, text='Rename',
command=lambda: self.executeRename(names,
keywords,
datafull,
linenum))
labelRenameError = ttk.Label(self.topRename, textvariable=self.varNewnameError)
labelRename.pack(side='top', pady=10*C.scsy, expand=True)
entryRename.pack(side='top', pady=5*C.scsy, expand=True)
buttonRename.pack(side='top', pady=6*C.scsy, expand=True)
labelRenameError.pack(side='top', expand=True)
def executeRename(self, names, keywords, datafull, linenum):
'''Renames the selected data save.'''
if self.varNewname.get() != '' and not ',' in self.varNewname.get():
file = open('imagedata.txt', 'w')
# Rewrites the data file, using the new keyword for the selected data save
for i in range(linenum):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[linenum]] + [self.varNewname.get()] + datafull[linenum])))
if linenum < len(keywords):
for i in range(linenum+1, len(keywords)):
file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n' \
.format(*tuple([names[i]] + [keywords[i]] + datafull[i])))
file.close()
# Go back to an updated managing window
self.topRename.destroy()
self.topManage.destroy()
self.manageData()
self.currentFrame().varMessageLabel.set('Image data renamed.')
self.currentFrame().labelMessage.configure(foreground='navy')
# Show an error message if the new name contains a ","
elif ',' in self.varNewname.get():
self.varNewnameError.set('The new name cannot contain a ",".')
# Show an error message if the new name is an empty string
else:
self.varNewnameError.set('Please insert a new name.')
return None
def changeCamera(self, restrict='no'):
'''Create window with list of camera models.'''
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
self.topCamera = tk.Toplevel(background=C.DEFAULT_BG)
self.topCamera.title('Choose camera')
self.addIcon(self.topCamera)
apc.setupWindow(self.topCamera, 300, 330)
self.topCamera.focus_force()
labelCamera = tk.Label(self.topCamera, text='Choose camera:', font=self.medium_font, background=C.DEFAULT_BG)
frameSelection = ttk.Frame(self.topCamera)
labelCamera.pack(side='top', pady=(18*C.scsy, 8*C.scsy), expand=True)
frameSelection.pack(side='top', pady=10*C.scsy, expand=True)
scrollbarCamera = ttk.Scrollbar(frameSelection)
self.listboxCamera = tk.Listbox(frameSelection, height=8, width=28, font=self.small_font,
selectmode='single', yscrollcommand=scrollbarCamera.set)
scrollbarCamera.pack(side='right', fill='y')
self.listboxCamera.pack(side='right', fill='both')
self.listboxCamera.focus_force()
# Insert camera names into listbox
if restrict == 'no':
for i in range(len(C.CNAME)):
self.listboxCamera.insert(i, C.CNAME[i])
elif restrict == 'DSLR':
for i in range(len(C.CNAME)):
if C.TYPE[i] == 'DSLR': self.listboxCamera.insert(i, C.CNAME[i])
elif restrict == 'CCD':
for i in range(len(C.CNAME)):
if C.TYPE[i] == 'CCD': self.listboxCamera.insert(i, C.CNAME[i])
scrollbarCamera.config(command=self.listboxCamera.yview) # Add scrollbar to listbox
if self.cnum is not None: self.listboxCamera.activate(self.cnum)
self.varSetDefaultC = tk.IntVar()
frameDefault = ttk.Frame(self.topCamera)
buttonChange = ttk.Button(self.topCamera, text='OK', command=self.executeCamChange)
buttonAddNew = ttk.Button(self.topCamera, text='Add new camera', command=self.addNewCamera)
frameDefault.pack(side='top', expand=True)
buttonChange.pack(side='top', pady=(10*C.scsy, 5*C.scsy), expand=True)
buttonAddNew.pack(side='top', pady=(0, 25*C.scsy), expand=True)
labelDefault = ttk.Label(frameDefault, text='Use as default:')
checkbuttonDefault = tk.Checkbutton(frameDefault, highlightbackground=C.DEFAULT_BG, background=C.DEFAULT_BG, activebackground=C.DEFAULT_BG, variable=self.varSetDefaultC)
labelDefault.grid(row=0, column=0)
checkbuttonDefault.grid(row=0, column=1)
self.cancelled = True
self.wait_window(self.topCamera)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
# Return true if the window wasn't closed with the X
return not self.cancelled
def executeCamChange(self):
'''Configures widgets according to the selected new camera.'''
self.cancelled = False
self.cnum = C.CNAME.index(self.listboxCamera.get('active')) # Get index of new camera
# Calculate new image scale value if a telescope is selected
if self.tnum is not None:
self.ISVal = np.arctan2(C.PIXEL_SIZE[self.cnum][0]*1e-3,
C.FOCAL_LENGTH[self.tnum][0]*self.FLModVal)*180*3600/np.pi
# Sets the new camera name in bottom line in camera data file if "Set as default" is selected
if self.varSetDefaultC.get():
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'w')
for line in lines[:-1]:
file.write(line + '\n')
file.write('Camera: ' + C.CNAME[self.cnum] + ',' + ','.join(lines[-1].split(',')[1:]))
file.close()
self.varCamName.set('Camera: ' + C.CNAME[self.cnum])
anframe = self.frames[ImageAnalyser]
self.isDSLR = C.TYPE[self.cnum] == 'DSLR' # Set new camera type
self.hasQE = C.QE[self.cnum][0] != 'NA' # Set new QE state
self.noData = C.GAIN[self.cnum][0][0] == 0
if not self.hasQE: self.setElectronSignalType()
if self.isDSLR:
anframe.radioCCDm.grid_forget()
anframe.radioCCDc.grid_forget()
anframe.labelDSLR.grid(row=0, column=0, columnspan=2, sticky='EW')
else:
anframe.labelDSLR.grid_forget()
anframe.varCCDType.set('mono')
anframe.radioCCDm.grid(row=0, column=0)
anframe.radioCCDc.grid(row=0, column=1)
if not self.toolsConfigured:
self.topCamera.destroy()
anframe.varMessageLabel.set('Camera selected.')
anframe.labelMessage.configure(foreground='navy')
return None
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
fovframe = self.frames[FOVCalculator]
# Reset frames to original states
calframe.setDefaultValues()
calframe.toggleDarkInputMode()
if self.tooltipsOn.get():
apc.createToolTip(calframe.labelDark, C.TTDarkNoise if self.isDSLR else C.TTDarkLevel, self.tt_fs)
apc.createToolTip(calframe.entryDark, C.TTDarkNoise if self.isDSLR else C.TTDarkLevel, self.tt_fs)
simframe.setDefaultValues()
plotframe.setDefaultValues()
anframe.clearFiles()
# Update widgets
calframe.reconfigureNonstaticWidgets()
simframe.reconfigureNonstaticWidgets()
plotframe.reconfigureNonstaticWidgets()
plotframe.toggleActiveWidgets(plotframe.plotList[0])
fovframe.update()
fovframe.selectObject(fovframe.start_idx)
fovframe.setFOV()
if self.calMode.get():
self.showFrame(MessageWindow if self.noData else ImageCalculator)
elif self.simMode.get():
self.showFrame(MessageWindow if self.noData else ImageSimulator)
elif self.plMode.get():
self.showFrame(MessageWindow if self.noData else PlottingTool)
self.topCamera.destroy() # Close change camera window
self.currentFrame().varMessageLabel.set('Camera changed.')
self.currentFrame().labelMessage.configure(foreground='navy')
def addNewCamera(self):
'''Shows a window with options for adding a new camera.'''
varCamName = tk.StringVar()
varCamType = tk.StringVar()
varPS = tk.StringVar()
varRX = tk.IntVar()
varRY = tk.IntVar()
varRX.set('')
varRY.set('')
varMessageLabel = tk.StringVar()
def executeAddNew():
'''Adds the new camera to "cameradata.txt".'''
# Get inputted name of new camera
name = varCamName.get()
type = varCamType.get()
try:
ps = float(varPS.get())
except:
varMessageLabel.set('Invalid pixel size input.')
return None
try:
rx = varRX.get()
except:
varMessageLabel.set('Invalid resolution input.')
return None
try:
ry = varRY.get()
except:
varMessageLabel.set('Invalid resolution input.')
return None
if name == '' or ',' in name:
varMessageLabel.set('Invalid camera name input.')
return None
if name in C.CNAME:
varMessageLabel.set('This camera is already added.')
return None
# Read camera data file
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'w')
file.write(lines[0])
# Create new line in cameradata.txt
file.write('\n' + '\n'.join(lines[1:-1]))
file.write('\n{},{},0,0,0,0,0,NA,{:g}*,{:d}*,{:d}*'.format(name, type, ps, rx, ry))
if type == 'DSLR': file.write(',0')
file.write('\n' + lines[-1])
file.close()
# Sort camera list
idx = apc.sortDataList(name, 'aplab_data{}cameradata.txt'.format(os.sep))
# Insert camera name and type to camera info lists
C.CNAME.insert(idx, name)
C.TYPE.insert(idx, type)
C.GAIN.insert(idx, [np.array([0]), np.array([1])])
C.RN.insert(idx, [np.array([0]), np.array([1])])
C.SAT_CAP.insert(idx, [[0], [1]])
C.BLACK_LEVEL.insert(idx, [[0], [1]])
C.WHITE_LEVEL.insert(idx, [[0], [1]])
C.QE.insert(idx, ['NA', 1])
C.PIXEL_SIZE.insert(idx, [ps, 1])
C.RES_X.insert(idx, [rx, 1])
C.RES_Y.insert(idx, [ry, 1])
C.ISO.insert(idx, (np.array([0])))
self.cancelled = False
self.topAddNewCam.destroy()
self.topCamera.destroy()
self.changeCamera()
# Setup window
self.topAddNewCam = tk.Toplevel(background=C.DEFAULT_BG)
self.topAddNewCam.title('Add new camera')
self.addIcon(self.topAddNewCam)
apc.setupWindow(self.topAddNewCam, 300, 200)
self.topAddNewCam.focus_force()
varCamType.set('DSLR')
ttk.Label(self.topAddNewCam, text='Please provide requested camera information:')\
.pack(side='top', pady=(15*C.scsy, 10*C.scsy), expand=True)
frameInput = ttk.Frame(self.topAddNewCam)
frameInput.pack(side='top', pady=(7*C.scsy, 10*C.scsy), expand=True)
ttk.Label(frameInput, text='Camera type: ').grid(row=0, column=0, sticky='W')
ttk.OptionMenu(frameInput, varCamType, None, 'DSLR', 'CCD').grid(row=0, column=1)
ttk.Label(frameInput, text='Camera name: ').grid(row=1, column=0, sticky='W')
ttk.Entry(frameInput, textvariable=varCamName, font=self.small_font,
background=C.DEFAULT_BG, width=20).grid(row=1, column=1)
ttk.Label(frameInput, text=u'Pixel size (in \u03bcm): ').grid(row=2, column=0, sticky='W')
ttk.Entry(frameInput, textvariable=varPS, font=self.small_font,
background=C.DEFAULT_BG, width=6).grid(row=2, column=1, sticky='W')
ttk.Label(frameInput, text='Resolution: ').grid(row=3, column=0, sticky='W')
frameRes = ttk.Frame(frameInput)
frameRes.grid(row=3, column=1, sticky='W')
ttk.Entry(frameRes, textvariable=varRX, font=self.small_font,
background=C.DEFAULT_BG, width=6).pack(side='left')
ttk.Label(frameRes, text=' x ').pack(side='left')
ttk.Entry(frameRes, textvariable=varRY, font=self.small_font,
background=C.DEFAULT_BG, width=6).pack(side='left')
ttk.Button(self.topAddNewCam, text='OK',
command=executeAddNew).pack(side='top', pady=(0, 10*C.scsy), expand=True)
ttk.Label(self.topAddNewCam, textvariable=varMessageLabel, font=self.small_font,
background=C.DEFAULT_BG).pack(side='top', pady=(0, 10*C.scsy), expand=True)
def changeTelescope(self):
'''Create window with list of telescope models.'''
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
self.topTelescope = tk.Toplevel(background=C.DEFAULT_BG)
self.topTelescope.title('Choose telescope or lens')
self.addIcon(self.topTelescope)
apc.setupWindow(self.topTelescope, 320, 330)
self.topTelescope.focus_force()
labelTelescope = tk.Label(self.topTelescope, text='Choose telescope or lens:',
font=self.medium_font, background=C.DEFAULT_BG)
frameSelection = ttk.Frame(self.topTelescope)
labelTelescope.pack(side='top', pady=(18*C.scsy, 8*C.scsy), expand=True)
frameSelection.pack(side='top', pady=10*C.scsy, expand=True)
scrollbarTelescope = ttk.Scrollbar(frameSelection)
self.listboxTelescope = tk.Listbox(frameSelection, height=8, width=32, font=self.small_font,
selectmode='single', yscrollcommand=scrollbarTelescope.set)
scrollbarTelescope.pack(side='right', fill='y')
self.listboxTelescope.pack(side='right', fill='both')
self.listboxTelescope.focus_force()
# Insert telescope names into listbox
for i in range(len(C.TNAME)):
self.listboxTelescope.insert(i, C.TNAME[i])
scrollbarTelescope.config(command=self.listboxTelescope.yview) # Add scrollbar to listbox
if self.tnum is not None: self.listboxTelescope.activate(self.tnum)
self.varSetDefaultT = tk.IntVar()
frameDefault = ttk.Frame(self.topTelescope)
buttonChange = ttk.Button(self.topTelescope, text='OK', command=self.executeTelChange)
buttonAddNew = ttk.Button(self.topTelescope, text='Add new telescope or lens',
command=self.addNewTelescope)
frameDefault.pack(side='top', expand=True)
buttonChange.pack(side='top', pady=(10*C.scsy, 5*C.scsy), expand=True)
buttonAddNew.pack(side='top', pady=(0, 25*C.scsy), expand=True)
labelDefault = ttk.Label(frameDefault, text='Use as default:')
checkbuttonDefault = tk.Checkbutton(frameDefault, highlightbackground=C.DEFAULT_BG, background=C.DEFAULT_BG, activebackground=C.DEFAULT_BG, variable=self.varSetDefaultT)
labelDefault.grid(row=0, column=0)
checkbuttonDefault.grid(row=0, column=1)
self.cancelled = True
self.wait_window(self.topTelescope)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
return not self.cancelled
def executeTelChange(self):
'''Configures widgets according to the selected new telescope.'''
self.cancelled = False
self.tnum = C.TNAME.index(self.listboxTelescope.get('active')) # Get index of new telescope
# Calculate new image scale value if a camera is selected
if self.cnum is not None:
self.ISVal = np.arctan2(C.PIXEL_SIZE[self.cnum][0]*1e-3,
C.FOCAL_LENGTH[self.tnum][0]*self.FLModVal)*180*3600/np.pi
# Sets the new telescope name in bottom line in telescope data file if "Set as default" is selected
if self.varSetDefaultT.get():
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'w')
for line in lines[:-1]:
file.write(line + '\n')
file.write('Telescope: ' + C.TNAME[self.tnum])
file.close()
self.varTelName.set('Telescope: ' + C.TNAME[self.tnum])
anframe = self.frames[ImageAnalyser]
if not self.toolsConfigured:
self.topTelescope.destroy()
anframe.varMessageLabel.set('Telescope selected.')
anframe.labelMessage.configure(foreground='navy')
return None
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
fovframe = self.frames[FOVCalculator]
calframe.setDefaultValues()
simframe.setDefaultValues()
plotframe.setDefaultValues()
anframe.clearFiles()
fovframe.update()
fovframe.selectObject(fovframe.start_idx)
fovframe.setFOV()
self.topTelescope.destroy() # Close change telescope window
self.currentFrame().varMessageLabel.set('Telescope changed.')
self.currentFrame().labelMessage.configure(foreground='navy')
def addNewTelescope(self):
'''Shows a window with options for adding a new telescope/lens.'''
varTelName = tk.StringVar()
varAp = tk.StringVar()
varFL = tk.StringVar()
varMessageLabel = tk.StringVar()
def executeAddNew():
'''Adds the new telescope to "telescopedata.txt".'''
# Get inputted name of new camera
name = varTelName.get()
try:
aperture = float(varAp.get())
except:
varMessageLabel.set('Invalid aperture input.')
return None
try:
fl = float(varFL.get())
except:
varMessageLabel.set('Invalid focal length input.')
return None
if name == '' or ',' in name:
varMessageLabel.set('Invalid telescope/lens name input.')
return None
if name in C.TNAME:
varMessageLabel.set('This telescope/lens is already added.')
return None
# Read telescope data file
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'w')
file.write(lines[0])
# Create new line in telescopedata.txt
file.write('\n' + '\n'.join(lines[1:-1]))
file.write('\n{},{:g}*,{:g}*'.format(name, fl, aperture))
file.write('\n' + lines[-1])
file.close()
# Sort telescope list
idx = apc.sortDataList(name, 'aplab_data{}telescopedata.txt'.format(os.sep))
# Insert telescope name, aperture and focal length to telescope info lists
C.TNAME.insert(idx, name)
C.FOCAL_LENGTH.insert(idx, [fl, 1])
C.APERTURE.insert(idx, [aperture, 1])
self.cancelled = False
self.topAddNewTel.destroy()
self.topTelescope.destroy()
self.changeTelescope()
# Setup window
self.topAddNewTel = tk.Toplevel(background=C.DEFAULT_BG)
self.topAddNewTel.title('Add new telescope or lens')
self.addIcon(self.topAddNewTel)
apc.setupWindow(self.topAddNewTel, 300, 220)
self.topAddNewTel.focus_force()
ttk.Label(self.topAddNewTel, text='Please provide requested\ntelescope/lens information:')\
.pack(side='top', pady=(15*C.scsy, 10*C.scsy), expand=True)
frameInput = ttk.Frame(self.topAddNewTel)
frameInput.pack(side='top', pady=(7*C.scsy, 10*C.scsy), expand=True)
ttk.Label(frameInput, text='Name: ').grid(row=0, column=0, sticky='W')
ttk.Entry(frameInput, textvariable=varTelName, font=self.small_font,
background=C.DEFAULT_BG, width=20).grid(row=0, column=1, columnspan=2)
ttk.Label(frameInput, text='Aperture: ').grid(row=1, column=0, sticky='W')
ttk.Entry(frameInput, textvariable=varAp, font=self.small_font,
background=C.DEFAULT_BG, width=12).grid(row=1, column=1, sticky='W')
ttk.Label(frameInput, text='mm').grid(row=1, column=2, sticky='W')
ttk.Label(frameInput, text='Focal length: ').grid(row=2, column=0, sticky='W')
ttk.Entry(frameInput, textvariable=varFL, font=self.small_font,
background=C.DEFAULT_BG, width=12).grid(row=2, column=1, sticky='W')
ttk.Label(frameInput, text='mm').grid(row=2, column=2, sticky='W')
ttk.Button(self.topAddNewTel, text='OK',
command=executeAddNew).pack(side='top', pady=(0, 10*C.scsy), expand=True)
ttk.Label(self.topAddNewTel, textvariable=varMessageLabel, font=self.small_font,
background=C.DEFAULT_BG).pack(side='top', pady=(0, 10*C.scsy), expand=True)
def changeFLMod(self):
'''Change focal length modifier.'''
varFLMod = tk.StringVar()
varMessageLabel = tk.StringVar()
def ok():
'''Set the new FL modifier value and update relevant widgets and parameters.'''
try:
FLModVal = float(varFLMod.get())
except ValueError:
varMessageLabel.set('Invalid input.')
return None
self.varFLMod.set('Focal length modifier: {:g}x'.format(FLModVal))
self.FLModVal = FLModVal
self.currentFrame().varMessageLabel.set('Focal length modifier changed.')
self.currentFrame().labelMessage.configure(foreground='navy')
self.ISVal = np.arctan2(C.PIXEL_SIZE[self.cnum][0]*1e-3,
C.FOCAL_LENGTH[self.tnum][0]*self.FLModVal)*180*3600/np.pi
self.frames[ImageCalculator].updateOpticsLabels()
self.frames[ImageSimulator].updateOpticsLabels()
self.frames[ImageAnalyser].updateAngle()
fovframe = self.frames[FOVCalculator]
fovframe.selectObject(fovframe.obj_idx)
fovframe.setFOV()
topChangeFLMod.destroy()
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
topChangeFLMod = tk.Toplevel(background=C.DEFAULT_BG)
topChangeFLMod.title('Change focal length modifier')
apc.setupWindow(topChangeFLMod, 220, 160)
self.addIcon(topChangeFLMod)
topChangeFLMod.focus_force()
ttk.Label(topChangeFLMod, text='Input the magnification factor of\nthe barlow or focal reducer:').pack(side='top', pady=(12*C.scsy, 0), expand=True)
entryFLMod = ttk.Entry(topChangeFLMod, textvariable=varFLMod, font=self.small_font,
background=C.DEFAULT_BG, width=10).pack(side='top', pady=12*C.scsy,
expand=True)
frameButtons = ttk.Frame(topChangeFLMod)
frameButtons.pack(side='top', pady=(0, 12*C.scsy), expand=True)
ttk.Button(frameButtons, text='OK', command=ok).grid(row=0, column=0)
ttk.Button(frameButtons, text='Cancel',
command=lambda: topChangeFLMod.destroy()).grid(row=0, column=1)
ttk.Label(topChangeFLMod, textvariable=varMessageLabel,
anchor='center').pack(side='top', pady=(0, 3*C.scsy), expand=True)
self.wait_window(topChangeFLMod)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def modifyCamParams(self):
'''Creates window with options for modifying camera data.'''
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
self.topCModify = tk.Toplevel(background=C.DEFAULT_BG)
self.topCModify.title('Modify parameters')
self.addIcon(self.topCModify)
apc.setupWindow(self.topCModify, 280, 210)
self.topCModify.focus_force()
# Show a message if no camera data exists
if self.noData:
ttk.Label(self.topCModify, text='No sensor data exists for\nthe currently active camera.\n\n' \
+ 'You can aquire sensor data\nwith the Image Analyser.').pack(side='top', pady=20*C.scsy, expand=True)
ttk.Button(self.topCModify, text='OK', command=lambda: self.topCModify.destroy())\
.pack(side='top', pady=(0, 10*C.scsy), expand=True)
self.wait_window(self.topCModify)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
return None
# Read camera data file
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
self.lines = file.read().split('\n')
file.close()
# Store parameter values
self.currentCValues = self.lines[self.cnum + 1].split(',')
self.gain_idx = 0
self.rn_idx = 0
self.varCParam = tk.StringVar()
self.varISO = tk.IntVar()
self.varGain = tk.DoubleVar()
self.varRN = tk.DoubleVar()
self.varNewCParamVal = tk.StringVar()
self.varCurrentCParamVal = tk.StringVar()
self.varErrorModifyC = tk.StringVar()
# List of modifyable parameters
paramlist = ['Gain', 'Read noise', 'Sat. cap.', 'Black level', 'White level', 'QE',
'Pixel size']
self.isolist = self.currentCValues[11].split('-') if self.isDSLR else ['0'] # List of ISO values
self.gainlist = self.currentCValues[2].split('-') # List of gain values
self.rnlist = self.currentCValues[3].split('-') # List of read noise values
self.satcaplist = self.currentCValues[4].split('-') # List of saturation capacity values
self.bllist = self.currentCValues[5].split('-') # List of black level values
self.wllist = self.currentCValues[6].split('-') # List of white level values
self.varCParam.set(paramlist[0])
self.varISO.set(self.isolist[0])
self.varGain.set(self.gainlist[0])
self.varRN.set(self.rnlist[0])
self.varNewCParamVal.set('')
self.varCurrentCParamVal.set('Current value: ' + self.gainlist[0].split('*')[0] + ' e-/ADU' \
+ (' (modified)' if '*' in self.gainlist[0] else ''))
frameParam = ttk.Frame(self.topCModify)
labelParam = ttk.Label(frameParam, text='Parameter:', anchor='center', width=11)
optionParam = ttk.OptionMenu(frameParam, self.varCParam, None, *paramlist,
command=self.updateCamParam)
self.labelISO = ttk.Label(frameParam, text='ISO:', anchor='center', width=11)
self.optionISO = ttk.OptionMenu(frameParam, self.varISO, None, *self.isolist,
command=self.updateParamISO)
self.labelGain = ttk.Label(frameParam, text='Gain:', anchor='center', width=11)
self.optionGain = ttk.OptionMenu(frameParam, self.varGain, None, *self.gainlist,
command=self.updateParamGain)
self.labelRN = ttk.Label(frameParam, text='RN:', anchor='center', width=11)
self.optionRN = ttk.OptionMenu(frameParam, self.varRN, None, *self.rnlist,
command=self.updateParamRN)
if not C.is_win:
self.optionISO.config(width=6)
self.optionGain.config(width=6)
self.optionRN.config(width=6)
labelCurrent = ttk.Label(self.topCModify, textvariable=self.varCurrentCParamVal)
labelSet = ttk.Label(self.topCModify, text='Input new value:', anchor='center')
entryNewVal = ttk.Entry(self.topCModify, textvariable=self.varNewCParamVal,
font=self.small_font, background=C.DEFAULT_BG)
buttonSet = ttk.Button(self.topCModify, text='Set value', command=self.setNewCamParamVal)
errorModify = ttk.Label(self.topCModify, textvariable=self.varErrorModifyC, anchor='center')
frameParam.pack(side='top', pady=(10*C.scsy, 0), expand=True)
labelParam.grid(row=0, column=0)
optionParam.grid(row=1, column=0)
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.gainlist) > 1:
self.labelGain.grid(row=0, column=1)
self.optionGain.grid(row=1, column=1)
labelCurrent.pack(side='top', pady=10*C.scsy, expand=True)
labelSet.pack(side='top', expand=True)
entryNewVal.pack(side='top', pady=5*C.scsy, expand=True)
buttonSet.pack(side='top', pady=5*C.scsy, expand=True)
errorModify.pack(side='top', expand=True)
self.currentFrame().varMessageLabel.set(\
'Note: changing the value of a camera parameter will reset the application.')
self.currentFrame().labelMessage.configure(foreground='navy')
self.wait_window(self.topCModify)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def updateCamParam(self, selected_param):
'''Displays the relevant parameter value when selected parameter in the optionmenu changes.'''
self.labelISO.grid_forget()
self.optionISO.grid_forget()
self.labelGain.grid_forget()
self.optionGain.grid_forget()
self.labelRN.grid_forget()
self.optionRN.grid_forget()
self.varISO.set(self.isolist[0])
self.varGain.set(self.gainlist[0])
self.varRN.set(self.rnlist[0])
if selected_param == 'Gain':
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.gainlist) > 1:
self.labelGain.grid(row=0, column=1)
self.optionGain.grid(row=1, column=1)
self.varCurrentCParamVal.set('Current value: ' + self.gainlist[0].split('*')[0] + ' e-/ADU' \
+ (' (modified)' if '*' in self.gainlist[0] else ''))
elif selected_param == 'Read noise':
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.rnlist) > 1:
self.labelRN.grid(row=0, column=1)
self.optionRN.grid(row=1, column=1)
self.varCurrentCParamVal.set('Current value: ' + self.rnlist[0].split('*')[0] + ' e-' \
+ (' (modified)' if '*' in self.rnlist[0] else ''))
elif selected_param == 'Sat. cap.':
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.gainlist) > 1:
self.labelGain.grid(row=0, column=1)
self.optionGain.grid(row=1, column=1)
self.varCurrentCParamVal.set('Current value: ' + self.satcaplist[0].split('*')[0] + ' e-' \
+ (' (modified)' if '*' in self.satcaplist[0] else ''))
elif selected_param == 'Black level':
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.gainlist) > 1:
self.labelGain.grid(row=0, column=1)
self.optionGain.grid(row=1, column=1)
self.varCurrentCParamVal.set('Current value: ' + self.bllist[0].split('*')[0] + ' ADU' \
+ (' (modified)' if '*' in self.bllist[0] else ''))
elif selected_param == 'White level':
if self.isDSLR:
self.labelISO.grid(row=0, column=1)
self.optionISO.grid(row=1, column=1)
elif len(self.gainlist) > 1:
self.labelGain.grid(row=0, column=1)
self.optionGain.grid(row=1, column=1)
self.varCurrentCParamVal.set('Current value: ' + self.wllist[0].split('*')[0] + ' ADU' \
+ (' (modified)' if '*' in self.wllist[0] else ''))
elif selected_param == 'QE':
if self.hasQE:
self.varCurrentCParamVal.set('Current value: ' + self.currentCValues[7].split('*')[0] \
+ (' (modified)' if '*' in self.currentCValues[7] else ''))
else:
self.varCurrentCParamVal.set('No value exists.')
elif selected_param == 'Pixel size':
self.varCurrentCParamVal.set('Current value: ' + self.currentCValues[8].split('*')[0] \
+ u' \u03bcm' \
+ (' (modified)' if '*' in self.currentCValues[8] else ''))
elif selected_param == 'Horizontal resolution':
self.varCurrentCParamVal.set('Current value: ' + self.currentCValues[9].split('*')[0] \
+ (' (modified)' if '*' in self.currentCValues[9] else ''))
elif selected_param == 'Vertical resolution':
self.varCurrentCParamVal.set('Current value: ' + self.currentCValues[10].split('*')[0] \
+ (' (modified)' if '*' in self.currentCValues[10] else ''))
def updateParamISO(self, selected_iso):
'''Update the label showing the current gain or read noise value when a new ISO is selected.'''
# Store index of selected iso
self.gain_idx = self.isolist.index(selected_iso)
self.rn_idx = self.gain_idx
if self.varCParam.get() == 'Gain':
self.varCurrentCParamVal.set('Current value: ' + self.gainlist[self.gain_idx].split('*')[0] \
+ ' e-/ADU' + (' (modified)' if '*' in self.gainlist[self.gain_idx] else ''))
elif self.varCParam.get() == 'Read noise':
self.varCurrentCParamVal.set('Current value: ' + self.rnlist[self.rn_idx].split('*')[0] \
+ ' e-' + (' (modified)' if '*' in self.rnlist[self.rn_idx] else ''))
elif self.varCParam.get() == 'Sat. cap.':
self.varCurrentCParamVal.set('Current value: ' + self.satcaplist[self.gain_idx].split('*')[0] \
+ ' e-' + (' (modified)' if '*' in self.satcaplist[self.gain_idx] else ''))
elif self.varCParam.get() == 'Black level':
self.varCurrentCParamVal.set('Current value: ' + self.bllist[self.gain_idx].split('*')[0] \
+ ' ADU' + (' (modified)' if '*' in self.bllist[self.gain_idx] else ''))
elif self.varCParam.get() == 'White level':
self.varCurrentCParamVal.set('Current value: ' + self.wllist[self.gain_idx].split('*')[0] \
+ ' ADU' + (' (modified)' if '*' in self.wllist[self.gain_idx] else ''))
def updateParamGain(self, selected_gain):
'''Update the label showing the current gain value when a new gain is selected.'''
self.gain_idx = self.gainlist.index(selected_gain) # Store index of selected gain
if self.varCParam.get() == 'Gain':
self.varCurrentCParamVal.set('Current value: ' + selected_gain.split('*')[0] + ' e-/ADU' \
+ (' (modified)' if '*' in selected_gain else ''))
elif self.varCParam.get() == 'Sat. cap.':
self.varCurrentCParamVal.set('Current value: ' + self.satcaplist[self.gain_idx].split('*')[0] \
+ ' e-' + (' (modified)' if '*' in self.satcaplist[self.gain_idx] else ''))
elif self.varCParam.get() == 'Black level':
self.varCurrentCParamVal.set('Current value: ' + self.bllist[self.gain_idx].split('*')[0] \
+ ' ADU' + (' (modified)' if '*' in self.bllist[self.gain_idx] else ''))
elif self.varCParam.get() == 'White level':
self.varCurrentCParamVal.set('Current value: ' + self.wllist[self.gain_idx].split('*')[0] \
+ ' ADU' + (' (modified)' if '*' in self.wllist[self.gain_idx] else ''))
def updateParamRN(self, selected_rn):
'''Update the label showing the current read noise value when a new read noise is selected.'''
self.rn_idx = self.rnlist.index(selected_rn) # Store index of selected read noise
self.varCurrentCParamVal.set('Current value: ' + selected_rn.split('*')[0] + ' e-' \
+ (' (modified)' if '*' in selected_rn else ''))
def setNewCamParamVal(self):
'''Writes new camera data file with the new value of the selected parameter.'''
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
newval = self.varNewCParamVal.get()
# Show error message if the new inputted value is not a number
try:
float(newval)
except ValueError:
self.varErrorModifyC.set('Invalid value. Please insert a number.')
return None
# Write new camera data file
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'w')
idx = self.cnum + 1
file.write(self.lines[0])
for i in range(1, idx):
file.write('\n' + self.lines[i])
if self.varCParam.get() == 'Gain':
self.gainlist[self.gain_idx] = newval + '*'
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:2]),
'-'.join(self.gainlist),
','.join(self.currentCValues[3:])))
C.GAIN[self.cnum][0][self.gain_idx] = float(newval)
C.GAIN[self.cnum][1][self.gain_idx] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' e-/ADU (modified)')
elif self.varCParam.get() == 'Read noise':
self.rnlist[self.rn_idx] = newval + '*'
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:3]),
'-'.join(self.rnlist),
','.join(self.currentCValues[4:])))
C.RN[self.cnum][0][self.rn_idx] = float(newval)
C.RN[self.cnum][1][self.rn_idx] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' e- (modified)')
elif self.varCParam.get() == 'Sat. cap.':
self.satcaplist[self.gain_idx] = newval + '*'
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:4]),
'-'.join(self.satcaplist),
','.join(self.currentCValues[5:])))
C.SAT_CAP[self.cnum][0][self.gain_idx] = int(newval)
C.SAT_CAP[self.cnum][1][self.gain_idx] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' e- (modified)')
elif self.varCParam.get() == 'Black level':
self.bllist[self.gain_idx] = newval + '*'
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:5]),
'-'.join(self.bllist),
','.join(self.currentCValues[6:])))
C.BLACK_LEVEL[self.cnum][0][self.gain_idx] = int(newval)
C.BLACK_LEVEL[self.cnum][1][self.gain_idx] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' ADU (modified)')
elif self.varCParam.get() == 'White level':
self.wllist[self.gain_idx] = newval + '*'
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:6]),
'-'.join(self.wllist),
','.join(self.currentCValues[7:])))
C.WHITE_LEVEL[self.cnum][0][self.gain_idx] = int(newval)
C.WHITE_LEVEL[self.cnum][1][self.gain_idx] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' ADU (modified)')
elif self.varCParam.get() == 'QE':
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:7]),
newval + '*',
','.join(self.currentCValues[8:])))
C.QE[self.cnum][0] = float(newval)
C.QE[self.cnum][1] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' (modified)')
elif self.varCParam.get() == 'Pixel size':
file.write('\n{},{},{}'.format(','.join(self.currentCValues[:8]),
newval + '*',
','.join(self.currentCValues[9:])))
C.PIXEL_SIZE[self.cnum][0] = float(newval)
C.PIXEL_SIZE[self.cnum][1] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + u' \u03bcm (modified)')
elif self.varCParam.get() == 'Horizontal resolution':
file.write('\n{},{:d}*,{}'.format(','.join(self.currentCValues[:9]),
float(newval),
','.join(self.currentCValues[10:])))
C.RES_X[self.cnum][0] = int(float(newval))
C.RES_X[self.cnum][1] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' (modified)')
elif self.varCParam.get() == 'Vertical resolution':
file.write('\n{},{:d}*'.format(','.join(self.currentCValues[:10]), float(newval)))
if self.isDSLR: file.write(',{}'.format(self.currentCValues[11]))
C.RES_Y[self.cnum][0] = int(float(newval))
C.RES_Y[self.cnum][1] = 1
self.varCurrentCParamVal.set('Current value: ' + newval + ' (modified)')
for i in range((idx + 1), len(self.lines)):
file.write('\n' + self.lines[i])
file.close()
# Reset all frames in order for the parameter change to take effect
self.hasQE = C.QE[self.cnum][0] != 'NA'
calframe.setDefaultValues()
simframe.setDefaultValues()
plotframe.setDefaultValues()
calframe.reconfigureNonstaticWidgets()
simframe.reconfigureNonstaticWidgets()
plotframe.reconfigureNonstaticWidgets()
self.currentFrame().varMessageLabel.set('Camera parameter modified.')
self.currentFrame().labelMessage.configure(foreground='navy')
# Update widgets and attributes in the window with the new parameter value
self.optionISO.set_menu(*([None] + self.isolist))
self.optionGain.set_menu(*([None] + self.gainlist))
self.optionRN.set_menu(*([None] + self.rnlist))
if self.isDSLR: self.varISO.set(self.isolist[self.gain_idx])
self.varGain.set(self.gainlist[self.gain_idx])
self.varRN.set(self.rnlist[self.rn_idx])
self.varNewCParamVal.set('')
self.varErrorModifyC.set('')
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
self.lines = file.read().split('\n')
file.close()
self.currentCValues = self.lines[self.cnum + 1].split(',')
def modifyTelParams(self):
'''Creates window with options for modifying telescope data.'''
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
self.topTModify = tk.Toplevel(background=C.DEFAULT_BG)
self.topTModify.title('Modify parameters')
self.addIcon(self.topTModify)
apc.setupWindow(self.topTModify, 280, 210)
self.topTModify.focus_force()
# Read telescope data file
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'r')
self.lines = file.read().split('\n')
file.close()
# Store parameter values
self.currentTValues = self.lines[self.tnum + 1].split(',')
self.varTParam = tk.StringVar()
self.varNewTParamVal = tk.StringVar()
self.varCurrentTParamVal = tk.StringVar()
self.varErrorModifyT = tk.StringVar()
# List of modifyable parameters
paramlist = ['Focal length', 'Aperture']
self.varTParam.set(paramlist[0])
self.varNewTParamVal.set('')
self.varCurrentTParamVal.set('Current value: ' + self.currentTValues[1].split('*')[0] + ' mm' \
+ (' (modified)' if '*' in self.currentTValues[1] else ''))
frameParam = ttk.Frame(self.topTModify)
labelParam = ttk.Label(frameParam, text='Parameter:', anchor='center', width=11)
optionParam = ttk.OptionMenu(frameParam, self.varTParam, None, *paramlist,
command=self.updateTelParam)
labelCurrent = ttk.Label(self.topTModify, textvariable=self.varCurrentTParamVal)
labelSet = ttk.Label(self.topTModify, text='Input new value:', anchor='center')
entryNewVal = ttk.Entry(self.topTModify, textvariable=self.varNewTParamVal,
font=self.small_font, background=C.DEFAULT_BG)
buttonSet = ttk.Button(self.topTModify, text='Set value', command=self.setNewTelParamVal)
errorModify = ttk.Label(self.topTModify, textvariable=self.varErrorModifyT, anchor='center')
frameParam.pack(side='top', pady=(10*C.scsy, 0), expand=True)
labelParam.grid(row=0, column=0)
optionParam.grid(row=1, column=0)
labelCurrent.pack(side='top', pady=10*C.scsy, expand=True)
labelSet.pack(side='top', expand=True)
entryNewVal.pack(side='top', pady=5*C.scsy, expand=True)
buttonSet.pack(side='top', pady=5*C.scsy, expand=True)
errorModify.pack(side='top', expand=True)
self.currentFrame().varMessageLabel.set(\
'Note: changing the value of a telescope/lens parameter will reset the application.')
self.currentFrame().labelMessage.configure(foreground='navy')
self.wait_window(self.topTModify)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def updateTelParam(self, selected_param):
'''Displays the relevant parameter value when selected parameter in the optionmenu changes.'''
if selected_param == 'Focal length':
self.varCurrentTParamVal.set('Current value: ' + self.currentTValues[1].split('*')[0] + ' mm' \
+ (' (modified)' if '*' in self.currentTValues[1] else ''))
elif selected_param == 'Aperture':
self.varCurrentTParamVal.set('Current value: ' + self.currentTValues[2].split('*')[0] + ' mm' \
+ (' (modified)' if '*' in self.currentTValues[2] else ''))
def setNewTelParamVal(self):
'''Writes new telescope data file with the new value of the selected parameter.'''
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
newval = self.varNewTParamVal.get()
# Show error message if the new inputted value is not a number
try:
float(newval)
except ValueError:
self.varErrorModifyT.set('Invalid value. Please insert a number.')
return None
# Write new camera data file
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'w')
idx = self.tnum + 1
file.write(self.lines[0])
for i in range(1, idx):
file.write('\n' + self.lines[i])
if self.varTParam.get() == 'Focal length':
file.write('\n{},{},{}'.format(self.currentTValues[0], newval + '*', self.currentTValues[2]))
C.FOCAL_LENGTH[self.tnum][0] = float(newval)
C.FOCAL_LENGTH[self.tnum][1] = 1
self.varCurrentTParamVal.set('Current value: ' + newval + ' mm (modified)')
elif self.varTParam.get() == 'Aperture':
file.write('\n{},{},{}'.format(self.currentTValues[0], self.currentTValues[1], newval + '*'))
C.APERTURE[self.tnum][0] = float(newval)
C.APERTURE[self.tnum][1] = 1
self.varCurrentTParamVal.set('Current value: ' + newval + ' mm (modified)')
for i in range((idx + 1), len(self.lines)):
file.write('\n' + self.lines[i])
file.close()
calframe.setDefaultValues()
simframe.setDefaultValues()
plotframe.setDefaultValues()
self.currentFrame().varMessageLabel.set('Telescope parameter modified.')
self.currentFrame().labelMessage.configure(foreground='navy')
self.varNewTParamVal.set('')
self.varErrorModifyT.set('')
file = open('aplab_data{}telescopedata.txt'.format(os.sep), 'r')
self.lines = file.read().split('\n')
file.close()
self.currentTValues = self.lines[self.tnum + 1].split(',')
def transferToSim(self):
'''Transfer relevant inputted or calculated values to widgets in the Image Simulator frame.'''
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
# If Image Calculator is the active frame
if self.calMode.get():
# Show error message if flux data hasn't been calculated
if not calframe.dataCalculated:
calframe.varMessageLabel.set('Data must be calculated before it can be transferred.')
calframe.labelMessage.configure(foreground='crimson')
return None
if calframe.varTransfLim.get():
simframe.varLF.set('{:g}'.format(self.convSig(calframe.tf, True) if self.lumSignalType.get() \
else calframe.tf))
calframe.varMessageLabel.set('Target flux transferred as limit flux to Image Simulator.')
calframe.labelMessage.configure(foreground='navy')
return None
# Set values
simframe.gain_idx = calframe.gain_idx
simframe.rn_idx = calframe.rn_idx
simframe.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][calframe.gain_idx])
simframe.varGain.set(C.GAIN[self.cnum][0][calframe.gain_idx])
simframe.varRN.set(C.RN[self.cnum][0][calframe.rn_idx])
simframe.varExp.set('{:g}'.format(calframe.exposure))
simframe.varDF.set('{:g}'.format(calframe.df))
simframe.varSF.set('{:g}'.format(self.convSig(calframe.sf, True) if self.lumSignalType.get() \
else calframe.sf))
if calframe.tf != 0:
simframe.varTF.set('{:g}'.format(self.convSig(calframe.tf, True) if self.lumSignalType.get() \
else calframe.tf))
simframe.varSubs.set(1)
simframe.dataCalculated = False # Previously calculated data is no longer valid
simframe.updateSensorLabels() # Update sensor info labels in case the ISO has changed
simframe.emptyInfoLabels() # Clear other info labels
calframe.varMessageLabel.set('Data transferred to Image Simulator.' \
if calframe.varUseDark.get() \
else 'Data transferred. Note that transferred signal ' \
+ 'data does not contain a separate value for dark current.')
calframe.labelMessage.configure(foreground='navy')
# If Plotting Tool is the active frame
elif self.plMode.get():
simframe.setDefaultValues() # Reset Image Simulator frame
# Set values that are not invalid
simframe.gain_idx = plotframe.gain_idx
simframe.rn_idx = plotframe.rn_idx
simframe.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][plotframe.gain_idx])
simframe.varGain.set(C.GAIN[self.cnum][0][plotframe.gain_idx])
simframe.varRN.set(C.RN[self.cnum][0][plotframe.rn_idx])
try:
simframe.varExp.set('{:g}'.format(plotframe.varExp.get()))
except tk.TclError:
pass
try:
simframe.varDF.set('{:g}'.format(plotframe.varDF.get()))
except tk.TclError:
pass
try:
simframe.varSF.set('{:g}'.format(plotframe.varSF.get()))
except tk.TclError:
pass
try:
simframe.varTF.set('{:g}'.format(plotframe.varTF.get()))
except tk.TclError:
pass
try:
simframe.varLF.set('{:g}'.format(plotframe.varLF.get()))
except tk.TclError:
pass
simframe.varSubs.set(1)
simframe.updateSensorLabels() # Update sensor info labels in case the ISO has changed
simframe.emptyInfoLabels() # Clear other info labels
plotframe.varMessageLabel.set('Input transferred to Image Simulator.')
plotframe.labelMessage.configure(foreground='navy')
def transferToPlot(self):
'''Transfer relevant inputted or calculated values to widgets in the Plotting Tool frame.'''
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
# If Image Calculator is the active frame
if self.calMode.get():
# Show error message if flux data hasn't been calculated
if not calframe.dataCalculated:
calframe.varMessageLabel.set('Data must be calculated before it can be transferred.')
calframe.labelMessage.configure(foreground='crimson')
return None
if calframe.varTransfLim.get() and self.isDSLR:
plotframe.varLF.set('{:g}'.format(self.convSig(calframe.tf, True) if self.lumSignalType.get() \
else calframe.tf))
calframe.varMessageLabel.set('Target flux transferred as limit flux to Plotting Tool.')
calframe.labelMessage.configure(foreground='navy')
return None
# Set values
plotframe.gain_idx = calframe.gain_idx
plotframe.rn_idx = calframe.rn_idx
plotframe.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][calframe.gain_idx])
plotframe.varGain.set(C.GAIN[self.cnum][0][calframe.gain_idx])
plotframe.varRN.set(C.RN[self.cnum][0][calframe.rn_idx])
plotframe.varExp.set('{:g}'.format(calframe.exposure))
plotframe.varDF.set('{:g}'.format(calframe.df))
plotframe.varSF.set('{:g}'.format(self.convSig(calframe.sf, True) if self.lumSignalType.get() \
else calframe.sf))
plotframe.varTF.set(0 if calframe.tf == 0 \
else ('{:g}'.format(self.convSig(calframe.tf, True) if self.lumSignalType.get() \
else calframe.tf)))
plotframe.ax.cla() # Clear plot
calframe.varMessageLabel.set('Data transferred to Plotting Tool.' \
if calframe.varUseDark.get() \
else 'Data transferred. Note that transferred signal data ' \
+ 'does not contain a separate value for dark current.')
calframe.labelMessage.configure(foreground='navy')
# If Plotting Tool is the active frame
elif self.simMode.get():
plotframe.setDefaultValues() # Reset Plotting Tool frame
# Set values that are not invalid
plotframe.gain_idx = simframe.gain_idx
plotframe.rn_idx = simframe.rn_idx
plotframe.varISO.set(0 if not self.isDSLR else C.ISO[self.cnum][simframe.gain_idx])
plotframe.varGain.set(C.GAIN[self.cnum][0][simframe.gain_idx])
plotframe.varRN.set(C.RN[self.cnum][0][simframe.rn_idx])
try:
plotframe.varExp.set('{:g}'.format(simframe.varExp.get()))
except tk.TclError:
pass
try:
plotframe.varDF.set('{:g}'.format(simframe.varDF.get()))
except tk.TclError:
pass
try:
plotframe.varSF.set('{:g}'.format(simframe.varSF.get()))
except tk.TclError:
pass
try:
plotframe.varTF.set('{:g}'.format(simframe.varTF.get()))
except tk.TclError:
pass
try:
plotframe.varLF.set('{:g}'.format(simframe.varLF.get()))
except tk.TclError:
pass
plotframe.ax.cla() # Clear plot
simframe.varMessageLabel.set('Input transferred to Plotting Tool.')
simframe.labelMessage.configure(foreground='navy')
def setElectronSignalType(self):
'''Use electron flux as signal quantity.'''
# Do nothing if electron flux is already used
if not self.lumSignalType.get():
self.electronSignalType.set(1)
return None
self.lumSignalType.set(0)
self.electronSignalType.set(1)
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
# Change unit labels
calframe.varSFLabel.set('e-/s')
calframe.varTFLabel.set('e-/s')
simframe.varSFLabel.set('e-/s')
simframe.varTFLabel.set('e-/s')
simframe.varLFLabel.set('e-/s')
plotframe.varSFLabel.set('e-/s')
plotframe.varTFLabel.set('e-/s')
plotframe.varLFLabel.set('e-/s')
# Change tooltips
if self.tooltipsOn.get():
apc.createToolTip(calframe.labelSF, C.TTSFElectron if calframe.varUseDark.get() \
or self.isDSLR else C.TTDSFElectron, self.tt_fs)
apc.createToolTip(calframe.labelSF2, C.TTSFElectron if calframe.varUseDark.get() \
or self.isDSLR else C.TTDSFElectron, self.tt_fs)
apc.createToolTip(calframe.labelTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(calframe.labelTF2, C.TTTFElectron, self.tt_fs)
apc.createToolTip(simframe.labelSF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(simframe.labelTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(simframe.labelLF, C.TTLFElectron, self.tt_fs)
apc.createToolTip(simframe.entrySF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(simframe.entryTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(simframe.entryLF, C.TTLFElectron, self.tt_fs)
apc.createToolTip(plotframe.labelSF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(plotframe.labelTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(plotframe.labelLF, C.TTLFElectron, self.tt_fs)
apc.createToolTip(plotframe.entrySF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(plotframe.entryTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(plotframe.entryLF, C.TTLFElectron, self.tt_fs)
# Change displayed flux values if they have been calculated
if calframe.dataCalculated:
calframe.varSFInfo.set('{:.3g}'.format(calframe.sf))
calframe.varTFInfo.set('-' if calframe.tf == 0 else '{:.3g}'.format(calframe.tf))
calframe.labelSF2.configure(background=C.DEFAULT_BG, foreground='black')
calframe.labelTF2.configure(background=C.DEFAULT_BG, foreground='black')
try:
sig = simframe.varSF.get()
simframe.varSF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
try:
sig = simframe.varTF.get()
simframe.varTF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
try:
sig = simframe.varLF.get()
simframe.varLF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
try:
sig = plotframe.varSF.get()
plotframe.varSF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
try:
sig = plotframe.varTF.get()
plotframe.varTF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
try:
sig = plotframe.varLF.get()
plotframe.varLF.set('{:.3g}'.format(self.convSig(sig, False)))
except:
pass
self.currentFrame().varMessageLabel.set(\
'Using electron flux as signal quantity. Relevant values have been converted.')
self.currentFrame().labelMessage.configure(foreground='navy')
def setLumSignalType(self):
'''Use luminance as signal quantity.'''
# Do nothing if luminance is already used
if not self.electronSignalType.get():
self.lumSignalType.set(1)
return None
if not self.hasQE:
self.lumSignalType.set(0)
self.currentFrame().varMessageLabel\
.set('Camera doesn\'t have QE data. Cannot estimate luminance.')
self.currentFrame().labelMessage.configure(foreground='crimson')
return None
self.lumSignalType.set(1)
self.electronSignalType.set(0)
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
plotframe = self.frames[PlottingTool]
# Change unit labels
calframe.varSFLabel.set(u'mag/arcsec\u00B2')
calframe.varTFLabel.set(u'mag/arcsec\u00B2')
simframe.varSFLabel.set(u'mag/arcsec\u00B2')
simframe.varTFLabel.set(u'mag/arcsec\u00B2')
simframe.varLFLabel.set(u'mag/arcsec\u00B2')
plotframe.varSFLabel.set(u'mag/arcsec\u00B2')
plotframe.varTFLabel.set(u'mag/arcsec\u00B2')
plotframe.varLFLabel.set(u'mag/arcsec\u00B2')
# Change tooltips
if self.tooltipsOn.get():
apc.createToolTip(calframe.labelSF, C.TTSFLum if calframe.varUseDark.get() \
or self.isDSLR else C.TTDSFPhoton, self.tt_fs)
apc.createToolTip(calframe.labelSF2, C.TTSFLum if calframe.varUseDark.get() \
or self.isDSLR else C.TTDSFPhoton, self.tt_fs)
apc.createToolTip(calframe.labelTF, C.TTTFLum, self.tt_fs)
apc.createToolTip(calframe.labelTF2, C.TTTFLum, self.tt_fs)
apc.createToolTip(simframe.labelSF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(simframe.labelTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(simframe.labelLF, C.TTLFElectron, self.tt_fs)
apc.createToolTip(simframe.entrySF, C.TTSFLum, self.tt_fs)
apc.createToolTip(simframe.entryTF, C.TTTFLum, self.tt_fs)
apc.createToolTip(simframe.entryLF, C.TTLFLum, self.tt_fs)
apc.createToolTip(plotframe.labelSF, C.TTSFElectron, self.tt_fs)
apc.createToolTip(plotframe.labelTF, C.TTTFElectron, self.tt_fs)
apc.createToolTip(plotframe.labelLF, C.TTLFElectron, self.tt_fs)
apc.createToolTip(plotframe.entrySF, C.TTSFLum, self.tt_fs)
apc.createToolTip(plotframe.entryTF, C.TTTFLum, self.tt_fs)
apc.createToolTip(plotframe.entryLF, C.TTLFLum, self.tt_fs)
# Change displayed flux values if they have been calculated
if calframe.dataCalculated:
sf = self.convSig(calframe.sf, True)
calframe.varSFInfo.set('{:.4f}'.format(sf))
calframe.setLumColour(sf, calframe.labelSF2)
if calframe.tf == 0:
calframe.varTFInfo.set('-')
calframe.labelTF2.configure(background=C.DEFAULT_BG, foreground='black')
else:
tf = self.convSig(calframe.tf, True)
calframe.varTFInfo.set('{:.4f}'.format(tf))
calframe.setLumColour(tf, calframe.labelTF2)
try:
sig = simframe.varSF.get()
simframe.varSF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
try:
sig = simframe.varTF.get()
simframe.varTF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
try:
sig = simframe.varLF.get()
simframe.varLF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
try:
sig = plotframe.varSF.get()
plotframe.varSF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
try:
sig = plotframe.varTF.get()
plotframe.varTF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
try:
sig = plotframe.varLF.get()
plotframe.varLF.set('{:.4f}'.format(self.convSig(sig, True)))
except:
pass
self.currentFrame().varMessageLabel.set(\
'Using luminance as signal quantity. Relevant values have been converted.')
self.currentFrame().labelMessage.configure(foreground='navy')
def setdBDRUnit(self):
'''Use [dB] as unit for dynamic range.'''
# Do nothing if dB is already used
if not self.stopsDRUnit.get():
self.dBDRUnit.set(1)
return None
self.dBDRUnit.set(1)
self.stopsDRUnit.set(0)
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
calframe.varDRLabel.set('dB')
simframe.varDRLabel.set('dB')
# Convert existing DR values from stops to dB
factor = 10*np.log(2.0)/np.log(10.0)
if calframe.dataCalculated: calframe.varDRInfo.set('{:.1f}'.format(calframe.dr*factor))
if simframe.dataCalculated: simframe.varDRInfo.set('{:.1f}'.format(simframe.dr*factor))
def setStopsDRUnit(self):
'''Use [stops] as unit for dynamic range.'''
# Do nothing if stops is already used
if not self.dBDRUnit.get():
self.stopsDRUnit.set(1)
return None
self.dBDRUnit.set(0)
self.stopsDRUnit.set(1)
calframe = self.frames[ImageCalculator]
simframe = self.frames[ImageSimulator]
calframe.varDRLabel.set('stops')
simframe.varDRLabel.set('stops')
# Convert existing DR values from dB to stops
if calframe.dataCalculated: calframe.varDRInfo.set('{:.1f}'.format(calframe.dr))
if simframe.dataCalculated: simframe.varDRInfo.set('{:.1f}'.format(simframe.dr))
def addIcon(self, window):
'''Set icon if it exists.'''
try:
window.iconbitmap('aplab_icon.ico')
except:
pass
def currentFrame(self):
'''Returns the class corresponding to the currently active frame.'''
if self.anMode.get():
frame = self.frames[ImageAnalyser]
elif self.calMode.get():
frame = self.frames[ImageCalculator]
elif self.simMode.get():
frame = self.frames[ImageSimulator]
else:
frame = self.frames[PlottingTool]
return frame
def toggleTooltips(self):
'''Turn tooltips on or off.'''
if self.tooltipsOn.get():
self.frames[ImageCalculator].deactivateTooltips()
self.frames[ImageSimulator].deactivateTooltips()
self.frames[PlottingTool].deactivateTooltips()
self.tooltipsOn.set(0)
self.currentFrame().varMessageLabel.set('Tooltips deactivated.')
self.currentFrame().labelMessage.configure(foreground='navy')
else:
self.frames[ImageCalculator].activateTooltips()
self.frames[ImageSimulator].activateTooltips()
self.frames[PlottingTool].activateTooltips()
self.tooltipsOn.set(1)
self.currentFrame().varMessageLabel.set('Tooltips activated.')
self.currentFrame().labelMessage.configure(foreground='navy')
def toogleDefaultTTState(self):
'''Toggle whether tooltips will be shown automatically on startup.'''
self.menuTT.delete(1)
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'w')
for line in lines[:-1]:
file.write(line + '\n')
if self.defaultTTState.get():
self.menuTT.insert_command(1, label='Turn on as default', command=self.toogleDefaultTTState)
file.write(lines[-1].split(',')[0] + ', Tooltips: off,' + lines[-1].split(',')[2])
self.defaultTTState.set(0)
if not self.tooltipsOn.get():
self.toggleTooltips()
self.currentFrame().varMessageLabel.set('Default tooltip state: off')
self.currentFrame().labelMessage.configure(foreground='navy')
else:
self.menuTT.insert_command(1, label='Turn off as default', command=self.toogleDefaultTTState)
file.write(lines[-1].split(',')[0] + ', Tooltips: on,' + lines[-1].split(',')[2])
self.defaultTTState.set(1)
if self.tooltipsOn.get():
self.toggleTooltips()
self.currentFrame().varMessageLabel.set('Default tooltip state: on')
self.currentFrame().labelMessage.configure(foreground='navy')
file.close()
def changeFS(self):
'''Change application font size.'''
fs_vals = [7, 8, 9, 10, 11, 12, 13, 14, 15]
varFS = tk.IntVar()
varFS.set(self.small_fs)
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
topFS = tk.Toplevel(background=C.DEFAULT_BG)
topFS.title('Change font size')
apc.setupWindow(topFS, 150, 130)
self.addIcon(topFS)
topFS.focus_force()
labelFS = ttk.Label(topFS, text='Choose font size:', anchor='center')
optionFS = ttk.OptionMenu(topFS, varFS, None, *fs_vals)
if not C.is_win: optionFS.config(width=4)
buttonFS = ttk.Button(topFS, text='OK', command=lambda: setNewFS(self, self.cnum, self.tnum,
varFS.get()))
labelFS.pack(side='top', pady=(12*C.scsy, 0), expand=True)
optionFS.pack(side='top', pady=12*C.scsy, expand=True)
buttonFS.pack(side='top', pady=(0, 12*C.scsy), expand=True)
self.currentFrame().varMessageLabel.set('Warning: changing font size ' \
+ 'will restart the application.')
self.currentFrame().labelMessage.configure(foreground='crimson')
self.wait_window(topFS)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
def clearInput(self):
'''Reset input widgets in the active tool.'''
frame = self.currentFrame()
def ok():
frame.setDefaultValues()
if self.calMode.get():
frame.toggleDarkInputMode()
elif self.plMode.get():
frame.toggleActiveWidgets(frame.plotList[0])
topWarning.destroy()
self.menubar.entryconfig(1, state='disabled')
self.menubar.entryconfig(2, state='disabled')
self.menubar.entryconfig(3, state='disabled')
self.menubar.entryconfig(4, state='disabled')
# Setup window
topWarning = tk.Toplevel(background=C.DEFAULT_BG)
topWarning.title('Warning')
self.addIcon(topWarning)
apc.setupWindow(topWarning, 300, 145)
topWarning.focus_force()
ttk.Label(topWarning, text='Are you sure you want to\nclear the inputted information?').pack(side='top', pady=(20*C.scsy, 5*C.scsy), expand=True)
frameButtons = ttk.Frame(topWarning)
frameButtons.pack(side='top', expand=True, pady=(0, 10*C.scsy))
ttk.Button(frameButtons, text='Yes', command=ok).grid(row=0, column=0)
ttk.Button(frameButtons, text='Cancel',
command=lambda: topWarning.destroy()).grid(row=0, column=1)
self.wait_window(topWarning)
try:
self.menubar.entryconfig(1, state='normal')
self.menubar.entryconfig(2, state='normal')
self.menubar.entryconfig(3, state='normal')
self.menubar.entryconfig(4, state='normal')
except:
pass
frame.varMessageLabel.set('Input cleared.')
frame.labelMessage.configure(foreground='navy')
def setDMSAngleUnit(self):
'''Use [deg/min/sex] as angle unit.'''
# Do nothing if DMS is already used
if not self.degAngleUnit.get():
self.dmsAngleUnit.set(1)
return None
self.dmsAngleUnit.set(1)
self.degAngleUnit.set(0)
self.frames[ImageAnalyser].updateAngle()
self.frames[FOVCalculator].setFOV()
def setDegAngleUnit(self):
'''Use [degree] as angle unit.'''
# Do nothing if deg is already used
if not self.dmsAngleUnit.get():
self.degAngleUnit.set(1)
return None
self.dmsAngleUnit.set(0)
self.degAngleUnit.set(1)
self.frames[ImageAnalyser].updateAngle()
self.frames[FOVCalculator].setFOV()
def convSig(self, val, toMag):
'''Convert between electron flux and luminance.'''
f = C.FOCAL_LENGTH[self.tnum][0] # Focal length [mm]
m = self.FLModVal # Focal length multiplier
d = C.APERTURE[self.tnum][0] # Aperture diameter [mm]
# Solid angle subtended by the aperture at the location of the sensor
omega = 2*np.pi*m*f*(1.0/(m*f) - 1.0/np.sqrt((0.5*d)**2 + (m*f)**2))
A = (C.PIXEL_SIZE[self.cnum][0]*1e-6)**2 # Pixel area [m^2]
T = 1.0 - self.TLoss # Telescope transmission factor
E = 1.986e-16/self.avgWL # Average photon energy [J]
q = C.QE[self.cnum][0] # Peak quantum efficiency
if toMag:
L_lin = val*683.0*E/(omega*A*T*q) # Luminance [cd/m^2]
L_log = -2.5*np.log10(L_lin/108000.0) # Luminance [mag/arcsec^2]
ret = L_log
else:
L_lin = 108000.0*10**(-0.4*val) # Luminance [cd/m^2]
Fe = L_lin*omega*A*T*q/(683.0*E) # Electron flux [e-/s]
ret = Fe
return ret
def setNewFS(app, cnum, tnum, fs):
'''Change default font size and restart application.'''
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'r')
lines = file.read().split('\n')
file.close()
file = open('aplab_data{}cameradata.txt'.format(os.sep), 'w')
for line in lines[:-1]:
file.write(line + '\n')
file.write(','.join(lines[-1].split(',')[0:2]) + ', Fontsize: {:d}'.format(fs))
file.close()
app.destroy()
app = ToolManager(cnum, tnum, fs)
app.mainloop() | [
"[email protected]"
] | |
8639b64d13f495471e27f7e4ecec5866ea70b372 | ad9ae5908d106201cb8d7ccec89cab7c2e188688 | /hw2/step_search.py | fce92dc9706edb296b9878d99c395a84fe48231d | [] | no_license | Howuhh/hse_optimization | 70debe571807525a61306c13f6432ab9b719cf9a | 9f24bff9f5850dae8dcf23c08f8da7f33e20fdfe | refs/heads/main | 2023-02-03T23:55:43.622498 | 2020-12-23T18:22:26 | 2020-12-23T18:22:26 | 310,073,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | import numpy as np
from scipy.optimize import bracket, line_search
from optimize_single.golden import golden_section
from optimize_single.brent import brent
def _line_search(oracle, w, direction, optimizer, eps, max_iter):
assert w.shape == direction.shape, "diff shape for w and direction"
def f(alpha):
return oracle.value(w - alpha * direction)
xa, xb = 1, 10 # some magick numbers for which Brent works well
if f(xa) > f(xb):
xa, xb = xb, xa
brack = bracket(f, xa=xa, xb=xb)[:3]
return optimizer(f, brack, eps=eps, max_iter=max_iter)
def golden_line_search(oracle, w, direction):
# golden really expensive method, so only few iterations
return _line_search(oracle, w, direction, golden_section, 1e-5, 16)
def brent_line_search(oracle, w, direction):
return _line_search(oracle, w, direction, brent, 1e-8, 64)
def armijo_line_search(oracle, w, direction, init_alpha="mean"):
def f(alpha):
return oracle.value(w - alpha * direction)
brack = bracket(f)
x, fx = np.array(brack[:3]), np.array(brack[3:-1])
if init_alpha == "wmean":
alpha = np.mean(x * fx) # magick trick to boost armijo from 7k iterations to 2k iterations on gd
elif init_alpha == "mean":
alpha = np.mean(x) # a little less magick trick to boost armijo to 3.5k without breaking on other (not a1a) datasets
elif init_alpha == "max":
alpha = max(x)
else:
alpha = 100
c = 0.0001
fk = oracle.value(w)
grad_norm = oracle.grad(w) @ direction
i = 0
while oracle.value(w - alpha * direction) > fk + alpha * c * grad_norm and i < 10000:
alpha = alpha / 2
i += 1
return alpha
def wolfe_line_search(oracle, w, direction):
# TODO: not direction.T but gradient to wolfe
alpha = line_search(oracle.value, oracle.grad, w, -direction, direction.T)[0]
if alpha is None:
alpha = armijo_line_search(oracle, w, direction, init_alpha="max")
return alpha
def lipschitz_line_search(oracle, w, direction):
L = oracle.L # init with 1.0
dir_norm = 0.5 * direction.T @ direction
w_new = w - (1/L) * direction
while oracle.value(w_new) > oracle.value(w) - (1 / L) * dir_norm:
L = L * 2
w_new = w - (1/L) * direction
oracle.L = (L / 2)
return 1 / oracle.L | [
"[email protected]"
] | |
4afff28e71980128cc4561c1361d0d54ca414ce3 | 917d54527c85eae44dfcab13da1f583facb59c87 | /leetcode/70. Climbing Stairs.py | 524686106f213de44157d2dbef0b82bd76620239 | [] | no_license | queryor/algorithms | f73d14d18914491532b7beb9ca23146b46e038ec | 5b55e35f15c7bf098203a6aabbb7aad6b14579fa | refs/heads/master | 2021-06-19T18:38:43.462388 | 2019-08-24T03:16:58 | 2019-08-24T03:16:58 | 116,901,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # You are climbing a stair case. It takes n steps to reach to the top.
# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
# Note: Given n will be a positive integer.
# Example 1:
# Input: 2
# Output: 2
# Explanation: There are two ways to climb to the top.
# 1. 1 step + 1 step
# 2. 2 steps
# Example 2:
# Input: 3
# Output: 3
# Explanation: There are three ways to climb to the top.
# 1. 1 step + 1 step + 1 step
# 2. 1 step + 2 steps
# 3. 2 steps + 1 step
class Solution:
def climbStairs(self, n: int) -> int:
# 超时
if n==0:
return 1
elif n==1:
return 1
else:
return self.climbStairs(n-1)+self.climbStairs(n-2)
def climbStairs1(self, n: int) -> int:
# dp
# Runtime: 48 ms, faster than 21.42% of Python3 online submissions for Climbing Stairs.
# Memory Usage: 13.3 MB, less than 5.18% of Python3 online submissions for Climbing Stairs.
if n == 0:
return 0
dp = [0 for i in range(n+1)]
dp[0] = 1
dp[1] = 1
for i in range(2,n+1):
dp[i] = dp[i-1]+dp[i-2]
return dp[n] | [
"queryor.163.com"
] | queryor.163.com |
559c19f0eedcd31bcc48a3484331802b3dd03b6f | 96358408165144e9066d33749c436d475b3d044e | /webapp/user/forms.py | 8a6828314b3b7e1bcff7b405d1dc7fd89f0c6312 | [] | no_license | xkxixnxgx/task_for_srvhub | c742a77c5a8ffd16ec8f38297468863a9faaa3b1 | 718cc1d7aa7825dd3c614ffc5a7c24be757323c5 | refs/heads/master | 2023-06-27T21:39:16.404515 | 2021-08-02T19:09:55 | 2021-08-02T19:09:55 | 298,768,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from webapp.user.models import User
class LoginForm(FlaskForm):
username = StringField('Имя пользователя', validators=[DataRequired()], render_kw={"class": "form-control"})
password = PasswordField('Пароль', validators=[DataRequired()], render_kw={"class": "form-control"})
remember_me = BooleanField('Запомнить меня', default=True, render_kw={"class": "form-check-input"})
submit = SubmitField('Отправить', render_kw={"class": "btn btn-primary"})
class RegistrationForm(FlaskForm):
username = StringField('Имя пользователя', validators=[DataRequired()], render_kw={"class": "form-control"})
password = PasswordField('Пароль', validators=[DataRequired()], render_kw={"class": "form-control"})
password2 = PasswordField('Повторите пароль', validators=[DataRequired(), EqualTo('password')], render_kw={"class": "form-control"})
submit = SubmitField('Отправить!', render_kw={"class": "btn btn-primary"})
def validate_username(self, username):
users_count = User.query.filter_by(username=username.data).count()
if users_count > 0:
raise ValidationError('Пользователь с таким именем уже зарегистрирован')
| [
"[email protected]"
] | |
dfa948f6b72398a816d41e9b0647c492e8e27315 | 9617c8f29ff867039c7743ae906b5f90963cb676 | /tst.py | 3e8fa29ad14219dfed1b53044642273d727c5e74 | [] | no_license | ritikkh/competative_programming | c4a16759d15314d3e8061b574b84bbfd535ef288 | 01966286d2c4082825e75e893f3f5bb6bfb8c287 | refs/heads/master | 2022-12-02T11:43:47.533242 | 2020-08-19T17:35:36 | 2020-08-19T17:35:36 | 282,161,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | class Node(object):
def __init__(self, character):
self.character = character
self.middleNode = None
self.leftNode = None
self.rightNode = None
self.value = 0
class TST(object):
def __init__(self):
self.rootNode = None
def put(self, key, value):
self.rootNode = self.putItem(self.rootNode, key, value, 0)
def putItem(self, node, key, value, index):
c = key[index]
if node == None:
node = Node(c)
if c < node.character:
node.leftNode = self.putItem(node.leftNode, key, value, index)
elif c > node.character:
node.rightNode = self.putItem(node.rightNode, key, value, index)
elif index < len(key) - 1:
node.middleNode = self.putItem(node.middleNode, key, value, index+1)
else:
node.value = value
return node
def get(self, key):
node = self.getItem(self.rootNode, key, 0)
if node == None:
return -1
return node.value
def getItem(self, node, key, index):
if node == None:
return None
c = key[index]
if c < node.character:
return self.getItem(node.leftNode, key, index)
elif c > node.character:
return self.getItem(node.rightNode, key, index)
elif index < len(key) - 1:
return self.getItem(node.middleNode, key, index+1)
else:
return node
if __name__ == "__main__":
tst = TST()
tst.put("apple", 100)
tst.put("orange", 200)
print(tst.get("orange"))
| [
"[email protected]"
] | |
5681cae9d8e0bd0e84325d582a67fcecd54ecdd9 | 997e0cae1eed3506a6c14cda7d103b549885a3e4 | /report_preprocessing/unigram.py | 4a7fef560a3c3831a3e5a29080af61125aca48d7 | [] | no_license | olivierh59500/malware_detection | 0ce94dfa07ccdfbbc07dfd0079f9855f1b8598d7 | da8310d3f1fc219e32afe33a75b355ac69eef8a9 | refs/heads/master | 2021-01-13T05:44:30.714689 | 2017-02-07T03:47:06 | 2017-02-07T03:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import re
import dir
import count
def extract_unigram(path):
f = open(path,'r')
data = f.read()
f.close()
return re.split('\W+', data)
def extract_unigram_dir(dir_path):
filenames = dir.filenames_in_dir(dir_path)
unigrams_list = []
for filename in filenames:
unigrams_list.append(extract_unigram(filename))
return unigrams_list
def extract_most_appeared_unigrams_dir(target_dir,count_num):
unigrams_list = extract_unigram_dir(target_dir)
counting_dict={}
for unigrams in unigrams_list:
count.count_single_list(unigrams,counting_dict)
top_list = count.most_appreared_in_dict(dict=counting_dict,top_num=count_num)
return top_list,unigrams_list
| [
"[email protected]"
] | |
7b063530b98b2e4222df26c7815ba099a548177f | 61c9bb7d88f637d24a42ae78f8223915d43a356a | /learningurllib/qiushibaike.py | 29ce8abce059934d75d5450fac8c8057527f1c24 | [] | no_license | foodish/python_project | fb2f53583d651d2e1035c10fd396923ffbac96ad | e127445c8a41f4ec839b4ad689d36594d34ad113 | refs/heads/master | 2021-01-21T10:59:17.577012 | 2017-03-26T18:05:00 | 2017-03-26T18:05:00 | 83,505,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,166 | py | # coding=utf-8
from urllib.request import Request, urlopen
from urllib import error
import re
import _thread
import time
#定义糗事百科爬虫类
class QSBK:
#初始化,定义变量
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/4.0 (compatible; MISE 5.5; Windows NT)'
#初始化headers
self.headers = { 'User-Agent':self.user_agent }
#存放段子的变量,每一个元素是每一页的段子
self.stories=[]
#存放程序是否继续运行的变量
self.enable=False
#传入某一页的索引以获得页面代码
def getPage(self,pageIndex):
try:
url = 'http://www.qiushibaike.com/8hr/page/'+str(pageIndex)+'/'
request=Request(url,headers=self.headers)
response=urlopen(request)
pageCode=response.read().decode('utf-8')
return pageCode
except error.URLError as e:
print('糗事百科连接失败,错误原因:',e.reason)
return None
#获取某一页内容
def getPageItems(self,pageIndex):
pageCode=self.getPage(pageIndex)
pageStories=[]
if not pageCode:
print('页面加载失败...')
return None
pattern = re.compile('<div class="author.*?<h2>(.*?)<.*?<span>(.*?)<.*?<i class="number">(.*?)<', re.S)
items = re.findall(pattern, pageCode)
for i in items:
replaceBR=re.compile('<br/>')
text=re.sub(replaceBR,'\n',i[1])
#i[0]是作者,i[1]是内容,i[2]是点赞数
pageStories.append([i[0].strip(),i[1].strip(),i[2].strip()])
return pageStories
#加载并提取内面内容,加入到列表
def loadPage(self):
#如果当前未看的页面数少于2页,则加载新一页
if self.enable==True:
if len(self.stories)<2:
#获取新一页
pageStories=self.getPageItems(self.pageIndex)
#将该页的段子放入全局list
if pageStories:
self.stories.append(pageStories)
#获取完后页码索引加一,即下次读取下一页
self.pageIndex+=1
#调用该方法,每次敲回车打印输出一个段子
def getOneStory(self,pageStories,page):
#遍历一页的段子
for story in pageStories:
#等待用户输入
input_0=input()
#每敲一次回车,判断是否加载新页面
self.loadPage()
#如果输入Q则程序结束
if input_0=='Q':
self.enable=False
return
print(u'第%d页\t发布人:%s\t发布内容:%s\t赞:%s\n' % (page,story[0],story[1],story[2]))
#开始方法
def start(self):
print('正在读取糗事百科,按回车查看新段子,Q退出')
#使变量为Ture,程序正常运行
self.enable=True
#先加载一页内容
self.loadPage()
#局部变量,控制当前读到了第几页
newPage=0
while self.enable:
if len(self.stories)>0:
#从全局list获取一个段子
pageStories=self.stories[0]
#当前读到页数加一
newPage += 1
#将全局list中第一个元素删除,因为已经取出
del self.stories[0]
#输出该页段子
self.getOneStory(pageStories,newPage)
spider=QSBK()
spider.start()
#page=1
#url = 'http://www.qiushibaike.com/8hr/page/'+str(page)+'/'
#user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
#headers = {'User-Agent': user_agent}
#try:
#request = Request(url,headers=headers)
#response = urlopen(request)
#content = response.read().decode('utf-8')
#pattern = re.compile('<div class="author.*?<h2>(.*?)<.*?<span>(.*?)<.*?<i class="number">(.*?)<', re.S)
#items = re.findall(pattern, content)
#for i in items:
#print(i[0], i[1], i[2])
#except error.URLError as e:
#print(e.reason)
| [
"[email protected]"
] | |
838b964eb294c35fb4356ac36b4eec1569a6df8a | 4d675419b49ce330ecc772fc489ac12b230ff46c | /data/pipelines.py | 57f38e4cb28f2e3ac4683c1f0da38e81824d6cdd | [
"MIT"
] | permissive | unreal-estate/chicago | 6b17f648044a03607ee0713b2f7d57b26c0f207c | 8cacef1ce883217939f2b97d4d0dd8a88ed43a1c | refs/heads/master | 2020-06-01T18:58:40.954827 | 2019-06-13T12:54:17 | 2019-06-13T12:54:17 | 190,892,426 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DataPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
3a522a31ec436c517bdd0588c09f8bf415488569 | 3599c5fc79b8ca70ffe7b81abde2171884563e64 | /dashboard/settings.py | ec4ee15f83f9950b93ef3e21d4cf3a3b3afc2b60 | [] | no_license | linhvo/pilosa-benchmarks | dd57ad092e607dcb11755512ceb203d7ed798294 | 3af372ee2cdd042077f9381f19279e4578cc6be9 | refs/heads/master | 2021-07-07T22:41:47.873651 | 2017-10-03T19:41:53 | 2017-10-03T19:41:53 | 105,677,475 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | """
Django settings for dashboard project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rtn#octnyvr#!y=bf!t(-fe8!x*&$4s-59@*pyfqe%_olmg2d4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core.apps.CoreConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'benchmarks', # Database name
'USER': 'lvo', # PostgreSQL username
'PASSWORD': '', # PostgreSQL password
'HOST': 'localhost', # Database server
'PORT': '', # Database port (leave blank for default)
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# TEMPLATE = (os.path.join(BASE_DIR, 'templates'),)
| [
"[email protected]"
] | |
4db7ac534b2b262088fc4520ab768d9ed3da6e5e | 496fb6067bbb05d578835824fc9be58a3f5c5869 | /app.py | e17a82a13f1278520f6ad86f1fd3c4f0b260ab18 | [] | no_license | vaidik/metaplace | 3fe03b828e989967d104983a0421034e87eb21e0 | 9fb4ba5df4176a84fb034f09994850c05a1bba71 | refs/heads/master | 2021-01-18T04:38:43.111859 | 2013-08-15T22:41:09 | 2013-08-15T22:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,222 | py | from __future__ import division
import csv
import json
import os
import urllib
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from decimal import Decimal
from itertools import groupby
import boto
from boto.s3.key import Key
import grequests
import requests
from flask import abort, Flask, redirect, render_template, request, session
from gevent.pywsgi import WSGIServer
from werkzeug.contrib.cache import MemcachedCache
import local
log_cache = os.path.join(os.path.dirname(__file__), 'cache')
cache = MemcachedCache([os.getenv('MEMCACHE_URL', 'localhost:11211')])
app = Flask(__name__)
servers = {
'dev': 'https://marketplace-dev.allizom.org',
'stage': 'https://marketplace.allizom.org',
'prod': 'https://marketplace.firefox.com'
}
api = {
'tiers': '/api/v1/webpay/prices'
}
regions = {
1: 'Worldwide', 2: 'US', 4: 'UK', 7: 'Brazil', 8: 'Spain', 9: 'Colombia',
10: 'Venezuela', 11: 'Poland', 12: 'Mexico', 13: 'Hungary', 14: 'Germany'
}
methods = {
0: 'operator',
1: 'card',
2: 'both'
}
regions_sorted = sorted(regions.keys())
builds = {
'jenkins': ['solitude', 'marketplace', 'marketplace-api',
'marketplace-webpay', 'amo-master', 'solitude'],
'travis': ['andymckay/receipts', 'mozilla/fireplace',
'andymckay/django-paranoia', 'andymckay/curling',
'andymckay/django-statsd']
}
statuses = {
'0': ['pending', 'null'],
'1': ['completed', 'success'],
'2': ['checked', 'info'],
'3': ['received', 'info'],
'4': ['failed', 'important'],
'5': ['cancelled', 'warning'],
}
def notify(msg, *args):
esc = urllib.urlencode(dict(['args', a] for a in args), doseq=True)
url = 'https://notify.paas.allizom.org/notify/{0}/?{1}'.format(msg, esc)
requests.post(url, headers={'Authorization':
'Basic {0}'.format(local.NOTIFY_AUTH)})
@app.route('/')
def base(name=None):
return render_template('index.html', name=name)
def get_jenkins(keys, results):
reqs = []
for key in keys:
url = ('https://ci.mozilla.org/job/{0}/lastCompletedBuild/api/json'
.format(key))
reqs.append(grequests.get(url, headers={'Accept': 'application/json'}))
resps = grequests.map(reqs)
for key, resp in zip(keys, resps):
results['results'][key] = resp.json()['result'] == 'SUCCESS'
return results
def get_travis(keys, results):
reqs = []
for key in keys:
url = ('https://api.travis-ci.org/repositories/{0}.json'
.format(key))
reqs.append(grequests.get(url, headers={'Accept': 'application/json'}))
resps = grequests.map(reqs)
for key, resp in zip(keys, resps):
results['results'][key] = resp.json()['last_build_result'] == 0
return results
def get_build():
result = cache.get('build')
if not result:
result = {'when': datetime.now(), 'results': {}}
get_jenkins(builds['jenkins'], result)
get_travis(builds['travis'], result)
cache.set('build', result, timeout=60 * 5)
result['results'] = OrderedDict(sorted(result['results'].items()))
passing = all(result['results'].values())
last = bool(cache.get('last-build'))
cache.set('last-build', True)
if last is None:
cache.set('last-build', bool(passing))
elif last != passing:
#notify('builds', 'passing' if passing else 'failing')
cache.set('last-build', bool(passing))
return result, passing
@app.route('/build/')
def build():
result, passing = get_build()
if 'application/json' in request.headers['Accept']:
result['when'] = result['when'].isoformat()
return json.dumps({'all': passing, 'result': result})
return render_template('build.html', result=result, request=request,
all=passing)
def fill_tiers(result):
for tier in result['objects']:
prices = {}
for price in tier['prices']:
prices[price['region']] = price
tier['prices'] = prices
return result
@app.route('/tiers/')
@app.route('/tiers/<server>/')
def tiers(server=None):
if server:
res = requests.get('{0}{1}'.format(
servers[server], api['tiers']))
result = fill_tiers(res.json())
return render_template('tiers.html', result=result['objects'],
regions=regions, sorted=regions_sorted,
methods=methods, server=server)
return render_template('tiers.html')
def s3_get(server, src_filename, dest_filename):
conn = boto.connect_s3(local.S3_AUTH[server]['key'],
local.S3_AUTH[server]['secret'])
bucket = conn.get_bucket(local.S3_BUCKET[server])
k = Key(bucket)
k.key = src_filename
k.get_contents_to_filename(os.path.join(log_cache, dest_filename))
def list_to_dict_multiple(listy):
return reduce(lambda x, (k,v): x[k].append(v) or x, listy, defaultdict(list))
@app.route('/transactions/')
@app.route('/transactions/<server>/<date>/')
def transactions(server=None, date=''):
if not session.get('mozillian'):
abort(403)
sfmt = '%Y-%m-%d'
lfmt = sfmt + 'T%H:%M:%S'
today = datetime.today()
dates = (('-1 day', (today - timedelta(days=1)).strftime(sfmt)),
('-2 days', (today - timedelta(days=2)).strftime(sfmt)))
if server and date:
date = datetime.strptime(date, sfmt)
src_filename = date.strftime(sfmt) + '.log'
dest_filename = date.strftime(sfmt) + '.' + server + '.log'
if dest_filename not in os.listdir(log_cache):
s3_get(server, src_filename, dest_filename)
src = os.path.join(log_cache, dest_filename)
with open(src) as csvfile:
rows = []
stats = defaultdict(list)
for row in csv.DictReader(csvfile):
row['created'] = datetime.strptime(row['created'], lfmt)
row['modified'] = datetime.strptime(row['modified'], lfmt)
row['diff'] = row['modified'] - row['created']
if row['diff']:
stats['diff'].append(row['diff'].total_seconds())
stats['status'].append(row['status'])
if row['currency'] and row['amount']:
stats['currencies'].append((row['currency'],
Decimal(row['amount'])))
rows.append(row)
if len(stats['diff']):
stats['mean'] = '%.2f' % (sum(stats['diff'])/len(stats['diff']))
for status, group in groupby(sorted(stats['status'])):
group = len(list(group))
perc = (group / len(stats['status'])) * 100
stats['statuses'].append((str(status), '%.2f' % perc))
stats['currencies'] = list_to_dict_multiple(stats['currencies'])
for currency, items in stats['currencies'].items():
stats['currencies'][currency] = {'items': items}
stats['currencies'][currency]['count'] = len(items)
mean = (sum(items) / len(items))
stats['currencies'][currency]['mean'] = '%.2f' % mean
return render_template('transactions.html', rows=rows,
server=server, dates=dates, stats=stats,
statuses=statuses, filename=dest_filename)
return render_template('transactions.html', dates=dates)
@app.errorhandler(500)
def page_not_found(err):
return render_template('500.html', err=err), 500
@app.errorhandler(403)
def page_not_allowed(err):
return render_template('403.html', err=err), 403
@app.route('/auth/login', methods=['POST'])
def login():
# The request has to have an assertion for us to verify
if 'assertion' not in request.form:
abort(400)
# Send the assertion to Mozilla's verifier service.
data = {'assertion': request.form['assertion'],
'audience': 'https://metaplace.paas.allizom.org/'}
resp = requests.post('https://verifier.login.persona.org/verify',
data=data, verify=True)
# Did the verifier respond?
if resp.ok:
# Parse the response
verification_data = json.loads(resp.content)
# Check if the assertion was valid
if verification_data['status'] == 'okay':
# Log the user in by setting a secure session cookie
session.update({
'email': verification_data['email'],
'mozillian': verification_data['email'] in local.MOZS
})
return 'You are logged in'
abort(500)
@app.route('/auth/logout', methods=['POST', 'GET'])
def logout():
session.update({'email': None, 'mozillian': False})
return 'You are logged out'
@app.after_request
def after_request(response):
response.headers.add('Strict-Transport-Security', 'max-age=31536000')
return response
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.secret_key = local.SECRET
http = WSGIServer(('0.0.0.0', port), app)
http.serve_forever()
| [
"[email protected]"
] | |
a6d7f8f9500a0fc8477fd43e019867defe8d109f | a1106300094bb55dc69e4390b782f41b33d957bb | /CrawlZingNews/Zing/Zing/pipelines.py | 7b47d79328b4d41848b64d6bdb95f63108a83228 | [] | no_license | phuongbui2207/BuiKhanhPhuong_18021017_Nhom5_Crawler | 94b378d9a31e46d0c0f45d2d5a720d76503a7265 | 384763c40e55b6b055339db5075ac95dd5c3c83c | refs/heads/master | 2022-11-28T05:04:58.853355 | 2020-08-08T12:21:31 | 2020-08-08T12:21:31 | 286,037,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ZingPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
6cf64851e3cfe56fe13f73cdb2dda93181578b0c | afa8dd0dc4756b7ed618442453d1e22ba46b0076 | /run.py | 303da145fe324f0aa571219245acf927c87d183d | [] | no_license | sunyanbei/GDWCT | 85241bf10dd437e4a4be415c91ffb95b197c6936 | 2c589348b5549a5f6b461ae83172a724009ab639 | refs/heads/master | 2022-03-06T02:04:49.409926 | 2019-11-13T21:33:38 | 2019-11-13T21:33:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,526 | py | import torch
import torch.nn as nn
import torch.optim as optim
from torch import cuda
from data_loader import *
from model import *
import time
import datetime
import os
from utils.util import *
from torch.backends import cudnn
from scipy.linalg import block_diag
class Run(object):
def __init__(self, config):
self.data_loader = get_loader(config['DATA_PATH'],
crop_size=config['CROP_SIZE'], resize=config['RESIZE'],
batch_size=config['BATCH_SIZE'], dataset=config['DATASET'],
mode=config['MODE'], num_workers=config['NUM_WORKERS'])
self.config = config
self.device = torch.device("cuda:%d" % (int(config['GPU1'])) if torch.cuda.is_available() else "cpu")
print(self.device)
C = self.config['G']['CONTENT_DIM'] # The number of channels of the content feature.
n_mem = C // self.config['N_GROUP'] # The number of blocks in the coloring matrix: G, The number of elements for each block: n_members^2
self.mask = self.get_block_diagonal_mask(n_mem) # This is used in generators to make the coloring matrix the block diagonal form.
self.make_dir()
self.init_network()
self.loss = {}
print(config)
if config['LOAD_MODEL']:
self.load_pretrained_model(self.config['START'])
def get_block_diagonal_mask(self, n_member):
G = self.config['N_GROUP']
ones = np.ones((n_member,n_member)).tolist()
mask = block_diag(ones,ones)
for i in range(G-2):
mask = block_diag(mask,ones)
return torch.from_numpy(mask).to(self.device).float()
def make_dir(self):
if not os.path.exists(self.config['MODEL_SAVE_PATH']):
os.makedirs(self.config['MODEL_SAVE_PATH'])
def init_network(self):
"""Create a generator and a discriminator."""
G_opts = self.config['G']
D_opts = self.config['D']
self.G_A = Generator(G_opts['FIRST_DIM'], G_opts['N_RES_BLOCKS'], self.mask, self.config['N_GROUP'],
G_opts['MLP_DIM'], G_opts['BIAS_DIM'], G_opts['CONTENT_DIM'], self.device)
self.G_B = Generator(G_opts['FIRST_DIM'], G_opts['N_RES_BLOCKS'], self.mask, self.config['N_GROUP'],
G_opts['MLP_DIM'], G_opts['BIAS_DIM'], G_opts['CONTENT_DIM'], self.device)
G_params = list(self.G_A.parameters()) + list(self.G_B.parameters()) # + list(blah)
self.G_optimizer = torch.optim.Adam([p for p in G_params if p.requires_grad], self.config['G_LR'], [self.config['BETA1'], self.config['BETA2']], weight_decay=self.config['WEIGHT_DECAY'])
self.G_scheduler = get_scheduler(self.G_optimizer, config)
self.G_A.apply(weights_init(self.config['INIT']))
self.G_B.apply(weights_init(self.config['INIT']))
if self.config['MODE'] == 'train':
self.D_A = Discriminator(3, D_opts)
self.D_B = Discriminator(3, D_opts)
D_params = list(self.D_A.parameters()) + list(self.D_B.parameters())
self.D_optimizer = torch.optim.Adam([p for p in D_params if p.requires_grad], self.config['D_LR'], [self.config['BETA1'], self.config['BETA2']], weight_decay=self.config['WEIGHT_DECAY'])
self.D_scheduler = get_scheduler(self.D_optimizer, config)
self.D_A.apply(weights_init('gaussian'))
self.D_B.apply(weights_init('gaussian'))
# print_network(self.G, 'G')
# print_network(self.D, 'D')
self.set_gpu()
def set_gpu(self):
def multi_gpu(gpu1, gpu2, model):
model = nn.DataParallel(model, device_ids=[gpu1, gpu2])
return model
gpu1 = int(self.config['GPU1'])
gpu2 = int(self.config['GPU2'])
if self.config['DATA_PARALLEL']:
self.G_A = multi_gpu(gpu1, gpu2, self.G_A)
self.G_B = multi_gpu(gpu1, gpu2, self.G_B)
if self.config['MODE'] == 'train':
self.D_A = multi_gpu(gpu1, gpu2, self.D_A)
self.D_B = multi_gpu(gpu1, gpu2, self.D_B)
self.G_A.to(self.device)
self.G_B.to(self.device)
if self.config['MODE'] == 'train':
self.D_A.to(self.device)
self.D_B.to(self.device)
def l1_criterion(self, input, target):
return torch.mean(torch.abs(input - target))
def reg(self, x_arr):
# whitening_reg: G,C//G,C//G
I = torch.eye(x_arr[0][0].size(1)).unsqueeze(0).to(self.device) # 1,C//G,C//G
loss = torch.FloatTensor([0]).to(self.device)
for x in x_arr:
x = torch.cat(x,dim=0) # G*(# of style),C//G,C//G
loss = loss + torch.mean(torch.abs(x-I))
return loss / len(x_arr)
def model_save(self, iteration):
self.G_A = self.G_A.cpu()
self.G_B = self.G_B.cpu()
self.D_A = self.D_A.cpu()
self.D_B = self.D_B.cpu()
torch.save(self.G_A.state_dict(),
os.path.join(self.config['MODEL_SAVE_PATH'], 'G_A_%s_%d.pth' % (self.config['SAVE_NAME'],iteration)))
torch.save(self.G_B.state_dict(),
os.path.join(self.config['MODEL_SAVE_PATH'], 'G_B_%s_%d.pth' % (self.config['SAVE_NAME'],iteration)))
torch.save(self.D_A.state_dict(),
os.path.join(self.config['MODEL_SAVE_PATH'], 'D_A_%s_%d.pth' % (self.config['SAVE_NAME'],iteration)))
torch.save(self.D_B.state_dict(),
os.path.join(self.config['MODEL_SAVE_PATH'], 'D_B_%s_%d.pth' % (self.config['SAVE_NAME'],iteration)))
self.set_gpu()
def load_pretrained_model(self, iteration):
self.G_A.load_state_dict(torch.load(os.path.join(
self.config['MODEL_SAVE_PATH'], 'G_A_%s_%d.pth' % (self.config['SAVE_NAME'], iteration))))
self.G_B.load_state_dict(torch.load(os.path.join(
self.config['MODEL_SAVE_PATH'], 'G_B_%s_%d.pth' % (self.config['SAVE_NAME'], iteration))))
if self.config['MODE'] == 'train':
self.D_A.load_state_dict(torch.load(os.path.join(
self.config['MODEL_SAVE_PATH'], 'D_A_%s_%d.pth' % (self.config['SAVE_NAME'], iteration))))
self.D_B.load_state_dict(torch.load(os.path.join(
self.config['MODEL_SAVE_PATH'], 'D_B_%s_%d.pth' % (self.config['SAVE_NAME'], iteration))))
def update_learning_rate(self):
if self.G_scheduler is not None:
self.G_scheduler.step()
if self.D_scheduler is not None:
self.D_scheduler.step()
def train_ready(self):
self.G_A.train()
self.G_B.train()
self.D_A.train()
self.D_B.train()
def test_ready(self):
self.G_A.eval()
self.G_B.eval()
def clamping_alpha(self,G):
for gdwct in G.decoder.gdwct_modules:
gdwct.alpha.data.clamp_(0,1)
def update_G(self, x_A, x_B, isTrain=True):
G_A = self.G_A.module if self.config['DATA_PARALLEL'] else self.G_A
G_B = self.G_B.module if self.config['DATA_PARALLEL'] else self.G_B
self.clamping_alpha(G_A)
self.clamping_alpha(G_B)
'''
### 1st stage
# cov_reg: G,C//G,C//G
# W_reg: B*G,C//G,C//G
'''
# get content
c_A = G_A.c_encoder(x_A)
c_B = G_B.c_encoder(x_B)
# get style
s_A = G_A.s_encoder(x_A)
s_B = G_B.s_encoder(x_B)
# from A to B
x_AB, whitening_reg_AB, coloring_reg_AB = G_B(c_A, s_B)
# from B to A
x_BA, whitening_reg_BA, coloring_reg_BA = G_A(c_B, s_A)
if isTrain:
'''
### 2nd stage
'''
c_BA = G_A.c_encoder(x_BA)
c_AB = G_B.c_encoder(x_AB)
s_AB = G_B.s_encoder(x_AB)
s_BA = G_A.s_encoder(x_BA)
# from AB to A
x_ABA, whitening_reg_ABA, coloring_reg_ABA = G_A(c_AB, s_BA)
# from BA to B
x_BAB, whitening_reg_BAB, coloring_reg_BAB = G_B(c_BA, s_AB)
# from A to A
x_AA, _, _ = G_A(c_A, s_A)
# from B to B
x_BB, _, _ = G_B(c_B, s_B)
# Compute the losses
g_loss_fake = self.D_A.calc_gen_loss(x_BA) + self.D_B.calc_gen_loss(x_AB)
loss_cross_rec = self.l1_criterion(x_ABA, x_A) + self.l1_criterion(x_BAB, x_B)
loss_ae_rec = self.l1_criterion(x_AA, x_A) + self.l1_criterion(x_BB, x_B)
loss_cross_s = self.l1_criterion(s_AB, s_B) + self.l1_criterion(s_BA, s_A)
loss_cross_c = self.l1_criterion(c_AB, c_A) + self.l1_criterion(c_BA, c_B)
loss_whitening_reg = self.reg([whitening_reg_AB, whitening_reg_BA, whitening_reg_ABA, whitening_reg_BAB])
loss_coloring_reg = self.reg([coloring_reg_AB, coloring_reg_BA, coloring_reg_ABA, coloring_reg_BAB])
# Backward and optimize.
g_loss = g_loss_fake + \
self.config['LAMBDA_X_REC'] * (loss_ae_rec) + \
self.config['LAMBDA_X_CYC'] * loss_cross_rec + \
self.config['LAMBDA_S'] * loss_cross_s + \
self.config['LAMBDA_C'] * loss_cross_c + \
self.config['LAMBDA_W_REG'] * loss_whitening_reg + \
self.config['LAMBDA_C_REG'] * loss_coloring_reg
self.G_optimizer.zero_grad()
g_loss.backward()
self.G_optimizer.step()
# Logging.
self.loss['G/loss_fake'] = g_loss_fake.item()
self.loss['G/loss_cross_rec'] = self.config['LAMBDA_X_REC']* loss_cross_rec.item()
self.loss['G/loss_ae_rec'] = self.config['LAMBDA_X_REC'] * loss_ae_rec.item()
self.loss['G/loss_latent_c'] = self.config['LAMBDA_C'] * loss_cross_c.item()
self.loss['G/loss_latent_s'] = self.config['LAMBDA_S'] * loss_cross_s.item()
self.loss['G/loss_whitening_reg'] = self.config['LAMBDA_W_REG'] * loss_whitening_reg.item()
self.loss['G/loss_coloring_reg'] = self.config['LAMBDA_C_REG'] * loss_coloring_reg.item()
return (x_AB, x_BA)
def update_D(self, x_A, x_B):
c_A = self.G_A.c_encoder(x_A)
c_B = self.G_B.c_encoder(x_B)
s_A = self.G_A.s_encoder(x_A)
s_B = self.G_B.s_encoder(x_B)
x_AB, _, _ = self.G_B(c_A, s_B)
x_BA, _, _ = self.G_A(c_B, s_A)
# D loss
d_loss_a = self.D_A.calc_dis_loss(x_BA.detach(), x_A)
d_loss_b = self.D_B.calc_dis_loss(x_AB.detach(), x_B)
d_loss = d_loss_a + d_loss_b
self.D_optimizer.zero_grad()
d_loss.backward()
self.D_optimizer.step()
self.loss['D/loss'] = d_loss.item()
def train(self):
data_loader = self.data_loader
print('# iters: %d' % (len(data_loader)))
print('# data: %d' % (len(data_loader)*self.config['BATCH_SIZE']))
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
self.train_ready()
print("Start training ~ Ayo:)!")
start_time = time.time()
for i in range(self.config['START'], self.config['NUM_ITERS']):
### Preprocess input data ###
# Fetch real images and labels.
try:
x_A, x_B = next(data_iter)
if x_A.size(0) != self.config['BATCH_SIZE'] or x_B.size(0) != self.config['BATCH_SIZE']:
x_A, x_B = next(data_iter)
except:
data_iter = iter(data_loader)
x_A, x_B = next(data_iter)
if x_A.size(0) != self.config['BATCH_SIZE'] or x_B.size(0) != self.config['BATCH_SIZE']:
x_A, x_B = next(data_iter)
x_A = x_A.to(self.device) # Input images.
x_B = x_B.to(self.device) # Exemplar images corresponding with target labels.
### Training ###
self.update_D(x_A, x_B)
(x_AB, x_BA)= \
self.update_G(x_A, x_B)
### ETC ###
if i % self.config['SAVE_EVERY'] == 0:
elapsed = time.time() - start_time
elapsed = str(datetime.timedelta(seconds=elapsed))
print('=====================================================')
print("Elapsed [{}], Iter [{}/{}]".format(
elapsed, i, self.config['NUM_ITERS']))
print('=====================================================')
print('D/loss: %.5f' % (self.loss['D/loss']))
print('G/loss_fake: %.5f' % (self.loss['G/loss_fake']))
print('G/loss_cross_rec: %.5f' % (self.loss['G/loss_cross_rec']))
print('G/loss_ae_rec: %.5f' % (self.loss['G/loss_ae_rec']))
print('G/loss_latent_s: %.5f' % (self.loss['G/loss_latent_s']))
print('G/loss_latent_c: %.5f' % (self.loss['G/loss_latent_c']))
print('G/loss_whitening_reg: %.5f' % (self.loss['G/loss_whitening_reg']))
print('G/loss_coloring_reg: %.5f' % (self.loss['G/loss_coloring_reg']))
save_img([x_A, x_AB, x_B, x_BA], self.config['SAVE_NAME'], i, 'train_results')
self.model_save(i)
if i > self.config['NUM_ITERS_DECAY']:
self.update_learning_rate()
def test(self):
print("test start")
self.test_ready()
data_loader = self.data_loader
with torch.no_grad():
for i, (x_A, x_B) in enumerate(data_loader):
x_A = x_A.to(self.device)
x_B = x_B.to(self.device)
x_AB, x_BA = \
self.update_G(x_A, x_B, isTrain=False)
save_img([x_A, x_B, x_AB, x_BA], self.config['SAVE_NAME'], i, 'test_results')
def main():
# For fast training
cudnn.benchmark = True
run = Run(config)
if config['MODE'] == 'train':
run.train()
else:
run.test()
config = ges_Aonfig('configs/config.yaml')
main() | [
"[email protected]"
] | |
219b7662507a1b3abed61f530b5614f51208739a | 0ea49aa13a836bc01f8b9699c84891701c9b14f1 | /navermovie/crawler.py | c170e58cfdcd75e133c07293dbb57cb3e1dcd167 | [] | no_license | spyria2019/tensorsppark_1 | 8a7221715754d07982a7215d8c6aaa9d305892fe | acc4a198e2db3507879eb72416701f5048da0af4 | refs/heads/master | 2020-05-30T11:34:42.841744 | 2019-06-01T09:20:31 | 2019-06-01T09:20:31 | 189,706,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
class NaverMovie:
def __init__(self,url):
driver = webdriver.Chrome('chromedriver')
driver.get(url)
soup = BeautifulSoup(driver.page_source,'html.parser')
print(soup) | [
"[email protected]"
] | |
c09e44bf3f8ca85322fb2a0c4302e0c853f6287c | 670194acaaeca4eb86f60d0410b43631db2cd592 | /model/contact.py | 01d1e2615cde74bf20ad90a3e51e7922193a9082 | [] | no_license | anakomissarova/python_training | 4c2d202470e512607a51cfd9145467442a6a8151 | 663bb3e9e7eefe6a711691b05a733b393eea1e13 | refs/heads/main | 2023-02-27T08:46:07.528234 | 2021-02-08T17:15:49 | 2021-02-08T17:15:49 | 318,516,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from sys import maxsize
class Contact:
def __init__(self, contact_id=None, firstname=None, middlename=None, lastname=None, address=None,
mobile=None, home=None, work=None, secondary=None, phones_from_homepage=None,
email1=None, email2=None, email3=None, emails_from_homepage=None):
self.contact_id = contact_id
self.firstname = firstname
self.middlename = middlename
self.address = address
self.lastname = lastname
self.mobile = mobile
self.home = home
self.work = work
self.secondary = secondary
self.phones_from_homepage = phones_from_homepage
self.email1 = email1
self.email2 = email2
self.email3 = email3
self.emails_from_homepage = emails_from_homepage
def __eq__(self, other):
return (self.contact_id == other.contact_id or self.contact_id is None or other.contact_id is None) and \
self.lastname == other.lastname and \
self.firstname == other.firstname
def __repr__(self):
return "{0}: {1} {2}".format(self.contact_id, self.lastname, self.firstname)
def compare_ids(self):
return int(self.contact_id) if self.contact_id else maxsize
| [
"[email protected]"
] | |
fd0427dabe97accf3295d1b62ac08574aa934f0f | bac88b1c28fdce256b954c786282d36abcf700b5 | /project/downloader/agent_base.py | 9da28e5374a4f4f2ef23e12f76e00fb6242121dd | [] | no_license | univizor/u2 | 5319ec908cc09db51e12a4308e6ea20188a04233 | f65d73e466532a3fe69d471f97e312ca98a7bb21 | refs/heads/master | 2021-01-21T13:04:08.773673 | 2016-05-08T23:15:25 | 2016-05-08T23:15:25 | 55,307,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from document.models import Document
from document.states import *
from django.utils import timezone
import logging
logger = logging.getLogger('u2.agent_base')
agents = []
def add_agent(agent):
agents.append(agent)
class Agent(object):
AGENT_NAME = "ABC"
AGENT_VERSION = 1
def agent_get_existing(self, agent_name, agent_version, agent_repository_url):
# check if this specific download exists already
results = Document.objects.filter(agent_name = agent_name,
#agent_version = agent_version,
agent_repository_url = agent_repository_url) #.exclude(agent_state = STATE_IN_PROGRESS)
# we exclude in progress, as that might be an indicator of previous runs where things went wrong
if len(results)>0:
logger.info("Found existing entry %s %s %s" % (agent_name, agent_version, agent_repository_url))
return results
def create_new_document(self, repository_url, jdata = {}):
d = Document()
dups = self.agent_get_existing(self.AGENT_NAME, self.AGENT_VERSION, repository_url)
if dups:
return None
# to do - Handle IN-PROGRESS dups that are now explicitely not included in agent_get_existing()
# d.agent_state = STATE_DUP
# d.agent_dup = dups[0]
# logger.info("Duplicate found %s - %s" % (thesis_url, dups[0]))
# d.save()
# continue
d.agent_name = self.AGENT_NAME
d.agent_version = self.AGENT_VERSION
d.agent_repository_url = repository_url
d.agent_date = timezone.now()
d.status = STATE_WAITING
# d.agent_json_data = json.dumps(jdata)
d.save()
return d
def import_catalog(self):
pass
def import_doc(self, doc):
pass
def __repr__(self):
return self.AGENT_NAME + "-" + str(self.AGENT_VERSION)
| [
"[email protected]"
] | |
55b3516fc383a6de1952bb42e51991a8cab1be45 | 5fc305524dd0bc16de9a26939819a9d47846edbb | /test/functional/rpc_blockchain.py | de02cd69f726d8a54593eb94aae3a6a2e6135019 | [
"MIT"
] | permissive | ca333/powerblockcoin | da85c7d667d98c60407455887e5507078769f2f4 | 6ef14a834ef9b6af5ce7ff31cd6907760b971887 | refs/heads/master | 2023-06-18T11:25:40.666674 | 2021-07-19T08:02:55 | 2021-07-19T08:02:55 | 348,801,917 | 0 | 1 | MIT | 2021-07-15T15:37:28 | 2021-03-17T17:51:01 | C++ | UTF-8 | Python | false | false | 9,074 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-stopatheight=207', '-prune=1']]
def run_test(self):
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'bip9_softforks',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_block_count'], 0)
assert('window_tx_count' not in chaintxstats)
assert('window_interval' not in chaintxstats)
assert('txrate' not in chaintxstats)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, 201)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
| [
"[email protected]"
] | |
e090cb0a3cc44a4ef94cf2e88b032f150f6a4192 | b7423aabf39b7ebacbd57388d20de1a9bf43f2f2 | /coding-bat/list-1/middle_way.py | c5ba6332db2864a418e9d48c61f0e6e16257eccd | [] | no_license | usman-tahir/python-snippets | b9e2bfe8e96d321314b2e87560c2fa3cd102d0e8 | cfe564ecb5a27d8b61c8c9930458bf3fdeab4d8e | refs/heads/master | 2021-01-12T13:56:06.263461 | 2017-05-07T17:53:53 | 2017-05-07T17:53:53 | 68,925,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # Given 2 int arrays, a and b, each length 3, return a new array length 2
# containing their middle elements.
def middle_way(a, b):
return [a[len(a) // 2], b[len(b) // 2]]
| [
"[email protected]"
] | |
72dbab8239a35d18ebf7943d2690f4300a8fca44 | e97e93699174acbd70b65f46d72c4a66a98bf0b2 | /lab7/informatics/f5.py | 5468164ad2b225a1fbcbc7057043374188213d96 | [] | no_license | anelyausk/web-dev | f362a1ce9063708866c24b42761e8aeebda2316d | ada089b69340b9ae220e2124ada4d1ed27e4a93c | refs/heads/main | 2023-05-15T07:38:45.455636 | 2021-06-14T08:51:17 | 2021-06-14T08:51:17 | 343,856,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | n = int(input())
cnt = 0
a = [int(i) for i in input().split()]
for i in range(1, n - 1):
if a[i] > a[i + 1] and a[i - 1] < a[i]:
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
86133d8792f59a466450ab8f5fd58de4a9a54a66 | eae8d7821f73233ef52f916b2d22b392fa056086 | /python_one_learn/day17/exercise01.py | ddb01f0867bfbf020ec1161b001425d528eb1b0f | [] | no_license | Wuskysong/python01_- | cad75d90dab945cc85719dcb1bb138ce81b813e5 | 4cef60bc5812524f7331e0f97f68c41db7082db7 | refs/heads/master | 2020-06-21T23:59:20.121964 | 2019-07-18T12:54:40 | 2019-07-18T12:54:40 | 197,584,017 | 1 | 0 | null | 2019-07-18T12:44:26 | 2019-07-18T12:37:03 | Python | UTF-8 | Python | false | false | 620 | py | """
练习:定义生成器函数my_enumerate,实现下列现象.
将元素与索引合成一个元组.
list01 = [3, 4, 55, 6, 7]
for item in enumerate(list01):
# (索引,元素)
print(item)
for index, element in enumerate(list01):
print(index, element)
"""
def my_enumerate(iterable_target):
index = 0
for item in iterable_target:
yield (index, item)
index +=1
# for index in range(len(iterable_target)):
# yield (index,iterable_target[index])
list01 = [3, 4, 55, 6, 7]
for index, element in my_enumerate(list01):
print(index, element)
| [
"[email protected]"
] | |
fc1c79ef406557e93a98275e45e40d62fe395c46 | 61f8733c7e25610d04eaccd59db28aa65897f846 | /dot blog/Built-in functions/min.py | 4a26742f75a80b90f32d47f262d3c297e2b018e3 | [] | no_license | masato932/study-python | 7e0b271bb5c8663fad1709e260e19ecb2d8d4681 | 03bedc5ec7a9ecb3bafb6ba99bce15ccd4ae29cc | refs/heads/main | 2023-03-27T13:32:05.605593 | 2021-03-21T10:45:16 | 2021-03-21T10:45:16 | 342,985,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # x = [30, 20, 10, 40]
# print(min(x))
# print(max(x))
# x = (30, 20, 10, 40)
# print(min(x))
# x = {'b':20, 'c':30, 'a':10, 'd':40}
# print(min(x))
# x = {'b':20, 'c':30, 'a':10, 'd':40}
# print(min(x.values()))
# print(max(x.values()))
# x = {30, 20, 10, 40, 5}
# print(min(x))
x = range(2, 93, 4)
print(min(x))
print(max(x))
x = 'cbaBCA'
print(min(x))
x = ['apple', 'Apple', 'amazon', 'Amazon', 'windows', 'Windows', 'walmart', 'Walmart']
print(min(x))
x = [30, 20, 10, 40, 30, 20, 10, 40]
print(min(x))
x = ['apple', 'Apple', 'amazon', 'Amazon', 'windows', 'Windows', 'walmart', 'Walmart', "pen"]
print(min(x, key=len)) | [
"[email protected]"
] | |
74bc7604452f3b169df0f784597fff5355775d87 | 7486b3af4d4413a96b3e0bf76f776cd8605d7c05 | /wndnjs9878/for/bj-2741.py | 12c56cf3563afc0b51ee1369b5af4bd47d5d6d64 | [] | no_license | WonyJeong/algorithm-study | 7146e18ec9a3d7f46910e31890768b2e37f8b9b4 | dd659bf75c902800bed226d392d144b691d8e059 | refs/heads/main | 2023-03-31T14:38:47.365622 | 2021-04-02T01:35:36 | 2021-04-02T01:35:36 | 334,309,434 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | #2741
import sys
input = sys.stdin.readline
N = int(input().strip())
for i in range(1, N+1) :
print(i)
| [
"[email protected]"
] | |
d356d063de284f7c7c8e447216b2679ac3541b14 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /WixXhsdqcNHe3vTn3_8.py | cb38f9e997d78b97a2bbeb924d95adbd37e850e2 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
def how_bad(n):
num = bin(n)
l = list(num[2:]).count('1')
res =[]
if l%2==0:res.append('Evil')
else:res.append('Odious')
if prime(l)==True:res.append('Pernicious')
return res
def prime(num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:return False
return True
return False
| [
"[email protected]"
] | |
06a5926ae454f0202fd7a070531414fbac8e60be | 799a90344c4e2e367bd79fff063ede765f816549 | /21_TTD_unit_test/mspack_test.py | 88c7ea59b246c702227b6399ef9b771bfcc5a8cb | [] | no_license | thraddash/python_tut | a49765801882003b6d3b1d6958bee476f5768470 | 5bd359f2021bb26953bcd955018dfbf6b2b6395f | refs/heads/master | 2023-03-16T10:28:56.581004 | 2021-03-05T00:08:06 | 2021-03-05T00:08:06 | 325,146,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #!/usr/bin/env python
## test msmath and msstring
## import unittest module
## create class MsPackMsMathTestCase and inherite test case class (unittest.TestCase)
## create list of test functions use test_ before each of the methods
import unittest
from mspack import msmath
class MsPackMSMathTestCase(unittest.TestCase):
def test_sum(self):
sum = msmath.sum(8, 12)
self.assertEqual(sum, 20)
def test_subtract(self):
result = msmath.subtract(109, 9)
self.assertTrue(result == 100)
def test_multiplication(self):
result = msmath.multiplication(9, 3)
self.assertEqual(result, 27)
def test_division(self):
result = msmath.division(50, 25)
self.assertEqual(result, 2)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2398215640eca54a212fdecfc44931e464969391 | 557a074c846ee90960fb9248ddab869a12cc5599 | /02_RM_TO.py | 779795b48d82081e1f47a04c11a70507e121be7d | [] | no_license | leezear2019/statistics | d89942724f0a08ebf830a8180a479e529c177889 | 71230524bfb2b77852736046ef6f71946afacc48 | refs/heads/master | 2022-12-10T09:52:17.187760 | 2020-08-24T01:52:27 | 2020-08-24T01:52:27 | 288,099,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | import os
import pandas as pd
import numpy as np
# 从sum中移除超时的实例
if __name__ == '__main__':
# root_dir = "D:/data2/out"
# res_dir = "D:/data2/sum_rm"
root_dir = "E:/alldiff"
res_dir = "E:/alldiff-"
solve_time_list = list()
delete_list = list()
# pct_files = os.listdir(pct_path)
# strbit_files = os.listdir(strbit_path)
# common_files = list(set(pct_files) & set(strbit_files))
summary_files = sorted(os.listdir(root_dir))
# summary_files.remove('.DS_Store')
# print(summary_files)
# for name in summary_files:
# 通过
sample = "E:/alldiff/AllInterval-m1-s1.csv"
data = pd.read_csv(sample)
title_list = data.columns.values.tolist()
print(title_list)
print(len(title_list))
#
result = pd.DataFrame(columns=title_list)
# solve time list
for c in title_list:
if c.startswith('time'):
solve_time_list.append(c)
print(solve_time_list)
#
for name in summary_files:
path = os.path.join(root_dir, name)
res_path = os.path.join(res_dir, name)
print(path)
# name = "/Users/lizhe/Documents/exp/sum/zzdubois.csv"
#
data = pd.read_csv(path)
# result.append()
# # print(data.columns.values.tolist())
#
# print('=====')
# print(data)
#
c1 = data[solve_time_list[0]] >= 900
c2 = data[solve_time_list[1]] >= 900
c3 = data[solve_time_list[2]] >= 900
c4 = data[solve_time_list[3]] >= 900
c5 = data[solve_time_list[4]] >= 900
c6 = data[solve_time_list[5]] >= 900
print(c1 & c2 & c3 & c4 & c5 & c6)
# data.drop(data[c1|c2|c3|c4|c5|c6].index, inplace=True)
data.drop(data[c1 & c2 & c3 & c4 & c5 & c6].index, inplace=True)
# data.drop(data[c5].index, inplace=True)
#
print(name, data.shape)
data.to_csv(res_path, index=0)
| [
"[email protected]"
] | |
9965c2f98f90ac340a098207da796b6ad09d40a5 | a69788fd661fedc8195696dec6da014f75cbfd20 | /generateRandom.py | 12cbd1a90e0ae7403509245fbed9b29a0d9eb5b4 | [] | no_license | chitrakgupta/Aggregation-Automation | 3b629180c2020588b0b9707a519cd6e788dc8d43 | ac7aad432545599d7cbb3681265efb9f84a3f0d2 | refs/heads/master | 2021-01-12T04:32:41.016181 | 2016-12-29T23:40:20 | 2016-12-29T23:40:20 | 77,646,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | #!/usr/bin/python
# This code generates random configurations for a system of pHLIPS (to study aggregation)
# Number of pHLIPs will be passed as argument
# Assume that PDB already exists with multiple pHLIPs overlaid on each other. All but one of them have to be moved randomly.
# This code will generate random coordinates, which shall then be used in VMD
import random
import math
from sys import argv
f = open("RandomCoords.rnd","w")
n=int(argv[1])
# Loop over all pHLIPs
Rand=[]
for i in range(1,n):
# Generate 4 random numbers
Rand1 = random.random()
Rand2 = random.random()
Rand3 = random.random()
Rand4 = random.random()
# Generate r, theta, phi vectors from these numbers
rVect = 40 * Rand1 # A random number between 0 and 40
thetaVect = 180 * Rand2 # A random number between 0 and 180
phiVect = 360 * Rand3 # A random number between 0 and 360
orientVect = 360 * Rand4 # A random number between 0 and 360
# Convert theta and phi to radians
thetaVectRad = math.pi * thetaVect/180.
phiVectRad = math.pi * phiVect/180.
# Convert r, theta, phi to X, Y, Z
xVect = rVect * math.sin(thetaVectRad) * math.cos(phiVectRad)
yVect = rVect * math.sin(thetaVectRad) * math.sin(thetaVectRad)
zVect = rVect * math.cos(thetaVectRad)
# Generate integer to determine axis of rotation
axisCode = random.randint(1,3)
if axisCode == 1:
axis = "x"
elif axisCode == 2:
axis = "y"
elif axisCode == 3:
axis = "z"
out = str(xVect) + "\t" + str(yVect) + "\t" + str(zVect) + "\t" + axis + "\t" + str(orientVect) + "\n"
f.write(out)
f.close()
| [
"[email protected]"
] | |
8ba564270ad71459a43f0d7255a7b2d96a8ce9ee | 5b4c803f68e52849a1c1093aac503efc423ad132 | /UnPyc/tests/tests/CFG/1/pass/pass_while+else_.py | c45c446afd64059c4f4281ff68eb69f4904d09ae | [] | no_license | Prashant-Jonny/UnPyc | 9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c | 4b9d4ab96dfc53a0b4e06972443e1402e9dc034f | refs/heads/master | 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | while 1:
pass
else:
pass
| [
"[email protected]"
] | |
ed957f9daccc09a4a2791cc23f08a27a2914369c | b8f1dda1b39e6927c5efd9d2c298e3e29043410f | /yaffs_example/testgenv5.py | 903dab18f13e0998e308d4293b9f7fed9bd86383 | [] | no_license | regehr/random-testing-book | 2181390dd1afedb5f37efe170256f650f5cf6705 | 45606f72775045b7a227a38199398da05f55652c | refs/heads/master | 2020-12-24T14:46:08.207372 | 2013-03-03T19:01:02 | 2013-03-03T19:01:02 | 7,459,867 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,457 | py | import random
import sys
import time
import testswarm as T
T.processOpts({"seed" : time.time(),
"length" : 100,
"ref" : False,
"swarm" : False,
"Pfeedback" : 0.95,
"PextendPath" : 0.05})
random.seed(int(T.opts["seed"]))
T.setupTestFile()
pathHistory = ['"/yaffs2"']
pathComps = ["alpha", "beta", "gamma", "delta", "epsilon", "zeta", "eta", "theta"]
Rpathname = lambda: '"/yaffs2' + T.any(pathComps, 0.5, "/") + '"'
def pathname():
if random.random() < float(T.opts["Pfeedback"]):
path = random.choice(pathHistory)
if random.random() < float(T.opts["PextendPath"]):
path = path[0:-1] + "/" + random.choice(pathComps) + '"'
return path
else:
return Rpathname()
def Cpathname():
path = pathname()
if path not in pathHistory:
pathHistory.append(path)
return path
dirmode = lambda: "S_IREAD|S_IWRITE|S_IEXEC" # YAFFS2 ignores directory permissions
mode = lambda: T.someOf(["S_IREAD", "S_IWRITE", "S_IEXEC"], 0.5, "|")
flag = lambda: T.someOf(["O_CREAT", "O_APPEND", "O_RDWR", "O_RDONLY", "O_WRONLY"], 0.5, "|")
NUM_BUFFERS = 4
buffer = lambda: "rw[" + str(random.randint(0,NUM_BUFFERS-1)) + "]"
MAX_BYTES = 2048*3
bytes = lambda: str(random.randint(0,MAX_BYTES-1))
offset = lambda: str(random.randint(0,MAX_BYTES-1))
whence = lambda: random.choice(["SEEK_SET", "SEEK_CUR", "SEEK_END"])
calls = {
"yaffs_freespace" : (0.05, (), [pathname]),
"yaffs_mkdir" : (0.5, (), [Cpathname, dirmode]),
"yaffs_rmdir" : (0.5, (), [pathname]),
"yaffs_rename" : (0.5, (), [pathname, Cpathname]),
"yaffs_open" : (0.5, "h", [Cpathname, flag, mode]),
"yaffs_close" : (0.5, (), ["!h"]),
"yaffs_read" : (0.5, (), ["h", buffer, bytes]),
"yaffs_write" : (0.5, (), ["h", buffer, bytes]),
"yaffs_unlink" : (0.5, (), [pathname]),
"yaffs_truncate" : (0.5, (), [pathname, offset]),
"yaffs_ftruncate" : (0.5, (), ["h", offset]),
"yaffs_lseek" : (0.5, (), ["h", offset, whence])
}
calls = T.swarm(calls)
refMap = [('"/yaffs2', 'REFPATH "'),
("yaffs_", ""),
("h[", "fd["),
("rw[", "rwRef[")]
s = 0
t = 0
tTimeout = T.opts["length"] * 5
while s < int(T.opts["length"]) and (t < tTimeout):
try:
T.addCall(calls, refMap)
s += 1
except (KeyError, IndexError):
t += 1
pass
T.finishTest()
| [
"[email protected]"
] | |
d3835d7f17c06d91ec27fb318dfab7f377ed7813 | e8742ddd09a5517a1959b76780069d89fa799118 | /12_Artificial_Intelligence/reinforcement learning/Project 2/WindyExp8Moves.py | b75e9958cc245e82f135fc347fd28edccaf3ab29 | [] | no_license | Frost-13/Portfolio | d61fe7161217a25a0fd832e0ad75bf9793cbf67d | 44f5405a5572e53b9491227779568fefa140cab3 | refs/heads/master | 2023-03-19T04:04:01.078025 | 2021-03-18T16:32:59 | 2021-03-18T16:32:59 | 223,272,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | import numpy as np
from WindyEnv8Moves import Environment
from WindyAgent8Moves import Agent
from rl_glue import RLGlue
import matplotlib.pyplot as plt
def experiment(rlg, num_runs, max_steps):
completed = np.zeros(max_steps)
for run in range(num_runs):
# set seed for reproducibility
np.random.seed(run)
# initialize RL-Glue
rlg.rl_init()
rlg.rl_start()
epi = 0
for i in range(max_steps):
reward, state, action, is_terminal = rlg.rl_step()
#print(state, action)
if is_terminal == True:
#print('done')
epi+=1
#print("FINISH!!!!")
rlg.rl_start()
completed[i] += epi
#rewards[i] += reward
completed /= num_runs
return completed
def main():
max_steps = 8000 # max number of steps in an episode --> 1000
num_runs = 1 # number of repetitions of the experiment --> 2000
# Create and pass agent and environment objects to RLGlue
#this is the epsilon optimistic approch where we explore 10% of the time
agent = Agent()
environment = Environment()
rlglue = RLGlue(environment, agent)
#del agent, environment # don't use these anymore
'''environment.env_init()
environment.env_start()
for i in range(1000):
action = int(input("enter an action: "))
environment.env_step(action)
print("State: ", environment.currentState)'''
result = experiment(rlglue, num_runs, max_steps)
plt.plot(result, label = 'something', color = 'blue')
plt.ylabel('Episodes')
plt.xlabel('Time Steps')
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c212f1faaba626baf4cd851eaaa3c2d6ea463b09 | 2ffd079c34cb07c738f7e5f703764fed68f2c8c0 | /Solutions/Number_of_Nodes_in_the_Sub-Tree_With_the_Same_Label.py | 86d618abeb7983b5330e4bdad148cd678500aba6 | [] | no_license | WuIFan/LeetCode | bc96355022c875bdffb39c89a2088457b97d30ab | 689a100ada757bc20334d5f0084587af3039ca7b | refs/heads/master | 2022-05-24T07:13:01.023733 | 2022-04-03T15:26:23 | 2022-04-03T15:26:23 | 202,471,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | from typing import List
import collections
class Solution:
def countSubTrees(self, n: int, edges: List[List[int]], labels: str) -> List[int]:
def dfs(node,par):
counter = collections.Counter()
for child in tree[node]:
if child == par:
continue
counter += dfs(child,node)
counter[labels[node]] += 1
ans[node] = counter[labels[node]]
return counter
tree = collections.defaultdict(list)
for i in range(0,n-1):
tree[edges[i][1]].append(edges[i][0])
tree[edges[i][0]].append(edges[i][1])
print(tree)
ans = [0]*n
dfs(0,None)
return ans
n = 7
edges = [[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]]
labels = "abaedcd"
print(Solution().countSubTrees(n,edges,labels)) | [
"[email protected]"
] | |
d654245aa192b82d1eea814beecfb32dbbe5d459 | 5858dcab54ef266ffdc6bbd70bd4d7f529289283 | /manage.py | 674eb62449ab449a964eca3c318bff59d9ec31fc | [] | no_license | shahrukh-alizai/update-model-38 | 1bdca22c84a86c38e87f25e29a19acbda20773c4 | a0d5128a8fca79f3096542cad0a4c2c22484a9ab | refs/heads/master | 2022-12-17T05:31:32.024626 | 2020-09-10T06:44:49 | 2020-09-10T06:44:49 | 294,153,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "update_model_38.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
00c0a3e62556125bafa7a5493ae5ed909268325f | b43066d17d83c4a96a201bc70573d956d618d643 | /in_polygon.py | 14f526627e76ebbc77273ba0a4d8b2fc9b47b79f | [] | no_license | rskschrom/points_inpolygon | f2dd87a92387e95a6124e5eb73014b55ede8b378 | 1592051c4d08874190ec85b25493114df4479af5 | refs/heads/master | 2021-01-25T01:02:50.378363 | 2017-11-15T20:56:03 | 2017-11-15T20:56:03 | 94,714,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | import numpy as np
from segments import get_overlap_segments
from segments import get_coverage_fraction, poly2seg
# calculate bearings from point to segments
def dir_span(px, py, segx, segy):
l1x = segx[0,:]-px
l2x = segx[1,:]-px
l1y = segy[0,:]-py
l2y = segy[1,:]-py
dir1 = np.arctan2(l1y, l1x)
dir2 = np.arctan2(l2y, l2x)
return dir1, dir2
# convert angle pairs from [-pi, pi] to segments between 0 and 1
def angles2segments(ang1, ang2):
# convert to [0, 2*pi]
ang1[ang1<0] = ang1[ang1<0]+2.*np.pi
ang2[ang2<0] = ang2[ang2<0]+2.*np.pi
# set left and right bounds of angles
ang_left = np.copy(ang1)
ang_right = np.copy(ang2)
ang_left[ang1>ang2] = ang2[ang1>ang2]
ang_right[ang1>ang2] = ang1[ang1>ang2]
# dealias
deal_cond = (np.abs(ang_left-ang_right)>np.pi)
ang_temp = np.copy(ang_right[deal_cond])
ang_right[deal_cond] = ang_left[deal_cond]
ang_left[deal_cond] = 0.
# add additional aliased segments from ang<2.*pi to 2.*np.pi
ang_left = np.append(ang_left, ang_temp)
ang_right = np.append(ang_right, np.linspace(np.pi*2., np.pi*2., len(ang_temp)))
seg_left = ang_left/(2.*np.pi)
seg_right = ang_right/(2.*np.pi)
return seg_left, seg_right
# function to determine if point is in a polygon
def in_polygon(x_poly, y_poly, x_point, y_point):
# compute direction span between the point and each polygon segment
x_seg, y_seg = poly2seg(x_poly, y_poly)
dir1, dir2 = dir_span(x_point, y_point, x_seg, y_seg)
seg_left, seg_right = angles2segments(dir1, dir2)
# get overlapping segment bounds and calculate covered fraction
over_left, over_right = get_overlap_segments(seg_left, seg_right)
cf = get_coverage_fraction(over_left, over_right, 1.)
indicator = int(cf)
return indicator
| [
"[email protected]"
] | |
806f89b43434e8bd806c06d099cf4b4b3344a60a | 763378fae9820f25a6b910de65c63fb10b7c32a5 | /account/migrations/0008_auto_20201119_1926.py | 8d5d44fc2654c39ed5c675171a69d1f2049f98d9 | [] | no_license | alisamadzadeh46/Blog | c9ae193647399d1513f32b675654aec56496c3ea | 50f9b1a63b99555d1eaad3171af5e5b128641c38 | refs/heads/main | 2023-02-26T19:43:46.288622 | 2021-02-12T09:57:17 | 2021-02-12T09:57:17 | 330,210,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.0.5 on 2020-11-19 15:56
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('account', '0007_auto_20201119_1923'),
]
operations = [
migrations.AlterField(
model_name='user',
name='special_user',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 19, 15, 56, 37, 168677, tzinfo=utc), verbose_name='کاربر ویژه تا '),
),
]
| [
"[email protected]"
] | |
ff3182b2fdd256162916662b4ffc140e8161a844 | 3e83e65ceb2e0cebc2442bdecf03b614b4dd1455 | /strategy.py | 92235394432bc3fc1223082445c9b3141a054b22 | [] | no_license | Shuso/csc148assignment2_tippy | 3c38c2f1339fcf2e96c54bdb9be971a5bc268f58 | 3dcd313d71a850bf008d331b3ba9ce55d01c5f59 | refs/heads/master | 2021-09-03T03:36:11.746185 | 2018-01-02T03:58:10 | 2018-01-02T03:58:10 | 116,357,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | class Strategy:
'''Interface to suggest moves for a GameState.
Must be subclassed to a concrete strategy. Our intention is
to provide a uniform interface for functions that suggest moves.
'''
def __init__(self, interactive=False):
'''(Strategy, bool) -> NoneType
Create new Strategy (self), prompt user if interactive.
'''
def suggest_move(self, state):
'''(Strategy, GameState) -> Move
Suggest a next move for state.
'''
raise NotImplementedError('Must be implemented in subclass')
| [
"[email protected]"
] | |
8bd17d214073140660e591f2126b3c52982f2aa6 | 86baffc71f56c51ccd75dfc9e9d15b983dbd8116 | /User-Verification-based-on-Keystroke-Dynamics-master/graphs.py | 7cc93fb32e7942dbdbcb14abd1a5b662e57c6e52 | [] | no_license | Siddharth2110/User-Verification-based-on-Keystroke-Dynamics-master | 1f371709d3d896004ea5f8fc517dff124200afa7 | 13c5235d1aa7e34d200e32ebe767ff84b1812bbc | refs/heads/main | 2023-06-09T03:05:14.013614 | 2021-06-27T05:23:54 | 2021-06-27T05:23:54 | 378,798,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | import matplotlib.pyplot as plt
import numpy as np
w=0.4
x = ["Manhattan","M_Filtered","M_Scaled","SVM","M-F-S","SVM_Filtered"]
AUC = [0.878,0.914,0.951,0.965,0.959,0.972]
EER = [0.18065765645731371,0.1484870487058271,0.11763692496206396,0.12054244703221502,0.10284572835503178,0.12557263803596627]
plt.bar(x,AUC,w,label="EER")
plt.xlabel("Models")
plt.ylabel("Values")
plt.title("Models Vs Values(Equal Error Rate)")
plt.legend()
plt.show() | [
"[email protected]"
] | |
e8becac7e006696104e0ca3829433f93a8db4c7d | 6bfaae2f3f21f76dd5b0394ef97513d4cc982a96 | /여행경로/jujoo.py | 791c764e164b2032a1afbc3e7229005945e59ad5 | [] | no_license | 5Hyeons/CodingStudy | bcfa7ffdffbd510a7ed71232d186e3eff46a61aa | d02d65484aef9707cf6f5660f730e508d8847903 | refs/heads/master | 2023-08-25T09:57:18.694973 | 2021-11-09T06:48:54 | 2021-11-09T06:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | from collections import defaultdict
from typing import List
def solution(tickets:List[List[str]]) -> List[str]:
'''
모든 도시를 방문할 수 없는 경우는 주어지지 않습니다.
[Example]
tickets = [["ICN", "COO"], ["ICN", "BOO"], ["COO", "ICN"], ["BOO", "DOO"]]
return : ["ICN", "COO", "ICN", "BOO", "DOO"]
'''
# route : 최종 방문 루트
# tmp : 방문 가능한 공항을 저장
route, answer = [], ["ICN"]
# 방문 가능한 공항을 기준으로 내림차순 정렬
tickets = sorted(tickets, key=lambda x: x[1], reverse = True)
# 공항별 방문가능한 공항 저장
dic = defaultdict(list)
for start, arrive in tickets:
dic[start].append(arrive)
while answer:
# 방문 공항 경로 저장
while dic[answer[-1]]:
answer.append(dic[answer[-1]].pop())
# 다음 공항이 없을 때 까지 pop
route.append(answer.pop())
return route[::-1] | [
"[email protected]"
] | |
699b9459829df8cf46f3eddb1b26c463af82d477 | 3fbb8f2a4c7d887cfb76c399568784b8e9703736 | /Analysis/Trivia/Questions/sets_with_most_colored_pips/trivia_question.py | 68b0ba25b3432ec645364c2a43f87fdd8618aa9e | [] | no_license | wally-wissner/MTGAnalysis | a36c3446bc26f54abd875347de2e947777b73862 | 697d864dc20f7aa4d15eca5ea07ed189fba40dd3 | refs/heads/master | 2023-08-14T17:57:29.470453 | 2023-07-28T03:24:12 | 2023-07-28T03:24:12 | 354,975,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # Question: Which sets have the most colored pips by card on average?
import pandasql
from Conn import conn_mtg
from Utilities.reddit_markdown import to_reddit_markdown
query = """
SELECT
cards.name,
cards.manaCost,
cards.setCode,
sets.name AS setName
FROM
cards
LEFT JOIN
sets
ON
sets.code = cards.setCode
WHERE
cards.borderColor IN ('black', 'white')
AND sets.type IN ('core', 'expansion')
AND cards.type NOT LIKE 'Land'
ORDER BY
cards.name, sets.releaseDate
"""
df = conn_mtg.request(query)
df["coloredPips"] = df["manaCost"].apply(
lambda x: len([i for i in x.split("}{") if any(c in i for c in "WUBRG")]) if x else 0
)
df = df.groupby("setCode")["coloredPips"].mean().reset_index()
df = df.sort_values("coloredPips")
df.to_markdown("result.md")
to_reddit_markdown(df, "result.reddit")
| [
"[email protected]"
] | |
3ef1d87fad484673586ff90b7e56e018b8147665 | db801026b0da97df281f8c30f2d423397b621a8d | /auth.py | 438cdfd84a8cea00f960f165557dbcfdc30bd8b0 | [] | no_license | AnWoz/CodeMe-Advanced-Project- | 05286585f3985131fb3a095faf34f14179c5f816 | dbf2bd9b62ae82477638613f390e284f49932ccc | refs/heads/master | 2020-07-29T02:03:08.897684 | 2019-09-19T18:57:23 | 2019-09-19T18:57:23 | 209,626,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | from functools import wraps
from flask import Blueprint, request, get_flashed_messages, render_template, \
session, redirect, flash
from werkzeug.security import check_password_hash
from db_utils import get_connection
auth_bp = Blueprint('auth_endpoints', __name__)
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
messages = get_flashed_messages()
return render_template('login.html', messages=messages)
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
conn = get_connection()
c = conn.cursor()
result = c.execute('SELECT * FROM users WHERE username = ?', (username,))
user_data = result.fetchone()
if user_data:
hashed_password = user_data['password']
if check_password_hash(hashed_password, password):
session['user_id'] = user_data['id']
session['username'] = user_data['username']
session['is_admin'] = bool(user_data['is_admin'])
return redirect('/')
flash('błędna nazwa użytkownika lub hasło')
return redirect('/login')
@auth_bp.route('/logout')
def logout():
session.clear()
return redirect('/login')
def login_required(view):
@wraps(view)
def wrapped_view(*args, **kwargs):
if session:
return view(*args, **kwargs)
else:
return redirect('/login')
return wrapped_view
| [
"[email protected]"
] | |
0375f7e1d7011a911aaf440aedcdb7956604a8f5 | ea3f3c03f91caf4c3c474ba764cd60a8302abbac | /study-be/api/fe.py | 8653248b0865c239fee00cf739dc9ca6f5267202 | [] | no_license | fengcms/python-restful-cms | 1c7ba03c584344ca271801ff7ebaeb8d0a4df636 | 48fb067b4bbd2c18f8db370e0a8cdc7dacdae2c6 | refs/heads/master | 2020-03-26T05:09:00.458646 | 2018-10-11T01:46:52 | 2018-10-11T01:46:52 | 144,540,397 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # 引入 sanic 蓝图
from sanic import Blueprint
# 引入公共 RESTful 类
from core.app import listView, itemView
# 引入配置参数中的前缀参数
from config import PREFIX
from core import rest
from core.tool import ok, fail
# 配置蓝图
FIX = PREFIX['fe']
bp = Blueprint('fe', url_prefix=FIX)
# 加载默认 RESTful 接口生成路由
bp.add_route(listView.as_view(), '<name>')
bp.add_route(itemView.as_view(), '<name>/<oid>')
@bp.route('tree_channel', methods=['GET'])
async def tree_channel(request):
sourceData = rest.getList(
{'pagesize': -1, 'sort': '-sort,-id'},
'channel',
'treeChannel',
True
)
if sourceData == 1:
return fail('服务器内部错误', 500, 500)
if sourceData['total'] < 1:
return fail('您当前还没有添加任何栏目')
sourceList = sourceData['list']
def makeTree(pid, arr):
res = []
for i in arr:
if i['pid'] == pid:
rep = makeTree(i['id'], arr)
if len(rep) != 0:
i['children'] = rep
res.append(i)
return res
res = makeTree(0, sourceList)
return ok(res)
| [
"[email protected]"
] | |
1a11bb1d43304c85bb297e0400ca22bddf7c1df0 | e3e9979ecd820cc027cf6db2c382aaf633d4b0f4 | /maquinas/models.py | afb336b4fe82dfa5eef46722e2a53bf56bf7a598 | [] | no_license | raianeli25/primeiro_django | 5358c75a3ab3a998312aed0584c6abf0935b07fa | 51bc2c15ef2d0d4fc51782ee7b04f7dc59cbf2f1 | refs/heads/master | 2023-02-12T08:59:49.723854 | 2020-12-24T14:46:09 | 2020-12-24T14:46:09 | 324,173,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
class Maquinas(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
nome = models.CharField(max_length=200)
status = models.CharField(max_length=200)
causa = models.TextField()
data_inicio = models.DateTimeField()
data_fim = models.DateTimeField()
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.nome
| [
"[email protected]"
] | |
2740ed87141ddd5afdaad073ebd88bb153352b3f | d02844730c9e8cc6d725e7dd0a7a626c37cd877b | /indice.py | f2e72c93f7807f0af0a89eebd2907e850240a264 | [] | no_license | rosaito/crud | 9424d528224d605a62f0af51abaef80bddb2b716 | 704617dc54a2764bc944c9a4f21c7643c39f52e7 | refs/heads/master | 2020-08-10T04:32:02.506766 | 2019-10-10T19:06:05 | 2019-10-10T19:06:05 | 214,257,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | print ("------------------------------")
print ("1.Agregar")
print ("2.Eliminar")
print ("3.Listar")
print ("4.Modificar")
print ("5.Salir")
print ("------------------------------")
seleccion = None
nombre = None
apellido = None
edad = None
while seleccion != 5:
seleccion = input("Ingrese un valor: ")
if seleccion == "1":
print("Agregar")
nombre = input("Ingresa tu nombre: ")
apellido = input("Ingresa tu apellido: ")
edad = input("Ingresa tu edad: ")
print("Tus datos son: "+nombre+" "+apellido+" "+edad+" años")
elif seleccion == "2":
print("Eliminar")
print("Datos a eliminar: "+nombre+" "+apellido+" "+edad+" años")
elif seleccion == "3":
print("Listar")
print("Los datos actuales son: ")
print (nombre+" "+apellido+" "+edad+" años")
elif seleccion == "4":
print("Modificar")
elif seleccion=="5":
print("Salir")
break
exit() | [
"[email protected]"
] | |
ff5574cdf5c3905fcc2986aaf83509e96e8611c4 | f58d5752607171fb683e66c9a83781956c453514 | /Django/groceryapp/grocery_list/migrations/0003_auto_20210114_2008.py | c6ef432386e37aa0b63bf4286956e69709ca5dda | [] | no_license | Fawad2aria/full_stack | 539bc1aa5e3c05a079f3d89993cd61e5391eb442 | 49c16b46d8149b08a4685977f706de7ff27d304e | refs/heads/main | 2023-06-12T21:46:54.371277 | 2021-01-28T19:08:14 | 2021-01-28T19:08:14 | 318,842,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | # Generated by Django 3.1.5 on 2021-01-15 04:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grocery_list', '0002_groceryitem_completed'),
]
operations = [
migrations.AddField(
model_name='groceryitem',
name='completed_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groceryitem',
name='created_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
a8c65f4cc9daf39f60267384c6553e8f0a21620b | 4b19fa5afa62013c5f21bc895fefaef27b6f5c57 | /lab10/client.py | e5aa777902347c9721bab7e54c1a818373ad5cd0 | [] | no_license | pramo18/CN-LAB_1BM18CS070 | f5f515edb452c056fc0096af2e0a7460c2eb39ac | 9c8d634828ba2297d729661d332a959a12bb5e1d | refs/heads/master | 2023-02-08T19:19:04.301308 | 2020-12-30T15:28:10 | 2020-12-30T15:28:10 | 300,163,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from django.test import TestCase
# Create your tests here.
from socket import *
serverName = 'DESKTOP-PP68JI4'
serverPort = 12001
clientSocket = socket(AF_INET,SOCK_STREAM)
clientSocket.connect((serverName,serverPort))
sentence = input("Enter file name")
clientSocket.send(sentence.encode())
filecontents = clientSocket.recv(1024).decode()
print('From Server:', filecontents)
clientSocket.close()
| [
"[email protected]"
] | |
22835052eb3a974bca07f4e0f68f847f6f10579b | 91e87ed2e9a1b79cd50ff7593702c677f3e180f7 | /demo.py | ecda6650159294426e943df529394de41ae793db | [] | no_license | wmy920/autotest | 7648cc4113f386b1435cf29ce5dff1bbac90ccce | bbb4eebd03ce38ccb28e636ea2a438c282b6bee0 | refs/heads/master | 2020-08-28T01:11:36.764493 | 2019-10-27T10:39:24 | 2019-10-27T10:39:24 | 217,543,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | print("你好,刘猪恒");
print("hello,zhuzhuliu"); | [
"[email protected]"
] | |
a18970f10077564a889c1eee4c8adcc65f9f257e | 5d99fa60e72ad25b5e9f772ed13a4cabe0a5964f | /find_characters.py | 3d4feb002d66cd8a3355e1d8b6ce17e7616a8692 | [] | no_license | mpbraun2/Python_Track | 9c292ef619e803ed2f368f9726a87296b1fb23f5 | c5636c15b13a52500d46c09ea419235513f289a2 | refs/heads/master | 2021-01-25T06:25:00.805482 | 2017-06-06T22:10:15 | 2017-06-06T22:10:15 | 93,568,127 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | l = ['hello','world','my','name','is','Anna']
print l.count('e')
| [
"[email protected]"
] | |
ccc67b26317e91fe5a9d78560344de819708af3b | 1e05ec67d5959841ebe0405a52237d2c02da8db4 | /chessboardApp/models.py | 1d4099e225688209285e7f950334b13fb2ddfb79 | [] | no_license | KurtVoigt/Chess-Website | 0309086ac199064503f88bd0fecb1f4b5f10a2ef | aaeb7f203449eff698bd83cdb67397140f1f7efa | refs/heads/main | 2023-04-07T14:31:53.411220 | 2021-04-12T11:23:10 | 2021-04-12T11:23:10 | 356,155,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from django.db import models
# Create your models here.
class Board(models.Model):
board_name = models.CharField(max_length=30)
def __str__(self):
return self.board_name
class Space(models.Model):
board = models.ForeignKey(Board, related_name="space_list", on_delete=models.CASCADE)
space_col = models.CharField(max_length=1)
space_row = models.IntegerField()
space_piece = models.CharField(max_length=6)
def __str__(self):
row = str(self.space_row)
col = str(self.space_col)
conc = col + row
return conc | [
"[email protected]"
] | |
2d917214e70c45b3846f8b9714c588817b6bad5e | 8d082640cf1eed6fed9763f121cc65a65077592e | /CoVigilantSystems/SampleGetData.py | de3992599d50ac773fa087ba93f2857122e4f9cf | [] | no_license | wangxiaoyuwf/CoVigilantSystems | 747cfd0855f93e757e5ec56be16d02a4a7233bb5 | aaa0e5fbf4b8fe22286b4487b65e9e61218f12e1 | refs/heads/master | 2022-07-14T17:26:44.825497 | 2019-12-18T22:35:55 | 2019-12-18T22:35:55 | 219,410,117 | 0 | 1 | null | 2022-06-21T23:20:39 | 2019-11-04T03:30:45 | Python | UTF-8 | Python | false | false | 2,060 | py | # This is a sample how to get data from database and convert data to a dataframe of pandas
import numpy as np
import pymysql
import atexit
import pandas as pd
from sqlalchemy import create_engine
display = pd.options.display
display.max_columns = 50
display.max_rows = 10
display.max_colwidth = 10
display.width = None
display.float_format = lambda x: '%.4f' % x
# Connect the database.
db = pymysql.connect(host='10.22.12.131', user='nonameteam', password='nonameteam', db='nonameteam',
cursorclass=pymysql.cursors.DictCursor)
engine = create_engine('mysql+pymysql://nonameteam:[email protected]:3306/nonameteam').connect()
# This function is registered, and it will be called when the application complete.
def on_exit():
# Close database.
db.close()
# print('ApplicationExit! Close database.')
# Register the function as one that is called when the application stop.
atexit.register(on_exit)
def insert_dataframe_to_sql(df: pd.DataFrame, table_name):
df.to_sql(table_name, engine, if_exists='replace')
# Function to query data.Return the pandas.DataFrame. Table name and column can be checked in the database.
def query_data(table_name, column_name, value) -> pd.DataFrame:
sql_command = 'Select * from ' + table_name + ' where ' + column_name + '=%s'
cursor = db.cursor()
cursor.execute(sql_command, value)
query_result = cursor.fetchall()
result = pd.DataFrame(list(query_result))
return result
def query_data_by_sql(sql) -> pd.DataFrame:
sql_command = sql
cursor = db.cursor()
cursor.execute(sql_command)
query_result = cursor.fetchall()
result = pd.DataFrame(list(query_result))
return result
def get_cursor():
cursor = db.cursor()
return cursor
def commit_sql():
db.commit()
# This is main function,and this is called only as main script.That means this part would be executed,
# when this script is called by other script as library.
if __name__ == "__main__":
result = query_data('business', 'state', 'AZ')
print(result)
| [
"[email protected]"
] | |
3632c2f44e258b782198caa32baf4b6e9d198297 | b3c80ababd3206943eb94c01b8201bee360c61cd | /palindrome_num_func.py | bcd5cee95f78a03d31b8eb944436326daa74d110 | [] | no_license | rashmitallam/PythonBasics | 48aedb0f281046b88bfd23a4fe0f10f828ac707a | d29838884aac8644af2f885d22a6640d5e681803 | refs/heads/master | 2020-04-21T14:02:55.472429 | 2019-03-07T18:16:32 | 2019-03-07T18:16:32 | 169,621,476 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #WAP to accept a number from user, and check if it is palindrome or not
def palindrome(n1):
s1=str(n1)
rev_s1=reversed(s1)
if list(s1) == list(rev_s1):
print s1+' is a palindrome'
else:
print s1+' is not a palindrome'
def main():
num1=input('Enter a number:')
palindrome(num1)
if __name__ == '__main__':
main()
'''
>>>
RESTART: C:\Users\Admin\Desktop\Python_2019\Funtions\hw_26_27_1\palindrome_num_func.py
Enter a number:1234321
1234321 is a palindrome
>>>
RESTART: C:\Users\Admin\Desktop\Python_2019\Funtions\hw_26_27_1\palindrome_num_func.py
Enter a number:435467
435467 is not a palindrome
>>>
'''
| [
"[email protected]"
] | |
031c44b6ea4589d112babdc909a1708837db2e88 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/sroberts-malwarehouse/allPythonContent.py | e144e319780a9c8cdfa936f4f86197b1e99aed73 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,404 | py | __FILENAME__ = db_controller
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.reflection import Inspector
from malware_sample_model import MalwareIndex
import os
import re
class MalwareDbController(object):
def __init__(self, config):
self.config = config
self.db = None
def initialize_db(self):
""" Initializes sqllite location and filename
Returns [True, False] accordingly
"""
# Get DB configuration
uri = self.config.get('database', 'uri')
try:
self.engine = create_engine(uri)
self.engine.echo = False # Try changing this to True and see what happens
self.metadata = MetaData(self.engine)
self.engine.connect()
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
return True
except Exception, err:
print "Failed to initialize DB\nPlease verify your db settings-%s" %(err)
return False
def find_sample(self, find_string):
""" Searches the malware db for samples
specified by the find_string query
"""
try:
if re.findall(r"^([a-fA-F\d]{64})$", find_string):
query = self.session.query(MalwareIndex).filter_by(sha256=find_string)
elif re.findall(r"^([a-fA-F\d]{32})$", find_string):
query = self.session.query(MalwareIndex).filter_by(md5=find_string)
else:
query = self.session.query(MalwareIndex).filter_by(name=find_string)
data = [malware_sample.__dict__ for malware_sample in query]
return data
except Exception, err:
print err
def recent(self, quantity='5'):
"""Returns a summary of the last n (default: n = 5) pieces of malware."""
try:
query = self.session.query(MalwareIndex).limit(quantity)
data = [malware_sample.__dict__ for malware_sample in query]
return data
except Exception, err:
print err
def load_db(self, report_json):
"""Load information about the sample into the index DB."""
try:
# Set the values with some reflection awesomeness
malware_sample = MalwareIndex()
for k, v in report_json.iteritems():
if hasattr(malware_sample, k):
if isinstance(v, list):
setattr(malware_sample, k, ",".join(v))
else:
setattr(malware_sample, k, v)
# Add to DB
self.session.add(malware_sample)
self.session.commit()
print "Sample %s loaded..." % report_json['name']
return True
except Exception, err:
print "Error", err
########NEW FILE########
__FILENAME__ = basic_analyzer
import os
import datetime
import utils
from extensions.plugin_base import PluginBase
class BasicAnalyzer(PluginBase):
# Parsing Custom Options
@staticmethod
def get_name():
return 'basic_analyzer'
def initialize_config(self, config):
# self.apikey = config.get(BasicAnalyzer.get_name(), "apikey")
pass
def analyze(self, malware_sample):
malware_definition = dict()
malware_definition["source"] = malware_sample.meta_source if malware_sample.meta_source else ""
malware_definition["tags"] = malware_sample.meta_tags if malware_sample.meta_tags else ""
malware_definition["notes"] = malware_sample.meta_notes if malware_sample.meta_notes else ""
# Parsing Automatically Generated Options
malware_definition["name"] = malware_sample.filename if malware_sample.filename else ""
malware_definition["datetime"] = str(datetime.datetime.now())
malware_definition["size"] = os.stat(malware_sample.analysis_sample_location).st_size
malware_definition["md5"] = malware_sample.hash_md5
malware_definition["sha256"] = malware_sample.hash_sha256
malware_definition["mimetype"] = utils.get_mimetype(malware_sample.analysis_sample_location)
malware_definition["sample_dir"] = malware_sample.analysis_sample_directory
malware_sample.malware_definition = malware_definition
self.report = malware_sample.details()
self.report_name = "analysis.rpt"
def create_report(self):
print "Creating report"
report_dir = self.analysis_report_directory
try:
if not os.path.exists(report_dir):
os.makedirs(report_dir)
super(BasicAnalyzer, self).create_report(self.report, os.path.join(report_dir, self.report_name))
except Exception, err:
print err
########NEW FILE########
__FILENAME__ = virus_total
from extensions.plugin_base import PluginBase
import urllib2
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import json
import os
class VirusTotal(PluginBase):
@staticmethod
def get_name():
return 'virustotal'
def initialize_config(self, config):
self.apikey = config.get(VirusTotal.get_name(), "apikey")
def submit_file_for_scan(self,filepath):
""" Submits a file to be scanned
Returns the JSON response
"""
# Register the streaming http handlers with urllib2
register_openers()
# Extract the file name
filename = os.path.basename(filepath)
try:
datagen, headers = multipart_encode({"name": filename, "file": open(filepath, "rb"), "apikey":self.apikey})
# Create the Request object
request = urllib2.Request("https://www.virustotal.com/vtapi/v2/file/scan", datagen, headers)
# Submit the file and read the reply
submission_info = urllib2.urlopen(request).read()
# This is the json data return upon submission
# Reference this data for the scan_id for future
# lookups.
return submission_info
except Exception, err:
print err
return
def lookup_by_hash(self, sample_hash):
query_dict = dict()
# Set URL parameters
query_dict["endpoint"] = "https://www.virustotal.com/vtapi/v2/file/report"
query_dict["apikey"] = self.apikey
query_dict["resource"] = sample_hash
url = "{endpoint}?apikey={apikey}&resource={resource}".format(**query_dict)
# Retrieve the data
v_total_json = urllib2.urlopen(url).read()
return v_total_json
def analyze(self, malware_sample):
# Lookup details for the sample on virus total
json_response = self.lookup_by_hash(malware_sample.hash_sha256)
# Set the report name
self.report_name = "virustotal_details.json"
# Save the json in a pretty format
self.report = json.dumps(json.loads(json_response), sort_keys=True,
indent=4)
def create_report(self):
print "Creating report"
dir = self.__class__.get_name()
report_dir = os.path.join(self.analysis_report_directory, dir)
try:
if not os.path.exists(report_dir):
os.makedirs(report_dir)
super(VirusTotal,self).create_report(self.report, os.path.join(report_dir,self.report_name))
except Exception, err:
print err
########NEW FILE########
__FILENAME__ = plugin_base
import utils, os
# Custom Exceptions
class PluginConfigError(Exception):
pass
class MalwareHousePluginNotFound(Exception):
def __init__(self, plugin_name):
Exception.__init__(self, "Handler for plugin [" + plugin_name + "] not found.")
return None
def get_plugin(name, config=None):
plugin = utils.first(x for x in PluginBase.plugins if x.get_name() == name)
if plugin == None:
print PluginBase.plugins
print "Not found"
return None
return plugin(config)
class MalwareHousePlugin(type):
def __init__(cls,name,bases,attrs):
# add the plugin to the our plugins list
if not hasattr(cls,'plugins'):
cls.plugins = []
else:
cls.plugins.append(cls)
class PluginBase(object):
__metaclass__ = MalwareHousePlugin
def __init__(self,config):
# Call the derived class to parse the rest of the configuration parameters
self.initialize_config(config)
def initialize_config(self,config_dict):
raise NotImplementedError
@staticmethod
def get_name():
raise NotImplementedError
def analyze(self):
pass
def create_report(self, report_data, filename):
try:
with open(filename, 'w') as report_file:
report_file.write(report_data)
print "Report successfully created"
print "Report Location: %s" %filename
except Exception, err:
print "Error: %s" %err
########NEW FILE########
__FILENAME__ = malware_manager
# !/usr/bin/env python
# encoding: utf-8
"""
malwarehouse.py
Created by Scott Roberts.
Copyright (c) 2012 TogaFoamParty Studios. All rights reserved.
"""
import utils
import db_controller
import sys
from malware_sample import MalwareSample
from argparse import ArgumentParser
import extensions.plugins
from extensions.plugin_base import get_plugin, PluginBase as plg
# Malwarehouse Options
config = utils.get_configuration("malwarehouse.cfg")
# Initialize our DB controller
db_controller = db_controller.MalwareDbController(config)
db_initialized = db_controller.initialize_db()
def check_prelim():
"""Initial setup code. Eventually this will set options."""
directory_structure = utils.initialize_environment(config)
return directory_structure
# Processes the malware sample
def malware_loader(malware_path, source, notes, tags):
sample = MalwareSample(malware_path, config, source, notes, tags)
return sample
def load_sample(args, source, notes, tags):
sample = malware_loader(args.load, source, notes, tags)
if sample:
# Process plugins
for plugin in plg.plugins:
plugin_name = plugin.get_name()
if config.get(plugin_name, 'plugin') == "On":
plg.analysis_report_directory = sample.analysis_report_directory
_plugin = get_plugin(plugin.get_name(), config)
_plugin.analyze(sample)
_plugin.create_report()
if sample.malware_definition and db_initialized:
db_controller.load_db(sample.malware_definition)
else:
print "Errors were encountered during analysis"
def delete_sample():
if not db_initialized:
print "Failed to initialize database\nPlease verify your database settings"
return
pass
def recent_samples(args):
if not db_initialized:
print "Failed to initialize database\nPlease verify your database settings"
return
args.recent = 5 if not args.recent else args.recent
data = db_controller.recent(args.recent)
for parsed_data in map(utils.parse_sqlite_result, data):
print MalwareSample.summary(parsed_data)
def find_sample(args):
if not db_initialized:
print "Failed to initialize database\nPlease verify your database settings"
return
print "> Find called with %s." % (args.find)
data = db_controller.find_sample(args.find)
for parsed_data in map(utils.parse_sqlite_result, data):
print MalwareSample.summary(parsed_data)
def main():
if not check_prelim():
sys.exit(1)
parser = ArgumentParser()
parser.add_argument("-r", "--recent",
action="store",
nargs='?',
default='5',
help="Display the newest samples (default: 5)")
parser.add_argument("-s", "--source",
action="store",
default=None,
help="Source of file")
parser.add_argument("-t", "--tags",
action="store",
default=None,
help="Any characteristics of the malware")
parser.add_argument("-n", "--notes",
action="store",
default="",
help="Notes about file")
parser.add_argument("-f", "--find",
action="store",
default="",
help="Find a sample by name, md5, or sha256")
parser.add_argument("-l", "--load",
action="store",
default="",
help="Load a malware sample for analysis")
parser.add_argument("-d", "--delete",
action="store",
default="",
help="Delete a sample by name, md5, or sha256")
args = parser.parse_args()
cli_arguments = sys.argv
# Set optional arguments
tags = args.tags if args.tags else ""
source = args.source if args.source else ""
notes = args.notes if args.notes else ""
# Process user commands
if args.find:
find_sample(args)
elif "-r" in cli_arguments:
recent_samples(args)
elif args.delete:
print "> [not implemented] Delete called with %s" % (args.delete)
elif args.load:
load_sample(args, source, notes, tags)
return True
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = malware_sample
import utils
import os, pwd, shutil
import magic
import hashlib
import datetime
def get_md5(malware_path):
"""Wrapper for the usual md5 call because it's so verbose."""
return hashlib.md5(file(malware_path, 'r').read()).hexdigest()
def get_sha256(malware_path):
"""Wrapper for the usual sha256 call because it's so verbose."""
return hashlib.sha256(file(malware_path, 'r').read()).hexdigest()
def get_ssdeep(malware_path):
"""Wrapper for the usual pyssdeep call because it's so verbose."""
return "Not yet implimented"
def get_mimetype(malware_path):
"""Finds the standard mimetype for file and returns type name."""
mime = magic.Magic(mime=True)
return mime.from_file(malware_path)
def get_yara(malware_path, yara_rules):
"""Checks malware against a Yara ruleset and returns a dictionary of matched rules."""
tags = []
try:
import yara
rules = yara.compile(filepath=yara_rules, includes=True)
yara_tags = rules.match(malware_path)
for tag in yara_tags:
tags.append(str(tag))
except ImportError:
raise
except yara.Error as e:
print("Yara signature file doesn't exist.")
tags = []
return tags
class MalwareSample(object):
"""malware_sample represents a piece of malware within Malwarehouse"""
def __init__(self, malware_path, config, sample_source="", sample_notes="", yara_rules=None):
super(MalwareSample, self).__init__()
self.malware_path = malware_path
self.filename = os.path.basename(malware_path)
self.config = config
self.yara_rules = self.config.get('settings', 'yararules') if not yara_rules else yara_rules
# Hash characteristics
self.hash_md5 = get_md5(malware_path)
self.hash_sha256 = get_sha256(malware_path)
# Meta characteristics
self.meta_tags = get_yara(malware_path, yara_rules)
self.meta_source = sample_source
self.meta_notes = sample_notes.split(',')
self.initialize_sample_environment()
def initialize_sample_environment(self):
dirs = ["bin", "report"]
base_dir = os.path.expanduser(self.config.get('settings', 'basedir'))
sample_dir = os.path.join(base_dir, self.hash_sha256)
# Create sample root directory
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
# Create analysis directories
for dir in dirs:
os.makedirs(os.path.join(sample_dir, dir))
# Analysis characteristics
self.analysis_datetime = str(datetime.datetime.now())
self.analysis_user = pwd.getpwuid(os.getuid())[0]
self.analysis_sample_directory = sample_dir
# Malware binary location
self.analysis_sample_location = os.path.join(sample_dir, "bin", self.filename)
# Reports location
self.analysis_report_directory = os.path.join(sample_dir, "report")
# Make a copy of the file
print "Copying sample to %s" % self.analysis_sample_location
shutil.copy(self.malware_path, os.path.join(sample_dir, "bin"))
@staticmethod
def summary(malware_definition):
return "- %s (%s) - %s" % (malware_definition['name'], malware_definition['source'], malware_definition['md5'])
def details(self):
"""Prints an easy to read summary of the malware."""
try:
details = " Analysis ".center(50, "=")
details += "\ndatetime:".ljust(25) + "%s\n" % (self.malware_definition['datetime'])
details += " File ".center(50, "=")
details += "\nsample name:".ljust(25) + "%s\n" % (self.malware_definition['name'])
details += "mimetype:".ljust(25) + "%s\n" % (self.malware_definition['mimetype'])
details += "size:".ljust(25) + "%s\n" % (self.malware_definition['size'])
details += " Hashes ".center(50, "=")
details += "\nmd5:".ljust(26) + "%s\n" % (self.malware_definition['md5'])
details += "sha256:".ljust(26) + "%s\n" % (self.malware_definition['sha256'])
details += " Meta ".center(50, "=")
details += "\ntags:".ljust(26) + "%s\n" % (self.malware_definition['tags'])
details += "source:".ljust(26) + "%s\n" % (self.malware_definition['source'])
details += " Meta ".center(50, "=")
details += "\nnotes:".ljust(25) + "%s" % (self.malware_definition["notes"])
details += "\n"
details += "sample directory: %s" % self.malware_definition["sample_dir"]
return details
except Exception, err:
print "%s - %s" % (Exception.message, err)
########NEW FILE########
__FILENAME__ = malware_sample_model
from sqlalchemy import Column, Text, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MalwareIndex(Base):
__tablename__ = 'malwarehouse_index'
datetime = Column(Text, primary_key=True)
name = Column(Text)
mimetype = Column(Text)
tags = Column(Text)
size = Column(Integer)
md5 = Column(Text)
sha256 = Column(Text)
source = Column(Text)
notes = Column(Text)
########NEW FILE########
__FILENAME__ = utils
import hashlib
import magic
import pydeep
import ConfigParser
import json
import os
def get_configuration(config_file):
config = ConfigParser.SafeConfigParser()
try:
config.read(config_file)
return config
except Exception, err:
print "%s - %s" %(Exception, err)
def initialize_environment(config):
# Create malwarehouse root directory
base_dir = os.path.expanduser(config.get('settings', 'basedir'))
try:
if not os.path.exists(base_dir):
os.makedirs(base_dir)
return True
except Exception, err:
print err
return False
def parse_sqlite_result(unparsed):
"Takes the results from a SQLite query and parses it as a dictionary."
return unparsed
return {'datetime': unparsed[0], 'name': unparsed[1], 'mimetype': unparsed[2], 'tags': unparsed[3], 'size': unparsed[4], 'md5': unparsed[5], 'sha256': unparsed[6], 'source': unparsed[7], 'notes': unparsed[8]}
def get_json(dictionary):
return json.dumps(dictionary
)
def get_mimetype(malware_path):
"""Finds the standard mimetype for file and returns type name."""
mime = magic.Magic(mime=True)
return mime.from_file(malware_path)
def __json__(self):
print "JSON would have been returned."
pass
def __str__(self):
return self.summary()
def first(iterable, default=None):
for item in iterable:
return item
return default
########NEW FILE########
| [
"[email protected]"
] | |
296d464904b3c0d513118488e0fbf5d27684cdcd | f3555ac66906df4bdcf35000d81d3d911f4f98fa | /lab3/zavd6.py | 721450527a313173e498b117f85661a04acf7c8c | [] | no_license | ki2chio/IA_language | 48e7f2b2a662a3be3f16b74b14c1309eb0b3d65b | e0daac3986a170823527860a811530f69ad62748 | refs/heads/master | 2023-04-24T19:59:22.570447 | 2021-05-14T14:00:15 | 2021-05-14T14:00:15 | 338,029,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('flats.csv', sep=',', decimal=',')
df["Загальна_площа"] = df.Загальна_площа.astype(float)
plot5 = sns.FacetGrid(df, col='Місто')
plot5 = plot5.map(sns.boxplot, 'Ціна')
plot6 = sns.FacetGrid(df, col='Кімнат')
plot6 = plot6.map(sns.boxplot, 'Ціна')
plt.show() | [
"[email protected]"
] | |
cef9526a60cae3d7e5f090a8960aab12a4d64957 | f766eeefa2b4b63312087ba72bb77f9b9936c46a | /Day 2/file1.py | d8a3d91c8c2165900970da2d33f93cd547741d82 | [
"MIT"
] | permissive | adamsaparudin/python-datascience | 365a84984aec7f444dc4fb9797fe69af5e80fd39 | 1b4164bb8a091f88def950f07108fe023737399c | refs/heads/master | 2020-03-30T09:51:33.034552 | 2018-11-06T14:17:16 | 2018-11-06T14:17:16 | 151,095,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | myfile = open("myfile1.txt", "w")
myfile.write("Hallo from text file \n") # \r\n untuk di windows
myfile.write("cobain nulis di file nih")
myfile.close()
| [
"[email protected]"
] | |
c3d8d42556273a8ebbb7044e03df523a2537dcc7 | 2dcd5b3f42f72a3b3c2e074c045ca0e33b682867 | /AutopilotUiPanel.py | 43dff47214422b7d3a0eb8591c63861e83601c51 | [
"Apache-2.0"
] | permissive | JustMJ/ksp_rtls_launch_to_rendezvous | 0e14035d2218d6ade42f828797e6cf4ab361e8e2 | 195ebfb5aacf1a857aaaf0a69bf071d93d887efd | refs/heads/master | 2022-12-08T17:08:04.785658 | 2020-08-21T12:41:44 | 2020-08-21T12:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,124 | py | from krpc_utils import TelemetryStreams
import time
import math
import numpy as np
import json
from MainUiPanel import panel_x_offset
from krpc_utils import quaternion_rotation
from Autopilot import Autopilot
class AutopilotUiPanel:
def __init__(self, connection):
self.autopilot = None
self.canvas = connection.ui.stock_canvas
self.connection = connection
self.screen_size = self.canvas.rect_transform.size
self.ut_previous_control_update = 0.0
self.ut_previous_status_update = 0.0
self.ut_stream = connection.add_stream(getattr, connection.space_center, 'ut')
self.panel = self.canvas.add_panel()
self.panel.rect_transform.size = (600, 300)
self.panel.rect_transform.position = (panel_x_offset, -200)
self.panel.rect_transform.anchor = (0.0, 1.0)
self.panel.rect_transform.pivot = (0.0, 1.0)
self.panel.visible = False
self.label_status = self.panel.add_text("")
self.label_status.rect_transform.position = (25, -60)
self.label_status.rect_transform.size = (500, 300)
self.label_status.rect_transform.anchor = (0.0, 1.0)
self.label_status.rect_transform.pivot = (0.0, 1.0)
self.label_status.color = (1,1,1)
self.label_status.alignment = connection.ui.TextAnchor.upper_left
self.label_status.size = 15
self.label_status.font = 'Courier New'
self.label_status.style = connection.ui.FontStyle.bold
self.button_start = self.panel.add_button("Start")
self.button_start.rect_transform.position = (20, -20)
self.button_start.rect_transform.size = (70, 30)
self.button_start.rect_transform.anchor = (0.0, 1.0)
self.button_start.rect_transform.pivot = (0.0, 1.0)
self.button_start_clicked = connection.add_stream(getattr,
self.button_start, 'clicked')
self.button_disable = self.panel.add_button("Disable")
self.button_disable.rect_transform.position = (100, -20)
self.button_disable.rect_transform.size = (70, 30)
self.button_disable.rect_transform.anchor = (0.0, 1.0)
self.button_disable.rect_transform.pivot = (0.0, 1.0)
self.button_disable_clicked = connection.add_stream(getattr,
self.button_disable, 'clicked')
self.restore_state()
def save_state(self):
with open('autopilot_state.json', 'w') as f:
json.dump({'is_running': self.is_running()}, f)
def restore_state(self):
is_running = False
try:
with open('autopilot_state.json', 'r') as f:
data = json.load(f)
if data.get('is_running', False) == True:
is_running = True
except:
pass
if is_running:
self.autopilot = Autopilot(self.connection, self.ut_stream)
def is_running(self):
return self.autopilot is not None
def update(self):
if self.button_start_clicked():
self.button_start.clicked = False
if not self.is_running():
self.autopilot = Autopilot(self.connection, self.ut_stream)
self.save_state()
if self.button_disable_clicked():
self.button_disable.clicked = False
if self.is_running():
self.autopilot = None
self.save_state()
# Timed updates
if self.is_running():
ut = self.ut_stream()
if ut > self.ut_previous_control_update + 1.0/20:
delta_t = ut - self.ut_previous_control_update
self.autopilot.control_update(ut, delta_t)
self.ut_previous_control_update = ut
if ut > self.ut_previous_status_update + 0.5:
delta_t = ut - self.ut_previous_status_update
self.label_status.content = self.autopilot.get_status_update(ut, delta_t)
self.ut_previous_status_update = ut
if self.autopilot.should_disable:
self.autopilot = None
self.save_state()
exit()
| [
""
] | |
106c2742f861e9c94c9d1c6cc3ffaa7dbf2321b7 | 90376bf430217279fc391e51f5309434474d05fe | /Django-EyePay/webpersonal/core/serializers.py | aa58686d36bee15d804519b0cc83ab7459c42e7b | [] | no_license | OscarUtreras/EyePay | 8ea6f7f0666b13b82d878f83aeec90996a387f31 | 726d8eda4edc161d6c1d5d61a619933538d49aa5 | refs/heads/master | 2020-04-07T05:49:37.452113 | 2018-11-26T18:33:22 | 2018-11-26T18:33:22 | 158,111,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from rest_framework import serializers
from .models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('name', 'category') | [
"[email protected]"
] | |
bd0c71fbc2d44ac2a1abad9d43a97abaf92e7854 | 0c48e242fcacab32a71fdf3075f386ce109edf55 | /generation3/bots/theoretical_bot.py | f8a1c07f977c546dbe6852e1293a3b51e6d81591 | [] | no_license | webclinic017/Master-Insight-Trading-Bot-2 | 6a39c029264d832e913b0739499c252f86169850 | dcc0fa8fd1e2dbd3fa4cad0ee3618db88eade480 | refs/heads/master | 2023-05-29T22:25:53.090971 | 2021-06-18T14:10:51 | 2021-06-18T14:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | from typing import Dict
from generation3.bots.bot_processing_hub.backtester.BotBackTester import \
BotBackTester
from generation3.bots.bot_processing_hub.refined_data_for_bot import RefinedData
from generation3.ensemble_trading_bot import SignalProvider
import numpy as np
class TheoreticalBot(SignalProvider):
buy_signal: np.array
sell_signal: np.array
def __init__(
self,
b_t_x, b_t_y, b_v_x,
s_t_x, s_t_y, s_v_x,
metadata: Dict
):
self.buy_signal = metadata["true_buy_signal"]
self.sell_signal = metadata["true_sell_signal"]
self.buy_signal = RefinedData.get_dataset_common(
None,
buy_sell_dataset='b'
)[3].to_numpy()
self.buy_signal = self.buy_signal.reshape(len(self.buy_signal), )
self.sell_signal = RefinedData.get_dataset_common(
None,
buy_sell_dataset='s'
)[3].to_numpy()
self.sell_signal = self.sell_signal.reshape(len(self.sell_signal), )
def get_buy_signal(self) -> np.array:
return self.buy_signal
def get_sell_signal(self) -> np.array:
return self.sell_signal
if __name__ == "__main__":
b_t_x, b_t_y, b_v_x, true_buy_signal, _, _ = RefinedData.get_dataset_common(
None,
buy_sell_dataset='b'
)
s_t_x, s_t_y, s_v_x, true_sell_signal, _, _ = RefinedData.get_dataset_common(
None,
buy_sell_dataset='s'
)
bbt = BotBackTester(
TheoreticalBot(
None, None, None,
None, None, None,
{
'true_buy_signal': true_buy_signal,
'true_sell_signal': true_sell_signal
}
),
list(b_v_x["Adj Close"]),
buy_treshold=(0.5, 1.0),
sell_treshold=(0.9, 1.0),
spread=0,
lot_size=0.1,
pip_definition=0.0001,
profit_per_pip_per_lot=10
)
bbt.back_test()
bbt.print_stats()
| [
"[email protected]"
] | |
51a83e3c760b46a993dcee7eb086c4bcee6ec984 | c1f95744e8ca3110630365c92fe74962a0c989da | /app/master_sound/models.py | 3f03e85e2d939a499f3b61c2c545a908bc37ea02 | [
"MIT"
] | permissive | PlatziMasterSound/MasterSound-Backend | 35ab0b0a0b3903762a34b56322e0e5f2330320d3 | a534aa2da238d5b284769a6e9dee43669d86017f | refs/heads/master | 2023-02-18T14:55:26.095957 | 2021-01-23T07:50:19 | 2021-01-23T07:50:19 | 309,891,679 | 0 | 0 | MIT | 2021-01-23T07:50:20 | 2020-11-04T04:57:16 | Python | UTF-8 | Python | false | false | 6,035 | py | from datetime import datetime
import requests
from app.db import db, BaseModelMixin
albums_artists = db.Table(
'albums_artists',
db.Model.metadata,
db.Column('albums_artists_id', db.Integer, primary_key=True),
db.Column('artist_id', db.Integer, db.ForeignKey('artists.artist_id', ondelete='CASCADE'), nullable=False),
db.Column('album_id', db.Integer, db.ForeignKey('albums.album_id', ondelete='CASCADE'), nullable=False)
)
playlists_songs = db.Table(
'playlists_songs',
db.Model.metadata,
db.Column('playlists_songs_id', db.Integer, primary_key=True),
db.Column('playlist_id', db.Integer, db.ForeignKey('playlists.playlist_id', ondelete='CASCADE'), nullable=False),
db.Column('song_id', db.Integer, db.ForeignKey('songs.song_id', ondelete='CASCADE'), nullable=False)
)
played_songs = db.Table(
'played_songs',
db.Column('played_song_id', db.Integer, primary_key=True),
db.Column('song_id', db.Integer, db.ForeignKey('songs.song_id', ondelete='CASCADE',), nullable=False),
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id', ondelete='CASCADE',), nullable=False),
db.Column('created_at', db.DateTime, default=datetime.utcnow),
db.Column('updated_at', db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow),
db.Column('active', db.Integer, default=1)
)
class Country(db.Model, BaseModelMixin):
__tablename__ = 'countries'
country_id = db.Column(db.Integer, primary_key=True)
iso = db.Column(db.String(2), nullable=False)
name = db.Column(db.String(45), nullable=False)
spanish_name = db.Column(db.String(45), nullable=False)
users = db.relationship('User', back_populates='country', cascade='all, delete, delete-orphan', passive_deletes=True)
class User(db.Model, BaseModelMixin):
__tablename__ = 'users'
user_id = db.Column(db.Integer, primary_key=True)
given_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(60), nullable=False)
country_id = db.Column(db.Integer, db.ForeignKey('countries.country_id', ondelete='CASCADE'), nullable=False)
image_url = db.Column(db.String(100), nullable=False)
sex = db.Column(db.String(1), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
active = db.Column(db.Integer, nullable=False, default=1)
country = db.relationship('Country', back_populates='users')
played_songs = db.relationship('Song', back_populates='playing_users', secondary=played_songs, passive_deletes=True)
playlists = db.relationship('Playlist', back_populates='user')
class Album(db.Model, BaseModelMixin):
__tablename__ = 'albums'
album_id = db.Column(db.Integer, primary_key=True)
cover_image_url = db.Column(db.String(100))
spt_album_id = db.Column(db.String(30), nullable=False, unique=True)
album_name = db.Column(db.String(150), nullable=False)
songs = db.relationship('Song', back_populates='album', cascade='all, delete, delete-orphan', passive_deletes=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
active = db.Column(db.Integer, default=1)
artists = db.relationship('Artist', back_populates='albums', secondary=albums_artists, cascade='all, delete')
class Artist(db.Model, BaseModelMixin):
__tablename__ = 'artists'
artist_id = db.Column(db.Integer, primary_key=True)
spt_artist_id = db.Column(db.String(30), nullable=False, unique=True)
artist_name = db.Column(db.String(50), nullable=False)
cover_image_url = db.Column(db.String(100))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
active = db.Column(db.Integer, default=1)
albums = db.relationship('Album', back_populates='artists', secondary=albums_artists, passive_deletes=True)
class Song(db.Model, BaseModelMixin):
__tablename__ = 'songs'
song_id = db.Column(db.Integer, primary_key=True)
spt_song_id = db.Column(db.String(30), nullable=False, unique=True)
song_name = db.Column(db.String(120), nullable=False)
album_id = db.Column(db.Integer, db.ForeignKey('albums.album_id', ondelete='CASCADE'), nullable=False)
order_number = db.Column(db.Integer, nullable=False)
duration = db.Column(db.String(6), nullable=False)
sound_url = db.Column(db.String(200), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
active = db.Column(db.Integer, default=1)
album = db.relationship('Album', back_populates='songs')
playlists = db.relationship('Playlist', back_populates='songs', secondary=playlists_songs, cascade='all, delete')
playing_users = db.relationship('User', back_populates='played_songs', secondary=played_songs, cascade='all, delete')
class Playlist(db.Model, BaseModelMixin):
__tablename__ = 'playlists'
playlist_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id', ondelete='CASCADE'), nullable=False)
playlist_name = db.Column(db.String(50), nullable=False)
favourite = db.Column(db.Integer, default=0)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
active = db.Column(db.Integer, default=1)
user = db.relationship('User', back_populates='playlists')
songs = db.relationship('Song', back_populates='playlists', secondary=playlists_songs, passive_deletes=True)
| [
"[email protected]"
] | |
a9aee755e05f92b025c1289803993fcb90208dab | d294701a6365f279ff4368d64d83c6935f25cfc2 | /ITEnergy/urls.py | 4e333c2540a5674a1f8e70002641a729306ea06d | [] | no_license | ifdotpy/ITEnergy | 5e03c9bbb8cba2b9a1dcfea06a01c2cc05baaecd | 0c3b4563069b4d36f32a7a43df6fdede62e8177a | refs/heads/master | 2021-06-19T10:34:57.008354 | 2017-03-22T07:49:38 | 2017-03-22T07:49:38 | 84,626,384 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | """ITEnergy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('cafe.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
580f645cc7cf7570e831fe6cc77214eaf6905599 | 14e94be35e02db4259347b30f6f4da3310303be4 | /notebooks/develop/config.py | e85cc1765202c6f46372af4e42963ebad7ec0fac | [] | no_license | ditmo/litness-test | f2038b9ca9ae80cb271af1727efa652896b86609 | 6e57f2b0058e304f758ae2e39e51d239260b51f4 | refs/heads/master | 2022-10-24T21:55:56.490081 | 2020-06-17T19:40:10 | 2020-06-17T19:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | # Version: Ready for midpoint review
from os import path
import os
# Getting the parent directory of this file. That will function as the project home.
PROJECT_HOME = path.dirname(path.dirname(path.abspath(__file__)))
# Logging
LOGGING_CONFIG = path.join(PROJECT_HOME, 'config/logging.conf')
# Billboard charts parameters
START_YEAR = 2000
END_YEAR = 2020
RAPSONG_TOPX = 25
HOT100_TOPX = 40
# Acquire and process config
# MAX_RECORDS_READ = 100
BB_HOT100_LOCATION = path.join(PROJECT_HOME,
'data/bb_hot100_{}_to_{}.json'.format(START_YEAR, END_YEAR))
BB_RAPSONG_LOCATION = path.join(PROJECT_HOME,
'data/bb_rapsong_{}_to_{}.json'.format(START_YEAR, END_YEAR))
SPOTIFY_LOCATION = path.join(PROJECT_HOME,
'data/spotify_{}_to_{}.csv'.format(START_YEAR, END_YEAR))
# Spotify API keys
SPOTIFY_CID = os.environ.get("SPOTIFY_CID")
SPOTIFY_SECRET = os.environ.get("SPOTIFY_SECRET")
# S3
S3_BUCKET_NAME = os.environ.get("AWS_BUCKET")
BB_HOT100_NAME = 'bb_hot100_{}_to_{}.json'.format(START_YEAR, END_YEAR)
BB_RAPSONG_NAME = 'bb_rapsong_{}_to_{}.json'.format(START_YEAR, END_YEAR)
SPOTIFY_NAME = 'spotify_{}_to_{}.csv'.format(START_YEAR, END_YEAR)
# Boolean variable for creation of local database instead of on RDS
OFFLINE_DB_FLAG = True
# SQLite database
DATABASE_PATH = path.join(PROJECT_HOME, 'data/billboard_spotify.db')
SQLITE_ENGINE = 'sqlite:////{}'.format(DATABASE_PATH)
# MySQL database
CONN_TYPE = "mysql+pymysql"
USER = os.environ.get("MYSQL_USER")
PASSWORD = os.environ.get("MYSQL_PASSWORD")
HOST = os.environ.get("MYSQL_HOST")
PORT = os.environ.get("MYSQL_PORT")
DATABASE_NAME = os.environ.get("DATABASE_NAME")
MYSQL_ENGINE = "{}://{}:{}@{}:{}/{}".format(CONN_TYPE, USER, PASSWORD, HOST, PORT, DATABASE_NAME)
# Modeling
SPLIT_SEED = 64
FEATURE_NAMES = ['danceability', 'energy','loudness', 'acousticness', 'speechiness',
'instrumentalness', 'liveness', 'valence', 'tempo','duration_ms',
'key_A', 'key_Ab', 'key_B', 'key_Bb', 'key_C', 'key_D', 'key_Db', 'key_E',
'key_Eb', 'key_F', 'key_G', 'key_Gb','mode_major']
RFC_PARAMS = {
'n_estimators': 200,
'min_samples_split': 5,
'min_samples_leaf': 4,
'max_features': 'auto',
'max_depth': 10,
'bootstrap': True,
'random_state': 64}
MODEL_PATH = path.join(PROJECT_HOME, 'model/random_forest_classifier.pkl')
MODEL_METRICS_PATH = path.join(PROJECT_HOME, 'model/test_metrics.yaml')
FEAT_IMP_PATH = path.join(PROJECT_HOME, 'model/feature_importance.csv')
| [
"[email protected]"
] | |
cc38a92a3398b14185effb892690b20baefc270b | 35da81a9b06f388b7dd7d2ab588e6f8ebd1f04db | /Day2/Day2_Set .intersection() Operation.py | 7206583b1b178412cc758f5500933e76d2f694e4 | [] | no_license | Swati1910/Innomatics_Internship | 4434f4422072a4025ade079c515da352114033a6 | 2e91a616b91ff3f68f666cd6991fd26f7e3df565 | refs/heads/main | 2023-02-27T13:44:52.008790 | 2021-02-10T07:01:42 | 2021-02-10T07:01:42 | 331,275,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
n1 = input()
set1=set(input().split(" "))
n2= input()
set2=set(input().split(" "))
set3=set1.intersection(set2)
print (len(set3))
| [
"[email protected]"
] | |
9a08c6976c23db467a6aff1288eaa39c38ef7341 | 5e6260548f3f91eb7eddb1a23d569b1f280e122d | /pdf-table-extract-master/src/pdftableextract/scripts.py | fb80c860b6391a7dfc86b33785cc61ea22f2b5c0 | [
"MIT"
] | permissive | frankcode101/PDFProcessing | 198d708508b77eb9550c4d4e513d07ed867b25eb | 21520aea2f38700dd282f0920b3ec0eb2ad107d3 | refs/heads/master | 2021-09-07T21:29:13.563605 | 2018-03-01T11:19:13 | 2018-03-01T11:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,624 | py | import argparse
import sys
import logging
import subprocess
from core import process_page, output
import core
#-----------------------------------------------------------------------
def procargs() :
p = argparse.ArgumentParser( description="Finds tables in a PDF page.")
p.add_argument("-i", dest='infile', help="input file" )
p.add_argument("-o", dest='outfile', help="output file", default=None,
type=str)
p.add_argument("--greyscale_threshold","-g", help="grayscale threshold (%%)", type=int, default=25 )
p.add_argument("-p", type=str, dest='page', required=True, action="append",
help="a page in the PDF to process, as page[:firstrow:lastrow]." )
p.add_argument("-c", type=str, dest='crop',
help="crop to left:top:right:bottom. Paints white outside this "
"rectangle." )
p.add_argument("--line_length", "-l", type=float, default=0.17 ,
help="line length threshold (length)" )
p.add_argument("--bitmap_resolution", "-r", type=int, default=300,
help="resolution of internal bitmap (dots per length unit)" )
p.add_argument("-name", help="name to add to XML tag, or HTML comments")
p.add_argument("-pad", help="imitial image pading (pixels)", type=int,
default=2 )
p.add_argument("-white",action="append",
help="paint white to the bitmap as left:top:right:bottom in length units."
"Done before painting black" )
p.add_argument("-black",action="append",
help="paint black to the bitmap as left:top:right:bottom in length units."
"Done after poainting white" )
p.add_argument("-bitmap", action="store_true",
help = "Dump working bitmap not debuging image." )
p.add_argument("-checkcrop", action="store_true",
help = "Stop after finding croping rectangle, and output debuging "
"image (use -bitmap).")
p.add_argument("-checklines", action="store_true",
help = "Stop after finding lines, and output debuging image." )
p.add_argument("-checkdivs", action="store_true",
help = "Stop after finding dividors, and output debuging image." )
p.add_argument("-checkcells", action="store_true",
help = "Stop after finding cells, and output debuging image." )
p.add_argument("-colmult", type=float, default=1.0,
help = "color cycling multiplyer for checkcells and chtml" )
p.add_argument("-boxes", action="store_true",
help = "Just output cell corners, don't send cells to pdftotext." )
p.add_argument("-t", choices=['cells_csv','cells_json','cells_xml',
'table_csv','table_html','table_chtml','table_list'],
default="cells_xml",
help = "output type (table_chtml is colorized like '-checkcells') "
"(default cells_xml)" )
p.add_argument("--whitespace","-w", choices=['none','normalize','raw'], default="normalize",
help = "What to do with whitespace in cells. none = remove it all, "
"normalize (default) = any whitespace (including CRLF) replaced "
"with a single space, raw = do nothing." )
p.add_argument("--traceback","--backtrace","-tb","-bt",action="store_true")
return p.parse_args()
def main():
try:
args = procargs()
imain(args)
except IOError as e:
if args.traceback:
raise
sys.exit("I/O Error running pdf-table-extract: {0}".format(e))
except OSError as e:
print("An OS Error occurred running pdf-table-extract: Is `pdftoppm` installed and available?")
if args.traceback:
raise
sys.exit("OS Error: {0}".format(e))
except subprocess.CalledProcessError as e:
if args.traceback:
raise
sys.exit("Error while checking a subprocess call: {0}".format(e))
except Exception as e:
if args.traceback:
raise
sys.exit(e)
def imain(args):
cells = []
if args.checkcrop or args.checklines or args.checkdivs or args.checkcells:
for pgs in args.page :
success = process_page(args.infile, pgs,
bitmap=args.bitmap,
checkcrop=args.checkcrop,
checklines=args.checklines,
checkdivs=args.checkdivs,
checkcells=args.checkcells,
whitespace=args.whitespace,
boxes=args.boxes,
greyscale_threshold=args.greyscale_threshold,
page=args.page,
crop=args.crop,
line_length=args.line_length,
bitmap_resolution=args.bitmap_resolution,
name=args.name,
pad=args.pad,
white=args.white,
black=args.black, outfilename=args.outfile)
else:
for pgs in args.page :
cells.extend(process_page(args.infile, pgs,
bitmap=args.bitmap,
checkcrop=args.checkcrop,
checklines=args.checklines,
checkdivs=args.checkdivs,
checkcells=args.checkcells,
whitespace=args.whitespace,
boxes=args.boxes,
greyscale_threshold=args.greyscale_threshold,
page=args.page,
crop=args.crop,
line_length=args.line_length,
bitmap_resolution=args.bitmap_resolution,
name=args.name,
pad=args.pad,
white=args.white,
black=args.black))
filenames = dict()
if args.outfile is None:
args.outfile = sys.stdout
filenames["{0}_filename".format(args.t)] = args.outfile
output(cells, args.page, name=args.name, infile=args.infile, output_type=args.t, **filenames)
| [
"[email protected]"
] | |
998ba8ac50e4d7278a16930ba907e750e9566b42 | 529ea79ac80a2d0b359fbbe420f51b5c19f7d40c | /8K-means/K-Means/kmeansplusplus_ys.py | ea981128ad27d59814d8dfc8fe6bdc9cca59567a | [] | no_license | ys1305/ML-hand | 2a6a16704b5632775f28f586dd6969fd5dc8c85f | 8e1c3a4a1e34f329a4d6bd70d561c1988325e57c | refs/heads/master | 2020-06-18T06:03:28.098924 | 2019-07-10T11:14:23 | 2019-07-10T11:14:23 | 196,189,128 | 18 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,081 | py | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors
from scipy import io as spio
from scipy import misc # 图片操作
import numbers
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 解决windows环境下画图汉字乱码问题
def distance(point1, point2):
# 欧氏距离
return np.sqrt(np.sum(np.square(point1 - point2), axis=1))
# return np.sqrt(np.sum(np.power(point1 - point2,2)))
def check_random_state(seed):
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
random_state = check_random_state(None)
# kmeans++的初始化方式,加速聚类速度
# 第一个点是随机选择出来的
def k_means_plus_plus(dataset,k):
n_samples, n_features = dataset.shape
centers = np.empty((k, n_features))
# n_local_trials是每次选择候选点个数
n_local_trials = None
if n_local_trials is None:
n_local_trials = 2 + int(np.log(k))
# 第一个随机点
center_id = random_state.randint(n_samples)
centers[0] = dataset[center_id]
# closest_dist_sq是每个样本到所有中心点最近距离
# 假设现在有3个中心点,closest_dist_sq =
# [min(样本1到3个中心距离),min(样本2到3个中心距离),...min(样本n到3个中心距离)]
closest_dist_sq = distance(centers[0, np.newaxis], dataset)
# newaxis可以给原数组增加一个维度
# current_pot所有最短距离的和
current_pot = closest_dist_sq.sum()
for c in range(1, k):
# 选出n_local_trials随机址,并映射到current_pot的长度
rand_vals = random_state.random_sample(n_local_trials) * current_pot
# 选择出来的候选节点是按照概率选择出来的
# 然后再根据所有样本到候选节点的距离选择出来距离最小的节点
# np.cumsum([1,2,3,4]) = [1, 3, 6, 10],就是累加当前索引前面的值
# np.searchsorted搜索随机出的rand_vals落在np.cumsum(closest_dist_sq)中的位置。
# candidate_ids候选节点的索引
candidate_ids = np.searchsorted(np.cumsum(closest_dist_sq), rand_vals)
print(candidate_ids)
# best_candidate最好的候选节点
# best_pot最好的候选节点计算出的距离和
# best_dist_sq最好的候选节点计算出的距离列表
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# 计算每个样本到候选节点的欧式距离
distance_to_candidate = distance(dataset[candidate_ids[trial], np.newaxis], dataset)
# 计算每个候选节点的距离序列new_dist_sq, 距离总和new_pot
# closest_dist_sq 每个样本,到所有已知的中心点的距离
# new_dist_sq 每个样本,到所有中心点(已知的中心点+当前的候选点)最近距离
# 如果中心点变成了两个,那么样本到中心点的最近距离就可能会发生变化
new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidate)
new_pot = new_dist_sq.sum()
# 选择最小的new_pot
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
centers[c] = dataset[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
##################### 相对简单版--理解
# 但是可能会出现初值不好的情况
# [[3. 0. ]
# [0. 0. ]
# [3.1 3.1]]
def distance1(point1, point2):
# 欧氏距离
return np.sqrt(np.sum(np.power(point1 - point2,2)))
#对一个样本找到与该样本距离最近的聚类中心
def nearest(point, cluster_centers):
min_dist = np.inf
m = np.shape(cluster_centers)[0] # 当前已经初始化的聚类中心的个数
for i in range(m):
# 计算point与每个聚类中心之间的距离
d = distance1(point, cluster_centers[i, ])
# 选择最短距离
if min_dist > d:
min_dist = d
return min_dist
#选择尽可能相距较远的类中心
def get_centroids(dataset, k):
m, n = np.shape(dataset)
cluster_centers = np.zeros((k , n))
index = np.random.randint(0, m)
# index = random_state.randint(0,m)
# 返回一个随机整型数,范围从低(包括)到高(不包括)
# print(index)
cluster_centers[0] = dataset[index]
# 2、初始化一个距离的序列
d = [0.0 for _ in range(m)]
for i in range(1, k):
sum_all = 0
for j in range(m):
# 3、对每一个样本找到最近的聚类中心点
d[j] = nearest(dataset[j], cluster_centers[0:i])
# 4、将所有的最短距离相加
sum_all += d[j]
# 5、取得sum_all之间的随机值
# print(d)
sum_all *= np.random.rand()
# np.searchsorted搜索随机出的sum_all落在np.cumsum(d)中的位置。等价于下面6的代码
candidate_ids = np.searchsorted(np.cumsum(d), sum_all)
cluster_centers[i] = dataset[candidate_ids]
# ##6、获得距离最远的样本点作为聚类中心点
# for j, di in enumerate(d):
# sum_all=sum_all - di
# if sum_all > 0:
# continue
# cluster_centers[i] = dataset[j]
# break
return cluster_centers
data = np.array([[0.,0.],
[0.1,0.1],[0.2,0.2],[3.0,0.0],[3.1,3.1],[3.2,3.2],[9.0,9.0],[9.1,9.1],[9.2,9.2]
])
print(data)
print(k_means_plus_plus(data,3))
print(get_centroids(data,3)) | [
"[email protected]"
] | |
63d06648d0f943bcb273b11a78c85cba43cade4c | 8438015063bde3b5f87b26351946b51754bf5984 | /kuaforler/migrations/0012_kuafor_gun2.py | 21de3ec07e8970d9b0600875effac49bd56a0425 | [] | no_license | tekin7/Django-web-project | a624ea540093cbbd0069cb95cb2a7fd2713337bf | f07db2509ba78bb79c59db528bafc5fab84bc668 | refs/heads/master | 2020-08-13T16:34:15.588018 | 2019-10-14T09:26:19 | 2019-10-14T09:26:19 | 215,001,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,020 | py | # Generated by Django 2.2.2 on 2019-07-12 14:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20190704_1501'),
('kuaforler', '0011_auto_20190710_0238'),
]
operations = [
migrations.CreateModel(
name='kuafor_gun2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sekiz', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('sekizB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('dokuz', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('dokuzB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('on', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onbir', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onbirB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('oniki', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onikiB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onuc', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onucB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('ondort', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('ondortB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onbes', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onbesB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onalti', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onaltiB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onyedi', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onyediB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onsekiz', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('onsekizB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('ondokuz', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('ondokuzB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmi', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmiB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmibir', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmibirB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmiiki', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmiikiB', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('yirmiuc', models.CharField(default='btn btn-default btn-lg btn-success', max_length=128)),
('coiffeur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.coiffeur_profil')),
],
),
]
| [
"[email protected]"
] | |
9a2c0ef6b579c25560ad7c30076fc90d3e7a4a86 | 6ffea7adf2543159d7a15cc01fbf09de9c67e202 | /Data_Structures/Implement_Graph.py | 41c32a15c5d1fbf8946533b24384c546b765c436 | [] | no_license | jzingh98/SelfOrganizingANN | 35ea6da87cfcd640ba311257a5ebe0879b502dea | a615fe350e398635558444e3cf8b5bac9794bfb9 | refs/heads/master | 2022-11-30T13:34:33.136915 | 2020-08-03T11:55:20 | 2020-08-03T11:55:20 | 259,671,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | import random as randGraph
class Neuron:
def __init__(self):
self.incomingEdges = []
self.outgoingEdges = []
inputVal = 0
outputVal = 0
class Axon:
def __init__(self, startNode=None, endNode=None):
self.startNode = startNode
self.endNode = endNode
self.weight = randGraph.uniform(0, 1)
class Brain:
def __init__(self):
self.allNeurons = []
self.allAxons = []
self.startNeuron = None
def initializeBrain(self):
self.startNeuron = inputSpawnNeuron = Neuron()
middleSpawnNeuron = Neuron()
outputSpawnNeuron = Neuron()
self.formConnection(inputSpawnNeuron, middleSpawnNeuron)
self.formConnection(middleSpawnNeuron, outputSpawnNeuron)
self.allNeurons.extend([inputSpawnNeuron, middleSpawnNeuron, outputSpawnNeuron])
def formConnection(self, node1, node2):
newAxon = Axon(node1, node2)
node1.outgoingEdges.append(newAxon)
node2.incomingEdges.append(newAxon)
self.allAxons.append(newAxon)
| [
"[email protected]"
] | |
47fdee74bceac6d74ab494199f3383d8023ae23b | 5f483737707ca77cc1fde79bbe65289648f42062 | /python zajecia AI/zaj2/zad4.py | b93f7ef6cf1c2cb81d0daa7828b2ca844cec95f2 | [] | no_license | Yirogu/EasyPython | 82b8edeffac7de1f531d5d170cd620e444e90001 | c6700446037602a2c69fa1a5a5af09ae049326e7 | refs/heads/master | 2020-05-01T05:15:00.253559 | 2019-05-08T19:39:12 | 2019-05-08T19:39:12 | 177,296,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,907 | py | from treelib import Tree
import time
#
# def duplicate_node_path_check(tree, node):
# check_node = tree.get_node(node)
# current_node = check_node
#
#
# while not current_node.is_root():
# current_node = tree.parent(current_node.identifier)
# if check_node.tag == current_node.tag:
# return True
# return False
def dupilcate_node_path_check(tree,node,tag):
current_node = node
while not current_node.is_root():
current_node = tree.parent(current_node.identifier)
if current_node.tag == tag:
#print("z funkcji node.tag",node.tag)
return True
return False
def reachable_states(state):
if state == "Gdansk":
return [["Gdynia",24],["Koscierzyna",58],["Tczew",33],["Elblag",63]]
if state == "Gdynia":
return [["Gdansk",24],["Lebork",60],["Wladyslawowo",33]]
if state == "Koscierzyna":
return [["Chojnice", 70], ["Bytów", 40], ["Lebork", 58], ["Gdansk", 58], ["Tczew", 59]]
if state == "Tczew":
return [["Elblag", 53], ["Gdansk", 33], ["Koscierzyna", 59]]
if state == "Elblag":
return [["Tczew", 53], ["Gdansk", 63]]
if state == "Hel":
return [["Wladyslawowo", 35]]
if state == "Wladyslawowo":
return [["Gdynia", 42], ["Leba", 63]]
if state == "Leba":
return [["Ustka", 64], ["Lebork", 29], ["Wladyslawowo", 66]]
if state == "Lebork":
return [["Leba", 29], ["Slupsk", 55], ["Koscierzyna", 58], ["Gdynia", 60]]
if state == "Ustka":
return [["Slupsk", 21], ["Gdansk", 64]]
if state == "Slupsk":
return [["Ustka", 21], ["Lebork", 55], ["Bytow", 70]]
if state == "Bytow":
return [["Chojnice", 65], ["Koscierzyna", 40], ["Slupsk", 70]]
if state == "Chojnice":
return [["Bytow", 65], ["Koscierzyna", 70]]
return []
def uniform_cost_search(start_state,target_state):
#do budowy drzewa potrzebujemy dla kazdego wierzcholka id
#bedziemy je pozniej inkrementowac
id = 0
#wrzucenie stanu startowego do drzewa (korzen) i kolejki
tree = Tree()
current_node = tree.create_node(start_state,id,data = 0)
fifo_queue = []
fifo_queue.append(current_node)
#petla szukajaca sciezki do stnau koncowego
#robimy ograniczenie na max wierzcholkow (id<200000)
while id<200000:
#jesli kolejka pusta to znaczy ze nie da sie dojsc do stanu koncowego
#drukowanie kolejki: print(fifo_queue)
if len(fifo_queue) == 0:
tree.show()
print("failed to reach the target state")
return 1
#jesli kolejka niepusta to wez pierwszy stan z kolejki
fifo_queue = sorted(fifo_queue, key=lambda x: x.data)
current_node = fifo_queue[0]
#jesli ten stan jest koncowy to zakoncz program z sukcesem
if current_node.tag == target_state:
tree.show()
print("the target state "+str(current_node.tag)+" with id ="+str(current_node.identifier)+" has been reached after "+str(current_node.data)+" kms!")
return 0
#jesli stan niekoncowy to usun go z kolejki
del(fifo_queue[0])
#a nastepnie dodaj stany osiagalne z niego
#na koniec kolejki i do drzewa
for elem in reachable_states(current_node.tag):
if dupilcate_node_path_check(tree,current_node,elem[0]) == False:
id += 1
new_elem = tree.create_node(elem[0], id, parent=current_node.identifier)
new_elem.data = current_node.data + elem[1]
fifo_queue.append(new_elem)
print("time limit exceeded")
#print(breadth_first_search("Tczew","Gdansk"))
start = time.time()
uniform_cost_search("Gdansk","Ustka")
end = time.time()
print(end - start)
# IF
# the target state Gdynia with id = 6 has been reached!
# 0.0006566047668457031
# Switch
| [
"[email protected]"
] | |
6dd6412d066314b6ca240a2fc1a9e76e145e2952 | 6a686f67fce7562f29ec318b6d0e2bbf1f505b72 | /Cap12_Estruturas_de_dados/9.2_Listas/2020/lists_tuplespy | 5b73db187eb1f88ad4d9d85a06a025bd18c464e7 | [] | no_license | frclasso/1st_Step_Python_Fabio_Classo | dd7f4b8f03f6992112fc9afb59c347d2989e612c | e7424143c432b5894ac30f816040145e129c1a2d | refs/heads/master | 2023-07-31T14:10:14.934518 | 2021-09-09T22:25:10 | 2021-09-09T22:25:10 | 138,339,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 09:12:45 2018
@author: fabio
"""
courses = ['History', 'Math', 'Physics','CompSci' ]
#indexing
#print(courses[0])
#print(courses[1])
#print(courses[2])
#print(courses[3])
# Slincing
#print(courses[0:5])
#print(courses[:5])
#print(courses[2:])
# Add items
#courses.append('Art')
#courses.insert(0, 'Biology')
# Add items from another list
courses_2 = ['Chemistry', 'Art']
#courses.insert(0, courses_2)
#print(courses[0])
courses.extend(courses_2)
# Removing items
# Usisng remove method
#courses.remove('Math')
#Using pop method (last object)
#courses.pop()
# Using del method
#del courses[0]
# Sorting lists
#courses.sort()
#courses.sort(reverse=True)
nums = [6,1,4,2,5,3,0,9,8,7]
#nums.sort()
#nums.sort(reverse=True) # inverte a ordem
#print(nums)
# Sorting a new list, using sorted method
new_courses = sorted(courses)
#print(new_courses)
#print(min(nums))
#print(max(nums))
#print(sum(nums))
#print(courses.index('CompSci'))
# Membership operators
#print('Medicine' in courses)
#print('Math' in courses)
#print(courses)
# for loop
#for course in courses:
# print(course)
#for index, course in enumerate(courses, start=1):
# print(index, course)
# Join
#courses_str = ', '.join(courses)
#print(courses_str)
#new_list = courses_str.split(', ')
#print(new_list)
# Mutable list
#list1 = ['History', 'Math', 'Physics', 'CompSci', 'Chemistry', 'Art']
#list2 = list1
#print(list1)
#print(list2)
#list1[0] = 'Bio' # Altera ambas as 9.2_Listas
#print(list1)
#print(list2)
# Tuples
#tuple_1 = ['History', 'Math', 'Physics', 'CompSci', 'Chemistry', 'Art']
#tuple_2 = tuple_1
#print(tuple_1)
#print(tuple_2)
#tuple_1[0] = 'Bio' # Altera ambas as 9.2_Listas
#
#print(tuple_1)
#print(tuple_2)
| [
"[email protected]"
] | ||
0bd86c05a544ec74fd6e21d361d37cbed3a88ef9 | c8c664de604f293b19e9cb3637aeb2c2f47dbde1 | /creating-project/application/table/models.py | b530454cde7fdb5a70b36021184ca1f51a522490 | [] | no_license | s-suchkov/dj-homeworks | 9c92ec7579b833f688afdad44e44e23611e7e506 | ef607951b84b9f79d2a6cf8cff7b8d4e83acc0ef | refs/heads/master | 2020-06-30T03:26:39.062667 | 2019-09-20T16:04:49 | 2019-09-20T16:04:49 | 200,708,401 | 0 | 0 | null | 2019-08-05T18:29:03 | 2019-08-05T18:29:02 | null | UTF-8 | Python | false | false | 375 | py | from django.db import models
import csv
class Fields(models.Model):
name = models.TextField()
width = models.IntegerField()
id = models.IntegerField(primary_key=True)
class Path_csv(models.Model):
file = models.FileField()
@staticmethod
def get_path(file):
with open(file) as f:
reader = csv.reader(f)
return reader
| [
"[email protected]"
] | |
80aea4f4f9b70bd805e7c2612cee79ad03c1e53c | 5bae449c697f6f75d76376700cacb4ef547a4ec1 | /report/urls.py | b007e7d19abd79aa70f98c24504e6de0dfa2430f | [] | no_license | shanirok/fluffy-palm-tree | 3e4e0060ecbba47cf2c1d64782efc290fdf5717b | f7213cb9e010ef62e215e1df46816cc4a955f316 | refs/heads/master | 2021-01-16T18:10:21.010843 | 2017-09-09T13:47:24 | 2017-09-09T13:47:24 | 100,044,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | from django.conf.urls import url
from . import views
from getriddb.models import Inventoryitem, Customer, Pickup
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^customers/$', views.CustomerListView.as_view(), name='customers'),
url(r'^pickups/$', views.PickupListView.as_view(), name='pickups'),
url(r'^items/$', views.ItemListView.as_view(), name='items'),
url(r'^customers/(?P<customer_id>\d+)$', views.CustomerPickupList, name='customer-pickup'),
url(r'^customers/(?P<customer_id>\d+)/donated$', views.CustomerDonatedItemList, name='customer-donated-item'),
url(r'^customers/(?P<customer_id>\d+)/sold$', views.CustomerSoldItemList, name='customer-sold-item'),
url(r'^customers/(?P<customer_id>\d+)/(?P<pickup_id>\d+)$', views.PickupItemList, name='pickup-item'),
]
# url(r'^$', views.customer_list, name='customer_list'),
# url(r'^$', views.pickup_list, name='pickup_list'),
# # ex: /polls/
# url(r'^index$', views.IndexView.as_view(), name='index'),
# # ex: /polls/5/
# url(r'^(?P<customer_id>[0-9]+)/$', views.detail, name='detail'),
# #url(r'^(?P<item_id>[0-9]+)/$', views.detail, name='detail'),
# # ex: /polls/5/results/
# url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# #url(r'^(?P<item_id>[0-9]+)/results/$', views.results, name='results'),
# # ex: /polls/5/vote/
# url(r'^(?P<item_id>[0-9]+)/vote/$', views.vote, name='vote'),
| [
"[email protected]"
] | |
6804237b0d91a7bcc876f5811d01e0b7064a663d | 965a7d9f81c051b9f56ea08fe048a3935f10ced6 | /lclbindings/lclpython/unit1.py | 53ba0c352b17d58d1c32c84c91516a87ef115fdd | [] | no_license | mabudrais/lazarus-ccr | 1fd074078d04c869fe0a5a5140a1871b66e5c16d | be1510ff5bb5adae34fa91781c61f43650779f04 | refs/heads/master | 2020-12-25T11:41:40.836166 | 2015-08-14T07:26:17 | 2015-08-14T07:26:17 | 40,607,784 | 0 | 0 | null | 2015-08-12T15:16:48 | 2015-08-12T15:16:48 | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from TButtonunit import*
from TMemounit import*
from TEditunit import*
class TForm1:
def __init__(self):
self.Button1=TButton()
self.Memo1=TMemo()
self.Edit1=TEdit()
def onlic(self):
print self.Button1.Width
self.Button1.setWidth(self.Button1.getWidth()+10)
self.Button1.setTop(self.Button1.getTop()+8)
self.Button1.setCaption(('hello'))
self.Memo1.getLines().Add('lll')
print self.Memo1.getLines().getStrings(0)
self.Edit1.setCaption(self.Memo1.getLines().getStrings(0))
| [
"[email protected]"
] | |
22e9f05bcf84b1140a015082cd24f39a7aec9b5a | 0106b5f304bed732756605f8a28524fa571d4099 | /Linked List/Palindrome LinkedList.py | bf29e9adf068784dedc7e4f896eb95a9404b30ca | [] | no_license | Svastikkka/DS-AND-ALGO | 8338d6f4641e8a369840af455087a5630cdad5cd | 95a6bec319df19753d382c800f4c78c0f0bb256d | refs/heads/master | 2023-03-03T06:13:52.335488 | 2023-02-28T08:11:09 | 2023-02-28T08:11:09 | 296,348,959 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | class Node:
def __init__(self,data):
self.data=data
self.next=None
def LinkedList(arr):
head=None
tail=None
if len(arr)<1:
return
else:
for i in arr:
if i ==-1:
break
NewNode = Node(i)
if head == None:
head=NewNode
tail=NewNode
else:
tail.next=NewNode
tail=NewNode
return head
def printLL(head):
arr=[]
while head is not None:
arr.append(head.data)
head=head.next
if arr[0:]==arr[::-1]:
print('true')
else:
print('false')
t=int(input())
for i in range(t):
arr=list(map(int,input().split()))
printLL(LinkedList(arr)) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.