hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19ebb2a3f5203d8e575a8e0bab417177a0a48924 | 5,010 | py | Python | third_party/unidecode/x0bd.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
]
| 39 | 2015-06-10T23:18:07.000Z | 2021-10-21T04:29:06.000Z | third_party/unidecode/x0bd.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
]
| 2 | 2016-08-22T12:38:10.000Z | 2017-01-26T18:37:33.000Z | third_party/unidecode/x0bd.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
]
| 26 | 2015-06-10T22:09:15.000Z | 2021-06-27T15:45:15.000Z | data = (
'bols', # 0x00
'bolt', # 0x01
'bolp', # 0x02
'bolh', # 0x03
'bom', # 0x04
'bob', # 0x05
'bobs', # 0x06
'bos', # 0x07
'boss', # 0x08
'bong', # 0x09
'boj', # 0x0a
'boc', # 0x0b
'bok', # 0x0c
'bot', # 0x0d
'bop', # 0x0e
'boh', # 0x0f
'bwa', # 0x10
'bwag', # 0x11
'bwagg', # 0x12
'bwags', # 0x13
'bwan', # 0x14
'bwanj', # 0x15
'bwanh', # 0x16
'bwad', # 0x17
'bwal', # 0x18
'bwalg', # 0x19
'bwalm', # 0x1a
'bwalb', # 0x1b
'bwals', # 0x1c
'bwalt', # 0x1d
'bwalp', # 0x1e
'bwalh', # 0x1f
'bwam', # 0x20
'bwab', # 0x21
'bwabs', # 0x22
'bwas', # 0x23
'bwass', # 0x24
'bwang', # 0x25
'bwaj', # 0x26
'bwac', # 0x27
'bwak', # 0x28
'bwat', # 0x29
'bwap', # 0x2a
'bwah', # 0x2b
'bwae', # 0x2c
'bwaeg', # 0x2d
'bwaegg', # 0x2e
'bwaegs', # 0x2f
'bwaen', # 0x30
'bwaenj', # 0x31
'bwaenh', # 0x32
'bwaed', # 0x33
'bwael', # 0x34
'bwaelg', # 0x35
'bwaelm', # 0x36
'bwaelb', # 0x37
'bwaels', # 0x38
'bwaelt', # 0x39
'bwaelp', # 0x3a
'bwaelh', # 0x3b
'bwaem', # 0x3c
'bwaeb', # 0x3d
'bwaebs', # 0x3e
'bwaes', # 0x3f
'bwaess', # 0x40
'bwaeng', # 0x41
'bwaej', # 0x42
'bwaec', # 0x43
'bwaek', # 0x44
'bwaet', # 0x45
'bwaep', # 0x46
'bwaeh', # 0x47
'boe', # 0x48
'boeg', # 0x49
'boegg', # 0x4a
'boegs', # 0x4b
'boen', # 0x4c
'boenj', # 0x4d
'boenh', # 0x4e
'boed', # 0x4f
'boel', # 0x50
'boelg', # 0x51
'boelm', # 0x52
'boelb', # 0x53
'boels', # 0x54
'boelt', # 0x55
'boelp', # 0x56
'boelh', # 0x57
'boem', # 0x58
'boeb', # 0x59
'boebs', # 0x5a
'boes', # 0x5b
'boess', # 0x5c
'boeng', # 0x5d
'boej', # 0x5e
'boec', # 0x5f
'boek', # 0x60
'boet', # 0x61
'boep', # 0x62
'boeh', # 0x63
'byo', # 0x64
'byog', # 0x65
'byogg', # 0x66
'byogs', # 0x67
'byon', # 0x68
'byonj', # 0x69
'byonh', # 0x6a
'byod', # 0x6b
'byol', # 0x6c
'byolg', # 0x6d
'byolm', # 0x6e
'byolb', # 0x6f
'byols', # 0x70
'byolt', # 0x71
'byolp', # 0x72
'byolh', # 0x73
'byom', # 0x74
'byob', # 0x75
'byobs', # 0x76
'byos', # 0x77
'byoss', # 0x78
'byong', # 0x79
'byoj', # 0x7a
'byoc', # 0x7b
'byok', # 0x7c
'byot', # 0x7d
'byop', # 0x7e
'byoh', # 0x7f
'bu', # 0x80
'bug', # 0x81
'bugg', # 0x82
'bugs', # 0x83
'bun', # 0x84
'bunj', # 0x85
'bunh', # 0x86
'bud', # 0x87
'bul', # 0x88
'bulg', # 0x89
'bulm', # 0x8a
'bulb', # 0x8b
'buls', # 0x8c
'bult', # 0x8d
'bulp', # 0x8e
'bulh', # 0x8f
'bum', # 0x90
'bub', # 0x91
'bubs', # 0x92
'bus', # 0x93
'buss', # 0x94
'bung', # 0x95
'buj', # 0x96
'buc', # 0x97
'buk', # 0x98
'but', # 0x99
'bup', # 0x9a
'buh', # 0x9b
'bweo', # 0x9c
'bweog', # 0x9d
'bweogg', # 0x9e
'bweogs', # 0x9f
'bweon', # 0xa0
'bweonj', # 0xa1
'bweonh', # 0xa2
'bweod', # 0xa3
'bweol', # 0xa4
'bweolg', # 0xa5
'bweolm', # 0xa6
'bweolb', # 0xa7
'bweols', # 0xa8
'bweolt', # 0xa9
'bweolp', # 0xaa
'bweolh', # 0xab
'bweom', # 0xac
'bweob', # 0xad
'bweobs', # 0xae
'bweos', # 0xaf
'bweoss', # 0xb0
'bweong', # 0xb1
'bweoj', # 0xb2
'bweoc', # 0xb3
'bweok', # 0xb4
'bweot', # 0xb5
'bweop', # 0xb6
'bweoh', # 0xb7
'bwe', # 0xb8
'bweg', # 0xb9
'bwegg', # 0xba
'bwegs', # 0xbb
'bwen', # 0xbc
'bwenj', # 0xbd
'bwenh', # 0xbe
'bwed', # 0xbf
'bwel', # 0xc0
'bwelg', # 0xc1
'bwelm', # 0xc2
'bwelb', # 0xc3
'bwels', # 0xc4
'bwelt', # 0xc5
'bwelp', # 0xc6
'bwelh', # 0xc7
'bwem', # 0xc8
'bweb', # 0xc9
'bwebs', # 0xca
'bwes', # 0xcb
'bwess', # 0xcc
'bweng', # 0xcd
'bwej', # 0xce
'bwec', # 0xcf
'bwek', # 0xd0
'bwet', # 0xd1
'bwep', # 0xd2
'bweh', # 0xd3
'bwi', # 0xd4
'bwig', # 0xd5
'bwigg', # 0xd6
'bwigs', # 0xd7
'bwin', # 0xd8
'bwinj', # 0xd9
'bwinh', # 0xda
'bwid', # 0xdb
'bwil', # 0xdc
'bwilg', # 0xdd
'bwilm', # 0xde
'bwilb', # 0xdf
'bwils', # 0xe0
'bwilt', # 0xe1
'bwilp', # 0xe2
'bwilh', # 0xe3
'bwim', # 0xe4
'bwib', # 0xe5
'bwibs', # 0xe6
'bwis', # 0xe7
'bwiss', # 0xe8
'bwing', # 0xe9
'bwij', # 0xea
'bwic', # 0xeb
'bwik', # 0xec
'bwit', # 0xed
'bwip', # 0xee
'bwih', # 0xef
'byu', # 0xf0
'byug', # 0xf1
'byugg', # 0xf2
'byugs', # 0xf3
'byun', # 0xf4
'byunj', # 0xf5
'byunh', # 0xf6
'byud', # 0xf7
'byul', # 0xf8
'byulg', # 0xf9
'byulm', # 0xfa
'byulb', # 0xfb
'byuls', # 0xfc
'byult', # 0xfd
'byulp', # 0xfe
'byulh', # 0xff
)
| 19.343629 | 20 | 0.436128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,461 | 0.690818 |
19ecf7e3e7e37b889b168edf93836870ccf82afb | 55 | py | Python | social/backends/azuread.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
]
| 1,987 | 2015-01-01T16:12:45.000Z | 2022-03-29T14:24:25.000Z | social/backends/azuread.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
]
| 731 | 2015-01-01T22:55:25.000Z | 2022-03-10T15:07:51.000Z | virtual/lib/python3.6/site-packages/social/backends/azuread.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
]
| 1,082 | 2015-01-01T16:27:26.000Z | 2022-03-22T21:18:33.000Z | from social_core.backends.azuread import AzureADOAuth2
| 27.5 | 54 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19ed8ee16410261911df594fb0af9ff20f20ca7e | 6,556 | py | Python | pystitchy/grid.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
]
| 1 | 2021-02-28T17:27:16.000Z | 2021-02-28T17:27:16.000Z | pystitchy/grid.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
]
| null | null | null | pystitchy/grid.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
]
| null | null | null | # Copyright (c) 2012 Israel Herraiz <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import wx
import numpy
from numpy import zeros
class Grid:
def __init__ (self):
self._xcells = 120
self._ycells = 80
self._xsize = 1200
self._ysize = 800
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
self._zoom_factor = 100
self._init_matrix ()
def _init_matrix (self):
self._cells = zeros ((self._xcells, self._ycells), dtype=numpy.bool)
self._colors = {}
for x in range (self._xcells):
for y in range (self._ycells):
self._colors[(x,y)] = []
def decrease_zoom (self):
self._xsize = self._xsize - self._zoom_factor
self._ysize = self._ysize - self._zoom_factor
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
def increase_zoom (self):
self._xsize = self._xsize + self._zoom_factor
self._ysize = self._ysize + self._zoom_factor
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
def get_size (self):
return (self._xsize + self._xoffset, self._ysize + self._yoffset)
def draw_grid(self, dc):
step = self._xsize / self._xcells
boldstep = step * 10
# Vertical lines
dc.SetPen (wx.Pen(wx.LIGHT_GREY, 1))
for x in range(self._xcells+1):
xsize = x*step
ysize = step * self._ycells
dc.DrawLine(self._xoffset + xsize, self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Draw bold lines
dc.SetPen (wx.Pen(wx.BLACK,1))
for x in range((self._xcells)/10+1):
xsize = x*boldstep
ysize = step * self._ycells
dc.DrawLine(xsize + self._xoffset, self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Horizontal lines
dc.SetPen (wx.Pen(wx.LIGHT_GREY, 1))
for y in range(self._ycells+1):
ysize = y*step
xsize = self._xcells*step
dc.DrawLine(self._xoffset, ysize + self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Draw bold lines
dc.SetPen (wx.Pen(wx.BLACK,1))
for y in range((self._ycells)/10+1):
ysize = y*boldstep
xsize = self._xcells*step
dc.DrawLine(self._xoffset, ysize + self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
for x in range(self._xcells):
for y in range(self._ycells):
if self._cells[x][y]:
self._paint_cell (x, y, dc, self._colors[(x,y)][-1])
def add_cell (self, xcell, ycell, dc, color, erase):
if not erase:
if xcell >= 0 and ycell >= 0 and xcell < self._xcells and ycell < self._ycells:
self._cells[xcell][ycell] = True
if not len(self._colors[(xcell,ycell)]):
self._colors[(xcell,ycell)].append(color)
elif self._colors[(xcell,ycell)][-1] != color:
self._colors[(xcell,ycell)].append(color)
self._paint_cell (xcell, ycell, dc, color)
else:
if xcell >= 0 and ycell >= 0 and xcell < self._xcells and ycell < self._ycells:
self._cells[xcell][ycell] = False
if not len(self._colors[(xcell,ycell)]):
self._colors[(xcell,ycell)].append(None)
elif self._colors[(xcell,ycell)][-1]:
self._colors[(xcell,ycell)].append(None)
self._paint_cell (xcell, ycell, dc, None, erase)
return len(self._colors[(xcell,ycell)])-1
def get_color_by_mouse (self, x, y):
step = self._xsize / self._xcells
xcell = int((x - self._xoffset)/step)
ycell = int((y - self._yoffset)/step)
try:
c = self._colors[(xcell, ycell)][-1]
if c:
# Return a copy of the color, otherwise two consecutive colors in the same
# cell would have the same colour, due to Python's pass by reference
r, g, b = c.Get()
return wx.Colour(r, g, b)
else:
return c
except KeyError:
return None
except IndexError:
return None
def get_color_by_index (self, xcell, ycell, i):
return self._colors[(xcell,ycell)][i]
def mouse2cell (self, mousex, mousey):
step = self._xsize / self._xcells
xcell = int((mousex - self._xoffset)/step)
ycell = int((mousey - self._yoffset)/step)
return (xcell, ycell)
def cell2mouse (self, xcell, ycell):
step = self._xsize / self._xcells
mousex = int(xcell*step + self._xoffset)
mousey = int(ycell*step + self._yoffset)
return (mousex, mousey)
def _paint_cell (self, xcell, ycell, dc, color, erase = False):
step = self._xsize / self._xcells
px = xcell * step + self._xoffset
py = ycell * step + self._yoffset
if not erase:
dc.SetPen (wx.Pen(color))
dc.SetBrush (wx.Brush (color))
else:
dc.SetPen (wx.WHITE_PEN)
dc.SetBrush (wx.WHITE_BRUSH)
dc.DrawRectangle(px + 1,py + 1,step - 1,step - 1)
| 33.968912 | 107 | 0.585265 | 5,395 | 0.82291 | 0 | 0 | 0 | 0 | 0 | 0 | 1,301 | 0.198444 |
19eeab362b9fb0e2d6b801d0a756ec8fc09dd20a | 2,837 | py | Python | usps_tools/exceptions.py | pedrovagner/usps-tools | 6a241fda35db6590684a534b9c3cf78a589ea09d | [
"MIT"
]
| null | null | null | usps_tools/exceptions.py | pedrovagner/usps-tools | 6a241fda35db6590684a534b9c3cf78a589ea09d | [
"MIT"
]
| null | null | null | usps_tools/exceptions.py | pedrovagner/usps-tools | 6a241fda35db6590684a534b9c3cf78a589ea09d | [
"MIT"
]
| null | null | null | import traceback
from typing import Optional
from .i18n import _
class UspsToolsException(Exception):
"""
Base class for all errors.
"""
def __init__(self, message: Optional[str] = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
super().__init__(message or _("An error happened in UspsTools."))
self.origin = origin
@property
def traceback_msg(self) -> str:
"""
:return:
"""
return traceback.format_exc()
class ConnectionFail(UspsToolsException):
"""
Erro de conexão com o servidor.
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else "Erro de conexão."
super().__init__(msg, origin)
class XmlLoadError(UspsToolsException):
"""
Não foi possível criar o objeto com o XML da resposta.
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else "Não foi possível instanciar o JSON da resposta."
super().__init__(msg, origin)
class XmlResponseError(UspsToolsException):
"""
Resposta não retornou um XML válido.
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else "Resposta não retornou um JSON válido."
super().__init__(msg, origin)
class Timeout(UspsToolsException):
"""
Servidor demorou muito para responder a requisição.
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else "Erro de esgotamento (timeout)."
super().__init__(msg, origin)
class ValidationError(UspsToolsException):
"""
Erro de validação (marshmallow.exceptions.ValidationError).
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else _("Validation error.")
super().__init__(msg, origin)
class NotFoundError(UspsToolsException):
"""
Procura não encontrou o objeto.
"""
def __init__(self, message: str = None, origin: Exception = None):
"""
:param message:
:param origin:
"""
msg = str(origin) if origin else message if message else "Erro de validação."
super().__init__(msg, origin)
| 27.813725 | 114 | 0.601692 | 2,768 | 0.969867 | 0 | 0 | 124 | 0.043448 | 0 | 0 | 1,093 | 0.382971 |
19effa59bdd92c4854c56be758df2693cacdcb3d | 1,158 | py | Python | scraper/engine.py | pesya/scraper | c088dc3dc613fec94e297ac71302d2305b44b14c | [
"BSD-3-Clause"
]
| null | null | null | scraper/engine.py | pesya/scraper | c088dc3dc613fec94e297ac71302d2305b44b14c | [
"BSD-3-Clause"
]
| null | null | null | scraper/engine.py | pesya/scraper | c088dc3dc613fec94e297ac71302d2305b44b14c | [
"BSD-3-Clause"
]
| null | null | null | import sys
import csv
import requests
from parsel import Selector
from scraper.parser import get_features_from_item
start_url = 'http://www.world-art.ru/animation/rating_top.php'
SIGN_STDOUT = '-'
FORMAT_CSV = 'csv'
FORMAT_JL = 'jl'
def parse(url: str, out_path: str, out_format: str):
"""
gets link and returns the response
"""
response = requests.get(url)
assert response.status_code == 200, f'bad status code: {response.status_code}'
response_html = Selector(response.text)
links_to_films = response_html.xpath('//td[@class="review"]/a[@class="review"]/@href').getall()
out_file = sys.stdout if out_path == SIGN_STDOUT else open(out_path, 'w', buffering=1, newline='')
for link in links_to_films:
item_response = requests.get(link)
assert response.status_code == 200, f'bad status code: {item_response.status_code}'
item = get_features_from_item(item_response)
if out_format == FORMAT_CSV:
item_writer = csv.writer(out_file, delimiter=' ', quotechar=',', quoting=csv.QUOTE_MINIMAL)
item_writer.writerow(item.values())
out_file.close()
return
| 28.243902 | 103 | 0.69171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.224525 |
19f24b3bd880d9e6bed48acb8886bc868d4be2dd | 14,592 | py | Python | gui/python/photogate/photogate/photogate_app.py | iorodeo/photogate_software | c9a97dc3da644fe093397dd11024825ba0d79519 | [
"Apache-2.0"
]
| 1 | 2020-07-23T19:02:50.000Z | 2020-07-23T19:02:50.000Z | gui/python/photogate/photogate/photogate_app.py | iorodeo/photogate_software | c9a97dc3da644fe093397dd11024825ba0d79519 | [
"Apache-2.0"
]
| null | null | null | gui/python/photogate/photogate/photogate_app.py | iorodeo/photogate_software | c9a97dc3da644fe093397dd11024825ba0d79519 | [
"Apache-2.0"
]
| null | null | null | from __future__ import print_function
import os
import sys
import serial.tools.list_ports
from PyQt4 import QtCore
from PyQt4 import QtGui
from photogate_ui import Ui_PhotogateMainWindow
from photogate_serial import PhotogateDevice
from photogate_serial import getListOfPorts
import dependency_hack
try:
import scipy.io
HAVE_SCIPY_IO = True
except ImportError:
HAVE_SCIPY_IO = False
class PhotogateMainWindow(QtGui.QMainWindow, Ui_PhotogateMainWindow):
PORT_MONITOR_TIMER_DT = 1000
DATA_ACQUISITION_TIMER_DT = 10
NOT_AVAILABLE_STR = ''
FILE_TYPE_TO_FILTER = {
'.txt' : "Text (*.txt);;All files (*.*)",
'.mat' : "Mat (*.mat);;All files (*.*)",
'.csv' : "CSV (*.csv);;All files (*.*)",
}
def __init__(self,parent=None):
super(PhotogateMainWindow,self).__init__(parent)
self.setupUi(self)
self.connectWidgets()
self.initialize()
def connectWidgets(self):
self.connectPushButton.pressed.connect(self.connectPressed_Callback)
self.connectPushButton.clicked.connect(self.connectClicked_Callback)
self.resetPushButton.clicked.connect(self.resetClicked_Callback)
self.actionSaveMat.triggered.connect(self.saveMat_Callback)
self.actionSaveTxt.triggered.connect(self.saveTxt_Callback)
self.actionSaveCsv.triggered.connect(self.saveCsv_Callback)
def main(self):
self.show()
self.raise_()
def initialize(self):
self.dev = None
self.lastDataDict = None
self.userHome = os.getenv('USERPROFILE')
if self.userHome is None:
self.userHome = os.getenv('HOME')
self.lastSaveDir = self.userHome
self.populatePortComboBox()
self.updateWidgetEnabled()
self.setupTimers()
self.portMonitorTimer.start()
self.statusbar.showMessage('Not Connected')
def setupTimers(self):
self.portMonitorTimer = QtCore.QTimer(self)
self.portMonitorTimer.setInterval(self.PORT_MONITOR_TIMER_DT)
self.portMonitorTimer.timeout.connect(self.portMonitorTimer_Callback)
self.dataAcquisitionTimer = QtCore.QTimer(self)
self.dataAcquisitionTimer.setInterval(self.DATA_ACQUISITION_TIMER_DT)
self.dataAcquisitionTimer.setSingleShot(True); # Serial communications sets the pacing
self.dataAcquisitionTimer.timeout.connect(self.dataAcquisitionTimer_Callback)
def portMonitorTimer_Callback(self):
self.populatePortComboBox()
def dataAcquisitionTimer_Callback(self):
if self.dev is not None:
try:
dataDict = self.dev.getData()
except:
self.statusbar.showMessage('Data Acquistion Error')
dataDict = {}
if dataDict:
self.setRunTime(dataDict)
self.setMode(dataDict)
self.setStatusbarMessage(dataDict)
if self.lastDataDict is not None:
if self.lastDataDict['running']:
self.setDataText(dataDict)
else:
self.setDataText(dataDict)
self.lastDataDict = dataDict
self.updateSaveWidgetEnabled()
self.dataAcquisitionTimer.start()
def connectPressed_Callback(self):
if self.dev is None:
self.connectPushButton.setText('Disconnect')
self.connectPushButton.setFlat(True)
self.portComboBox.setEnabled(False)
self.statusbar.showMessage('Connecting...')
def connectClicked_Callback(self):
if self.dev is None:
self.connectDevice()
else:
self.disconnectDevice()
self.updateWidgetEnabled()
self.connectPushButton.setFlat(False)
def resetClicked_Callback(self):
if self.dev is not None:
self.dev.reset()
def fileSaveDialog(self,fileExt):
dialog = QtGui.QFileDialog()
dialog.setFileMode(QtGui.QFileDialog.AnyFile)
if os.path.isdir(self.lastSaveDir):
saveDir = self.lastSaveDir
else:
saveDir = self.userHome
fileNameFullPath = dialog.getSaveFileName(
None,
'Select {0} file'.format(fileExt),
saveDir,
self.FILE_TYPE_TO_FILTER[fileExt],
options=QtGui.QFileDialog.DontUseNativeDialog,
)
fileNameFullPath = str(fileNameFullPath)
if fileNameFullPath:
# Extract last save path and check for file extension
filePath, fileName = os.path.split(fileNameFullPath)
self.lastSaveDir = filePath
fileName = autoAddFileExtension(fileName,fileExt)
fileNameFullPath = os.path.join(filePath,fileName)
return fileNameFullPath
def saveMat_Callback(self):
if not HAVE_SCIPY_IO:
return
fileExt = '.mat'
dataDict = self.lastDataDict
fileNameFullPath = self.fileSaveDialog(fileExt)
if fileNameFullPath:
matDict = self.getMatDict(dataDict)
scipy.io.savemat(fileNameFullPath,matDict)
def getMatDict(self,dataDict):
matDict = {}
for i, photogateDict in enumerate(dataDict['photogates']):
if photogateDict['isConnected']:
entryTime, exitTime, timeInGate = getPhotogateTimes(photogateDict)
matDict['photogate{0}'.format(i)] = {
'entryTime' : entryTime,
'exitTime' : exitTime,
'timeInGate' : timeInGate,
}
if dataDict['operatingMode'] == 'TWO_PHOTOGATE':
timeBetweenGates = getTimeBetweenGates(dataDict['photogates'])
matDict['timeBetweenGates'] = timeBetweenGates
return matDict
def saveTxt_Callback(self):
fileExt = '.txt'
dataStr = self.getDataStr(self.lastDataDict)
fileNameFullPath = self.fileSaveDialog(fileExt)
if fileNameFullPath:
with open(fileNameFullPath,'w') as f:
f.write(dataStr)
def saveCsv_Callback(self):
fileExt = '.csv'
dataDict = self.lastDataDict
fileNameFullPath = self.fileSaveDialog(fileExt)
if fileNameFullPath:
csvDataStr = self.getCsvDataStr(dataDict)
with open(fileNameFullPath,'w') as f:
f.write(csvDataStr)
def getCsvDataStr(self,dataDict):
dataStrList = ['field,value']
for i,photogateDict in enumerate(dataDict['photogates']):
if photogateDict['isConnected']:
entryTime, exitTime, timeInGate = getPhotogateTimes(photogateDict)
dataStrList.append('"photogate {0} enter",{1}'.format(i,entryTime))
dataStrList.append('"photogate {0} exit",{1}'.format(i,exitTime))
dataStrList.append('"photogate {0} in-gate",{1}'.format(i,timeInGate))
if dataDict['operatingMode'] == 'TWO_PHOTOGATE':
timeBetweenGates = getTimeBetweenGates(dataDict['photogates'])
dataStrList.append('"between gates",{0}'.format(timeBetweenGates))
dataStr = '\n'.join(dataStrList)
return dataStr
def connectDevice(self):
port = str(self.portComboBox.currentText())
try:
self.dev = PhotogateDevice(port)
except Exception, e:
msgTitle = 'Connection Error'
msgText = 'unable to connect to device: {0}'.format(str(e))
QtGui.QMessageBox.warning(self,msgTitle, msgText)
self.connectPushButton.setText('Connect')
self.statusbar.showMessage('Not Connected')
self.dev = None
if self.dev is not None:
self.portMonitorTimer.stop()
self.dataAcquisitionTimer.start()
self.statusbar.showMessage('Connected')
def disconnectDevice(self):
if self.dev is not None:
try:
self.dev.close()
except:
pass
self.dev = None
self.dataAcquisitionTimer.stop()
self.portMonitorTimer.start()
self.connectPushButton.setText('Connect')
self.statusbar.showMessage('Not Connected')
def updateWidgetEnabled(self):
if self.dev is None:
self.portComboBox.setEnabled(True)
self.infoFrame.setEnabled(False)
self.dataText.setEnabled(False)
self.resetPushButton.setEnabled(False)
else:
self.portComboBox.setEnabled(False)
self.infoFrame.setEnabled(True)
self.dataText.setEnabled(True)
self.resetPushButton.setEnabled(True)
self.updateSaveWidgetEnabled()
def updateSaveWidgetEnabled(self):
if self.lastDataDict is not None:
if not self.lastDataDict['running'] and not self.lastDataDict['timeout']:
if HAVE_SCIPY_IO:
self.actionSaveMat.setEnabled(True)
self.actionSaveTxt.setEnabled(True)
self.actionSaveCsv.setEnabled(True)
else:
self.actionSaveMat.setEnabled(False)
self.actionSaveTxt.setEnabled(False)
self.actionSaveCsv.setEnabled(False)
else:
self.actionSaveMat.setEnabled(False)
self.actionSaveTxt.setEnabled(False)
self.actionSaveCsv.setEnabled(False)
def populatePortComboBox(self):
currPort = str(self.portComboBox.currentText())
self.portComboBox.clear()
portNameList = getListOfPorts()
for portName in portNameList:
self.portComboBox.addItem(portName)
try:
index = portNameList.index(currPort)
except ValueError:
index = 0
self.portComboBox.setCurrentIndex(index)
def setRunTime(self,dataDict):
runTime = uSecToSec(float(dataDict['runTime']))
runTimeLabelStr = 'Time: {0:2.2f}'.format(runTime)
self.timeLabel.setText(runTimeLabelStr)
def setMode(self,dataDict):
modeRaw = dataDict['operatingMode']
if modeRaw == 'TWO_PHOTOGATE':
mode = 'Two Photogates'
elif modeRaw == 'ONE_PHOTOGATE':
mode = 'One Photogate'
elif modeRaw == 'NO_PHOTOGATE':
mode = 'No Photogates'
else:
mode = 'Unknown'
modeLabelStr = 'Mode: {0}'.format(mode)
self.modeLabel.setText(modeLabelStr)
def setStatusbarMessage(self,dataDict):
running = dataDict['running']
timeout = dataDict['timeout']
if running:
message = 'Running'
else:
if timeout:
message = 'Stopped, Timeout'
else:
message = 'Measurement Complete'
self.statusbar.showMessage(message)
def getDataStr(self,dataDict):
dataStrList = []
infoStr = 'Timing Data (units = s, resolution = 10us)'
dataStrList.append(infoStr)
dataStrList.append(' ')
# Add photogate data
for i, photogateDict in enumerate(dataDict['photogates']):
if photogateDict['isConnected']:
dataStrList.append('Photogate: {0}'.format(i+1))
photogateDataStr = self.getPhotogateDataStr(photogateDict)
dataStrList.append(photogateDataStr)
dataStrList.append(' ')
# Add time between gates if in two photogate mode
if dataDict['operatingMode'] == 'TWO_PHOTOGATE':
if dataDict['running'] or dataDict['timeout']:
timeBetweenStr = self.NOT_AVAILABLE_STR
else:
timeBetween = getTimeBetweenGates(dataDict['photogates'])
timeBetweenStr = '{0:1.5f}'.format(timeBetween)
timeBetweenStr = 'between-gates: {0}'.format(timeBetweenStr)
dataStrList.append(timeBetweenStr)
dataStr = '\n'.join(dataStrList)
return dataStr
def setDataText(self,dataDict):
dataStr = self.getDataStr(dataDict)
self.dataText.clear()
self.dataText.setPlainText(dataStr)
def getPhotogateDataStr(self,photogateDict, indent=2):
indentStr = ' '*indent
dataStrList = []
entryTime, exitTime, timeInGate = getPhotogateTimes(photogateDict)
# Add entry time
if photogateDict['hasEntryTime']:
entryTimeStr = '{0:1.5f}'.format(entryTime)
else:
entryTimeStr = self.NOT_AVAILABLE_STR
entryTimeStr = '{0}enter: {1}'.format(indentStr, entryTimeStr)
dataStrList.append(entryTimeStr)
# Add exit time
if photogateDict['hasExitTime']:
exitTimeStr ='{0:1.5f}'.format(exitTime)
else:
exitTimeStr = self.NOT_AVAILABLE_STR
exitTimeStr = '{0}exit: {1}'.format(indentStr, exitTimeStr)
dataStrList.append(exitTimeStr)
# Add time in-gate
if photogateDict['isDone']:
timeInGateStr = '{0:1.5f}'.format(timeInGate)
else:
timeInGateStr = self.NOT_AVAILABLE_STR
timeInGateStr = '{0}in-gate: {1}'.format(indentStr, timeInGateStr)
dataStrList.append(timeInGateStr)
dataStr = '\n'.join(dataStrList)
return dataStr
# Utility functions
# -----------------------------------------------------------------------------
def getPhotogateTimes(photogateDict):
entryTime = uSecToSec(float(photogateDict['entryTime']))
exitTime = uSecToSec(float(photogateDict['exitTime']))
timeInGate = exitTime - entryTime
return entryTime, exitTime, timeInGate
def getTimeBetweenGates(photogateList):
entryTime0 = uSecToSec(float(photogateList[0]['entryTime']))
entryTime1 = uSecToSec(float(photogateList[1]['entryTime']))
timeBetween = entryTime1 - entryTime0
return timeBetween
def autoAddFileExtension(fileName,autoExt):
fileNameBase, fileNameExt = os.path.splitext(fileName)
if not fileNameExt:
# Only add extension if there isn't one already
fileName = '{0}{1}'.format(fileNameBase,autoExt)
return fileName
def uSecToSec(value):
return (1.0e-6)*value
def runPhotogateApp():
app = QtGui.QApplication(sys.argv)
mainWindow = PhotogateMainWindow()
mainWindow.main()
app.exec_()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
runPhotogateApp()
| 37.22449 | 94 | 0.614172 | 13,025 | 0.892612 | 0 | 0 | 0 | 0 | 0 | 0 | 1,714 | 0.117462 |
19f364dac17ba32accfedb9fef8b6459dc8369f0 | 114 | py | Python | playground/step2/test1.py | jhson989/jhML | eb8b76d3b47df858e82cd971bb32794e12de4747 | [
"Apache-2.0"
]
| null | null | null | playground/step2/test1.py | jhson989/jhML | eb8b76d3b47df858e82cd971bb32794e12de4747 | [
"Apache-2.0"
]
| null | null | null | playground/step2/test1.py | jhson989/jhML | eb8b76d3b47df858e82cd971bb32794e12de4747 | [
"Apache-2.0"
]
| null | null | null |
from core import Variable
from operation import *
a = Variable(2)
b = square(a)
c = square(b)
print(c.data)
| 8.769231 | 25 | 0.675439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19f3a16361bc6bc5804201c9d2a4c2d8f966a4a0 | 106 | py | Python | sharepoint/__init__.py | nessalc/python-sharepoint | d264cf5be56c8f9f619a4f72fd039c167cd01ba8 | [
"MIT"
]
| 1 | 2019-06-03T03:16:44.000Z | 2019-06-03T03:16:44.000Z | sharepoint/__init__.py | nessalc/python-sharepoint | d264cf5be56c8f9f619a4f72fd039c167cd01ba8 | [
"MIT"
]
| null | null | null | sharepoint/__init__.py | nessalc/python-sharepoint | d264cf5be56c8f9f619a4f72fd039c167cd01ba8 | [
"MIT"
]
| null | null | null | name = 'sharepoint'
from .sharepoint import SharePointSite
__author__='James Classen'
__version__='0.0.2' | 21.2 | 38 | 0.792453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.320755 |
19f6250f9d15cae4fb338cfbac1c36e435b2c1ca | 3,188 | py | Python | third_party/nkata/tests/transformvideo_test.py | google/offline-content-packager | 5a023eeeed4973e452309b434a59ce745487fdd6 | [
"Apache-2.0"
]
| 32 | 2016-05-31T13:01:46.000Z | 2022-03-18T11:17:36.000Z | third_party/nkata/tests/transformvideo_test.py | google/offline-content-packager | 5a023eeeed4973e452309b434a59ce745487fdd6 | [
"Apache-2.0"
]
| null | null | null | third_party/nkata/tests/transformvideo_test.py | google/offline-content-packager | 5a023eeeed4973e452309b434a59ce745487fdd6 | [
"Apache-2.0"
]
| 29 | 2016-06-08T18:11:00.000Z | 2021-09-28T04:14:34.000Z | # Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import makedirs
from os.path import dirname
from os.path import isdir
from os.path import join
import shutil
import tempfile
import unittest
import jinja2
from scripts.transformations import VideoTransformation
import yaml
class VideoTestCase(unittest.TestCase):
def setUp(self):
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.tracking_code = "123456"
self.video_subtitle = "test subtitle"
self.video_summary = "test summary"
self.video_name = "test_video"
self.setUpMetadata()
self.JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.src_dir),
extensions=["jinja2.ext.autoescape"],
autoescape=False)
def setUpTemplate(self, template, content):
template = join(self.src_dir, template)
template_dir = dirname(template)
if not isdir(template_dir):
makedirs(template_dir)
with open(template, "w") as f:
f.write(content)
def setUpMetadata(self):
self.meta_data_content = {
"title": "test video",
"description": "test description",
"sub_title": "test subtitle",
"tags": "",
"image_src": ""
}
self.metadata_file = join(self.src_dir, "video.yaml")
f = open(self.metadata_file, "w")
yaml.dump(self.meta_data_content, f)
self.meta_data = {self.video_name: self.metadata_file}
def createInstance(self):
return VideoTransformation(self.tracking_code, self.JINJA_ENVIRONMENT)
def tearDown(self):
shutil.rmtree(self.src_dir)
shutil.rmtree(self.dst_dir)
def test_generate_html(self):
html_name = "test_output.html"
video_source = "/test/file/path/video_source.avi"
video_type = "video/test"
video_info = ("video_title", "video_subtitle", "video_description")
template_content = ("{{ video_name }} / {{ video_type}} /"
" {{ video_source }} / {{ tracking_code }}")
expected_output = "%s / %s / %s / %s" % (self.video_name, video_type,
video_source, self.tracking_code)
self.setUpTemplate("templates/video.html", template_content)
transformation = self.createInstance()
video_detail = (self.video_name, video_source, video_type, video_info)
transformation.generate_html(self.dst_dir, html_name, video_detail, None)
# assert the output
with open(join(self.dst_dir, "html_files", html_name), "r") as f:
output = f.read()
self.assertEquals(output, expected_output)
if __name__ == "__main__":
unittest.main()
| 32.20202 | 78 | 0.69542 | 2,283 | 0.716123 | 0 | 0 | 0 | 0 | 0 | 0 | 1,074 | 0.336888 |
19f6287b8eec32e7e9ec1cec1c39636f68949b75 | 2,308 | py | Python | src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py | feiphoon/mpns-pipeline | d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6 | [
"MIT"
]
| 1 | 2022-03-28T10:46:58.000Z | 2022-03-28T10:46:58.000Z | src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py | feiphoon/mpns-pipeline | d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6 | [
"MIT"
]
| null | null | null | src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py | feiphoon/mpns-pipeline | d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6 | [
"MIT"
]
| null | null | null | from pathlib import Path
from pyspark.sql import SparkSession, functions as f
from pyspark.sql.dataframe import DataFrame
# Monkeypatch in case I don't use Spark 3.0
def transform(self, f):
return f(self)
DataFrame.transform = transform
def query_mpns_v8_name_relationships(
input_filepath: str,
output_filepath: str,
sample_run: bool,
) -> None:
spark = SparkSession.builder.appName(
"query_mpns_v8_name_relationships"
).getOrCreate()
if sample_run:
name_relationships_df: DataFrame = spark.read.json(input_filepath)
else:
name_relationships_df: DataFrame = spark.read.parquet(input_filepath)
plants_with_minimum_relationships_df: DataFrame = name_relationships_df.transform(
lambda df: get_plants_with_minimum_relationships(df)
)
write_query_results(plants_with_minimum_relationships_df, output_filepath)
def write_query_results(df: DataFrame, output_filepath: Path) -> None:
output_filepath_parent: Path = Path(output_filepath).parents[0]
output_filepath_parent.mkdir(parents=True, exist_ok=True)
# Coalesce to 1 JSON file
df.coalesce(1).write.format("json").mode("overwrite").save(output_filepath)
def get_plants_with_minimum_relationships(df: DataFrame) -> DataFrame:
df = df.filter(
(f.col("synonym_count") >= 1)
& (f.col("scm_non_scientific_name_count") >= 1)
& (f.col("com_non_scientific_name_count") >= 1)
& (f.col("pha_non_scientific_name_count") >= 1)
)
return df.sort(df.synonym_count.asc(), df.scm_com_pha.asc())
# Sample/demo purposes
mpns_v8_name_relationships_filepath: str = (
"data/analysis/mpns/sample_mpns_v8/name_relationships/"
)
output_filepath: str = "data/analysis/mpns/sample_mpns_v8/name_relationships/query/"
query_mpns_v8_name_relationships(
input_filepath=mpns_v8_name_relationships_filepath,
output_filepath=output_filepath,
sample_run=True,
)
# # Real data
# mpns_v8_name_relationships_filepath: str = (
# "data/analysis/mpns/mpns_v8/name_relationships/"
# )
# output_filepath: str = "data/analysis/mpns/mpns_v8/name_relationships/query/"
# query_mpns_v8_name_relationships(
# input_filepath=mpns_v8_name_relationships_filepath,
# output_filepath=output_filepath,
# sample_run=False,
# )
| 30.368421 | 86 | 0.747834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.310659 |
19f803a96ec5d364efd732c5edf09bf82c3dfe31 | 124 | py | Python | library_homework/my_project/my_program.py | Tommy3121173/tommy | 429aefb377f84a1d49e85f825a32ac2c160ebc85 | [
"MIT"
]
| null | null | null | library_homework/my_project/my_program.py | Tommy3121173/tommy | 429aefb377f84a1d49e85f825a32ac2c160ebc85 | [
"MIT"
]
| null | null | null | library_homework/my_project/my_program.py | Tommy3121173/tommy | 429aefb377f84a1d49e85f825a32ac2c160ebc85 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed May 23 16:56:57 2018
@author: tommy_mizuki
"""
import my_library
my_func(1,2) | 11.272727 | 35 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.717742 |
19f8e4fcaecd9a3968eed26a324bf80026d1583f | 246 | py | Python | algorithm/python/BAEKJOON_1436.py | cjsrhd94/TIL | b91bab7d99d10c63f91af0790cb28ec3d228b68b | [
"MIT"
]
| 1 | 2021-08-19T06:23:00.000Z | 2021-08-19T06:23:00.000Z | algorithm/python/BAEKJOON_1436.py | cjsrhd94/TIL | b91bab7d99d10c63f91af0790cb28ec3d228b68b | [
"MIT"
]
| null | null | null | algorithm/python/BAEKJOON_1436.py | cjsrhd94/TIL | b91bab7d99d10c63f91af0790cb28ec3d228b68b | [
"MIT"
]
| null | null | null | n = int(input())
count = 0
number = 0
while True:
if '666' in str(number): #문자열로 변경하였을때 '666'이 포함되어있다면 count를 세준다.
count += 1
if count == n: #count가 입력값과 동일할 때 print -> n번째 값 출력
print(number)
break
number += 1 | 24.6 | 68 | 0.565041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.477987 |
19f91845aaff11955f6b430aa3684474c464bf80 | 3,599 | py | Python | cacheTraceAnalysis/plot/reqRate.py | Thesys-lab/cacheWorkloadAnalysisOSDI20 | cfc5bbb5c8d909571546c78c247561c9db449469 | [
"Apache-2.0"
]
| 6 | 2020-11-12T07:51:02.000Z | 2022-03-27T20:20:01.000Z | cacheTraceAnalysis/plot/reqRate.py | Thesys-lab/InMemoryCachingWorkloadAnalysis | 5f6f9f7e29a164478f3fc28eb64c170bbbafdec7 | [
"Apache-2.0"
]
| null | null | null | cacheTraceAnalysis/plot/reqRate.py | Thesys-lab/InMemoryCachingWorkloadAnalysis | 5f6f9f7e29a164478f3fc28eb64c170bbbafdec7 | [
"Apache-2.0"
]
| 1 | 2021-12-31T01:16:09.000Z | 2021-12-31T01:16:09.000Z | """ plot request rate
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
def _cal_req_rate(trace_reader, window):
metadata_name = "reqRateList_w{}_{}.pickle".format(window, trace_reader.trace_path.split("/")[-1])
loaded = load_metadata(metadata_name)
if loaded is not None:
return loaded
start_ts = -1
req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list = [], [], [], []
req_cnt, obj_cnt, req_byte, obj_byte = 0, 0, 0, 0
seen_obj = set()
for req in trace_reader:
if start_ts == -1:
start_ts = req.real_time
req_cnt += req.cnt
req_byte += req.req_size
if req.obj_id not in seen_obj:
obj_cnt += 1
obj_byte += req.req_size
seen_obj.add(req.obj_id)
if (req.real_time - start_ts)//window > len(req_cnt_list):
req_cnt_list.append(req_cnt/window)
obj_cnt_list.append(obj_cnt/window)
req_Gbps_list.append(req_byte/GB/window*8)
obj_Gbps_list.append(obj_byte/GB/window*8)
req_cnt, obj_cnt, req_byte, obj_byte = 0, 0, 0, 0
seen_obj.clear()
trace_reader.reset()
save_metadata((req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list), metadata_name)
return req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list
def plot_req_rate(trace_reader, window, plot_type=("cnt", "byte")):
COLOR = JPlot.get_color(2)
req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list = _cal_req_rate(trace_reader, window)
ret_dict = {
"mean_req_cnt": sum(req_cnt_list)/len(req_cnt_list),
"mean_obj_cnt": sum(obj_cnt_list)/len(obj_cnt_list),
"mean_req_Gbps": sum(req_Gbps_list)/len(req_Gbps_list),
"mean_obj_Gbps": sum(obj_Gbps_list)/len(obj_Gbps_list),
}
if "cnt" in plot_type or plot_type == "cnt":
plt.plot([i*window/3600 for i in range(len(req_cnt_list))], [i/1000 for i in req_cnt_list], nomarker=True, label="request", color=next(COLOR), linewidth=1)
plt.plot([i*window/3600 for i in range(len(obj_cnt_list))], [i/1000 for i in obj_cnt_list], nomarker=True, label="object", color=next(COLOR), linewidth=1)
plt.xlabel("Time (Hour)")
plt.ylabel("Request rate (K QPS)")
plt.legend()
plt.savefig("{}/{}_reqRateCnt_w{}.png".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window), no_save_plot_data=True)
plt.clf()
COLOR = JPlot.get_color(2)
if "byte" in plot_type or plot_type == "byte":
y1, y2, ylabel = req_Gbps_list, obj_Gbps_list, "Request rate (Gbps)"
if sum(req_Gbps_list)/len(req_Gbps_list) < 1:
y1 = [i*1024 for i in req_Gbps_list]
y2 = [i*1024 for i in obj_Gbps_list]
ylabel = "Request rate (Mbps)"
plt.plot([i*window/3600 for i in range(len(req_Gbps_list))], y1, nomarker=True, color=next(COLOR), label="request", linewidth=1)
plt.plot([i*window/3600 for i in range(len(obj_Gbps_list))], y2, nomarker=True, color=next(COLOR), label="object", linewidth=1)
plt.xlabel("Time (Hour)")
plt.ylabel(ylabel)
plt.legend()
plt.savefig("{}/{}_reqRateTraffic_w{}.png".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window), no_save_plot_data=True)
plt.clf()
return ret_dict
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--type", type=str, default="cnt", help="plot type")
ap.add_argument("--window", type=int, default=60, help="the size of window in sec")
p = ap.parse_args()
plot_req_rate(TwrShortBinTraceReader(p.trace), p.window, plot_type=(p.type, ))
| 38.698925 | 159 | 0.689358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.119478 |
19f928aecd4ff7011c0373aab909dea8913438c6 | 1,373 | py | Python | config.py | ShallweJohn/MonsterBlog | f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5 | [
"MIT"
]
| null | null | null | config.py | ShallweJohn/MonsterBlog | f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5 | [
"MIT"
]
| 3 | 2021-03-18T20:36:25.000Z | 2021-09-07T23:54:49.000Z | config.py | ShallweJohn/MonsterBlog | f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5 | [
"MIT"
]
| null | null | null | import os
import redis
import logging
class Config(object):
#配置数据库连接
SQLALCHEMY_DATABASE_URI = 'mysql://root:[email protected]:3306/data_monster'
SQLALCHEMY_TRACK_MODIFICATIONS = False
#配置redis数据库
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# 配置flask_session
SESSION_TYPE = 'redis'
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
PERMENANT_SESSION_LIFETIME = 86400 * 7
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.163.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '465'))
MAIL_USE_SSL = True
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = 'jiangizhang92'
MONSTER_MAIL_SUBJECT_PREFIX = '[MONSTER]'
MONSTER_MAIL_SENDER = 'MONSTER Admin <[email protected]>'
MONSTER_ADMIN = os.environ.get('MONSTER_ADMIN')
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
# 配置日志登记
LOG_LEVEL = logging.DEBUG
class ProductionConfig(Config):
DEBUG = False
LOG_LEVEL = logging.ERROR
class DevelopmentConfig(Config):
DEBUG = True
LOG_LEVEL = logging.DEBUG
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 19.614286 | 81 | 0.699927 | 1,226 | 0.867657 | 0 | 0 | 49 | 0.034678 | 0 | 0 | 373 | 0.263977 |
19fac7af0c83f21b636a9b1fa9c53ac1705d1cfb | 5,097 | py | Python | utils.py | sjenni/DeepBilevel | 9db6c9d81188e891104677a7ffc4b045421fb097 | [
"MIT"
]
| 8 | 2019-10-23T12:16:13.000Z | 2020-11-16T02:20:28.000Z | utils.py | sjenni/DeepBilevel | 9db6c9d81188e891104677a7ffc4b045421fb097 | [
"MIT"
]
| null | null | null | utils.py | sjenni/DeepBilevel | 9db6c9d81188e891104677a7ffc4b045421fb097 | [
"MIT"
]
| 4 | 2020-02-06T14:54:47.000Z | 2020-10-25T03:03:04.000Z | import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def montage_tf(imgs, num_h, num_w):
"""Makes a montage of imgs that can be used in image_summaries.
Args:
imgs: Tensor of images
num_h: Number of images per column
num_w: Number of images per row
Returns:
A montage of num_h*num_w images
"""
imgs = tf.unstack(imgs)
img_rows = [None] * num_h
for r in range(num_h):
img_rows[r] = tf.concat(axis=1, values=imgs[r * num_w:(r + 1) * num_w])
montage = tf.concat(axis=0, values=img_rows)
return tf.expand_dims(montage, 0)
def remove_missing(var_list, model_path):
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
return var_list
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the correspoing variables to initialize. If empty or None,
it would return no_op(), None.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
if ignore_missing_vars:
var_list = remove_missing(var_list, model_path)
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
def get_variables_to_train(trainable_scopes=None):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if trainable_scopes is None:
variables_to_train = tf.trainable_variables()
else:
scopes = [scope.strip() for scope in trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
print('Variables to train: {}'.format([v.op.name for v in variables_to_train]))
return variables_to_train
def get_checkpoint_path(checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if not ckpt:
print("No checkpoint in {}".format(checkpoint_dir))
return None
return ckpt.model_checkpoint_path
| 36.148936 | 84 | 0.673141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,455 | 0.481656 |
19fb5606842651b084278976b916a2b6ffe5d1f9 | 328 | py | Python | configuration/mmd_configuration.py | matthewfaw/mixnmatch | 11b923b941198c02e380011853beb70169f024ac | [
"Apache-2.0"
]
| 1 | 2020-11-16T04:59:48.000Z | 2020-11-16T04:59:48.000Z | configuration/mmd_configuration.py | matthewfaw/mixnmatch | 11b923b941198c02e380011853beb70169f024ac | [
"Apache-2.0"
]
| null | null | null | configuration/mmd_configuration.py | matthewfaw/mixnmatch | 11b923b941198c02e380011853beb70169f024ac | [
"Apache-2.0"
]
| 1 | 2019-12-03T21:43:02.000Z | 2019-12-03T21:43:02.000Z |
class MMDConfiguration:
def __init__(self,
mmd_rbf_gamma,
mmd_rbf_ncomponents,
mmd_representative_set_size):
self.mmd_rbf_gamma=mmd_rbf_gamma
self.mmd_rbf_ncomponents=mmd_rbf_ncomponents
self.mmd_representative_set_size=mmd_representative_set_size
| 32.8 | 68 | 0.685976 | 326 | 0.993902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19fb9cf0c33a5df90f7ff935997f2b53b4510673 | 1,234 | py | Python | paralleldomain/model/annotation/polygon_2d.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
]
| 10 | 2021-11-17T17:23:49.000Z | 2022-03-18T09:51:23.000Z | paralleldomain/model/annotation/polygon_2d.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
]
| 3 | 2021-12-02T17:16:20.000Z | 2022-01-07T12:47:13.000Z | paralleldomain/model/annotation/polygon_2d.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
]
| 2 | 2022-03-09T07:03:54.000Z | 2022-03-23T15:53:48.000Z | from dataclasses import dataclass
from typing import List
from paralleldomain.model.annotation.common import Annotation
from paralleldomain.model.annotation.polyline_2d import Polyline2D
@dataclass
class Polygon2D(Polyline2D):
"""A closed polygon made a collection of 2D Lines.
Args:
lines: :attr:`~.Polygon2D.lines`
class_id: :attr:`~.Polygon2D.class_id`
instance_id: :attr:`~.Polygon2D.instance_id`
attributes: :attr:`~.Polygon2D.attributes`
Attributes:
lines: Ordered list of :obj:`Line2D` instances
class_id: Class ID of the polygon. Can be used to lookup more details in :obj:`ClassMap`.
instance_id: Instance ID of annotated object. Can be used to cross-reference with
other instance annotation types, e.g., :obj:`InstanceSegmentation2D` or :obj:`InstanceSegmentation3D`.
If unknown defaults to -1.
attributes: Dictionary of arbitrary object attributes.
"""
...
@dataclass
class Polygons2D(Annotation):
"""Collection of 2D Polygons
Args:
polygons: :attr:`~.Polygons2D.polygons`
Attributes:
polygons: Ordered list of :obj:`Polygon2D` instances
"""
polygons: List[Polygon2D]
| 29.380952 | 114 | 0.691248 | 1,018 | 0.824959 | 0 | 0 | 1,040 | 0.842788 | 0 | 0 | 911 | 0.73825 |
19fd46480858b4a1d5b5836cc3a46a14d32272f9 | 828 | py | Python | tests/backup_bsps.py | LaudateCorpus1/bsp_tool | e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19 | [
"MIT"
]
| null | null | null | tests/backup_bsps.py | LaudateCorpus1/bsp_tool | e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19 | [
"MIT"
]
| null | null | null | tests/backup_bsps.py | LaudateCorpus1/bsp_tool | e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19 | [
"MIT"
]
| null | null | null | import os
import shutil
import sys
from maplist import installed_games
backup_dir = "F:/bsps"
if len(sys.argv) == 2:
backup_dir = sys.argv[1]
print(f"Making backups in '{backup_dir}'")
i = 0
for base_dir, game_dir in installed_games:
i += 1
print(f"Backing up ({i}/{len(installed_games)}) {game_dir}...")
for map_dir in installed_games[(base_dir, game_dir)]:
src_dir = os.path.join(base_dir, game_dir, map_dir)
dest_dir = os.path.join(backup_dir, game_dir, map_dir)
os.makedirs(dest_dir, exist_ok=True)
try: # note the missed file(s) and continue
shutil.copytree(src_dir, dest_dir, dirs_exist_ok=True)
except shutil.Error as err:
print(f"*** ERROR *** {err}")
except FileNotFoundError as err:
print(f"*** ERROR *** {err}")
| 30.666667 | 67 | 0.642512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.219807 |
19fe235467f017d20a959660a872441f0b170a74 | 770 | py | Python | infobip_channels/whatsapp/models/response/get_templates.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
]
| null | null | null | infobip_channels/whatsapp/models/response/get_templates.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
]
| null | null | null | infobip_channels/whatsapp/models/response/get_templates.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
]
| null | null | null | from typing import List, Optional
from pydantic import AnyHttpUrl
from infobip_channels.core.models import CamelCaseModel, ResponseBase
class Button(CamelCaseModel):
text: str
type: str
phone_number: Optional[str] = None
url: Optional[AnyHttpUrl] = None
class Header(CamelCaseModel):
format: str
text: Optional[str] = None
class Structure(CamelCaseModel):
header: Optional[Header] = None
body: str
footer: Optional[str] = None
type: str
buttons: Optional[List[Button]] = None
class Template(CamelCaseModel):
id: str
business_account_id: int
name: str
language: str
status: str
category: str
structure: Structure
class GetTemplatesResponseOK(ResponseBase):
templates: List[Template]
| 19.25 | 69 | 0.711688 | 617 | 0.801299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19ff517f6d368213182e5f5031c40842eae17a49 | 1,391 | py | Python | examples/server.py | fhamborg/Giveme5W | b5f49712654ab466e605716b4cd9f8dce9bcdd88 | [
"Apache-2.0"
]
| 16 | 2018-03-28T11:20:11.000Z | 2020-09-17T19:39:25.000Z | examples/server.py | fhamborg/Giveme5W | b5f49712654ab466e605716b4cd9f8dce9bcdd88 | [
"Apache-2.0"
]
| 3 | 2018-03-15T10:17:29.000Z | 2018-05-16T13:14:28.000Z | examples/server.py | fhamborg/Giveme5W | b5f49712654ab466e605716b4cd9f8dce9bcdd88 | [
"Apache-2.0"
]
| 6 | 2018-05-08T12:53:51.000Z | 2021-09-25T03:21:02.000Z | import logging
from flask import Flask, request, jsonify
from extractor.document import Document
from extractor.five_w_extractor import FiveWExtractor
app = Flask(__name__)
log = logging.getLogger(__name__)
host = None
port = 5000
debug = False
options = None
extractor = FiveWExtractor()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
log.setLevel(logging.DEBUG)
def run():
log.info("starting server on port %i", port)
app.run(host, port, debug)
log.info("server has stopped")
@app.route('/extract', methods=['GET', 'POST'])
def extract():
json_article = request.get_json()
if not json_article:
log.warning("received no article")
return jsonify({"error": "no article defined"})
article = {}
if json_article.get('title'):
article['title'] = json_article.get('title')
article['description'] = json_article.get('description')
article['text'] = json_article.get('text')
else:
article['title'] = json_article['articletext']
article['description'] = None
article['text'] = None
log.debug("retrieved raw article for extraction: %s", json_article['title'])
document = Document(article['title'], article['description'], article['text'])
extractor.parse(document)
return jsonify(document.questions)
if __name__ == "__main__":
run()
| 26.245283 | 82 | 0.675054 | 0 | 0 | 0 | 0 | 827 | 0.594536 | 0 | 0 | 300 | 0.215672 |
19ffa347e490ab19819ef9b329ffa153417391c5 | 826 | py | Python | CPAC/utils/tests/test_symlinks.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
]
| 125 | 2015-03-04T09:14:46.000Z | 2022-03-29T07:46:12.000Z | CPAC/utils/tests/test_symlinks.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
]
| 1,018 | 2015-01-04T16:01:29.000Z | 2022-03-31T19:23:09.000Z | CPAC/utils/tests/test_symlinks.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
]
| 117 | 2015-01-10T08:05:52.000Z | 2022-01-18T05:16:51.000Z | import os
import tempfile
import pkg_resources as p
from CPAC.utils.symlinks import create_symlinks
mocked_outputs = \
p.resource_filename(
"CPAC",
os.path.join(
'utils',
'tests',
'test_symlinks-outputs.txt'
)
)
def test_symlinks():
temp_dir = tempfile.mkdtemp(suffix='test_symlinks')
paths = []
with open(mocked_outputs, 'r') as f:
for path in f.readlines():
path = path.strip()
if path:
paths += [path]
create_symlinks(
temp_dir,
'sym_links',
'pipeline_benchmark-FNIRT', '1019436_1', paths
)
print("Links created at", temp_dir)
# TODO test the generated links
# Normal resource case
# Several resources within same key case
# QC case | 19.666667 | 55 | 0.579903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.282082 |
c200bfcfb3506f7b5d5aa61e676f674b8d4fef20 | 14,488 | py | Python | web/migrations/0001_initial.py | jmason-ebi/pdx | aec38d74a78c907041332f4623c01047f45f3f0a | [
"Apache-2.0"
]
| null | null | null | web/migrations/0001_initial.py | jmason-ebi/pdx | aec38d74a78c907041332f4623c01047f45f3f0a | [
"Apache-2.0"
]
| null | null | null | web/migrations/0001_initial.py | jmason-ebi/pdx | aec38d74a78c907041332f4623c01047f45f3f0a | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-09 15:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='HostStrain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.TextField()),
('accession', models.TextField()),
('humanized', models.TextField()),
('humanization_protocol', models.TextField()),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='ImplantationSite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='ImplantationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Marker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('gene', models.CharField(max_length=255)),
('details', models.TextField(blank=True, null=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('external_id', models.CharField(blank=True, max_length=10)),
('sex', models.CharField(blank=True, max_length=10, null=True)),
('age', models.IntegerField(blank=True, null=True)),
('race', models.TextField(blank=True, null=True)),
('ethnicity', models.TextField(blank=True, null=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='PatientSnapshot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('age', models.IntegerField(blank=True, null=True)),
('stage', models.TextField(blank=True, null=True)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Patient')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='PdxStrain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('external_id', models.CharField(max_length=100)),
('passage_number', models.CharField(blank=True, max_length=25, null=True)),
('lag_time', models.CharField(blank=True, max_length=25, null=True)),
('doubling_time', models.CharField(blank=True, max_length=25, null=True)),
('metastases', models.SmallIntegerField(blank=True, choices=[(1, 'Yes'), (0, 'No'), (3, 'Unknown')], default=None, null=True)),
('data_source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.DataSource')),
('host_strain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.HostStrain')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Regime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('regime', models.TextField()),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('result', models.TextField()),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Treatment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('drug', models.TextField()),
('dose', models.TextField()),
('regime', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='web.Regime')),
('response', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='web.Response')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='Tumor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('source_tumor_id', models.CharField(blank=True, max_length=255, null=True)),
('tumor_type', models.CharField(blank=True, max_length=255, null=True)),
('diagnosis', models.CharField(blank=True, max_length=255, null=True)),
('tissue_of_origin', models.CharField(blank=True, max_length=100, null=True)),
('classification', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='TumorHistology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('description', models.TextField()),
('image', models.ImageField(upload_to='histology')),
],
options={
'verbose_name_plural': 'Tumor Histology Images',
},
),
migrations.CreateModel(
name='Validation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('status', models.CharField(blank=True, max_length=100, null=True)),
('result', models.TextField()),
('pdx_strain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.PdxStrain')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.AddField(
model_name='tumor',
name='histology',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.TumorHistology'),
),
migrations.AddField(
model_name='tumor',
name='patient_snapshot',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.PatientSnapshot'),
),
migrations.AddField(
model_name='pdxstrain',
name='human_tumor',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='human_tumor', to='web.Tumor'),
),
migrations.AddField(
model_name='pdxstrain',
name='implantation_site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.ImplantationSite'),
),
migrations.AddField(
model_name='pdxstrain',
name='implantation_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.ImplantationType'),
),
migrations.AddField(
model_name='pdxstrain',
name='mouse_tumor',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='mouse_tumor', to='web.Tumor'),
),
migrations.AddField(
model_name='pdxstrain',
name='treatment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='web.Treatment'),
),
migrations.AddField(
model_name='patient',
name='prior_treatment',
field=models.ManyToManyField(to='web.Treatment'),
),
migrations.AddField(
model_name='marker',
name='tumor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Tumor'),
),
]
| 50.835088 | 143 | 0.574752 | 14,262 | 0.984401 | 0 | 0 | 0 | 0 | 0 | 0 | 2,708 | 0.186913 |
c202c2c6ef86a127b7a659f1ab70e457fb054b54 | 4,799 | py | Python | dserve/__init__.py | JIC-CSB/dserve | 5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e | [
"MIT"
]
| null | null | null | dserve/__init__.py | JIC-CSB/dserve | 5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e | [
"MIT"
]
| null | null | null | dserve/__init__.py | JIC-CSB/dserve | 5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e | [
"MIT"
]
| null | null | null | """Script for running the dserve server."""
import os
from flask import (
Flask,
jsonify,
send_file,
abort,
request,
)
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
@cross_origin()
def root():
content = {
"_links": {
"self": {"href": "/"},
"items": {"href": "/items"},
"overlays": {"href": "/overlays"}
},
"uuid": app._dataset._admin_metadata["uuid"],
"dtool_version": app._dataset._admin_metadata["dtool_version"],
"name": app._dataset._admin_metadata["name"],
"creator_username": app._dataset._admin_metadata["creator_username"],
}
return jsonify(content)
def items_root():
items = []
for i in app._dataset.manifest["file_list"]:
item = {
"_links": {"self": {"href": "/items/{}".format(i["hash"])}},
"identifier": i["hash"],
}
items.append(item)
content = {
"_links": {
"self": {"href": "/items"},
},
"_embedded": {
"items": items,
}
}
return jsonify(content)
def specific_item(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}".format(identifier)},
"content": {"href": "/items/{}/raw".format(identifier)},
"overlays": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name, overlay in overlays.items():
content[overlay_name] = overlay[identifier]
return jsonify(content)
@app.route("/items")
@app.route("/items/<identifier>")
@cross_origin()
def items(identifier=None):
if identifier is None:
return items_root()
else:
return specific_item(identifier)
@app.route("/items/<identifier>/raw")
@cross_origin()
def raw_item(identifier):
try:
item = app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
item_path = os.path.join(
app._dataset._abs_path,
app._dataset.data_directory,
item["path"]
)
return send_file(item_path, item["mimetype"])
@app.route("/items/<identifier>/overlays")
@cross_origin()
def item_overlays(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name in overlays.keys():
href = "/overlays/{}/{}".format(overlay_name, identifier)
content["_links"][overlay_name] = {"href": href}
return jsonify(content)
@app.route("/overlays/<overlay>/<identifier>", methods=["GET", "PUT"])
@cross_origin()
def item_overlay_content(overlay, identifier):
overlays = app._dataset.access_overlays()
try:
requested_overlay = overlays[overlay]
requested_overlay[identifier]
except KeyError:
abort(404)
if request.method == "PUT":
if not request.is_json:
abort(422)
new_value = request.get_json()
requested_overlay[identifier] = new_value
try:
app._dataset.persist_overlay(
overlay, requested_overlay, overwrite=True)
except KeyError:
abort(405)
return "", 201
elif request.method == "GET":
value = requested_overlay[identifier]
return jsonify(value)
def overlay_root():
overlays = app._dataset.access_overlays()
content = {
"_links": {
"self": {"href": "/overlays"}},
}
for overlay_name in overlays.keys():
value = {"href": "/overlays/{}".format(overlay_name)}
content["_links"][overlay_name] = value
return jsonify(content)
def specific_overlay(overlay_name):
overlays = app._dataset.access_overlays()
try:
overlay = overlays[overlay_name]
except KeyError:
abort(404)
return jsonify(overlay)
def creaate_new_overlay(overlay_name):
empty_overlay = app._dataset.empty_overlay()
try:
app._dataset.persist_overlay(overlay_name, empty_overlay)
except IOError:
abort(409)
return "", 201
@app.route("/overlays")
@app.route("/overlays/<overlay_name>", methods=["GET", "PUT"])
@cross_origin()
def overalys(overlay_name=None):
if overlay_name is None:
return overlay_root()
else:
if request.method == "PUT":
return creaate_new_overlay(overlay_name)
elif request.method == "GET":
return specific_overlay(overlay_name)
| 25.526596 | 77 | 0.600542 | 0 | 0 | 0 | 0 | 2,794 | 0.582205 | 0 | 0 | 761 | 0.158575 |
c203136ec3038930bc5926aaf959f30e095e46a5 | 1,610 | py | Python | kkutil/security.py | kaka19ace/kkutils | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
]
| 1 | 2015-12-13T18:42:52.000Z | 2015-12-13T18:42:52.000Z | kkutil/security.py | kaka19ace/kkutil | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
]
| null | null | null | kkutil/security.py | kaka19ace/kkutil | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
util regex tool
refs:
http://www.symantec.com/connect/articles/detection-sql-injection-and-cross-site-scripting-attacks
"""
import re
INJECTION_REGEX = re.compile(
r"(%27)|(\')|(\-\-)|(%23)|(#)|" # Regex for detection of SQL meta-characters
r"\w*((%27)|(\'))\s+((%6F)|o|(%4F))((%72)|r|(%52))\s*|" # Modified regex for detection of SQL meta-characters eg: ' or 1 = 1' detect word 'or',
r"((%3D)|(=))[^\n]*((%27)|(\')|(\-\-)|(%3B)|(;))" # Regex for typical SQL Injection attack eg: '= 1 --'
r"((%27)|(\'))union|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))select|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))insert|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))update|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))drop", # Regex for detecting SQL Injection with the UNION keyword
re.IGNORECASE
)
CSS_ATTACK_REGREX = re.compile(r"((%3C)|<)((%2F)|/)*[a-z0-9%]+((%3E)|>)", re.IGNORECASE)
CSS_IMG_SRC_ATTACK_REGEX = re.compile(
r"((%3C)|<)((%69)|i|(%49))((%6D)|m|(%4D))((%67)|g|(%47))[^\n]+((%3E)|>)",
re.IGNORECASE
)
CSS_PARANOID_ATTACK_REGEX = re.compile("((%3C)|<)[^\n]+((%3E)|>)", re.IGNORECASE)
def is_injection_string(s):
return True if INJECTION_REGEX.match(s) else False
def is_css_attack_string(s):
if CSS_ATTACK_REGREX.match(s) or \
CSS_IMG_SRC_ATTACK_REGEX.match(s) or \
CSS_PARANOID_ATTACK_REGEX.match(s):
return True
return False
| 35 | 148 | 0.608075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.637267 |
c204bfd19101390dbf534e7049d9b49aef3685e3 | 1,520 | py | Python | update_eeprom_rc.py | rkojedzinszky/thermo-sensor | f0b5aa6dbf231b566e00a683c5bb1551569d2463 | [
"BSD-3-Clause"
]
| 2 | 2019-04-25T17:38:02.000Z | 2020-03-03T22:50:04.000Z | update_eeprom_rc.py | rkojedzinszky/thermo-sensor | f0b5aa6dbf231b566e00a683c5bb1551569d2463 | [
"BSD-3-Clause"
]
| null | null | null | update_eeprom_rc.py | rkojedzinszky/thermo-sensor | f0b5aa6dbf231b566e00a683c5bb1551569d2463 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
REGISTERS = {
'IOCFG2': 0x00,
'IOCFG1': 0x01,
'IOCFG0': 0x02,
'FIFOTHR': 0x03,
'SYNC1': 0x04,
'SYNC0': 0x05,
'PKTLEN': 0x06,
'PKTCTRL1': 0x07,
'PKTCTRL0': 0x08,
'ADDR': 0x09,
'CHANNR': 0x0A,
'FSCTRL1': 0x0B,
'FSCTRL0': 0x0C,
'FREQ2': 0x0D,
'FREQ1': 0x0E,
'FREQ0': 0x0F,
'MDMCFG4': 0x10,
'MDMCFG3': 0x11,
'MDMCFG2': 0x12,
'MDMCFG1': 0x13,
'MDMCFG0': 0x14,
'DEVIATN': 0x15,
'MCSM2': 0x16,
'MCSM1': 0x17,
'MCSM0': 0x18,
'FOCCFG': 0x19,
'BSCFG': 0x1A,
'AGCCTRL2': 0x1B,
'AGCCTRL1': 0x1C,
'AGCCTRL0': 0x1D,
'WOREVT1': 0x1E,
'WOREVT0': 0x1F,
'WORCTRL': 0x20,
'FREND1': 0x21,
'FREND0': 0x22,
'FSCAL3': 0x23,
'FSCAL2': 0x24,
'FSCAL1': 0x25,
'FSCAL0': 0x26,
'RCCTRL1': 0x27,
'RCCTRL0': 0x28,
'FSTEST': 0x29,
'PTEST': 0x2A,
'AGCTEST': 0x2B,
'TEST2': 0x2C,
'TEST1': 0x2D,
'TEST0': 0x2E,
'PATABLE': 0x3E,
}
if __name__ == '__main__':
import sys
import re
with open('eeprom', 'r+b') as fh:
fh.seek(20)
for line in sys.stdin:
if re.match('^\s*#', line):
continue
m = re.match('(?P<reg>\w+)\s+(?P<value>[0-9a-fA-F]+)', line)
if not m:
continue
m = m.groupdict()
fh.write(chr(REGISTERS[m['reg']]))
fh.write(chr(int(m['value'], 16)))
fh.write(b"\xff" * (512 - fh.tell()))
| 20.540541 | 72 | 0.484211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.332237 |
c205b5f889cdcc188c5b89c3efa9505bfb938fe3 | 384 | py | Python | UsefulLink.py | qyu6/TAILab | 6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7 | [
"Apache-2.0"
]
| 1 | 2022-01-10T15:14:55.000Z | 2022-01-10T15:14:55.000Z | UsefulLink.py | qyu6/TAILab | 6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7 | [
"Apache-2.0"
]
| null | null | null | UsefulLink.py | qyu6/TAILab | 6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7 | [
"Apache-2.0"
]
| null | null | null | '''
@func:to store useful links module
@create:2021.10.20
'''
def usefullink():
import streamlit as st
st.write('[1].在线Latex公式编辑器')
st.write('https://latex.codecogs.com/eqneditor/editor.php?lang=zh-cn')
st.write('[2].装饰Github README.md文件的logo')
st.write('https://shields.io/')
st.write('[3].号称全世界最好的翻译器')
st.write('https://www.deepl.com/translator') | 25.6 | 74 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.669767 |
c2065e5fc7e61fdabd4ab6fd12c1ead2ad9d477a | 78,713 | py | Python | htdeblur/acquisition/motion.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
]
| 2 | 2020-01-16T18:30:55.000Z | 2020-02-06T08:33:51.000Z | htdeblur/acquisition/motion.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
]
| null | null | null | htdeblur/acquisition/motion.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwallerlab imports
from llops import display
from llops import Roi
class StopAndStareAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_type='bf',
illumination_sequence=None,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
trigger_mode='software',
motion_acceleration_mm_s_2=1e3,
flip_pathway=False,
acquisition_timeout_s=3,
illumination_na_pad=0.03,
illumination_color={'w': 127},
settle_time_s=0):
# Parse options
self.illumination_type = illumination_type
self.settle_time_s = settle_time_s
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.velocity_mm_s = velocity_mm_s
self.max_exposure_time_s = max_exposure_time_s
self.illumination_na_pad = illumination_na_pad
self.illumination_color = illumination_color
self.acquisition_timeout_s = acquisition_timeout_s
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'software':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# print(type(self.))
self.metadata.type = 'stop and stare'
assert 'illumination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device'
# Generate motion pathway
self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster(
self.object_size_mm, self.frame_spacing_mm)
# Generate illumination sequence
illuminaiton_pattern_sequence = [self.illumination_type] * \
len(self.hardware_controller_list['position'].state_sequence)
self.hardware_controller_list['illumination'].state_sequence = self.genMultiContrastSequence(
illuminaiton_pattern_sequence)
# Tell device not to use feedback
self.hardware_controller_list['illumination'].trigger_wait_flag = False
self.hardware_controller_list['illumination'].command('trs.0.500.0')
self.hardware_controller_list['illumination'].command('trs.1.500.0')
self.hardware_controller_list['position'].goToPosition((0,0))
self.hardware_controller_list['position'].command('ENCODER X 1')
self.hardware_controller_list['position'].command('ENCODER Y 1')
self.hardware_controller_list['position'].command('ENCW X 100')
self.hardware_controller_list['position'].command('ENCW Y 100')
def acquire(self, exposure_time_ms=50):
# Allocate memory for frames
if self.hardware_controller_list['camera'].isSequenceRunning():
self.hardware_controller_list['camera'].sequenceStop()
self.hardware_controller_list['camera'].setBufferSizeMb(
20 * len(self.hardware_controller_list['position'].state_sequence))
# Set camera exposure
self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3)
self.hardware_controller_list['camera'].setTriggerMode('hardware')
self.hardware_controller_list['camera'].runSequence()
self.hardware_controller_list['illumination'].bf()
# Snap one image to ensure all acquisitons are started
self.hardware_controller_list['camera'].snap()
# generate frame_list
t0 = time.time()
frames_acquired = 0
frame_list = []
for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'):
pos = frame['states']
x = pos[0][0]['value']['x']
y = pos[0][0]['value']['y']
self.hardware_controller_list['position'].goToPosition((x, y), blocking=True)
time.sleep(self.settle_time_s)
frame_list.append(self.hardware_controller_list['camera'].snap())
frames_acquired += 1
# print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence)))
t_acq_sns = time.time() - t0
print("Acquisition took %.4f seconds" % (t_acq_sns))
# Create dataset
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Assign acquisition time
self.metadata.acquisition_time_s = t_acq_sns
# Apply simple geometric transformations
if self.metadata.camera.transpose:
frame_list = frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
frame_list = np.flip(frame_list, 2)
if self.metadata.camera.flip_y:
frame_list = np.flip(frame_list, 1)
# Assign
dataset.frame_list = [frame for frame in frame_list]
# Set frame state list
self.n_frames = len(self.hardware_controller_list['position'].state_sequence)
frame_state_list = []
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = [0]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
dataset.metadata = self.metadata
dataset.type = 'stop_and_stare'
dataset.frame_state_list = frame_state_list
return dataset
def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_minor_axis=False):
# Determine major axis
if major_axis is None:
major_axis = np.argmax(np.asarray(object_size_mm))
if object_size_mm[0] == object_size_mm[1]:
major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)
).astype(np.int) # two components in x and y
# Determine slightly smaller frame spacing for optimal coverage of object
frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1])
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# This variable will be populated by the loop below
raster_segments = np.zeros((measurement_count[0] * 2, 2))
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
frame_spacing_mm[0] * row
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
linear_segment_index += 1
# make the center the mean of the pathway
path_means = []
for path in pathway:
path_mean = ((path['y_start']), (path['x_start']))
path_means.append(path_mean)
# mean = np.sum(np.asarray(path_means), axis=1) / len(path_means)
mean = np.sum(np.asarray(path_means), axis=0) / len(path_means)
for path in pathway:
path['x_start'] -= mean[1]
path['x_end'] -= mean[1]
path['y_start'] -= mean[0]
path['y_end'] -= mean[0]
# return pathway
state_sequence = []
for path in pathway:
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure()
common_state_dict['led_update_rate_us'] = None
common_state_dict['linear_segment_index'] = None
common_state_dict['frame_distance'] = 0
common_state_dict['exposure_distance'] = 0
common_state_dict['velocity'] = self.velocity_mm_s
common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2
common_state_dict['n_blur_positions_exposure'] = 1
common_state_dict['position_delta_x_mm'] = 0
common_state_dict['position_delta_y_mm'] = 0
path_dict = {'value': {'time_index' : 0,
'x': path['x_start'],
'y': path['y_start']}}
state_sequence.append({'states' : [[path_dict]], 'common' : common_state_dict})
return(state_sequence)
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMultiContrastSequence(self, illumination_pattern_sequence, n_acquisitions=1,
darkfield_annulus_width_na=0.1):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
led_list_bf = led_list[bf_mask]
led_list_df = led_list[~bf_mask]
led_list_an = led_list[~bf_mask & (self.metadata.illumination.state_list.design[:, 0] ** 2
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)]
illumination_sequence = []
self.pattern_type_list = []
pattern_dict = {'dpc.top': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] > 0]),
'dpc.bottom': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] < 0]),
'dpc.left': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] > 0]),
'dpc.right': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] < 0]),
'single': [0],
'bf': np.ndarray.tolist(led_list_bf),
'df': np.ndarray.tolist(led_list_df),
'an': np.ndarray.tolist(led_list_an),
'full': np.ndarray.tolist(led_list)
}
# DPC does not flicker patterns within frames
n_time_points_per_frame = 1
illumination_state_list = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, pattern in enumerate(illumination_pattern_sequence):
single_frame_state_list_illumination = []
# Loop over time points (irrelevent for dpc)
for time_index in range(n_time_points_per_frame):
time_point_state_list = []
# Loop over DPC patterns (which are themselves frames)
for led_idx in pattern_dict[pattern]:
values_dict = {}
for color_name in self.illumination_color:
values_dict[color_name] = self.illumination_color[color_name]
led_dict = {
'index': int(led_idx),
'time_index': 0,
'value': values_dict
}
# Append this to list with elements for each interframe time point
time_point_state_list.append(led_dict)
# Append to frame_dict
single_frame_state_list_illumination.append(time_point_state_list)
# Define illumination sequence
illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}})
# Define illumination list
self.state_list = self.metadata.illumination.state_list.design
return illumination_state_list
class MotionDeblurAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_sequence=None,
motion_path_type='linear',
use_l1_distance_for_motion_calculations=True,
blur_vector_method='pseudo_random',
kernel_pulse_count=150,
saturation_factor=1.0,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
max_velocity_mm_s=40.0,
max_led_update_rate_us=0.01,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
motion_acceleration_mm_s_2=1e3,
extra_run_up_time_s=0,
flip_pathway=False,
segment_delay_s=0,
initial_auto_exposure=False,
acquisition_timeout_s=3,
illumination_sequence_count=1,
illumination_na_pad=0.03,
illumination_color={'w': 127},
only_store_first_and_last_position=True):
# Parse options
self.motion_path_type = motion_path_type
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.use_l1_distance_for_motion_calculations = use_l1_distance_for_motion_calculations
self.velocity_mm_s = velocity_mm_s
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.max_led_update_rate_us = max_led_update_rate_us
self.max_exposure_time_s = max_exposure_time_s
self.max_velocity_mm_s = max_velocity_mm_s
self.illumination_na_pad = illumination_na_pad
self.saturation_factor = saturation_factor
self.reuse_illumination_sequence = reuse_illumination_sequence
self.blur_vector_method = blur_vector_method
self.kernel_pulse_count = kernel_pulse_count
self.illumination_color = illumination_color
self.extra_run_up_time_s = extra_run_up_time_s
self.initial_auto_exposure = initial_auto_exposure
self.acquisition_timeout_s = acquisition_timeout_s
self.segment_delay_s = segment_delay_s
self.only_store_first_and_last_position = only_store_first_and_last_position
self.illumination_sequence = illumination_sequence
self.illumination_sequence_count = illumination_sequence_count
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if hasattr(controller, 'trigger_mode'):
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# Set metadata type
self.metadata.type = 'motiondeblur'
assert 'illumination' in self.hardware_controller_list, 'Motion deblur object requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Motion deblur object requires motion stage'
# Initialize state_sequence
self.state_sequence = []
# Generate position sequence
self.hardware_controller_list['position'].state_sequence, self.time_sequence_s = self.genMotionPathway(
pathway_type=self.motion_path_type, frame_spacing_mm=frame_spacing_mm)
# Generate illumination sequence
self.hardware_controller_list['illumination'].state_sequence = self.genMotionIlluminationSequenceRandom(illumination_sequence=illumination_sequence,
sequence_count=self.illumination_sequence_count)
# Set up subframe captures
self.subframe_capture_count = len(self.hardware_controller_list['illumination'].state_sequence[0])
self.force_preload_all_frames = True
self.hardware_controller_list['position'].continuous_states_between_frames = True
# Configure illuination to use fast sequence updating if specified in options
self.hardware_controller_list['illumination'].use_fast_sequence = True
# Set bit depth
self.illumination_sequence_bit_depth = 1
# Set extra options for position controller
self.hardware_controller_list['position'].extra_run_up_time_s = self.extra_run_up_time_s
# Calculate effective pixel size if it hasn't already been calculated
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / \
(self.metadata.objective.mag * self.metadata.system.mag)
def preAcquire(self):
''' This method sets up the camera for an acquisition '''
# Check that the length of motion, illuimination, pupil, and focal sequences are same (or None)
frame_counts = []
for hardware_controller_name in list(self.hardware_controller_list):
# Get controller object from dictionary
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
# Reset Controller
hardware_controller.reset()
# Get number of frames in sequence. If there is no sequence, remove this element from hw_controller_list
if hardware_controller.type is not 'camera':
if hardware_controller.state_sequence is not None:
frame_counts.append(len(hardware_controller.state_sequence))
else:
self.hardware_controller_list.pop(hardware_controller_name)
else:
# Remove this controller from the list
if hardware_controller_name is not 'camera':
del self.hardware_controller_list[hardware_controller_name]
# Turn on hardware triggering for initialization
self.hardware_controller_list['camera'].setTriggerMode('hardware')
# Set illumination parameters
if 'illumination' in self.hardware_controller_list:
# self.hardware_controller_list['illumination'].setColor(self.illumination_color)
self.hardware_controller_list['illumination'].setSequenceBitDepth(
self.illumination_sequence_bit_depth)
# Ensure all hardware elements have the same number of frames
if len(frame_counts) > 0:
if not np.sum(np.mean(np.asarray(frame_counts)) == np.asarray(frame_counts)) == len(frame_counts):
raise ValueError('Sequence lengths are not the same (or None).')
else:
self.n_frames = frame_counts[0]
else:
raise ValueError('No sequence provided!')
# Initialize frame_list
self.frame_list = np.zeros((self.n_frames,
self.hardware_controller_list['camera'].getImageHeight(), self.hardware_controller_list['camera'].getImageWidth()), dtype=np.uint16)
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Generate frame_state_list
frame_state_list = []
if self.time_sequence_s is None:
self.time_sequence_s = []
for _ in range(self.n_frames):
self.time_sequence_s.append([0])
# Loop over frames
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = self.time_sequence_s[frame_index]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
self.frame_state_list = frame_state_list
# Perform auto-exposure if user desires
if self.initial_auto_exposure:
# Illuminate with first pattern
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].sequenceReset()
self.hardware_controller_list['illumination'].time_sequence_s = [[0]]
self.hardware_controller_list['illumination'].preloadSequence(0)
self.hardware_controller_list['illumination'].sequenceStep()
# Small delay to ensure illumination gets updated
time.sleep(0.1)
# Run Auto-Exposure
self.hardware_controller_list['camera'].autoExposure()
# Set camera memory footprint
if (self.hardware_controller_list['camera'].getBufferTotalCapacity() < self.frame_list.shape[0]):
self.frame_size_mb = int(
np.ceil(float(self.frame_list.shape[0] / 1e6) * float(self.frame_list.shape[1]) * float(self.frame_list.shape[2]) * 2))
print('Allocating %dmb for frames' % self.frame_size_mb)
self.hardware_controller_list['camera'].setBufferSizeMb(self.frame_size_mb)
assert self.hardware_controller_list['camera'].getBufferTotalCapacity(
) >= self.frame_list.shape[0], 'Buffer size too small!'
# Store initial time (acquisition start)
t0 = time.time()
# Tell camera to start waiting for frames
self.hardware_controller_list['camera'].runSequence()
# Keep track of how many images we have acquired
self.total_frame_count = 0
def acquire(self,
dataset=None,
reset_devices=False):
'''
This is a generic acquisition class, where LEDs are updated according to the sequence variable.
'''
# Call preacquire. which initializes hardware and variables
self.preAcquire()
# Determine which frames can be preloaded before serial acquisition. If each frame is only one state, we assume that we can preload all frames. But, if the state of any hardware element changes within any frame, we will assume we can't preload the frames
frame_count = 0
linear_segment_list = []
for frame_state in self.hardware_controller_list['position'].state_sequence:
if frame_state['common']['linear_segment_index'] >= 0:
frame_count += 1
if frame_state['common']['linear_segment_index'] not in linear_segment_list:
linear_segment_list.append(frame_state['common']['linear_segment_index'])
print("Found %d segments and %d frames" % (len(linear_segment_list), frame_count))
t_start = time.time()
for linear_segment_index in linear_segment_list:
self.frames_to_acquire = []
# Determine which linear segments to run
for frame_index, frame_state in enumerate(self.hardware_controller_list['position'].state_sequence):
if frame_state['common']['linear_segment_index'] == linear_segment_index:
self.frames_to_acquire += [frame_index]
self.n_frames_to_acquire = len(self.frames_to_acquire)
x_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['x']
y_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['y']
x_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['x']
y_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['y']
print('Starting linear segment %d which has %d frames moving from (%.4f, %.4f)mm to (%.4f, %.4f)mm' %
(linear_segment_index, self.n_frames_to_acquire, x_start, y_start, x_end, y_end))
frame_has_multiple_states = []
for frame_index in self.frames_to_acquire:
number_of_states_in_current_frame = 0
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
# Check if this frame can be preloaded (if it has more than one state, it can't be preloaded)
number_of_states_in_current_frame = max(number_of_states_in_current_frame, len(
self.hardware_controller_list[hardware_controller_name].state_sequence[frame_index]['states']))
# Check that the length of time_sequence_s matches the max number of state changes within this frame
if number_of_states_in_current_frame > 1:
frame_has_multiple_states.append(True)
assert self.time_sequence_s is not None, "time_sequence_s can not be None if any frame has multiple states!"
assert len(self.time_sequence_s[frame_index]) == number_of_states_in_current_frame, "time_sequence_s for frame %d is of wrong length!" % len(
self.time_sequence_s[frame_index]['states'])
else:
frame_has_multiple_states.append(False)
# Determine if the entire multi-frame sequence can be preloaded (this will be False if ther eis only one system state (e.g. LED pattern) within each frame)
all_frames_will_be_preloaded = (not any(frame_has_multiple_states)) or self.force_preload_all_frames
# Determine optimal exposure time for all frames
if self.exposure_time_s is not None:
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
elif self.time_sequence_s is not None and max(self.time_sequence_s[0]) > 0:
frame_exposures = []
for frame_index in range(self.n_frames_to_acquire):
frame_exposures.append(max(self.time_sequence_s[frame_index]))
self.exposure_time_s = sum(frame_exposures) / (self.n_frames_to_acquire)
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
else:
self.exposure_time_s = self.hardware_controller_list['camera'].getExposure()
# Check that exposure time is correct
assert abs(self.exposure_time_s - self.hardware_controller_list['camera'].getExposure(
)) < 1e-3, "Desired exposure time %.2f is not equal to device exposure %.2f. This is probably a MM issue" % (self.exposure_time_s, self.hardware_controller_list['camera'].getExposure())
# print('Using exposure time %.2fs (%d ms)' % (self.exposure_time_s, int(self.exposure_time_s * 1000)))
# Check that time_sequence_s for multiple frames exists if there are inter-frame state changes
if (not any(frame_has_multiple_states)) or self.time_sequence_s is None:
self.time_sequence_s = [self.exposure_time_s]
# Configure hardware triggering
trigger_output_settings = [0, 0]
trigger_input_settings = [0, 0]
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hasattr(hardware_controller, 'trigger_mode') and 'hardware' in hardware_controller.trigger_mode:
# Check that trigger pins are configured
assert hardware_controller.trigger_pin is not None, 'Trigger pin must be configured for hardware triggering!'
# Determine if we're performing preloadable acquisitions or not
if self.subframe_capture_count > 1:
if self.reuse_illumination_sequence:
if hardware_controller_name == 'camera':
if self.illumination_sequence_count == 1:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
else:
trigger_output_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
trigger_input_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
else:
if hardware_controller_name == 'camera':
trigger_output_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
trigger_input_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
# Case where there is only one system state wihtin each frame (trigger each frame)
elif all_frames_will_be_preloaded:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
# Case where we only want to trigger on first frame. This is probably not a good default.
else:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
# Check that this hardware controller is ready for a sequence, if it is sequencable.
if hardware_controller.state_sequence is not None:
# Reset controller sequence to initial state
hardware_controller.sequenceReset()
time.sleep(0.1)
# Wait until initialization is complete
initialization_wait_time = 0
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
initialization_wait_time += 0.05
if initialization_wait_time > self.acquisition_timeout_s:
raise ValueError('Pre-acquisiton isReadyForSequence timeout for %s' % hardware_controller_name)
# Tell the hardware controller about the acquisition time sequence
if len(hardware_controller.state_sequence) == len(self.time_sequence_s):
hardware_controller.time_sequence_s = [self.time_sequence_s[i] for i in self.frames_to_acquire]
else:
hardware_controller.time_sequence_s = [
[self.hardware_controller_list['camera'].getExposure()]] * self.n_frames_to_acquire
# Set up triggering for hardware acquision
self.hardware_controller_list['illumination'].trigger_output_settings = trigger_output_settings
self.hardware_controller_list['illumination'].trigger_input_settings = trigger_input_settings
# Determine which sequences get preloaded
if all_frames_will_be_preloaded: # One system state per acquisition
frame_preload_sequence = [-1] # Preload all frames at once
else:
frame_preload_sequence = range(self.n_frames_to_acquire) # Preload each frame serially
# Loop over frames to capture (may only execute once if we're preloading all frames)
for preload_index in frame_preload_sequence:
# Loop over hardware controllers, preload, and determine necessary exposure time (if using inter-frame state changes)
for hardware_controller_name in self.hardware_controller_list:
# If we're using the motion stage, calculate the mechanical delay
if hardware_controller_name == 'position':
# Get velocity and acceleration from state sequence
if preload_index == -1:
index = 0
else:
index = preload_index
velocity = self.hardware_controller_list[hardware_controller_name].state_sequence[0]['common']['velocity']
acceleration = self.hardware_controller_list[hardware_controller_name].acceleration
jerk = self.hardware_controller_list[hardware_controller_name].jerk
# Calculate spin-up time and distance
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+0.5*j+*+t)+solve+for+t
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+(1%2F8)*j+*+t)+solve+for+t
# Good reference:
# http://www.et.byu.edu/~ered/ME537/Notes/Ch5.pdf
# Total period
if False:
# First period (acceleration of acceleration)
t_1 = acceleration / jerk
# x_1 = 1/6 * jerk * t_1 ** 3
x_1 = acceleration ** 2 / (6 * jerk) * t_1
# v_1 = 1/2 * jerk * t_1 ** 2
v_1 = acceleration ** 2 / (2 * jerk)
# Second period (linear region)
dv = velocity - 2 * v_1
assert dv > 0
t_2 = dv / acceleration
x_2 = v_1 * t_2 + 1/2 * acceleration * t_2 ** 2
v_2 = velocity - v_1
# Third period (decelleration of acceleration)
t_3 = acceleration / jerk
x_3 = (v_2 + acceleration ** 2 / (3 * jerk)) * t_3
v_3 = v_1
# Calculate spin-up distance and time
spin_up_time_s = t_1 + t_2 + t_3
spin_up_distance_mm = x_1 + x_2 + x_3
assert (v_1 + v_2 + v_3 - velocity) < 1e-1, "Calculated velocity is %.4f, desired is %.4f" % (v_1 + v_2 + v_3, velocity)
else:
spin_up_time_s = velocity / acceleration
spin_up_distance_mm = 1/2 * acceleration * spin_up_time_s ** 2
# Add extra spin_up time
spin_up_time_s += self.extra_run_up_time_s
spin_up_distance_mm += self.extra_run_up_time_s * velocity
# spin_up_distance_mm = 0
spin_up_time_s = max(spin_up_time_s, 0.0001)
self.hardware_controller_list['illumination'].setupTriggering(self.hardware_controller_list['position'].trigger_pin, int(
self.hardware_controller_list['position'].trigger_pulse_width_us), int(spin_up_time_s * 1e6)) # convert to seconds
# Tell motion stage to offset it's positions by these amounts
self.hardware_controller_list['position'].preload_run_up_distance_mm = spin_up_distance_mm
else:
# no delay for other components
self.hardware_controller_list[hardware_controller_name].trigger_start_delay_s = 0
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
if hardware_controller_name is not 'illumination' or linear_segment_index == 0:
if hardware_controller_name == 'illumination' and self.reuse_illumination_sequence:
self.hardware_controller_list[hardware_controller_name].preloadSequence(0)
else:
state_sequence_used = [
self.hardware_controller_list[hardware_controller_name].state_sequence[i] for i in self.frames_to_acquire]
self.hardware_controller_list[hardware_controller_name].preloadSequence(
preload_index, state_sequence=state_sequence_used)
if preload_index < 0 or self.reuse_illumination_sequence:
frames_to_wait_for = self.n_frames_to_acquire # wait for all frames
else:
frames_to_wait_for = 1
# Set trigger frame time based on first pathway TODO: This is a hack
if 'position' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].trigger_frame_time_s[self.hardware_controller_list['camera']
.trigger_pin] = self.hardware_controller_list['position'].state_sequence[0]['common']['frame_time'] * 1e6
# Tell stage to start moving
self.hardware_controller_list['position'].runSequence()
if linear_segment_index == 0:
t_start = time.time()
# Tell illumination to start moving
if self.reuse_illumination_sequence:
self.hardware_controller_list['illumination'].runSequence(
n_acquisitions=1 * self.n_frames_to_acquire)
else:
self.hardware_controller_list['illumination'].runSequence(n_acquisitions=1)
# Wait for frames to be captured
t_frame = time.time()
frame_count = 0
while frame_count < frames_to_wait_for:
if self.total_frame_count + frame_count == frames_to_wait_for:
break
else:
if self.total_frame_count + frame_count == self.hardware_controller_list['camera'].getBufferSizeFrames():
time.sleep(0.01)
if (time.time() - t_frame) > self.acquisition_timeout_s:
print(self.hardware_controller_list['illumination'].response())
raise ValueError('Acquisition timeout (Total frame count: %d, Buffer size: %d, preload index %d, frames to wait for: %d)' % (
self.total_frame_count, self.hardware_controller_list['camera'].getBufferSizeFrames(), preload_index, frames_to_wait_for))
else:
if ((self.total_frame_count + frame_count) % int((self.n_frames) / min(10, self.n_frames_to_acquire))) == 0:
print('Acquired %d of %d frames' % (
self.hardware_controller_list['camera'].getBufferSizeFrames(), self.n_frames_to_acquire))
frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames(
) - self.total_frame_count
self.total_frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames()
t_frame = time.time()
# Get sequence timing information
time.sleep(0.1)
print(self.hardware_controller_list['illumination'].response())
# Wait for hardware to stop
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
self.sequence_timing_dict = {}
# Reset sequences
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera':
self.hardware_controller_list[hardware_controller_name].sequenceReset()
# Let user know we're finished
print('Finished linear segment %d' % linear_segment_index)
time.sleep(self.segment_delay_s)
t_acq = time.time() - t_start
self.metadata.acquisition_time_s = t_acq
print("Acquisition took %.4f seconds" % (t_acq))
# Call post-acquire functions
dataset = self.postAcquire(dataset=dataset, reset_devices=reset_devices)
# Return
return dataset
def postAcquire(self, dataset=None, reset_devices=True):
"""Post-acquisition steps for resetting hardware and preparing dataset."""
# Stop acquisition
# self.hardware_controller_list['camera'].sequenceStop()
# Parse dataset
if dataset is None:
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Read frames and timestamps from buffer
(self.frame_list, elapsed_frame_time_ms) = self.hardware_controller_list['camera'].readFramesFromBuffer()
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Let user know we're finished
print('Read frames from buffer.')
# Store camera timing in a standardized timing dict
self.sequence_timing_dict = {}
self.sequence_timing_dict['sequence_timing'] = []
for frame_index, frame_time in enumerate(elapsed_frame_time_ms):
timing_dict = {'trigger_number' : 0, 'acquisition_number' : frame_index, 'camera_start_time_us' : frame_time * 1000}
self.sequence_timing_dict['sequence_timing'].append(timing_dict)
# Reset all hardware elements
if reset_devices:
for hardware_controller_name in self.hardware_controller_list:
self.hardware_controller_list[hardware_controller_name].reset()
if self.only_store_first_and_last_position:
for frame_state in self.frame_state_list[1:]:
frame_state['position']['states'] = [frame_state['position']['states'][0], frame_state['position']['states'][-1]]
# Remove repeated illumination patterns and time_sequence_s if we used the same illumination for each pulse
if self.reuse_illumination_sequence:
for frame_state in self.frame_state_list[1:]:
frame_state['time_sequence_s'] = 'see_frame_#1'
frame_state['illumination'] = 'see_frame_#1'
# Illuminate with brightfield to indicate we're Finished
self.hardware_controller_list['illumination'].bf()
self.hardware_controller_list['position'].goToPosition((0,0))
# Save results to an itoools.Dataset object
dataset.frame_list = self.frame_list
dataset.frame_state_list = self.frame_state_list
dataset.metadata = self.metadata
dataset.type = 'motion_deblur'
# Return
return dataset
def genMotionPathway(self, n_acquisitions=1, pathway_type='raster', frame_spacing_mm=1.):
'''
This function generates a few example motion pathways.
'''
if pathway_type is 'raster':
pathway = self.genMotionPathwayRaster(self.object_size_mm, self.frame_spacing_mm)
elif (pathway_type is 'linear') or (pathway_type is 'linear_x'):
# predefine linear y sequence
n_frames = int(math.ceil(self.object_size_mm[1] / self.frame_spacing_mm[1]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'x_start': frame_index * self.frame_spacing_mm[1],
'x_end': (frame_index + 1) * self.frame_spacing_mm[1],
'y_start': 0, 'y_end': 0, 'linear_segment_index': 0})
elif pathway_type in ['linear_y']:
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': -frame_index * self.frame_spacing_mm[0],
'y_end': -(frame_index + 1) * self.frame_spacing_mm[0],
'x_start': 0, 'x_end': 0, 'linear_segment_index': 0})
elif pathway_type is 'linear_diag':
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': frame_index * self.frame_spacing_mm[0],
'y_end': (frame_index + 1) * self.frame_spacing_mm[0],
'x_start': frame_index * self.frame_spacing_mm[0],
'x_end': (frame_index + 1) * self.frame_spacing_mm[0],
'linear_segment_index': 0})
else:
raise ValueError('Pathway type %s is not implemented.' % pathway_type)
# make the center the mean of the pathway
path_xmin = 1e8
path_ymin = 1e8
path_xmax = -1e8
path_ymax = -1e8
for path in pathway:
path_mean = ((path['y_start']), (path['y_start']))
path_xmin = min(path_xmin, min([path['x_start'], path['x_end']]))
path_xmax = max(path_xmax, max([path['x_start'], path['x_end']]))
path_ymin = min(path_ymin, min([path['y_start'], path['y_end']]))
path_ymax = max(path_ymax, max([path['y_start'], path['y_end']]))
mean = ((path_ymax + path_ymin) / 2, (path_xmax + path_xmin) / 2)
for path in pathway:
path['x_start'] = path['x_start'] - mean[1]
path['x_end'] = path['x_end'] - mean[1]
path['y_start'] = path['y_start'] - mean[0]
path['y_end'] = path['y_end'] - mean[0]
# Flip pathway if user desired
if self.flip_pathway:
for path in pathway:
path['x_start'] *= -1
path['x_end'] *= -1
path['y_start'] *= -1
path['y_end'] *= -1
position_state_list = []
time_sequence_s = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, position in enumerate(pathway):
# define distance in terms of l1 or l2 distance
distance_l2 = float(np.sqrt((position['x_end'] - position['x_start'])
** 2 + (position['y_end'] - position['y_start']) ** 2))
distance_l1 = float(abs(position['x_end'] - position['x_start']) +
abs(position['y_end'] - position['y_start']))
if self.use_l1_distance_for_motion_calculations:
position['frame_distance'] = int(round(distance_l1 * 1000)) / 1000 # round to nearest um
else:
position['frame_distance'] = int(round(distance_l2 * 1000)) / 1000 # round to nearest um
# Determine number of qunatifiable positions in pathway
position['n_blur_positions_frame'] = int(
math.floor(position['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000)))
# Determine necessary velocity
if self.velocity_mm_s is not None:
position['velocity_mm_s'] = self.velocity_mm_s
else:
position['velocity_mm_s'] = self.max_velocity_mm_s # Use fastest speed possible
# Calculate time between frames
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s'] # t = x / v
# Determine camera exposure time for this frame
position['exposure_time_s'] = int(math.floor((self.hardware_controller_list['camera'].calcExposureTimeFromBusyTime(
position['frame_time_s']) - self.exposure_time_pad_s) * 1000)) / 1000 # round to nearest ms
# Determine LED update rate
dx_pixel = position['frame_distance'] / position['n_blur_positions_frame']
dt_pixel_raw = dx_pixel / position['velocity_mm_s']
position['led_update_rate_us'] = math.ceil(dt_pixel_raw * 1e6) # Round up to integer us
# Determine new velocity (ps / update rate)
new_velocity_mm_s = (self.metadata.system.eff_pixel_size_um / 1e3) / (position['led_update_rate_us'] / 1e6)
if self.debug > 0:
print('Reducing velocity to %.4f mm/s from %.4f mm/s to match illumination update rate of %d us' % (new_velocity_mm_s, position['velocity_mm_s'], position['led_update_rate_us']))
position['velocity_mm_s'] = new_velocity_mm_s
# Update frame time based on velocity
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s']
# Determine number of pixels in exposure time
position['n_blur_positions_exposure'] = math.floor(position['exposure_time_s'] / (position['led_update_rate_us'] / 1e6))
# Determine the distance traveled during the exposure time
position['exposure_distance'] = position['n_blur_positions_exposure'] * position['led_update_rate_us'] / 1e6 * position['velocity_mm_s']
# Store acceleration
position['acceleration_mm_s_2'] = self.motion_acceleration_mm_s_2
# Print information about this pattern
if self.debug > 0:
print('Segment %d, index %d will require %d blur positions per frame (%d during exposure), %.2fms exposure time (%.2fms total frame time), scan %.2fmm (%.2fmm with exposure), move at %.2fmm/s, and update speed %dus' %
(position['linear_segment_index'], frame_index, position['n_blur_positions_frame'],position['n_blur_positions_exposure'], 1000. * position['exposure_time_s'], 1000. * position['frame_time_s'], position['frame_distance'], position['exposure_distance'], position['velocity_mm_s'], position['led_update_rate_us']))
# Check that all blur parameters are valid
assert position['led_update_rate_us'] >= self.max_led_update_rate_us, "LED Array update rate (%dms) < max update rate (%dms)" % (
position['led_update_rate_us'], self.max_led_update_rate_us)
assert position['exposure_time_s'] <= self.max_exposure_time_s, "Exposure time (%.3fs) > max_exposure_time_s (%.3f)" % (
position['exposure_time_s'], self.max_exposure_time_s)
assert position['velocity_mm_s'] <= self.max_velocity_mm_s, "Velocity (%.3fs) > max_velocity_mm_s (%.3f)" % (
position['velocity_mm_s'], self.max_velocity_mm_s)
# List for this positions
single_frame_state_list_position = []
single_frame_time_sequence_s = []
# Determine movement direction
direction = np.asarray((position['y_end'] - position['y_start'],
position['x_end'] - position['x_start']))
direction /= np.linalg.norm(direction)
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = position['frame_time_s']
common_state_dict['led_update_rate_us'] = position['led_update_rate_us']
common_state_dict['linear_segment_index'] = position['linear_segment_index']
common_state_dict['frame_distance'] = position['frame_distance']
common_state_dict['exposure_distance'] = position['exposure_distance']
common_state_dict['velocity'] = position['velocity_mm_s']
common_state_dict['acceleration'] = position['acceleration_mm_s_2']
common_state_dict['n_blur_positions_exposure'] = position['n_blur_positions_exposure']
common_state_dict['position_delta_x_mm'] = direction[1] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
common_state_dict['position_delta_y_mm'] = direction[0] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
# Loop over time points (irrelevent for dpc)
for time_index in range(position['n_blur_positions_exposure']):
time_point_state_list = []
x = position['x_start'] + direction[1] * abs(common_state_dict['position_delta_x_mm']) * time_index
y = position['y_start'] + direction[0] * abs(common_state_dict['position_delta_x_mm']) * time_index
# Append this to list with elements for each interframe time point
time_point_state_list.append({'time_index': time_index,
'value': {'x': x, 'y': y}})
# Append to frame_dict
single_frame_state_list_position.append(time_point_state_list)
single_frame_time_sequence_s.append((time_index + 1) * position['led_update_rate_us'] / 1e6)
# Define illumination sequence
position_state_list.append({'states' : single_frame_state_list_position, 'common' : common_state_dict})
# Define time_sequence
time_sequence_s.append(single_frame_time_sequence_s)
# for state in position_state_list:
# print(state['states'][0][0]['value']['x'] - state['states'][-1][0]['value']['x'])
return (position_state_list, time_sequence_s)
def genMotionPathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=None, include_minor_axis=False):
# Hard-code major axis since the rest of the code doesn't respect it for now
_major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)).astype(np.int) # two components in x and y
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# If number of measurements along major axis is odd, center this row
offset = [0, 0]
offset[_major_axis] -= frame_spacing_mm[_major_axis] / 2
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (col + 1) + offset[1],
'y_end': frame_spacing_mm[0] * row + offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) + offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': frame_spacing_mm[1] * (col - 1) - offset[1],
'y_end': frame_spacing_mm[0] * row - offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': 0 - offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) - offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
linear_segment_index += 1
print('Generated motion pathway with %d linear segments' % (linear_segment_index))
return pathway
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMotionIlluminationSequenceRandom(self, sequence_count=1,
illumination_sequence=None):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
illumination_state_list = []
linear_segments_processed = {}
# Loop over DPC patterns (frames)
for frame_index, frame_position_dict in enumerate(self.hardware_controller_list['position'].state_sequence):
frame_position_list = frame_position_dict['states']
# Get number of positions in blur kernel from this frame. Divide into subsequences
pattern_count = len(frame_position_list) // sequence_count
# Determine the number of non-zero illumination positions
pattern_count_used = int(round(pattern_count * self.saturation_factor))
# Place patterns at the END of the full sequence
pattern_count_start = 0
# Get linear segment index
current_segment_index = frame_position_dict['common']['linear_segment_index']
if not self.reuse_illumination_sequence or frame_index == 0:
blur_vector_full = []
# Generate several blur vectors
for _ in range(sequence_count):
# Use provided illumination seqence if given
if illumination_sequence:
blur_vector = illumination_sequence
else:
blur_vector = np.zeros(pattern_count)
# Generate blur vector
blur_vector = np.zeros(pattern_count)
if self.blur_vector_method == 'strobe':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
elif self.blur_vector_method == 'center':
blur_vector = np.zeros(pattern_count)
# Determine distance traveled within this frame (including readout time)
frame_pixel_count = round(frame_position_list[0][0]['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
exposure_pixel_count = round(frame_position_list[0][0]['exposure_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
if not frame_pixel_count // 2 < exposure_pixel_count:
print("WARNING: Camera will not expose during center flash (%d pixels, %d pixels used of %d pixels total)" % (frame_pixel_count // 2, exposure_pixel_count, pattern_count))
blur_vector[pattern_count_used] = 1
else:
# Set center position to be on
blur_vector[frame_pixel_count // 2] = 1
elif self.blur_vector_method == 'start_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'start_middle_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'tens':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + 10] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 30] = 1
blur_vector[pattern_count_start + 40] = 1
elif self.blur_vector_method == 'twenties':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + 0] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 40] = 1
blur_vector[pattern_count_start + 60] = 1
blur_vector[pattern_count_start + 80] = 1
blur_vector[pattern_count_start + 100] = 1
blur_vector[pattern_count_start + 120] = 1
blur_vector[pattern_count_start + 140] = 1
blur_vector[pattern_count_start + 160] = 1
blur_vector[pattern_count_start + 180] = 1
elif self.blur_vector_method == 'quarters':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used // 4] = 1
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
blur_vector[pattern_count_start + pattern_count_used // 2 + pattern_count_used // 4] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'random':
blur_vector[pattern_count_start:pattern_count_start +
pattern_count_used] = np.random.rand(pattern_count_used)
elif self.blur_vector_method == 'constant':
blur_vector[pattern_count_start:pattern_count_start +
pattern_count_used] = np.ones(pattern_count_used)
elif self.blur_vector_method in ['coded', 'pseudo_random']:
if self.kernel_pulse_count is not None:
pulse_count = self.kernel_pulse_count
else:
pulse_count = pattern_count_used // 2
from htdeblur import blurkernel
blur_vector_tmp, kappa = blurkernel.vector(pulse_count, kernel_length=pattern_count_used)
blur_vector[pattern_count_start:pattern_count_start + pattern_count_used] = blur_vector_tmp
else:
raise ValueError('Invalid blur kernel method: %s' % self.blur_vector_method)
# Append to blur_vector_full
blur_vector_full += list(blur_vector)
# Ensure the pattern is the correct length
if len(blur_vector_full) < len(frame_position_list):
blur_vector_full += [0] * (len(frame_position_list) - len(blur_vector_full))
elif len(blur_vector_full) > len(frame_position_list):
raise ValueError
# Assign
linear_segments_processed[str(frame_index)] = blur_vector_full
else:
blur_vector_full = linear_segments_processed['0']
single_frame_state_list_illumination = []
# Loop over time points (irrelevent for dpc)
for time_index, illumination_value in enumerate(blur_vector_full):
time_point_state_list = []
# Loop over DPC patterns (which are themselves frames)
# for led_number in led_list[bf_mask]:
led_number = -1
values_dict = {}
for color_name in self.illumination_color:
values_dict[color_name] = self.illumination_color[color_name] * illumination_value
led_dict = {
'index': int(led_number),
'time_index': time_index,
'value': values_dict
}
# Append this to list with elements for each interframe time point
time_point_state_list.append(led_dict)
# Append to frame_dict
single_frame_state_list_illumination.append(time_point_state_list)
# Define illumination sequence
illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}})
return(illumination_state_list)
| 54.284828 | 757 | 0.612656 | 76,891 | 0.976853 | 0 | 0 | 0 | 0 | 0 | 0 | 19,483 | 0.247519 |
c2094cbd00b0292a602f2ea788a9486c162b5e7e | 2,053 | py | Python | leetcode/weekly150/last_substring.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
]
| 2 | 2018-01-18T11:01:36.000Z | 2021-12-20T18:14:48.000Z | leetcode/weekly150/last_substring.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
]
| null | null | null | leetcode/weekly150/last_substring.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
]
| null | null | null | '''
https://leetcode.com/contest/weekly-contest-150/problems/last-substring-in-lexicographical-order/
SA algorithm mostly copied from https://cp-algorithms.com/string/suffix-array.html
Status: tle. probably py3 lists
'''
class SuffixArray:
def __init__(self, s):
self.s = s
self.n = len(s)
self.p = [0] * self.n
self.c = [0] * self.n
c = self.preprocess()
self.process(c)
def preprocess(self):
counter = [0] * 260
for c in self.s:
counter[ord(c)] += 1
for i in range(1, len(counter)):
counter[i] += counter[i - 1]
for i in range(self.n):
c = ord(self.s[i])
counter[c] -= 1
self.p[counter[c]] = i
c = 0
self.c[0] = c
for i in range(1, self.n):
if self.s[self.p[i]] != self.s[self.p[i - 1]]:
c += 1
self.c[self.p[i]] = c
return c + 1
def process(self, c):
cn = [0] * self.n
i = 0
pn = [0] * self.n
while (1 << i) < self.n:
for j in range(self.n):
pn[j] = self.p[j] - (1 << i)
if pn[j] < 0: pn[j] += self.n
counter = [0] * c
for j in range(self.n):
counter[self.c[pn[j]]] += 1
for j in range(1, c):
counter[j] += counter[j - 1]
for j in range(self.n - 1, -1, -1):
counter[self.c[pn[j]]] -= 1
self.p[counter[self.c[pn[j]]]] = pn[j]
cn[self.p[0]] = 0
c = 1
for j in range(1, self.n):
a = [self.c[self.p[j]], self.c[(self.p[j] + (1 << i)) % self.n]]
b = [self.c[self.p[j - 1]], self.c[(self.p[j - 1] + (1 << i)) % self.n]]
if a != b: c += 1
cn[self.p[j]] = c - 1
self.c, cn = cn, self.c
i += 1
class Solution:
def lastSubstring(self, s: str) -> str:
sa = SuffixArray(s)
return s[sa.p[-1]:]
| 31.584615 | 97 | 0.431076 | 1,827 | 0.889917 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.107647 |
c20c7d9e299f07af3208c0a8aedd483571769bbb | 18,555 | py | Python | schemagen/schemagen.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
]
| 3 | 2021-03-26T22:51:41.000Z | 2021-03-27T15:17:24.000Z | schemagen/schemagen.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
]
| null | null | null | schemagen/schemagen.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
]
| null | null | null | from antlr4 import *
from .antlr import GraphQLLexer, GraphQLListener, GraphQLParser
from .codegen import CodegenTool, Class, String, ClassInstance, IfElse, If, Method, Expr, Variable
import re
from math import floor
from datetime import datetime
from .utils import strip_string_quotes, camel_case_to_snake_case, process_input_value_definition
from .errors import ParsingError
GraphQLParser = GraphQLParser.GraphQLParser
graphene = 'graphene'
built_in_scalars = [
'Int',
'Float',
'String',
'Boolean',
'ID',
'Date',
'Datetime',
'Time'
'Decimal',
'JSONString',
'Base64',
]
class SchemaGen(GraphQLListener.GraphQLListener):
"""
SchemaGen is the entry point through which the package is used.
Attributes:
input_file: a string containing the name of the GraphQL schema file
output_file: an optional string containing the name of the file to which the result of the code generation should be written to.
"""
def __init__(self, input_file: str, output_file: str = None):
if output_file is None:
output_file = input_file.split(sep='.')[0] + '_' + str(floor(datetime.now().timestamp())) + '.py'
is_valid_file_name = re.match("\w+.py$", output_file)
if is_valid_file_name is None:
raise Exception('File is not a python file')
self.output_file = output_file
self.input_file = input_file
self.codegen = CodegenTool(output_file=self.output_file)
super().__init__()
def enterTypeDefinition(self, ctx: GraphQLParser.TypeDefinitionContext):
for child in ctx.children:
# type definition is for an Object Type Definition
if isinstance(child, GraphQLParser.ObjectTypeDefinitionContext) or isinstance(child,
GraphQLParser.InterfaceTypeDefinitionContext):
is_object_type = isinstance(child, GraphQLParser.ObjectTypeDefinitionContext)
is_interface = isinstance(child, GraphQLParser.InterfaceTypeDefinitionContext)
type_class = Class(name=child.name().getText(), add_init_method=False)
if is_object_type:
type_class.base_class = "ObjectType"
elif is_interface:
type_class.base_class = "Interface"
is_mutation = False
if type_class.name == 'Mutation':
is_mutation = True
is_object_type = False
meta_class = Class(name='meta')
# create map for methods to be resolved
methods_to_be_resolved = {}
# get type description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get implemented interfaces
if is_object_type or is_mutation:
if child.implementsInterfaces() is not None:
interfaces = child.implementsInterfaces().getText().split(sep='implements')
interfaces = interfaces[1].split(sep='&')
interface_string = ''
for i in interfaces:
interface_string = interface_string + i + ','
meta_class.add_class_variable('interfaces', f"({interface_string})")
# get fields of the ObjectType or Interface
if child.fieldsDefinition():
fields = child.fieldsDefinition().fields
if not is_mutation:
for field in fields:
# get field name and type
field_name = camel_case_to_snake_case(field.name().getText())
field_type = field.type_().getText()
field_required = False
# get field description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
if is_interface:
if field_type.lower() == type_class.name.lower():
field_type = 'lambda: ' + field_type
# if field is a required field
if field_type[len(field_type) - 1] == '!':
field_required = True
field_code = ClassInstance('Field', field_type[:-1], required=True)
else:
field_code = ClassInstance('Field', field_type)
# if field is a list type
if field.type_().listType() is not None:
list_type_named_type = field.type_().listType().type_().getText()
if is_interface:
if list_type_named_type.lower() == type_class.name.lower():
list_type_named_type = 'lambda: ' + list_type_named_type
if list_type_named_type[len(list_type_named_type) - 1] == '!':
field_code = ClassInstance('List',
str(ClassInstance('NonNull', list_type_named_type[:-1])),
required=field_required)
else:
field_code = ClassInstance('List', list_type_named_type, required=field_required)
# get field arguments
if is_object_type:
args = field.argumentsDefinition()
args_string = []
if args is not None:
args = args.args
for arg in args:
# add info to method_to_be_resolved map
if field_name not in methods_to_be_resolved:
methods_to_be_resolved[field_name] = [arg.name().getText()]
else:
methods_to_be_resolved[field_name].append(arg.name().getText())
processed_arg = process_input_value_definition(arg)
args_string.append(
f"{String(processed_arg['name'])}: {str(processed_arg['arg_impl'])}")
field_code.add_kwarg('args', "{" + ', '.join(args_string) + "}")
if field_desc != '':
field_code.add_kwarg(key='description', value=field_desc)
type_class.class_variables[field_name] = str(field_code)
else:
for field in fields:
# get field name and type
field_name = camel_case_to_snake_case(field.name().getText())
field_type = field.type_().getText()
field_required = False
field_class = Class(field.name().getText(), add_init_method=False, base_class='Mutation')
argument_class = Class(name='arguments')
# get field description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
# if field is a required field
if field_type[len(field_type) - 1] == '!':
field_required = True
field_code = ClassInstance('Field', field_type[:-1], required=True)
else:
field_code = ClassInstance('Field', field_type)
# if field is a list type
if field.type_().listType() is not None:
list_type_named_type = field.type_().listType().type_().getText()
if list_type_named_type[len(list_type_named_type) - 1] == '!':
field_code = ClassInstance('List',
str(ClassInstance('NonNull', list_type_named_type[:-1])),
required=field_required)
else:
field_code = ClassInstance('List', list_type_named_type, required=field_required)
# get field arguments
args = field.argumentsDefinition()
arg_list = []
if args is not None:
args = args.args
for arg in args:
processed_arg = process_input_value_definition(arg)
argument_class.add_class_variable(processed_arg['name'],
str(processed_arg['arg_impl']))
arg_list.append(processed_arg['name'])
field_class.add_sub_class(argument_class)
field_class.add_method(
method=Method(
name='mutate',
arguments=['root', 'info'] + arg_list
)
)
if field_desc != '':
field_code.add_kwarg(key='description', value=field_desc)
# write mutation classes for the mutation's fields
self.codegen.write_class(field_class)
type_class.class_variables[field_name] = str(field_code)
# add resolver methods
if not is_mutation:
for method in methods_to_be_resolved:
type_class.add_method(method_name='resolve_' + method,
arguments_names=['info'] + methods_to_be_resolved[method])
if type_class.name == 'Query':
for var in type_class.class_variables:
if var not in methods_to_be_resolved:
type_class.add_method(method_name='resolve_' + var, arguments_names=['info'])
if len(meta_class.class_variables) != 0:
type_class.add_sub_class(meta_class)
self.codegen.write_class(type_class)
# type definition is for an EnumTypeDefinition
elif isinstance(child, GraphQLParser.EnumTypeDefinitionContext):
enum_class = Class(name=child.name().getText(), base_class="Enum", add_init_method=False)
meta_class = Class(name='meta')
# get enum description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get fields of the Enum
fields = child.enumValuesDefinition().fields
fields_and_desc = {}
for field in fields:
# get field name and type
enum_value = field.enumValue().getText()
# get enum description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
if field_desc != '':
# do something
fields_and_desc[enum_value] = field_desc
# add enums as class variables to main class
enum_class.add_class_variable(enum_value, String(enum_value))
if fields_and_desc:
# add enums description
method = Method(
name='description',
decorators=['@property'],
arguments=[]
)
if_else = IfElse(
indent_level=method.get_indent_level() + 1,
else_action=[Expr("pass")],
)
for i in fields_and_desc:
if_else.add_elif(If(
expr=Expr(f"self == {enum_class.name}.{i}"),
action=[Expr(f"return {fields_and_desc[i]}")]
))
method.set_body([if_else])
enum_class.add_method(method=method)
if len(meta_class.class_variables) != 0:
enum_class.add_sub_class(meta_class)
self.codegen.write_class(enum_class)
# type definition is for an EnumTypeDefinition
elif isinstance(child, GraphQLParser.ScalarTypeDefinitionContext):
if child.name().getText().capitalize() in built_in_scalars:
continue
scalar_class = Class(name=child.name().getText(), base_class="Scalar", add_init_method=False)
desc = child.description()
if desc is not None:
scalar_class.description = strip_string_quotes(desc.getText())
serialize_method = Method(
name='serialize',
arguments=['val'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
parse_literal_method = Method(
name='parse_literal',
arguments=['node'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
parse_value_method = Method(
name='parse_value',
arguments=['value'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
scalar_class.add_method(method=serialize_method)
scalar_class.add_method(method=parse_literal_method)
scalar_class.add_method(method=parse_value_method)
self.codegen.write_class(scalar_class)
elif isinstance(child, GraphQLParser.UnionTypeDefinitionContext):
union_class = Class(name=child.name().getText(), base_class='Union')
meta_class = Class(name='Meta')
unions = child.unionMemberTypes().getText()
if unions[0] == '=':
unions = unions[1:]
unions = ', '.join(unions.split(sep='|'))
meta_class.add_class_variable(variable_name='types', variable_value=f"({unions})")
desc = child.description()
if desc is not None:
meta_class.add_class_variable(variable_name='description',
variable_value=String(strip_string_quotes(desc.getText())))
union_class.add_sub_class(meta_class)
self.codegen.write_class(union_class)
print(unions)
elif isinstance(child, GraphQLParser.InputObjectTypeDefinitionContext):
type_class = Class(name=child.name().getText(), base_class="InputObjectType", add_init_method=False)
meta_class = Class(name='meta')
# get type description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get fields
if child.inputFieldsDefinition():
fields = child.inputFieldsDefinition().fields
for field in fields:
processed_ivd = process_input_value_definition(field)
type_class.add_class_variable(processed_ivd['name'], str(processed_ivd['arg_impl']))
if len(meta_class.class_variables) != 0:
type_class.add_sub_class(meta_class)
self.codegen.write_class(type_class)
else:
print(type(child))
def enterSchemaDefinition(self, ctx: GraphQLParser.SchemaDefinitionContext):
schema_obj = ClassInstance('Schema')
fields = ctx.fields
for field in fields:
schema_obj.add_kwarg(strip_string_quotes(field.operationType().getText()),
strip_string_quotes(field.namedType().getText()))
var = Variable(
name='schema',
value=schema_obj
)
self.codegen.write_variable(var)
def __call__(self):
try:
self.codegen.import_package(package=graphene, mode=2, object='*')
input_stream = FileStream(self.input_file)
lexer = GraphQLLexer.GraphQLLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = GraphQLParser(stream)
tree = parser.document()
walker = ParseTreeWalker()
walker.walk(self, tree)
except Exception as err:
raise ParsingError(str(err))
| 46.156716 | 136 | 0.492643 | 17,935 | 0.966586 | 0 | 0 | 0 | 0 | 0 | 0 | 2,159 | 0.116357 |
c20cac9dd66122173bfd30ba53957fea5bb5307b | 2,231 | py | Python | app/api/views.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
]
| null | null | null | app/api/views.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
]
| 1 | 2021-06-02T02:01:38.000Z | 2021-06-02T02:01:38.000Z | app/api/views.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
]
| null | null | null | from flask import Flask, request, jsonify,Blueprint
from flask_marshmallow import Marshmallow
from app.models import User, Group, Role
from app import ma
api = Blueprint('api', __name__)
class UserSchema(ma.Schema):
class Meta:
# Fields to expose
fields = ('id', 'confirmed','first_name','last_name', 'email', 'active')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
class GroupSchema(ma.Schema):
users = ma.Nested(UserSchema, many=True)
class Meta:
# Fields to expose
fields = ('id', 'name','users')
group_schema = GroupSchema()
groups_schema = GroupSchema(many=True)
class RoleSchema(ma.Schema):
users = ma.Nested(UserSchema, many=True)
class Meta:
# Fields to expose
fields = ('id', 'name','default','permissions','users')
role_schema = RoleSchema()
roles_schema = RoleSchema(many=True)
@api.route("/user", methods=["GET"])
def get_user():
all_users = User.query.all()
result = users_schema.dump(all_users)
return jsonify(result)
# endpoint to get user detail by id
@api.route('/user/<int:id>', methods=["GET"])
def user_detail(id):
user = User.query.get(id)
return user_schema.jsonify(user)
@api.route("/group", methods=["GET"])
def get_group():
all_groups = Group.query.all()
result = groups_schema.dump(all_groups)
return jsonify(result)
# endpoint to get group detail by id
@api.route('/group/<int:id>', methods=["GET"])
def group_detail_id(id):
group = Group.query.get(id)
return group_schema.jsonify(group)
@api.route('/group/<string:name>', methods=["GET"])
def group_detail_name(name):
group = Group.query.filter_by(name=name).first()
return group_schema.jsonify(group)
@api.route("/role", methods=["GET"])
def get_role():
all_roles = Role.query.all()
result = roles_schema.dump(all_roles)
return jsonify(result)
# endpoint to get group detail by id
@api.route('/role/<int:id>', methods=["GET"])
def role_detail_id(id):
role = Role.query.get(id)
return role_schema.jsonify(role)
@api.route('/role/<string:name>', methods=["GET"])
def role_detail_name(name):
role = Role.query.filter_by(name=name).first()
return role_schema.jsonify(role) | 26.247059 | 80 | 0.685791 | 491 | 0.220081 | 0 | 0 | 1,217 | 0.545495 | 0 | 0 | 429 | 0.19229 |
c20d8ed82808f42c1ce9f7452c5668af8015a2b5 | 2,335 | py | Python | setup.py | maljovec/samply | 9364c2f671c02cb7bab484c0e856a0a0ca6ecc40 | [
"BSD-3-Clause"
]
| null | null | null | setup.py | maljovec/samply | 9364c2f671c02cb7bab484c0e856a0a0ca6ecc40 | [
"BSD-3-Clause"
]
| 2 | 2019-02-21T00:28:36.000Z | 2019-11-09T04:35:39.000Z | setup.py | maljovec/samplers | 9364c2f671c02cb7bab484c0e856a0a0ca6ecc40 | [
"BSD-3-Clause"
]
| null | null | null | """
Setup script for samply
"""
from setuptools import setup
import re
extra_args = {}
def get_property(prop, project):
"""
Helper function for retrieving properties from a project's
__init__.py file
@In, prop, string representing the property to be retrieved
@In, project, string representing the project from which we will
retrieve the property
@Out, string, the value of the found property
"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(project + "/__init__.py").read(),
)
return result.group(1)
VERSION = get_property("__version__", "samply")
def long_description():
""" Reads the README.rst file and extracts the portion tagged between
specific LONG_DESCRIPTION comment lines.
"""
description = ""
recording = False
with open("README.rst") as f:
for line in f:
if "END_LONG_DESCRIPTION" in line:
return description
elif "LONG_DESCRIPTION" in line:
recording = True
continue
if recording:
description += line
# Consult here: https://packaging.python.org/tutorials/distributing-packages/
setup(
name="samply",
packages=["samply"],
version=VERSION,
description="A library for computing samplings in arbitrary dimensions",
long_description=long_description(),
author="Dan Maljovec",
author_email="[email protected]",
license="BSD",
test_suite="samply.tests",
url="https://github.com/maljovec/samply",
download_url="https://github.com/maljovec/samply/archive/"
+ VERSION
+ ".tar.gz",
keywords=[""],
# Consult here: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Mathematics",
],
setup_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
install_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
python_requires=">=2.7, <4",
)
| 29.935897 | 77 | 0.615418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,334 | 0.571306 |
c20db92c5e61a54ef4ff2401b5df9360bca3d9b1 | 4,353 | py | Python | 数据结构实践课/实验3/文本格式化.py | TD21forever/hdu-term-project-helper | f42f553efd1d7b59162d3fc793ac14ae30850efd | [
"Apache-2.0"
]
| 17 | 2021-01-09T06:49:09.000Z | 2022-02-23T01:36:20.000Z | 数据结构实践课/实验3/文本格式化.py | TD21forever/hdu-term-project-helper | f42f553efd1d7b59162d3fc793ac14ae30850efd | [
"Apache-2.0"
]
| null | null | null | 数据结构实践课/实验3/文本格式化.py | TD21forever/hdu-term-project-helper | f42f553efd1d7b59162d3fc793ac14ae30850efd | [
"Apache-2.0"
]
| 1 | 2021-06-22T12:56:16.000Z | 2021-06-22T12:56:16.000Z | # -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2018-11-14 15:41:57
# @Last Modified by: TD21forever
# @Last Modified time: 2018-11-15 16:50:48
file = open('input.txt','r')#读取文件
#预处理 传入字符串
def preprocess(article):
article = article.strip()
article = article.replace(",", ", ")
article = article.replace(" ,", ",")
article = article.replace(".", ". ")
article = article.replace(" .", ".")
article = article.replace("?", "? ")
article = article.replace(" ?", "?")
return article
def operate(line_num=5,word_in_line=55,margin=2,heading_len=3,footing_len=3,start_page_num=1,file = file):
flag = 0
article = file.read()#读到文件里的字符串
file.close()
f = open ('out.txt','a')
article = preprocess(article)
word_list = article.split()#分割每个单词,形成列表
str_info = " ".join(word_list)#目的是去掉连续的空格
str_info = str_info.replace("@", "\n @")
start = 0
end = word_in_line
while end < len(str_info):
for i in range(heading_len):#顶部的空格
print("\n",end="",file = f)
for one in range(line_num):#每一行
line = str_info[start:end]
temp = end
if end<=len(str_info):
if str_info[temp-1] != " " or str_info[temp-1] not in word_list:#如果一行的最后一个不是空格说明那个单词被拆开了
# 另一个条件是防止出现as被分开的这种情况
while str_info[temp] != " ":#下一行的第一个字母不是空格,就把这个字母加到上一行的末尾
line = line + (str_info[temp])
temp+=1
line = line + (str_info[temp])#temp最后移到空格,空格放在上一行的末尾
end = temp+1
print(" "*margin,end="",file = f)#每一行开头的空格
print(line,file = f)#打印一页
start = end
end+=word_in_line
elif one+1 < line_num:#如果每页的行数还没有得到要求
line = str_info[start:]
print(" "*margin,end="",file=f)#每一行开头的空格
print(line,file = f)#打印一页
flag = 1
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
break
else:#如果这一页的行数已经达到了,那就另起一页
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
for i in range(heading_len):#顶部的空格
print("\n",end="",file = f)
line = str_info[start:]
print(" "*margin,end="",file = f)#每一行开头的空格
print(line,file = f)#打印一页
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num+1),end = "",file = f)
print("\n",end = "",file = f)
flag = 1
if flag == 1:
break
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
start_page_num+=1
if __name__ == '__main__':
while True:
print("欢迎使用文本格式化工具")
print("您可以给出的参数有\n1.每页内文字的行数\n2.每页内文字所占最大字符数\n3.每页文字前的固定空格数\n4.每页页顶所空行数\n5.每页页底所空行数\n6.起始页号\n")
ans = "no"
ans = input("是否要使用默认的参数5,55,2,3,3,1?,请输入yes或no:")
if ans == 'yes':
operate()
else:
print("请输入参数\n")
a = int(input("1.每页内文字的行数"))
b = int(input("2.每页内文字所占最大字符数"))
if b>80:
b = int(input("每页内文字所占最大字符数小于80,请重新输入:"))
c = int(input("3.每页文字前的固定空格数"))
d = int(input("4.每页页顶所空行数"))
e = int(input("5.每页页页底所空行数"))
ff = int(input("6.起始页号"))
operate(a,b,c,d,e,ff)
f.close()
| 34.824 | 107 | 0.464278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,649 | 0.321255 |
c210287e380e114135144808518dac8414c8a7fc | 1,405 | py | Python | authors/apps/authentication/tests/test_models.py | C3real-kill3r/binary-jungle-backend | 5333138fbce901e75accf5487b10990979afa571 | [
"MIT"
]
| null | null | null | authors/apps/authentication/tests/test_models.py | C3real-kill3r/binary-jungle-backend | 5333138fbce901e75accf5487b10990979afa571 | [
"MIT"
]
| 8 | 2020-02-12T03:04:07.000Z | 2022-03-12T00:07:31.000Z | authors/apps/authentication/tests/test_models.py | C3real-kill3r/binary-jungle-backend | 5333138fbce901e75accf5487b10990979afa571 | [
"MIT"
]
| null | null | null | from django.test import TestCase
from authors.apps.authentication.models import (
User
)
class UserModelTest(TestCase):
"""
This tests the User model class, ability to create a user and create a super user.
"""
def test_create_user(self):
"""
Checks whether a user can be created with username email and password
:return:
"""
self.assertIsInstance(
User.objects.create_user(username="username", email="[email protected]", password="password"), User)
def test_cannot_create_user_without_email(self):
"""
Ensure a user cannot be created without an email
:return:
"""
with self.assertRaises(TypeError):
User.objects.create_user(username="username", password="password", email=None)
def test_create_superuser(self):
"""
Ensure a superuser can be created
:return:
"""
user = User.objects.create_superuser(username="admin", email="[email protected]", password="password")
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_cannot_create_superuser_without_password(self):
"""
Ensures a superuser must have a password
:return:
"""
with self.assertRaises(TypeError):
User.objects.create_superuser(username="admin", email="[email protected]")
| 30.543478 | 112 | 0.64484 | 1,308 | 0.930961 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.405694 |
c213671e056f4ccf87fd3dab05b33b16957f6f48 | 5,908 | py | Python | simulations/MonteCarlo_function.py | chengning-zhang/Statistical-methods-for-combining-multiple-tests-a-Wrapper- | 42b5aabf6a0619fa7fe18a034926236d133a35d8 | [
"MIT"
]
| 1 | 2020-11-24T02:58:58.000Z | 2020-11-24T02:58:58.000Z | simulations/MonteCarlo_function.py | chengning-zhang/Combining-multiple-tests-Wrapper- | 42b5aabf6a0619fa7fe18a034926236d133a35d8 | [
"MIT"
]
| null | null | null | simulations/MonteCarlo_function.py | chengning-zhang/Combining-multiple-tests-Wrapper- | 42b5aabf6a0619fa7fe18a034926236d133a35d8 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Created on Nov 23, 2020
@author: Chengning Zhang
"""
## simulation for Scenario A: generate X0 and X1.
def MonteCarlo_1(T, n0, n1, u0, u1, sigma0, sigma1, log_bool = False):
"""simulation for first scenario: multivarite normal with equal variance
T: number of simulation
n0: sample size of class 0
n1: sample size of class 1
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
np.random.seed(seed= 100*i+ 4*i)
X0 = multivariate_normal(u0, sigma0, size = n0)
X1 = multivariate_normal(u1, sigma1, size = n1)
if log_bool:
X0 = np.exp(X0)
X1 = np.exp(X1)
#
X = np.concatenate([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y) ## X,y is one simulation
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ...
def MonteCarlo_2(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
u: mean for X
sigma: variance for X
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
print(i)
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n)
X_trans = [ 10*sum(list(map(lambda x: np.sin(np.pi*x) , ele))) for ele in X]
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ...
def MonteCarlo_3(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n); #X = np.exp(X)
X_trans = [ele[0] - ele[1] - ele[2]+ (ele[0] - ele[1])**2 - ele[3]**4 for ele in X] ## x1 - x2 - x3 + (x1-x2)^2 - x4^4
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
| 46.15625 | 128 | 0.609682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,954 | 0.330738 |
c213c3cc512fab07ba3d806bd3d3286525745450 | 389 | py | Python | crawler/robo_proxy.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
]
| 1 | 2018-09-19T06:27:14.000Z | 2018-09-19T06:27:14.000Z | crawler/robo_proxy.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
]
| null | null | null | crawler/robo_proxy.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
]
| null | null | null | import requests
def pages_crawler():
http_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
}
url = r'https://robo.datayes.com/v2/indicator_library'
response = requests.get(url, headers=http_header)
print(response.text)
if __name__ == '__main__':
pages_crawler()
| 24.3125 | 135 | 0.676093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.467866 |
c2145a28b8098d26c67f49818369dff92c2ac06b | 11,662 | py | Python | apiosintDS/apiosintDS.py | davidonzo/apiosintDS | b5bb1c42e1a3d984a69e8794a4c5da6969dcd917 | [
"MIT"
]
| 13 | 2019-10-15T06:54:49.000Z | 2022-03-28T23:23:29.000Z | apiosintDS/apiosintDS.py | davidonzo/apiosintDS | b5bb1c42e1a3d984a69e8794a4c5da6969dcd917 | [
"MIT"
]
| 1 | 2019-11-12T15:00:53.000Z | 2019-11-14T09:37:46.000Z | apiosintDS/apiosintDS.py | davidonzo/apiosintDS | b5bb1c42e1a3d984a69e8794a4c5da6969dcd917 | [
"MIT"
]
| 4 | 2019-12-05T05:34:07.000Z | 2022-03-24T09:59:26.000Z | import sys
import logging
import pytz
logging.basicConfig(format='%(levelname)s: %(message)s')
if (sys.version_info < (3, 0)):#NO MORE PYTHON 2!!! https://pythonclock.org/
logging.error(" ########################### ERROR ###########################")
logging.error(" =============================================================")
logging.error(" Invalid python version detected: "+str(sys.version_info[0])+"."+str(sys.version_info[1]))
logging.error(" =============================================================")
logging.error(" It seems your are still using python 2 even if you should")
logging.error(" now it will be retire next 2020.")
logging.error(" For more info please read https://pythonclock.org/")
logging.error(" =============================================================")
logging.error(" Try again typing: python3 /path/to/"+sys.argv[0])
logging.error(" =============================================================")
logging.error(" ########################### ERROR ###########################")
exit(0)
import tempfile
import argparse
import os
import requests
import re
import json
italyTZ = pytz.timezone("Europe/Rome")
from apiosintDS.modules import listutils, dosearch
try:
from urllib.parse import urlparse
except ImportError as ierror:
logging.error(ierror)
logging.error("To run this script you need to install the \"urllib\" module")
logging.error("Try typing: \"pip3 install urllib3\"")
exit(0)
try:
import validators
except ImportError as e:
logging.error(e)
logging.error("To run this script you need to install the \"validators\" module")
logging.error("Try typing: \"pip3 install validators\"")
exit(0)
import platform
if platform.system() not in ['Linux']:
logging.warning("Script not testes on "+platform.system()+" systems. Use at your own risks.")
scriptinfo = {"scriptname": "DigitalSide-API",
"majorversion": "1",
"minorversion": "8.3",
"license": "MIT",
"licenseurl": "https://raw.githubusercontent.com/davidonzo/Threat-Intel/master/LICENSE",
"author": "Davide Baglieri",
"mail": "info[at]digitalside.it",
"pgp": "30B31BDA",
"fingerprint": "0B4C F801 E8FF E9A3 A602 D2C7 9C36 93B2 30B3 1BDA",
"git": "https://github.com/davidonzo/Threat-Intel/blob/master/tools/DigitalSide-API/v1",
"DSProjectHP": "https://osint.digitalside.it",
"DSGitHubHP": "https://github.com/davidonzo/Threat-Intel"}
def checkfile(file):
if os.path.isfile(file) == False:
msg = "File not found: %r." % file
raise argparse.ArgumentTypeError(msg)
else:
lines = [line.rstrip('\n') for line in open(file)]
if len(lines) == 0:
msg2 = "File is empty or unreadable: %r." % file
raise argparse.ArgumentTypeError(msg2)
return lines
def writablefile(file):
if os.path.isfile(file) == True:
msg = "File %r already exists. Please, delete it first." % file
raise argparse.ArgumentTypeError(msg)
else:
try:
f = open(file, "w+")
f.close()
except:
msg2 = "File is empty or unreadable: %r." % file
raise argparse.ArgumentTypeError(msg2)
return file
def writablecache(tmpdir):
if os.path.isfile(tmpdir):
msg = "%r seems to be a file, not a directory." % tmpdir
raise argparse.ArgumentTypeError(msg)
elif os.path.exists(tmpdir) == False:
msg = "%r directory not found." % tmpdir
raise argparse.ArgumentTypeError(msg)
elif os.access(tmpdir, os.W_OK) == False:
msg = "%r directory not found." % tmpdir
raise argparse.ArgumentTypeError(msg)
return tmpdir
def filebspath(directory, file):
_BSR = os.path.abspath(os.path.dirname(__file__))
return os.path.join(_BSR, directory, file)
def info():
htext = scriptinfo["scriptname"]+" v."+scriptinfo["majorversion"]+"."+scriptinfo["minorversion"]+"."
htext += "\nOn demand query API for OSINT.digitalside.it project.\n"
htext += "You can query for souspicious domains, urls and IPv4.\n\n"
htext += "For more information read the README.md file and the JSON schema hosted on GitHub.com:\n"
htext += " - "+scriptinfo["git"]+"/README.md\n"
htext += " - "+scriptinfo["git"]+"/schema.json\n"
htext += "\n"
htext += "This file is part of the OSINT.digitalside.it project.\n"
htext += "For more information about the project please visit the following links:\n"
htext += " - "+scriptinfo["DSProjectHP"]+"\n"
htext += " - "+scriptinfo["DSGitHubHP"]+"\n"
htext += "\n"
htext += "This software is released under the "+scriptinfo["license"]+" license\n"
htext += " - "+scriptinfo["licenseurl"]+"\n"
htext += "\n"
htext += "Coded with love by\n "+scriptinfo["author"]+" <"+scriptinfo["mail"]+">\n"
htext += " PGP "+scriptinfo["pgp"]+"\n"
htext += " Fingerprint "+scriptinfo["fingerprint"]
htext += "\n"
return htext
def schema():
try:
schema = open(filebspath('schema', 'schema.json'), "r")
content = schema.read()
schema.close()
return content
except IOError as e:
logging.error(e)
logging.error("Unable to load schema file.")
exit(1)
def request(entities=list, cache=False, cachedirectory=None, clearcache=False, verbose=False, *args, **kwargs):
if isinstance(entities, list):
if clearcache and ((not cache) or (cache == False)):
logging.error("Unable to clear cache with cache disabled. Please set the cache to 'True'")
exit(1)
if cachedirectory and ((not cache) or (cache == False)):
logging.error("Unable to use a cache directory with the cache option disabled. Please set the cache to 'True'")
exit(1)
if cache and not cachedirectory:
logging.error("When using apiosintDS as python library, you always have to specify the temporary files directory to be used.")
exit(1)
if cache:
try:
writablecache(cachedirectory)
except Exception as clearcacheerror:
logging.error(clearcacheerror)
exit(1)
lutils = listutils.listutils(None, entities, cache, cachedirectory, clearcache)
makelist = lutils.prepareLists()
if isinstance(makelist, dict):
serarch = dosearch.dosearch(makelist, verbose)
results = serarch.prepareResults()
if isinstance(results, dict):
return results
else:
logging.error("create_request must return a dict.")
else:
logging.error("create_request must return a dict.")
else:
logging.error("entities must be an instance of list.")
exit(1)
def main():
parserdescription = scriptinfo["scriptname"]+" v."+scriptinfo["majorversion"]+"."+scriptinfo["minorversion"]+"."
parserdescription +=" On demand query API for OSINT.digitalside.it project."
parserdescription +=" You can query for souspicious domains, urls and IPv4."
parser = argparse.ArgumentParser(description=parserdescription)
parser.add_argument("-e","--entity", type=str, action="store", metavar="[IPv4|domain|url|hash]", dest="ITEM", help="Single item to search. Supported entities are IPv4/FQDN/URLs and file hashes in md5, sha1 or sha256. It can't be used in combination with the --file option.", default=None)
parser.add_argument("-f","--file", type=checkfile, action="store", metavar="/path/to/file.txt", dest="FILE", help="Path to file containing entities to search. Supported entities are IPv4/FQDN/URLs. It can't be used in combination with the --entity option.", default=None)
parser.add_argument("-o", "--output", type=writablefile, action="store", metavar="/path/to/output.json", dest="OUTPUT", help="Path to output file (/path/to/output.json). If not specified the output will be redirect to the STDOUT.", default=None)
parser.add_argument("-v", "--verbose", action="store_true", dest="VERBOSE", help="Include unmatched results in report.")
parser.add_argument("-c","--cache", action="store_true", dest="CACHE", help="Enable cache mode. Downloaded lists will be stored a won't be downloaded for the next 4 hours.")
parser.add_argument("-cd","--cachedirectory", type=writablecache, action="store", metavar="/path/to/cachedir", dest="DIRECTORY", help="The cache directory where the script check for cached lists files and where them will be stored on cache creation or update. Must be specified the same every script run unless your are using the system temp directory. Default is '"+tempfile.gettempdir()+"'", default=tempfile.gettempdir())
parser.add_argument("-cc","--clearcache", action="store_true", dest="CLEARCACHE", help="Force the script to download updated lists even if the 3 hours timeout has not yet been reached. Must be used in combination with --cache.")
parser.add_argument("-i","--info", action="store_true", dest="INFO", help="Print information about the program.")
parser.add_argument("-s","--schema", action="store_true", dest="SCHEMA", help="Display the response json schema.")
try:
args = parser.parse_args()
if (args.INFO):
sys.stdout.write(info())
exit(1)
if (args.SCHEMA):
try:
schema = open(filebspath('schema', 'schema.json'), "r")
for schemaline in schema.readlines():
sys.stdout.write(schemaline)
schema.close()
exit(0)
except IOError as e:
logging.error(e)
logging.error("Unable to load schema file.")
exit(1)
if (args.ITEM == None) and (args.FILE == None):
parser.error("No targets selected! Please, specify one option between --entity and --file.\nTry option -h or --help.")
exit(1)
elif (args.ITEM != None) and (args.FILE != None):
parser.error("Too much targets selected! Sorry, you can't specify both options --entity and --file.\nTry option -h or --help.")
exit(1)
elif args.CLEARCACHE and not args.CACHE:
args.CLEARCACHE = False
logging.warning("Expected -c or --cache option declared. Ignoring all cache settings.\nTry option -h or --help.")
lutils = listutils.listutils(args.ITEM, args.FILE, args.CACHE, args.DIRECTORY, args.CLEARCACHE)
makelist = lutils.prepareLists()
if isinstance(makelist, dict):
serarch = dosearch.dosearch(makelist, args.VERBOSE)
results = serarch.prepareResults()
if isinstance(results, dict):
output = json.dumps(results, indent=4, separators=(",", ": "))
if args.OUTPUT == None:
sys.stdout.write(output)
else:
fileoutput = open(args.OUTPUT, "w+")
fileoutput.write(output)
fileoutput.close()
logging.info("Output saved in file: "+args.OUTPUT)
else:
logging.error("'results' is not an dict. Quit!")
else:
logging.error("'makelist' is not an dict. Quit!")
except argparse.ArgumentError as e:
logging.error(e)
parser.error("Unexpected Error.\nTry option -h or --help.")
exit(2)
if __name__ == '__main__':
main()
| 51.149123 | 428 | 0.607271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,918 | 0.421712 |
c215424b83f6d390aecbd0efbacc856a6c8e7a5e | 38 | py | Python | sqlcouch/exceptions.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
]
| 1 | 2016-03-01T10:20:20.000Z | 2016-03-01T10:20:20.000Z | sqlcouch/exceptions.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
]
| null | null | null | sqlcouch/exceptions.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
]
| null | null | null | class NoMoreData(Exception):
pass
| 12.666667 | 28 | 0.736842 | 37 | 0.973684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c2160b83bdfd16bb5fd59f1cfbfcbb7c7d36395f | 3,327 | py | Python | 5-3_stock inventory.py | hkrsmk/python | 1ee1b0adc911b62af3911428f441c6c59e1b345f | [
"Unlicense"
]
| null | null | null | 5-3_stock inventory.py | hkrsmk/python | 1ee1b0adc911b62af3911428f441c6c59e1b345f | [
"Unlicense"
]
| null | null | null | 5-3_stock inventory.py | hkrsmk/python | 1ee1b0adc911b62af3911428f441c6c59e1b345f | [
"Unlicense"
]
| null | null | null | #Stock inventory control system.
def menu():
print("""1. Add New Stock
2. Update existing stock
3. Sell stock, even though 2 will work too
8. Display Inventory
9. Exit""")
while True:
try:
choice = int(input("Please select an option"))
break
except:
print("Invalid choice, please try again")
return choice
#======================================= 1 ===========================
def newStock():
newstock = input("Enter new stock name")
if newstock in myStock:
print("Stock already there")
else:
myStock[newstock]=0
print("new stock", newstock.center(10, ' '), "added")
#======================================= 2 ===========================
def addVolume():
stock_bought = input("Enter stock name you're buying")
if stock_bought not in myStock:
print("Stock ain't there. add first")
else:
while True:
try:
qty = int(input("How many? positive for buy. negative for sell"))
myStock[stock_bought] += qty
print(stock_bought, "is now", myStock[stock_bought])
break
except:
print("Invalid quantity!")
#======================================= 3 ============================
def sell():
selling = input("Stock name you're selling?")
if selling not in myStock:
print("You don't have this?")
elif myStock[selling]<=0:
print(selling.center(10, ' '), "outta stock")
else:
while True:
try:
qty = int(input("how many sold?"))
if myStock[selling] < qty:
print("u selling > you have, not allowed!")
raise "Error"
myStock[selling] -= qty
print(selling, "is now", myStock[selling])
break
except:
print("Invalid qty")
#main prog below
choice = 0
myStock = {}
#empty dictionary for myStock
try:
infile = open("myStock.txt","r")
read1LineStock = infile.readline()
#read first line
while read1LineStock !=" ":
#while the file has not ended,
myStock[read1LineStock.split(",")[0]] = int(read1LineStock.split(",")[1])
read1LineStock = infile.readline()
print(myStock)
#place item 0 in the split up sentence as the name for the item for myStock,
#and whatever number you can find in item 1 of the split up sentence (ignore '\n')
#as the 'quantity' for myStock.
#eg myStock['apple'] = '1'
#then, read the next line.
infile.close()
except:
print("Welcome to the stock management system!")
while choice != 9:
choice = menu()
#rmb to return choice to the global choice.
#the choice inside menu() is a LOCAL choice.
if choice ==1:
newStock()
elif choice ==2:
addVolume()
elif choice ==3:
sell()
#======================================= 8 ===========================
elif choice ==8:
print(myStock)
#======================================= 9 ===========================
print("Have a noice day")
| 30.522936 | 87 | 0.479411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,468 | 0.441238 |
c21a45651bf528b945d22bdb962a7e3a45ad0e4d | 91 | py | Python | backend/gunicorn_config_worker.py | matan-h/futurecoder | 5117cbab7ed2bc41fe9d4763038d9c4a0aba064e | [
"MIT"
]
| null | null | null | backend/gunicorn_config_worker.py | matan-h/futurecoder | 5117cbab7ed2bc41fe9d4763038d9c4a0aba064e | [
"MIT"
]
| 1 | 2022-02-28T01:35:27.000Z | 2022-02-28T01:35:27.000Z | backend/gunicorn_config_worker.py | matan-h/futurecoder | 5117cbab7ed2bc41fe9d4763038d9c4a0aba064e | [
"MIT"
]
| null | null | null | bind = "0.0.0.0:5000"
threads = 10
worker_class = "gthread"
accesslog = '-'
errorlog = '-'
| 15.166667 | 24 | 0.626374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.318681 |
c21a8492971d5deb4f24b54f0d01b958dad6c817 | 1,780 | py | Python | 2017/day23.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
]
| null | null | null | 2017/day23.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
]
| null | null | null | 2017/day23.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
]
| null | null | null | """
2017 Day 23
https://adventofcode.com/2017/day/23
"""
from typing import Dict
import aocd # type: ignore
class Program:
def __init__(self, text: str):
self.registers: Dict[str, int] = {}
self.commands = text.split("\n")
self.position = 0
self.mul_count = 0
def get(self, key: str) -> int:
try:
return int(key)
except ValueError:
return self.registers.get(key, 0)
def run_command(self, pos: int) -> None:
command = self.commands[pos]
instruction, *args = command.split(" ")
if instruction == "set":
self.registers[args[0]] = self.get(args[1])
elif instruction == "sub":
self.registers[args[0]] = self.get(args[0]) - self.get(args[1])
elif instruction == "mul":
self.registers[args[0]] = self.get(args[0]) * self.get(args[1])
self.mul_count += 1
elif instruction == "jnz":
if self.get(args[0]) != 0:
self.position += self.get(args[1]) - 1
def run(self) -> None:
while self.position < len(self.commands):
self.run_command(self.position)
self.position += 1
def prime(number: int) -> bool:
for factor in range(2, (number // 2) + 1):
if number % factor == 0:
return False
return True
def run_program() -> int:
return sum(1 for b in range(107900, 124901, 17) if not prime(b))
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2017, day=23)
program = Program(data)
program.run()
print(f"Part 1: {program.mul_count}")
print(f"Part 2: {run_program()}")
if __name__ == "__main__":
main()
| 25.797101 | 75 | 0.561798 | 1,094 | 0.614607 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.13764 |
c21ace7559f52cf54fe988e11522102469f04048 | 1,641 | py | Python | src/simulator/wsn/test.py | liuliuliu0605/Federated-Learning-PyTorch | 04169455917ae50a8fea2dabd756a0ca1774e5d5 | [
"MIT"
]
| null | null | null | src/simulator/wsn/test.py | liuliuliu0605/Federated-Learning-PyTorch | 04169455917ae50a8fea2dabd756a0ca1774e5d5 | [
"MIT"
]
| null | null | null | src/simulator/wsn/test.py | liuliuliu0605/Federated-Learning-PyTorch | 04169455917ae50a8fea2dabd756a0ca1774e5d5 | [
"MIT"
]
| null | null | null | import sys
from sklearn.datasets import make_blobs
from src.simulator.wsn.network import Network
from src.simulator.wsn.utils import *
from src.simulator.wsn.fcm import *
from src.simulator.wsn.direct_communication import *
from src.utils import complete, star
seed = 1
np.random.seed(seed )
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
traces = {}
topo = complete(cf.NB_CLUSTERS)
# topo = independent(cf.NB_CLUSTERS)
# topo = star(cf.NB_CLUSTERS)
# topo = ring(cf.NB_CLUSTERS)
centers = [[50, 225], [25, 110], [125, 20], [220, 80], [200, 225]]
X, y = make_blobs(n_samples=100, centers=centers, n_features=2,
random_state=seed, cluster_std=15)
traces = {}
network = Network(init_nodes=X, topo=topo)
# network = Network(topo=topo)
for routing_topology in ['FCM']:#, 'DC']:
network.reset()
routing_protocol_class = eval(routing_topology)
network.init_routing_protocol(routing_protocol_class())
# traces[routing_topology] = network.simulate()
for i in range(1000):
print("--------Round %d--------"% i)
network.activate_mix()
traces[routing_topology] = network.simulate_one_round()
network.deactivate_mix()
if len(network.get_alive_nodes()) == 0 :
break
# plot_clusters(network)
# plot_time_of_death(network)
# print(network.energy_dis)
# print(network.energy_dis['inter-comm']/ network.energy_dis['intra-comm'])
print("All death round: ", i)
print("First death round: ", network.first_depletion)
print("Energy:", network.energy_dis)
plot_traces(traces) | 32.176471 | 80 | 0.672151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.258988 |
c21c3b472b61858775a3801d8a7ee0aff0f5536a | 4,149 | py | Python | src/dewloosh/geom/cell.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
]
| 2 | 2021-12-11T17:25:51.000Z | 2022-01-06T15:36:27.000Z | src/dewloosh/geom/cell.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
]
| null | null | null | src/dewloosh/geom/cell.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from numpy import ndarray
from dewloosh.math.array import atleast1d
from dewloosh.math.utils import to_range
from .celldata import CellData
from .utils import jacobian_matrix_bulk, points_of_cells, pcoords_to_coords_1d
class PolyCell(CellData):
NNODE = None
NDIM = None
def __init__(self, *args, topo: ndarray=None, i: ndarray=None, **kwargs):
if isinstance(topo, ndarray):
kwargs['nodes'] = topo
if isinstance(i, ndarray):
kwargs['id'] = i
super().__init__(*args, **kwargs)
def jacobian_matrix(self, *args, dshp=None, ecoords=None, topo=None, **kwargs):
ecoords = self.local_coordinates(topo=topo) if ecoords is None else ecoords
return jacobian_matrix_bulk(dshp, ecoords)
def jacobian(self, *args, jac=None, **kwargs):
return np.linalg.det(jac)
def points_of_cells(self, *args, target=None, **kwargs):
assert target is None
topo = kwargs.get('topo', self.nodes.to_numpy())
coords = kwargs.get('coords', self.pointdata.x.to_numpy())
return points_of_cells(coords, topo)
def local_coordinates(self, *args, **kwargs):
frames = kwargs.get('frames', self.frames.to_numpy())
topo = kwargs.get('_topo', self.nodes.to_numpy())
coords = self.pointdata.x.to_numpy()
return points_of_cells(coords, topo, local_axes=frames)
def coords(self, *args, **kwargs):
return self.points_of_cells(*args, **kwargs)
class PolyCell1d(PolyCell):
NDIM = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# NOTE The functionality of `pcoords_to_coords_1d` needs to be generalized
# for higher order cells.
def points_of_cells(self, *args, points=None, cells=None, target='global',
rng=None, flatten=False, **kwargs):
if isinstance(target, str):
assert target.lower() in ['global', 'g']
else:
raise NotImplementedError
topo = kwargs.get('topo', self.nodes.to_numpy())
coords = kwargs.get('coords', self.pointdata.x.to_numpy())
ecoords = points_of_cells(coords, topo)
if points is None and cells is None:
return ecoords
# points or cells is not None
if cells is not None:
cells = atleast1d(cells)
conds = np.isin(cells, self.id.to_numpy())
cells = atleast1d(cells[conds])
if len(cells) == 0:
return {}
ecoords = ecoords[cells]
topo = topo[cells]
else:
cells = np.s_[:]
if points is None:
points = np.array(self.lcoords()).flatten()
rng = [-1, 1]
else:
rng = np.array([0, 1]) if rng is None else np.array(rng)
points, rng = to_range(points, source=rng, target=[0, 1]).flatten(), [0, 1]
datacoords = pcoords_to_coords_1d(points, ecoords) # (nE * nP, nD)
if not flatten:
nE = ecoords.shape[0]
nP = points.shape[0]
datacoords = datacoords.reshape(nE, nP, datacoords.shape[-1]) # (nE, nP, nD)
# values : (nE, nP, nDOF, nRHS) or (nE, nP * nDOF, nRHS)
if isinstance(cells, slice):
# results are requested on all elements
data = datacoords
elif isinstance(cells, Iterable):
data = {c : datacoords[i] for i, c in enumerate(cells)}
else:
raise TypeError("Invalid data type <> for cells.".format(type(cells)))
return data
class PolyCell2d(PolyCell):
NDIM = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class PolyCell3d(PolyCell):
NDIM = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| 33.192 | 89 | 0.577489 | 3,721 | 0.896843 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.093275 |
c2221b8872b6350f052296a7af3215fb075a5795 | 30 | py | Python | src/python/src/rmq/items/__init__.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
]
| 34 | 2019-12-13T10:31:39.000Z | 2022-03-09T15:59:07.000Z | src/python/src/rmq/items/__init__.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
]
| 49 | 2020-02-25T19:41:09.000Z | 2022-02-27T12:05:25.000Z | src/python/src/rmq/items/__init__.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
]
| 23 | 2019-12-23T15:19:42.000Z | 2022-03-09T16:00:15.000Z | from .rmq_item import RMQItem
| 15 | 29 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c22246e42a11a496e2843439e4ad4abd332a1d57 | 968 | py | Python | softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py | anyboby/mbpo | 98b75cb4cb13a2640fce1fbe1ddef466b864342e | [
"MIT"
]
| 5 | 2020-02-12T17:09:09.000Z | 2021-09-29T16:06:40.000Z | softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py | anyboby/mbpo | 98b75cb4cb13a2640fce1fbe1ddef466b864342e | [
"MIT"
]
| 10 | 2020-08-31T02:50:02.000Z | 2022-02-09T23:36:43.000Z | softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py | anyboby/mbpo | 98b75cb4cb13a2640fce1fbe1ddef466b864342e | [
"MIT"
]
| 2 | 2022-03-15T01:45:26.000Z | 2022-03-15T06:46:47.000Z | import os
import numpy as np
from gym import utils
from mujoco_safety_gym.envs import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'slide.xml')
class FetchSlideEnv(fetch_env.FetchEnvNew, utils.EzPickle):
def __init__(self, reward_type='sparse'):
initial_qpos = {
'robot0:slide0': 0.05,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.7, 1.1, 0.41, 1., 0., 0., 0.],
}
fetch_env.FetchEnvNew.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=-0.02, target_in_the_air=False, target_offset=np.array([0.4, 0.0, 0.0]),
obj_range=0.1, target_range=0.3, distance_threshold=0.05, additional_objects=False,
number_of_objects = 0, initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
| 37.230769 | 105 | 0.66219 | 760 | 0.785124 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.143595 |
c222e22c9b1710ce4667ef563dce67f96dc33915 | 163 | py | Python | packages/raspi_ip/setup.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
]
| null | null | null | packages/raspi_ip/setup.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
]
| 2 | 2021-09-21T06:32:58.000Z | 2021-09-22T23:15:18.000Z | packages/raspi_ip/setup.py | atoy322/PiDrive | 8758f4b5dae4a0187ce0a769c4146628c88015de | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
setup(
name="raspi_ip",
version="1.0.0",
author="atoy322",
description="",
long_description=""
)
| 14.818182 | 43 | 0.644172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.184049 |
c223b2854d4053fb4e412891092b11e58745c844 | 339 | py | Python | kiteconnect/__version__.py | AnjayGoel/pykiteconnect | e33833a86d3e6483f2cff1be8bed74f40d5771c5 | [
"MIT"
]
| 1 | 2022-02-05T08:43:37.000Z | 2022-02-05T08:43:37.000Z | kiteconnect/__version__.py | AnjayGoel/pykiteconnect | e33833a86d3e6483f2cff1be8bed74f40d5771c5 | [
"MIT"
]
| null | null | null | kiteconnect/__version__.py | AnjayGoel/pykiteconnect | e33833a86d3e6483f2cff1be8bed74f40d5771c5 | [
"MIT"
]
| null | null | null | __title__ = "open_kite_connect"
__description__ = "Fork of the official Kite Connect python client, allowing free access to the api."
__url__ = "https://kite.trade"
__download_url__ = "https://github.com/AnjayGoel/pykiteconnect"
__version__ = "4.0.0"
__author__ = "Anjay Goel"
__author_email__ = "[email protected]"
__license__ = "MIT"
| 37.666667 | 101 | 0.766962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.625369 |
c223dd7e30b36ebfa0f41bf3e5a06ae1a6e0b5cd | 1,679 | py | Python | CrsData/pipelines.py | DivineEnder/CrsData | cd0cf14e79b4a3bbf7347b8612a5b67e2a185208 | [
"MIT"
]
| null | null | null | CrsData/pipelines.py | DivineEnder/CrsData | cd0cf14e79b4a3bbf7347b8612a5b67e2a185208 | [
"MIT"
]
| null | null | null | CrsData/pipelines.py | DivineEnder/CrsData | cd0cf14e79b4a3bbf7347b8612a5b67e2a185208 | [
"MIT"
]
| null | null | null | # @Author: DivineEnder
# @Date: 2018-03-08 22:24:45
# @Email: [email protected]
# @Last modified by: DivineEnder
# @Last modified time: 2018-03-11 01:25:41
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from dotenv import load_dotenv, find_dotenv
from Utils import connection_utils as glc
from psycopg2.extensions import AsIs
import os
class CrsdataPipeline(object):
def open_spider(self, spider):
# Load the environment file
# This load makes sure passwords are not stored on github
# Basically here for security reasons
load_dotenv(find_dotenv())
# Generate a new database connection (will default to credentials loaded from the env file)
self.connection = glc.open_new_connection(host = os.environ.get("DBHOST"), port = os.environ.get("DBPORT"), user = os.environ.get("DBUSER"), password = os.environ.get("DBPASS"), dbname = os.environ.get("DBNAME"))
# Generate a new cursor from the previously generated connection
self.cursor = glc.open_new_cursor(self.connection)
def process_item(self, course, spider):
# input() # <---- Debugging print (the god)
# Insert course into database
self.cursor.execute("""INSERT INTO courses (%s) VALUES %s ON CONFLICT (crn) DO NOTHING""",(
AsIs(','.join(course.keys())),
tuple([course[field] for field in course.keys()])
))
# Commit all changes to the database
self.connection.commit()
return course
def close_spider(self, spider):
# Close DB connection and cursor
glc.close_cursor(self.cursor)
glc.close_connection(self.connection)
| 34.265306 | 214 | 0.731983 | 1,181 | 0.703395 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.521739 |
c224e7c1cff16812960fb4cd9afab8ab99e06afc | 2,227 | py | Python | index_to_csv.py | grenzi/photoindexer | d10b3b6f347168706dc9c2673a29102fd73f31e1 | [
"Apache-2.0"
]
| null | null | null | index_to_csv.py | grenzi/photoindexer | d10b3b6f347168706dc9c2673a29102fd73f31e1 | [
"Apache-2.0"
]
| null | null | null | index_to_csv.py | grenzi/photoindexer | d10b3b6f347168706dc9c2673a29102fd73f31e1 | [
"Apache-2.0"
]
| null | null | null | import os
import json
from enum import Enum
from datetime import datetime,date
import logging
import pathlib
from tqdm import tqdm
from datastructures import Volume, IndexedFile,load_index_if_exists, save_index
from os import listdir
from os.path import isfile, join
import itertools
import csv
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
###############################################################################
index_dir = os.path.join(os.getcwd(), 'index')
logger.info('finding index files')
indexfiles = list([f for f in listdir(index_dir) if isfile(join(index_dir, f)) and f[-4:]=='json'])
columns = ['VolumeName', 'VolumeSerialNumber', 'Directory', 'Name', 'InodeNumber', 'Modified On', 'Created On', 'SHA256']
exif_columns=set()
logger.info('parsing index files')
#Pass 1 = collect keys
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
if ixf.EXIF is not None:
for i in ixf.EXIF.keys():
exif_columns.add(i)
logger.info('writing csv')
#Pass 2 = write header
with open(os.path.join(os.getcwd(), 'index.csv'), mode='w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(columns+list(exif_columns))
#and now rows
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
row = [
vol.VolumeName,
vol.VolumeSerialNumber,
ixf.Directory,
ixf.Name,
ixf.st_ino,
ixf.st_mtime.strftime("%c"),
ixf.st_ctime.strftime("%c"),
ixf.SHA256
]
for ec in exif_columns:
row.append(ixf.EXIF.get(ec, None))
writer.writerow(row) | 35.349206 | 122 | 0.58599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.173776 |
c2253045dcaa56a5991a62320574be6662b1c519 | 1,056 | py | Python | tests/test_wrapper.py | waysup/Jike-Metro | b8ead80dddd5d695784c5587edfd8df87c55a4e6 | [
"MIT"
]
| 193 | 2018-04-04T02:27:51.000Z | 2022-03-14T03:26:44.000Z | tests/test_wrapper.py | BeiFenKu/Jike-Metro | e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3 | [
"MIT"
]
| 16 | 2018-04-04T05:58:15.000Z | 2021-01-08T02:56:57.000Z | tests/test_wrapper.py | BeiFenKu/Jike-Metro | e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3 | [
"MIT"
]
| 24 | 2018-04-06T09:34:58.000Z | 2021-03-02T02:10:07.000Z | import unittest
from collections import namedtuple
from jike.objects.wrapper import *
class TestWrapper(unittest.TestCase):
def setUp(self):
self.Test = namedtuple('Test', ['id', 'content', 'other', 'none'])
def test_repr_namedtuple(self):
self.Test.__repr__ = repr_namedtuple
test = self.Test(**{'id': 'a', 'content': 'b', 'other': 'c', 'none': None})
self.assertEqual(repr(test), 'Test(id=a, content=b)')
def test_str_namedtuple(self):
self.Test.__str__ = str_namedtuple
test = self.Test(**{'id': 'a', 'content': 'b', 'other': 'c', 'none': None})
self.assertEqual(str(test), 'Test(id=a, content=b, other=c)')
def test_namedtuple_with_defaults(self):
Test = namedtuple_with_defaults(self.Test)
test = Test(**{'id': 'a', 'content': 'b', 'other': 'c'})
self.assertEqual(test.id, 'a')
self.assertEqual(test.content, 'b')
self.assertEqual(test.other, 'c')
self.assertIsNone(test.none)
if __name__ == '__main__':
unittest.main()
| 34.064516 | 83 | 0.61553 | 918 | 0.869318 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.194129 |
c225d7cd38555d8a71f34fd96c413aa41e8e84be | 10,125 | py | Python | storm_control/hal4000/illumination/illuminationChannelUI.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
]
| 47 | 2015-02-11T16:05:54.000Z | 2022-03-26T14:13:12.000Z | storm_control/hal4000/illumination/illuminationChannelUI.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
]
| 110 | 2015-01-30T03:53:41.000Z | 2021-11-03T15:58:44.000Z | storm_control/hal4000/illumination/illuminationChannelUI.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
]
| 61 | 2015-01-09T18:31:27.000Z | 2021-12-21T13:07:51.000Z | #!/usr/bin/env python
"""
The various ChannelUI classes.
Hazen 04/17
"""
import os
from PyQt5 import QtCore, QtWidgets
def loadStyleSheet(name):
text = ""
with open(os.path.join(os.path.dirname(__file__), name)) as fp:
text += fp.read()
return text
class ChannelUI(QtWidgets.QFrame):
"""
A QWidget for displaying the UI elements associated with
an illumination channel.
"""
onOffChange = QtCore.pyqtSignal(object)
powerChange = QtCore.pyqtSignal(int)
def __init__(self, name = "", color = None, **kwds):
super().__init__(**kwds)
self.enabled = True
# FIXME: These styles could be better..
self.disabled_style = loadStyleSheet("disabled_style.qss")
self.enabled_style = "QFrame { background-color: rgb(" + color + ");}\n"
self.enabled_style += loadStyleSheet("enabled_style.qss")
self.setFixedWidth(50)
self.setLineWidth(2)
self.setStyleSheet(self.enabled_style)
self.main_layout = QtWidgets.QVBoxLayout(self)
self.main_layout.setContentsMargins(0,0,0,0)
self.main_layout.setSpacing(1)
# Text label.
self.wavelength_label = QtWidgets.QLabel(self)
self.wavelength_label.setText(name)
self.wavelength_label.setAlignment(QtCore.Qt.AlignCenter)
self.main_layout.addWidget(self.wavelength_label)
# Container for the power slider (if any).
self.slider_widget = QtWidgets.QWidget(self)
#
# FIXME: This is a mistake if none of the channels have a power
# slider.
#
self.slider_widget.setMinimumHeight(150)
self.slider_layout = QtWidgets.QVBoxLayout(self.slider_widget)
self.slider_layout.setContentsMargins(0,0,0,0)
self.slider_layout.setSpacing(1)
self.main_layout.addWidget(self.slider_widget)
# Power on/off radio button.
self.on_off_button = QtWidgets.QRadioButton(self)
self.main_layout.addWidget(self.on_off_button)
self.main_layout.setAlignment(self.on_off_button, QtCore.Qt.AlignCenter)
# Spacer at the bottom.
self.spacer_item = QtWidgets.QSpacerItem(1, 1,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.main_layout.addItem(self.spacer_item)
# Connect signals
self.on_off_button.clicked.connect(self.handleOnOffChange)
def disableChannel(self):
"""
Disables all the UI elements of the channel.
"""
self.setOnOff(False)
self.setStyleSheet(self.disabled_style)
self.setFrameShadow(QtWidgets.QFrame.Sunken)
self.on_off_button.setCheckable(False)
self.enabled = False
def enableChannel(self, was_on = False):
"""
Enables all the UI elements of the channel.
"""
self.setStyleSheet(self.enabled_style)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.on_off_button.setCheckable(True)
self.setOnOff(was_on)
self.enabled = True
def getAmplitude(self):
if self.on_off_button.isChecked():
return 1.0
else:
return 0.0
def handleOnOffChange(self, on_off):
"""
Called when the on/off radio button is pressed.
"""
self.onOffChange.emit(on_off)
def isEnabled(self):
return self.enabled
def isOn(self):
return self.on_off_button.isChecked()
def newSettings(self, on, power):
self.setOnOff(on)
def remoteIncPower(self, power_inc):
pass
def remoteSetPower(self, new_power):
if self.enabled:
if (new_power > 0.5):
self.setOnOff(True)
else:
self.setOnOff(False)
def setOnOff(self, state):
if (state != self.on_off_button.isChecked()):
self.on_off_button.setChecked(state)
self.handleOnOffChange(state)
def setupButtons(self, button_data):
pass
def startFilm(self):
self.on_off_button.setEnabled(False)
def stopFilm(self):
self.on_off_button.setEnabled(True)
class ChannelUIAdjustable(ChannelUI):
"""
A QWidget for displaying the UI elements associated with
an adjustable illumination channel.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.buttons = []
self.max_amplitude = 1
self.min_amplitude = 0
# Current power label.
self.power_label = QtWidgets.QLabel(self.slider_widget)
self.power_label.setAlignment(QtCore.Qt.AlignCenter)
self.slider_layout.addWidget(self.power_label)
# Slider for controlling the power.
self.powerslider = QtWidgets.QSlider(self.slider_widget)
self.powerslider.setOrientation(QtCore.Qt.Vertical)
self.powerslider.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Expanding)
self.slider_layout.addWidget(self.powerslider)
# FIXME: If I knew what I was doing I should be able to do this
# using the stylesheet?
self.powerslider.setFixedWidth(25)
self.slider_layout.setAlignment(self.powerslider, QtCore.Qt.AlignHCenter)
def configureSlider(self, minimum, maximum):
"""
This is called once we have obtained amplitude functionality
that backs the slider. The functionality sets the range
for the slider.
"""
self.max_amplitude = maximum
self.min_amplitude = minimum
self.powerslider.setMaximum(maximum)
self.powerslider.setMinimum(minimum)
page_step = 0.1 * (maximum - minimum)
if (page_step > 1.0):
self.powerslider.setPageStep(page_step)
self.powerslider.setSingleStep(1)
#
# Why 2? We need the initial value to be a number that is not
# the default power, otherwise the slider text won't get updated
# at start-up.
#
self.setAmplitude(2)
self.powerslider.valueChanged.connect(self.handleAmplitudeChange)
def disableChannel(self):
super().disableChannel()
self.powerslider.setEnabled(False)
for button in self.buttons:
button.setEnabled(False)
def enableChannel(self, was_on = False):
super().enableChannel(was_on)
self.powerslider.setEnabled(True)
for button in self.buttons:
button.setEnabled(True)
def getAmplitude(self):
return self.powerslider.value()
def handleAmplitudeChange(self, amplitude):
self.powerChange.emit(amplitude)
def newSettings(self, on, power):
self.setOnOff(on)
self.setAmplitude(power)
def remoteIncPower(self, power_inc):
if self.enabled:
self.setAmplitude(self.powerslider.value() + power_inc)
def remoteSetPower(self, new_power):
if self.enabled:
self.setAmplitude(new_power)
def setAmplitude(self, amplitude):
if (amplitude != self.powerslider.value()):
self.powerslider.setValue(amplitude)
def setupButtons(self, button_data):
# Remove spacer at the end.
self.main_layout.removeItem(self.spacer_item)
# Make sure we have enough buttons.
while (len(self.buttons) < len(button_data)):
new_button = PowerButton(parent = self)
new_button.powerChange.connect(self.setAmplitude)
self.layout().addWidget(new_button)
self.buttons.append(new_button)
#self.cur_y += 22
# Hide all the buttons.
for button in self.buttons:
button.hide()
# Set text and value of the buttons we'll use & show them.
amp_range = float(self.max_amplitude - self.min_amplitude)
for i in range(len(button_data)):
self.buttons[i].setText(button_data[i][0])
self.buttons[i].setValue(int(round(button_data[i][1] * amp_range + self.min_amplitude)))
self.buttons[i].show()
# Add spacer again.
self.main_layout.addItem(self.spacer_item)
# Resize based on number of visible buttons.
#self.setFixedSize(48, 248 + 22 * len(button_data))
def updatePowerText(self, new_text):
self.power_label.setText(new_text)
class PowerButton(QtWidgets.QPushButton):
"""
A push button specialized for amplitude / power control.
"""
powerChange = QtCore.pyqtSignal(int)
def __init__(self, **kwds):
super().__init__(**kwds)
self.value = 0.0
self.clicked.connect(self.handleClicked)
def handleClicked(self, boolean):
self.powerChange.emit(self.value)
def setValue(self, value):
self.value = value
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 32.041139 | 100 | 0.645136 | 8,704 | 0.859654 | 0 | 0 | 0 | 0 | 0 | 0 | 2,737 | 0.270321 |
c229b5ef4f038beb67f6eb13b6306e08e27df0c9 | 4,051 | py | Python | 3-compressor/compress.py | JamesG3/Search-Engine | 790f32c7833eb82d9b01b84af634e650ef7a9e75 | [
"BSD-2-Clause"
]
| null | null | null | 3-compressor/compress.py | JamesG3/Search-Engine | 790f32c7833eb82d9b01b84af634e650ef7a9e75 | [
"BSD-2-Clause"
]
| null | null | null | 3-compressor/compress.py | JamesG3/Search-Engine | 790f32c7833eb82d9b01b84af634e650ef7a9e75 | [
"BSD-2-Clause"
]
| null | null | null | import sys
import struct
from io import FileIO, BufferedWriter
import S9Compressor as S9
BLOCKSIZE = (64*1024) / 4 # number of Int
LexiPos = 0 # record the current position for new lexicon writing
lexiconBuffer = []
IIBuffer = []
WriteThreshold = 0
def docidPrepare(docidList): # calculate the diff, return the shorter version list
res = [docidList[0]]
for i in xrange(1, len(docidList)):
res.append(docidList[i] - docidList[i-1])
return res
def blockPacker(chunks, chunksInfo):
TotalSize = 0 # total size for this term's inverted index
blocks = [] # [[metadata, block], [metadata, block], ...]
metadata = [0] # [metadata size, [last docid, chunk size], [last docid, chunk size], ...]
currentTotalSize = 1 # number of Int, initialize 1 because of the metadata size
block = []
for i in xrange(len(chunksInfo)):
if (currentTotalSize + 2 + chunksInfo[i][0]) <= BLOCKSIZE:
currentTotalSize += (2 + chunksInfo[i][0])
metadata[0] += 1
metadata.append(chunksInfo[i][::-1])
block.append(chunks[i])
else: # when current block is full
blocks.append([metadata,block]) # add block to blocks
TotalSize += currentTotalSize
metadata = [0] # initialize
currentTotalSize = 1
block = []
currentTotalSize += (2 + chunksInfo[i][0])
metadata[0] += 1
metadata.append(chunksInfo[i][::-1])
block.append(chunks[i])
blocks.append([metadata,block])
TotalSize += currentTotalSize
return TotalSize, blocks
def compress(docidList, freqList):
docLen = len(docidList)
chunks = [] # [chunk, chunk, ...]
chunksInfo = [] # [[chunksize, lastdocId], ...]
for i in xrange(docLen/128 + 1):
tmpdocidList = docidList[i*128 : (i+1)*128]
tmpfreqList = freqList[i*128 : (i+1)*128]
if len(tmpdocidList) == 0:
break
lastdocId = tmpdocidList[-1]
tmpLen = len(tmpdocidList)
tmpdocidList = docidPrepare(tmpdocidList)
tmpList = tmpdocidList + tmpfreqList
chunk = S9.encoder(tmpList, tmpLen * 2)
chunks.append(chunk)
chunksInfo.append([len(chunk), lastdocId])
return blockPacker(chunks, chunksInfo)
def writeLexicon():
# outLexFile = open("newLexicon.txt", 'a')
with BufferedWriter(FileIO("newLexicon.txt", "a")) as outLexFile:
for item in lexiconBuffer:
outLexFile.write(item[0] + ':' + str(item[1]) + ',' + str(item[2])+ ',' + str(item[3]))
outLexFile.write('\n')
del lexiconBuffer[:]
outLexFile.close()
return
def writeNewII():
# newII = open("InvertIndex.txt", 'ab')
with BufferedWriter(FileIO("InvertIndex.txt", "ab")) as newII:
def writeByte(Integer):
return newII.write(struct.pack('I', Integer)[::-1])
for blocks in IIBuffer:
for block in blocks:
writeByte(block[0][0]) # write size of metadata header
for number in [item for sublist in block[0][1:] for item in sublist]: # flatten the tmpList
writeByte(number)
for number in [item for sublist in block[1] for item in sublist]: # flatten the list
writeByte(number)
del IIBuffer[:]
newII.close()
return
def main():
global LexiPos
global WriteThreshold
LexFile = open("Lexicon.txt", 'r')
iiFile = open("origInvertedIndex.txt","rb")
for line in LexFile:
docidList = []
freqList = []
lexInfo = line.split(":")
IndexInfo = lexInfo[-1].split(",")
IndexInfo = [int(item) for item in IndexInfo] # [head, tail, number of docs]
iiFile.seek(4*IndexInfo[0],0)
for i in xrange(IndexInfo[-1]):
docidList.append(struct.unpack('<I', iiFile.read(4)[::-1])[0])
freqList.append(struct.unpack('<I', iiFile.read(4)[::-1])[0])
compressedII = compress(docidList, freqList) # compress data into blocks
TotalSize, blocks = compressedII[0], compressedII[1]
IIBuffer.append(blocks)
lexiconBuffer.append([lexInfo[0], LexiPos, LexiPos+TotalSize, IndexInfo[-1]])
LexiPos += TotalSize
WriteThreshold += TotalSize
if WriteThreshold > 10000000:
print "writing..."
WriteThreshold = 0 # reset WriteThreshold
writeLexicon()
writeNewII()
writeLexicon()
writeNewII()
iiFile.close()
LexFile.close()
main() | 26.827815 | 96 | 0.674155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 809 | 0.199704 |
c22ad6cee4570624757675e6c7ad19a18a8249f2 | 5,028 | py | Python | DataProcess/ultimate_subimage.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
]
| 5 | 2021-11-18T13:27:30.000Z | 2021-12-05T00:15:33.000Z | DataProcess/ultimate_subimage.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
]
| null | null | null | DataProcess/ultimate_subimage.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
]
| null | null | null | #CASA script to create cutouts of fits cubes
directoryA = '/Volumes/TARDIS/Work/askap/'
directoryB = '/Volumes/NARNIA/pilot_cutouts/'
import numpy as np
sources=np.loadtxt('/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt',dtype='str')
for i in range(0,sources.shape[0]):
objectname=sources[i,0]
POSSUMSB=sources[i,3]
EMUSB=sources[i,4]
ra=sources[i,1]
dec=sources[i,2]
sourcecentre=ra+','+dec
fov=sources[i,6]#arcsec
print(objectname)
region='centerbox[['+sourcecentre+'], ['+fov+'arcsec, '+fov+'arcsec]]'
possum_outfile=directoryB+objectname+'/'+objectname+'_POSSUM.fits'
emu_outfile=directoryB+objectname+'/'+objectname+'_EMU.fits'
#POSSUM
if POSSUMSB == '5038':
#this is the Early Science data
possum_cont_filename = '/Volumes/NARNIA/PawseySync/DRAGN_1_0p8_A/DRAGN_1_0p8_A/image.i.SB5038.cont.restored.fits'
else:
possum_cont_filename = directoryA +'fullfields/image.i.SB'+POSSUMSB+'.cont.taylor.0.restored.fits'
if POSSUMSB == '10035':
print('Skipping POSSUM: bad SB10035')
else:
imsubimage(imagename=possum_cont_filename,outfile='possum_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='possum_cont_temp',fitsimage=possum_outfile,overwrite=True)
#cubes
i_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.i.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
q_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.q.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
u_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.u.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
imsubimage(imagename=i_filename,outfile='i_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_filename,outfile='q_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_filename,outfile='u_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_im_temp',fitsimage=objectname+'_POSSUM_i.fits',overwrite=True)
exportfits(imagename='q_im_temp',fitsimage=objectname+'_POSSUM_q.fits',overwrite=True)
exportfits(imagename='u_im_temp',fitsimage=objectname+'_POSSUM_u.fits',overwrite=True)
#EMU
if EMUSB != 'NaN':
if EMUSB=='10083':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10083.contcube.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10083.contcube.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10083.contcube.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10083.cont.taylor.0.restored.conv.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
elif EMUSB=='10635':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10635.contcube.v2.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10635.contcube.v2.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10635.contcube.v2.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10635.cont.taylor.0.restored.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
else:
#no cubes
emu_filename= directoryA +'fullfields/image.i.SB'+EMUSB+'.cont.taylor.0.restored.fits'
imsubimage(imagename=emu_filename,outfile='emu_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='emu_cont_temp',fitsimage=emu_outfile,overwrite=True)
os.system("rm -r emu_cont_temp")
#tidy up
os.system("rm -r *_temp")
os.system("mv *{}* {}/".format(objectname,objectname))
| 57.136364 | 134 | 0.793755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,054 | 0.408512 |
c22b8b9f23f5fc7b3cfeba52a978e7ba6441ed61 | 92 | py | Python | cv_comparison_slider_window/__init__.py | Kazuhito00/cv-comparison-slider-window | 215cd91c1832b419af9fb99b484ce8c2a9e79a37 | [
"MIT"
]
| 2 | 2020-11-14T09:09:02.000Z | 2020-11-14T10:54:57.000Z | cv_comparison_slider_window/__init__.py | Kazuhito00/cv-comparison-slider-window | 215cd91c1832b419af9fb99b484ce8c2a9e79a37 | [
"MIT"
]
| null | null | null | cv_comparison_slider_window/__init__.py | Kazuhito00/cv-comparison-slider-window | 215cd91c1832b419af9fb99b484ce8c2a9e79a37 | [
"MIT"
]
| null | null | null | from cv_comparison_slider_window.cv_comparison_slider_window import CvComparisonSliderWindow | 92 | 92 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c22bb60421f79ce3a2d29c04e35af61e67fb09d3 | 1,207 | py | Python | telegram_ecommerce/filters/decorators.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
]
| 10 | 2020-11-20T20:55:52.000Z | 2022-02-10T20:25:45.000Z | telegram_ecommerce/filters/decorators.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
]
| 1 | 2022-02-16T10:28:18.000Z | 2022-02-16T10:35:31.000Z | telegram_ecommerce/filters/decorators.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
]
| 8 | 2021-05-01T01:13:09.000Z | 2022-03-13T14:00:01.000Z | from ..language import get_text
from ..database.query import (
user_exist,
is_admin)
END = -1
def warning_the_user_that_already_have_an_account(update, context):
text = get_text("user_have_account", context)
update.message.reply_text(text)
return END
def warning_the_user_that_he_dont_have_an_account(update, context):
text = get_text("user_dont_have_account", context)
update.message.reply_text(text)
return END
def execute_if_user_exist(callback):
def execute_warning_if_user_dont_exist(update, context):
user_id = update.effective_user.id
if user_exist(user_id):
return callback(update, context)
else:
return warning_the_user_that_he_dont_have_an_account(
update, context)
return execute_warning_if_user_dont_exist
def execute_if_user_dont_exist(callback):
def execute_warning_if_user_exist(update, context):
user_id = update.effective_user.id
if user_exist(user_id):
return warning_the_user_that_already_have_an_account(
update, context)
else:
return callback(update, context)
return execute_warning_if_user_exist
| 26.822222 | 67 | 0.71831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.035626 |
c22fbe148dfbc37e36952003c17c1b1180d11337 | 63 | py | Python | albow/demo/openGL/__init__.py | hasii2011/albow-python-3 | 04b9d42705b370b62f0e49d10274eebf3ac54bc1 | [
"MIT"
]
| 6 | 2019-04-30T23:50:39.000Z | 2019-11-04T06:15:02.000Z | albow/demo/openGL/__init__.py | hasii2011/albow-python-3 | 04b9d42705b370b62f0e49d10274eebf3ac54bc1 | [
"MIT"
]
| 73 | 2019-05-12T18:43:14.000Z | 2021-04-13T19:19:03.000Z | albow/demo/openGL/__init__.py | hasii2011/albow-python-3 | 04b9d42705b370b62f0e49d10274eebf3ac54bc1 | [
"MIT"
]
| null | null | null | """"
This package contains the OpenGL demonstration classes
""" | 21 | 54 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 1 |
c2306615617cec84564c5dcb8ee8a144809be27e | 1,640 | py | Python | openhab2/scripts/readNilan.py | starze/openhab2 | e4eeeecd829cdf286372067bd61561e63fed6e1a | [
"MIT"
]
| 10 | 2017-04-04T08:28:54.000Z | 2021-02-24T04:36:07.000Z | openhab2/scripts/readNilan.py | starze/openhab2 | e4eeeecd829cdf286372067bd61561e63fed6e1a | [
"MIT"
]
| 2 | 2017-04-18T13:33:12.000Z | 2018-06-05T21:27:18.000Z | openhab2/scripts/readNilan.py | starze/openhab2 | e4eeeecd829cdf286372067bd61561e63fed6e1a | [
"MIT"
]
| 7 | 2017-04-17T18:02:19.000Z | 2020-09-25T21:28:08.000Z | #!/usr/bin/env python3
# -*- coding: ISO-8859-1 -*-
# https://github.com/starze/openhab2
# https://github.com/roggmaeh/nilan-openhab
import minimalmodbus
import serial
import os, sys
import csv
import httplib2
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
instrument = minimalmodbus.Instrument('/dev/ttyUSB0', 30, mode='rtu') # port name, slave address (in decimal)
instrument.serial.port
instrument.serial.baudrate = 19200 # Baud
instrument.serial.bytesize = 8
instrument.serial.parity = serial.PARITY_EVEN
instrument.serial.stopbits = 1
instrument.serial.timeout = 2 # seconds
#instrument.debug = True
h = httplib2.Http()
with open('nilan_modbus.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
if row['Register Type'] == "Input":
fc = 4
elif row['Register Type'] == "Holding":
fc = 3
if row['Unit'] == "text" or row['Unit'] == "ascii":
strRet = instrument.read_string(int(row['Address']), numberOfRegisters=1, functioncode=fc)
lst = list(strRet)
strRet = lst[1] + lst[0]
elif row['Scale'] == "100":
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=2, functioncode=fc)
else:
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=0, functioncode=fc)
if row['Unit'] == "%" or row['Unit'] == "°C":
print("%s: %s %s" % (row['Name'], strRet, row['Unit']))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
else:
print("%s: %s" % (row['Name'], strRet))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
| 34.166667 | 109 | 0.675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.318708 |
c230b7732d9a3dd108e45e13abd94ad053baac7e | 2,316 | py | Python | face_signin/prepare_training.py | sribs/FaceRecognition | 68284173195d55f32a353fe3d78a53c25fbf1363 | [
"Apache-2.0"
]
| null | null | null | face_signin/prepare_training.py | sribs/FaceRecognition | 68284173195d55f32a353fe3d78a53c25fbf1363 | [
"Apache-2.0"
]
| null | null | null | face_signin/prepare_training.py | sribs/FaceRecognition | 68284173195d55f32a353fe3d78a53c25fbf1363 | [
"Apache-2.0"
]
| null | null | null | import cv2
import numpy as np
import os
def prepare_training_data(data_folder_path):
#------STEP-1--------
#get the directories (one directory for each subject) in data folder
dirs = sorted(os.listdir(data_folder_path))
#print(dirs)
faces = []
labels = []
for label,count in zip(dirs,range(len(dirs))):
subject_dir_path = data_folder_path+"/"+label
for image_name in os.listdir(subject_dir_path):
#ignore system files like .DS_Store
if image_name.startswith("."):
continue;
#build image path
#sample image path = training-data/s1/1.pgm
image_path = subject_dir_path + "/" + image_name
#read image
image = cv2.imread(image_path)
#display an image window to show the image
#print("Training label :",label)
cv2.waitKey(100)
#detect face
face, rect = detect_face(image)
#------STEP-4--------
#for the purpose of this tutorial
#we will ignore faces that are not detected
if face is not None:
#add face to list of faces
faces.append(face)
#add label for this face
labels.append(count)
print("Data Prepared for Training")
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
return faces, labels
def detect_face(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#load OpenCV face detector, I am using LBP which is fast
#there is also a more accurate but slow: Haar classifier
face_cascade = cv2.CascadeClassifier('opencv-files/lbpcascade_frontalface.xml')
#let's detect multiscale images(some images may be closer to camera than others)
#result is a list of faces
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);
#if no faces are detected then return original img
if (len(faces) == 0):
return None, None
#under the assumption that there will be only one face,
#extract the face area
x, y, w, h = faces[0]
#return only the face part of the image
return gray[y:y+w, x:x+h], faces[0]
| 31.297297 | 85 | 0.593264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 926 | 0.399827 |
c230e009f1c0351446c02fccceb30b7ead29e784 | 138 | py | Python | randomselection.py | Ristinoa/cs257 | e7c31f995d08661114d868a55448c628b4cc9327 | [
"MIT"
]
| null | null | null | randomselection.py | Ristinoa/cs257 | e7c31f995d08661114d868a55448c628b4cc9327 | [
"MIT"
]
| null | null | null | randomselection.py | Ristinoa/cs257 | e7c31f995d08661114d868a55448c628b4cc9327 | [
"MIT"
]
| null | null | null | "randompicker.py"
import random
"A very short practice program designed
to spit out a random, user-determined
sample of input names"
| 13.8 | 39 | 0.775362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.123188 |
c2318081600b41f253e54a78d1001f4ddb857e30 | 15,873 | py | Python | fisspy/analysis/tdmap.py | SNU-sunday/FISS-PYTHON | f79420debef476a904356d42542cb6472990bb2f | [
"BSD-2-Clause"
]
| 3 | 2017-02-18T06:42:08.000Z | 2021-01-05T04:15:08.000Z | fisspy/analysis/tdmap.py | SNU-sunday/fisspy | f79420debef476a904356d42542cb6472990bb2f | [
"BSD-2-Clause"
]
| 1 | 2019-06-30T10:35:27.000Z | 2019-06-30T10:35:27.000Z | fisspy/analysis/tdmap.py | SNU-sunday/FISS-PYTHON | f79420debef476a904356d42542cb6472990bb2f | [
"BSD-2-Clause"
]
| 1 | 2017-02-23T05:24:13.000Z | 2017-02-23T05:24:13.000Z | from __future__ import absolute_import, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from fisspy.analysis.filter import FourierFilter
from interpolation.splines import LinearSpline
from matplotlib.animation import FuncAnimation
import astropy.units as u
from astropy.time import Time
__author__= "Juhyung Kang"
__email__ = "[email protected]"
class TDmap:
"""
Make Time-Distance map for given slit position
Parameters
----------
data : `~numpy.ndarray`
3-dimensional data array (time, y, x).
header : '~astropy.io.fits.header.Header
Header of data.
tarr : `~numpy.ndarray`, optional
Array of time (unit: second).
filterRange : `list`, optional
List of range of Fourier bandpass filters
Returns
-------
td : `~fisspy.analysis.tdmap.TDmap`
A new time distance class object.
Examples
--------
"""
def __init__(self, data, header, tarr=None, filterRange=None, cmap=None):
self.data = data
self.header = header
self.nx = self.header['naxis1']
self.ny = self.header['naxis2']
self.nt = self.header['naxis3']
self.dx = self.header['cdelt1']
self.dy = self.header['cdelt2']
self.dt = self.header['cdelt3']
self.rx = self.header['crval1']
self.ry = self.header['crval2']
self.cmap = cmap
if not np.any(tarr):
tarr = np.arange(0, self.nt*self.dt, self.dt)
self._tarr = tarr
self.Time = Time(self.header['sttime']) + tarr*u.second
self.extent = [self.rx-self.nx/2*self.dx,
self.rx+self.nx/2*self.dx,
self.ry-self.ny/2*self.dy,
self.ry+self.ny/2*self.dy]
self._xarr = np.linspace(self.extent[0]+self.dx*0.5,
self.extent[1]-self.dx*0.5,
self.nx)
self._yarr = np.linspace(self.extent[2]+self.dy*0.5,
self.extent[3]-self.dy*0.5,
self.ny)
self.smin = [self._tarr[0],
self.extent[2]+0.5*self.dy,
self.extent[0]+0.5*self.dx]
self.smax = [self._tarr[-1],
self.extent[3]-0.5*self.dy,
self.extent[1]-0.5*self.dx]
self.order = [self.nt, self.ny, self.nx]
self._tname = ['ori']
if not filterRange:
self.nfilter = 1
self.fdata = np.empty([1, self.nt, self.ny, self.nx])
else:
self.nfilter = len(filterRange)+1
self.fdata = np.empty([self.nfilter, self.nt, self.ny, self.nx])
for n, fR in enumerate(filterRange):
self._tname += ['%.1f - %.1f mHZ'%(fR[0], fR[1])]
self.fdata[n+1] = FourierFilter(self.data, self.nt,
self.dt*1e-3, fR)
self.fdata[0] = self.data
self.interp = []
for data in self.fdata:
self.interp += [LinearSpline(self.smin, self.smax,
self.order, data)]
def get_TD(self, R, xc, yc, angle):
self.R = R
self.xc = xc
self.yc = yc
self.angle = angle
ang = np.deg2rad(self.angle)
nl = int(np.ceil(2*R/self.dx))
self.x1 = -R*np.cos(ang) + xc
self.x2 = R*np.cos(ang) + xc
self.y1 = -R*np.sin(ang) + yc
self.y2 = R*np.sin(ang) + yc
x = np.linspace(self.x1, self.x2, nl)
y = np.linspace(self.y1, self.y2, nl)
oiarr = np.empty([nl, self.nt, 3])
oiarr[:,:,0] = self._tarr
oiarr[:,:,1] = y[:,None]
oiarr[:,:,2] = x[:,None]
iarr = oiarr.reshape([nl*self.nt, 3])
td = self.interp[self.filterNum-1](iarr)
return td.reshape([nl, self.nt])
def imshow(self, R=5, xc=None, yc=None, angle=0, t=0,
filterNum=1, fps=10, cmap=plt.cm.gray,
interpolation='bilinear'):
try:
plt.rcParams['keymap.back'].remove('left')
plt.rcParams['keymap.forward'].remove('right')
except:
pass
if not xc:
xc = self.rx
if not yc:
yc = self.ry
self.R = self._R0 = R
self.angle = self._angle0 = angle
self.xc = self._xc0 = xc
self.yc = self._yc0 = yc
self.filterNum = self._filterNum0 = filterNum
self.t = self._t0 = t
self.fps = fps
self.pause = 'ini'
self.pos = []
self.mark = []
self.hlines = []
tpix = np.abs(self._tarr-self.t).argmin()
self.td = self.get_TD(R,xc,yc,angle)
self.tdextent = [self._tarr[0]-0.5*self.dt,
self._tarr[-1]+0.5*self.dt,
-self.R,
self.R]
if not self.cmap:
self.cmap = cmap
self.fig= plt.figure(figsize=[14,9])
self.fig.canvas.set_window_title('%s ~ %s'%(self.Time[0], self.Time[-1]))
gs = gridspec.GridSpec(5, self.nfilter)
self.axTD = self.fig.add_subplot(gs[3:, :])
self.axTD.set_xlabel('Time (sec)')
self.axTD.set_ylabel('Distance (arcsec)')
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(filterNum, self._tname[filterNum-1],
self.Time[tpix].value,
tpix))
self.imTD = self.axTD.imshow(self.td,
extent=self.tdextent,
origin='lower',
cmap=self.cmap,
interpolation=interpolation)
self.axRaster = []
self.im = []
for i in range(self.nfilter):
if i == 0:
self.axRaster += [self.fig.add_subplot(gs[:3, i])]
self.axRaster[i].set_xlabel('X (arcsec)')
self.axRaster[i].set_ylabel('Y (arcsec)')
else:
self.axRaster += [self.fig.add_subplot(gs[:3, i],
sharex=self.axRaster[0],
sharey=self.axRaster[0])]
self.axRaster[i].tick_params(labelleft=False, labelbottom=False)
self.axRaster[i].set_title('%i: %s'%(i+1, self._tname[i]))
self.im += [self.axRaster[i].imshow(self.fdata[i, tpix],
extent=self.extent,
origin='lower',
cmap=self.cmap,
interpolation=interpolation)]
self.slit = self.axRaster[filterNum-1].plot([self.x1, self.x2],
[self.y1, self.y2],
color='k')[0]
self.center = self.axRaster[filterNum-1].scatter(self.xc, self.yc,
100, marker='+',
c='k')
self.top = self.axRaster[filterNum-1].scatter(self.x2, self.y2, 100,
marker='+', c='b', label='%.1f'%self.R)
self.bottom = self.axRaster[filterNum-1].scatter(self.x1, self.y1, 100,
marker='+', c='r',
label='-%.1f'%self.R)
self.tslit = self.axTD.axvline(self.t, ls='dashed', c='lime')
self.leg = self.axRaster[filterNum-1].legend()
self.axTD.set_aspect(adjustable='box', aspect='auto')
self.imTD.set_clim(self.fdata[filterNum-1,0].min(),
self.fdata[filterNum-1,0].max())
self.fig.tight_layout()
self.fig.canvas.mpl_connect('key_press_event', self._onKey)
plt.show()
def _onKey(self, event):
if event.key == 'up':
if self.angle < 360:
self.angle += 1
else:
self.angle = 1
elif event.key == 'down':
if self.angle > 0:
self.angle -=1
else:
self.angle = 359
elif event.key == 'right':
if self.t < self._tarr[-1]:
self.t += self.dt
else:
self.t = self._tarr[0]
elif event.key == 'left':
if self.t > self._tarr[0]:
self.t -= self.dt
else:
self.t = self._tarr[-1]
elif event.key == 'ctrl+right':
if self.xc < self._xarr[-1]:
self.xc += self.dx
else:
self.xc = self._xarr[0]
elif event.key == 'ctrl+left':
if self.xc > self._xarr[0]:
self.xc -= self.dx
else:
self.xc = self._xarr[-1]
elif event.key == 'ctrl+up':
if self.yc < self._yarr[-1]:
self.yc += self.dy
else:
self.yc = self._yarr[0]
elif event.key == 'ctrl+down':
if self.yc > self._yarr[0]:
self.yc -= self.dy
else:
self.yc = self._yarr[-1]
elif event.key == 'ctrl++':
self.R += self.dx
elif event.key == 'ctrl+-':
self.R -= self.dx
elif event.key == ' ' and event.inaxes in self.axRaster:
self.xc = event.xdata
self.yc = event.ydata
elif event.key == ' ' and event.inaxes == self.axTD:
self.t = event.xdata
elif event.key == 'x' and event.inaxes == self.axTD:
self.pos += [event.ydata]
ang = np.deg2rad(self.angle)
xp = self.pos[-1]*np.cos(ang) + self.xc
yp = self.pos[-1]*np.sin(ang) + self.yc
self.mark += [self.axRaster[self.filterNum-1].scatter(xp, yp, 100,
marker='+',
c='lime')]
self.hlines += [self.axTD.axhline(self.pos[-1], ls='dashed', c='lime')]
elif event.key == 'enter':
if self.pause == 'ini':
self.ani = FuncAnimation(self.fig, self._chTime,
frames=self._tarr,
blit=False,
interval=1e3/self.fps,
repeat=True)
# cache_frame_data=False)
self.pause = False
else:
self.pause ^= True
if self.pause:
self.ani.event_source.stop()
else:
self.ani.event_source.start(1e3/self.fps)
for iid in range(self.nfilter):
if event.key == 'ctrl+%i'%(iid+1):
self.filterNum = iid+1
tpix = np.abs(self._tarr-self.t).argmin()
self.changeSlit(self.R, self.xc, self.yc, self.angle)
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(self.filterNum, self._tname[self.filterNum-1],
self.Time[tpix].value,
tpix))
self._filterNum0 = self.filterNum
self.imTD.set_clim(self.im[self.filterNum-1].get_clim())
if self.xc != self._xc0 or self.yc != self._yc0 or \
self.angle != self._angle0 or self.R != self._R0:
self.changeSlit(self.R, self.xc, self.yc, self.angle)
self._R0 = self.R
self._xc0 = self.xc
self._yc0 = self.yc
self._angle0 = self.angle
if self.t != self._t0:
self._chTime(self.t)
self._t0 = self.t
self.fig.canvas.draw_idle()
def changeSlit(self, R, xc, yc, angle):
td = self.get_TD(R, xc, yc, angle)
self.tdextent[2] = -R
self.tdextent[3] = R
self.axTD.set_ylim(-R, R)
ang = np.deg2rad(self.angle)
if self.filterNum != self._filterNum0:
self.leg.remove()
self.slit.remove()
self.bottom.remove()
self.center.remove()
self.top.remove()
self.slit = self.axRaster[self.filterNum-1].plot([self.x1, self.x2],
[self.y1, self.y2],
color='k')[0]
self.center = self.axRaster[self.filterNum-1].scatter(self.xc,
self.yc, 100, marker='+', c='k')
self.top = self.axRaster[self.filterNum-1].scatter(self.x2,
self.y2, 100,
marker='+', c='b', label='%.1f'%self.R)
self.bottom = self.axRaster[self.filterNum-1].scatter(self.x1,
self.y1, 100,
marker='+', c='r',
label='-%.1f'%self.R)
for n, pos in enumerate(self.pos):
self.mark[n].remove()
xp = pos*np.cos(ang) + self.xc
yp = pos*np.sin(ang) + self.yc
self.mark[n] = self.axRaster[self.filterNum-1].scatter(xp, yp, 100,
marker='+',
c='lime')
else:
self.slit.set_xdata([self.x1, self.x2])
self.slit.set_ydata([self.y1, self.y2])
self.bottom.set_offsets([self.x1, self.y1])
self.top.set_offsets([self.x2, self.y2])
self.center.set_offsets([self.xc, self.yc])
# change marker
for n, pos in enumerate(self.pos):
xp = pos*np.cos(ang) + self.xc
yp = pos*np.sin(ang) + self.yc
self.mark[n].set_offsets([xp, yp])
self.hlines[n].set_ydata(pos)
self.top.set_label('%.1f'%self.R)
self.bottom.set_label('-%.1f'%self.R)
self.imTD.set_data(td)
self.leg = self.axRaster[self.filterNum-1].legend()
def _chTime(self, t):
self.t = t
tpix = np.abs(self._tarr-t).argmin()
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(self.filterNum, self._tname[self.filterNum-1],
self.Time[tpix].value,
tpix))
self.tslit.set_xdata(self.t)
for n, im in enumerate(self.im):
im.set_data(self.fdata[n, tpix])
def set_clim(self, cmin, cmax, frame):
self.im[frame-1].set_clim(cmin, cmax)
if self.filterNum == frame:
self.imTD.set_clim(cmin, cmax)
def remove_Mark(self):
for n in range(len(self.pos)):
self.mark[n].remove()
self.hlines[n].remove()
self.pos = []
self.mark = []
self.hlines = []
def savefig(self, filename, **kwargs):
self.fig.save(filename, **kwargs)
def saveani(self, filename, **kwargs):
fps = kwargs.pop('fps', self.fps)
self.ani.save(filename, fps=fps, **kwargs) | 41.015504 | 86 | 0.449001 | 15,476 | 0.974989 | 0 | 0 | 0 | 0 | 0 | 0 | 1,279 | 0.080577 |
c231926cf5107bb89588302bb3bc85d7ec967624 | 2,806 | py | Python | Graphy/setup.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
]
| 4 | 2021-03-06T13:50:58.000Z | 2022-03-28T15:17:07.000Z | Graphy/setup.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
]
| null | null | null | Graphy/setup.py | andrepbento/OpenTracingProcessor | 9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6 | [
"MIT"
]
| null | null | null | """
Author: André Bento
Date last modified: 26-02-2019
"""
import subprocess
import sys
from os.path import dirname, abspath, join
from setuptools import find_packages, Command, setup
from setuptools.command.test import test as TestCommand
this_dir = abspath(dirname(__file__))
NAME = 'graphy'
VERSION = '0.0.1'
# Readme
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
readme = file.read()
# License
with open(join(this_dir, 'LICENSE'), encoding='utf-8') as file:
license_file = file.read()
# Requirements
with open(join(this_dir, 'requirements.txt')) as file:
requirements = file.read().splitlines()
class Install(Command):
user_options = [
['pip3', 'install', '-r', 'requirements.txt']
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run_install(self):
for command in self.user_options:
subprocess.run(command)
class Run(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print()
from graphy.app import Graphy
Graphy.run()
class Test(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
err = pytest.main(self.pytest_args)
sys.exit(err)
setup(
name=NAME,
version=VERSION,
description='A micro-services system monitor command line program in Python.',
long_description=readme,
# long_description_content_type='text/markdown',
url='https://github.com/andrepbento/MScThesis/tree/master/Graphy',
author='André Bento',
author_email='[email protected]',
license=license_file,
classifiers=[
# How mature is this project? Common values are
# 1 - Project setup
# 2 - Prototype
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Prototype',
'Intended Audience :: Developers',
'Topic :: Observing and Controlling Performance in Micro-services',
'License :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='cli',
packages=find_packages(exclude=('tests*', 'docs')),
install_requires=requirements,
tests_require=['pytest'],
extras_require={
'test': ['coverage', 'pytest', 'pytest-cov'],
},
cmdclass={
'install': Install,
'run': Run,
'test': Test
},
)
| 24.189655 | 82 | 0.62794 | 993 | 0.353632 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.314103 |
c232029579d8b288e2ac9ed43b03f0690df1e9c2 | 1,317 | py | Python | polaris/polaris/sep24/tzinfo.py | yuriescl/django-polaris | 8806d0e4e8baaddbffbceb3609786d2436b8abe1 | [
"Apache-2.0"
]
| 81 | 2019-11-16T21:47:22.000Z | 2022-02-17T07:35:02.000Z | polaris/polaris/sep24/tzinfo.py | yuriescl/django-polaris | 8806d0e4e8baaddbffbceb3609786d2436b8abe1 | [
"Apache-2.0"
]
| 491 | 2019-11-10T23:44:30.000Z | 2022-03-20T00:25:02.000Z | polaris/polaris/sep24/tzinfo.py | yuriescl/django-polaris | 8806d0e4e8baaddbffbceb3609786d2436b8abe1 | [
"Apache-2.0"
]
| 89 | 2019-11-18T21:31:01.000Z | 2022-03-28T13:47:41.000Z | import pytz
from datetime import datetime, timedelta, timezone
from rest_framework.decorators import api_view, parser_classes, renderer_classes
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.request import Request
from rest_framework.response import Response
from django.contrib.sessions.backends.db import SessionStore
from polaris.utils import render_error_response, getLogger
logger = getLogger(__name__)
@api_view(["POST"])
@parser_classes([JSONParser])
@renderer_classes([JSONRenderer])
def post_tzinfo(request: Request) -> Response:
if not (
request.data.get("sessionId") and request.data.get("sessionOffset") is not None
):
return render_error_response("missing required parameters")
now = datetime.now(timezone.utc)
offset = timedelta(minutes=request.data["sessionOffset"])
zone = None
for tz in map(pytz.timezone, pytz.all_timezones_set):
if now.astimezone(tz).utcoffset() == offset:
zone = tz.zone
break
if not zone:
return render_error_response("no timezones matched with offset")
session = SessionStore(session_key=request.data["sessionId"])
session["timezone"] = zone
session.save()
return Response({"status": "ok", "tz": zone})
| 34.657895 | 87 | 0.741838 | 0 | 0 | 0 | 0 | 833 | 0.632498 | 0 | 0 | 147 | 0.111617 |
c2321c74ae596a68d5084730c6df5fe1a40a8090 | 1,615 | py | Python | utils/fundoptutils.py | joshualee155/FundOptimizer | da842de6c99f89c767d03c9ef1b392237b726a3f | [
"MIT"
]
| 2 | 2021-01-03T00:46:51.000Z | 2021-09-01T02:48:51.000Z | utils/fundoptutils.py | joshualee155/FundOptimizer | da842de6c99f89c767d03c9ef1b392237b726a3f | [
"MIT"
]
| null | null | null | utils/fundoptutils.py | joshualee155/FundOptimizer | da842de6c99f89c767d03c9ef1b392237b726a3f | [
"MIT"
]
| 1 | 2021-08-28T11:04:00.000Z | 2021-08-28T11:04:00.000Z | import pandas as pd
import datetime as dt
class FundType( object ):
OF = 'Open Ended Fund'
ETF = 'Exchange Traded Fund'
LOF = 'Listed Open Ended Fund'
MMF = 'Money Market Fund'
def getFundType( fundCode ):
fundTypeDf = pd.read_csv( 'refData/fund_list.csv', names = [ 'fundCode', 'fundType' ] )
fundTypeDf[ 'fundCode' ] = fundTypeDf[ 'fundCode' ].apply( lambda x: str(x).zfill(6) )
fundTypeDf.drop_duplicates( subset = [ 'fundCode' ], inplace = True )
fundTypeDf.set_index( 'fundCode', drop = True, inplace = True )
try:
sType = fundTypeDf[ 'fundType' ][ fundCode ]
if sType == 'OF':
return FundType.OF
elif sType == 'ETF':
return FundType.ETF
elif sType == 'LOF':
return FundType.LOF
elif sType == 'MMF':
return FundType.MMF
else:
raise NameError( "Unknown fund type %s" % sType )
except KeyError:
return FundType.OF
def str2date( sDate ):
"""
Convert a string date to datetime.date
"""
try:
dateTime = dt.datetime.strptime( sDate, "%Y%m%d" )
except ValueError:
dateTime = dt.datetime.strptime( sDate, "%Y-%m-%d" )
return dateTime.date()
def getHolidays( startDate, endDate ):
"""
Return China exchange holidays ( non-trading days ) from `startDate` to `endDate`
"""
with open( 'refData/holidays.txt', 'r' ) as f:
holidays = f.read().strip().split('\n')
holidays = [ date for date in map( str2date, holidays ) if date >= startDate and date <= endDate ]
return holidays
| 30.471698 | 102 | 0.596285 | 160 | 0.099071 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.256347 |
c2329a7b9e06911a1ed82d81214f1385b352823d | 398 | py | Python | Http-api-auth0-jwt/src/list-all-unicorns.py | JimmyDqv/blogs-and-sessions-code | 737a2e88f3fd84bd8426be609f7474374d4ac4d6 | [
"MIT"
]
| 2 | 2021-07-08T10:31:11.000Z | 2022-01-07T23:04:31.000Z | Http-api-auth0-jwt/src/list-all-unicorns.py | JimmyDqv/blogs-and-sessions-code | 737a2e88f3fd84bd8426be609f7474374d4ac4d6 | [
"MIT"
]
| null | null | null | Http-api-auth0-jwt/src/list-all-unicorns.py | JimmyDqv/blogs-and-sessions-code | 737a2e88f3fd84bd8426be609f7474374d4ac4d6 | [
"MIT"
]
| null | null | null | import json
def lambda_handler(event, context):
unicorns = [
{
"name": "Gaia",
"gift": "Speed"
},
{
"name": "Magestic",
"gift": "Magic"
},
{
"name": "Sparkles",
"gift": "Glitter"
}
]
return {
'statusCode': 200,
'body': json.dumps(unicorns)
}
| 15.92 | 36 | 0.371859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.258794 |
c232b3257fa969c4deba44d282906664d6091820 | 253 | py | Python | examples/http_server.py | srossross/uvio | f4d55ad5ea5900a2a8b9c1249484ed621dc30055 | [
"MIT"
]
| 3 | 2016-03-23T08:12:03.000Z | 2018-10-06T02:46:54.000Z | examples/http_server.py | srossross/uvio | f4d55ad5ea5900a2a8b9c1249484ed621dc30055 | [
"MIT"
]
| null | null | null | examples/http_server.py | srossross/uvio | f4d55ad5ea5900a2a8b9c1249484ed621dc30055 | [
"MIT"
]
| null | null | null | import uvio
async def route(req):
pass
async def handler(req, res):
await route(req, res)()
res.end("Yes")
@uvio.run
async def main():
server = await uvio.http.listen(handler, host='127.0.0.1', port=80)
print("server", server)
| 14.882353 | 71 | 0.632411 | 0 | 0 | 0 | 0 | 129 | 0.509881 | 224 | 0.885375 | 24 | 0.094862 |
c2334b533c25c85bcfe5823b2fbd3fe0b9cec5f6 | 7,933 | py | Python | ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py | ceos-seo/Data_Cube_v2 | 81c3be66153ea123b5d21cf9ec7f59ccb7a2050a | [
"Apache-2.0"
]
| 27 | 2016-08-16T18:22:47.000Z | 2018-08-25T17:18:15.000Z | ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py | data-cube/CEOS-cube | 31baeba08d8e8470c4663c18aaf9056431d9c49f | [
"Apache-2.0"
]
| null | null | null | ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py | data-cube/CEOS-cube | 31baeba08d8e8470c4663c18aaf9056431d9c49f | [
"Apache-2.0"
]
| 27 | 2016-08-26T18:14:40.000Z | 2021-12-24T08:41:29.000Z |
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gdal, osr
import collections
import gc
import numpy as np
import xarray as xr
from datetime import datetime
import collections
from collections import OrderedDict
import datacube
from . import dc_utilities as utilities
# Author: KMF
# Creation date: 2016-06-14
# Modified by: AHDS
# Last modified date:
def create_mosaic_iterative(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
#masks data with clean_mask. all values that are clean_mask==False are set to nodata.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for index in reversed(range(len(clean_mask))):
dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999]
return dataset_out
def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the median pixel value for a given dataset.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
#required for np.nan
dataset_in = dataset_in.astype("float64")
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
dataset_out = dataset_in.isel(time=0).drop('time').copy(deep=True)
dataset_out.attrs = OrderedDict()
# Loop over every key.
for key in list(dataset_in.data_vars):
dataset_in[key].values[dataset_in[key].values==no_data] = np.nan
dataset_out[key].values = np.nanmedian(dataset_in[key].values, axis=0)
dataset_out[key].values[dataset_out[key].values==np.nan] = no_data
return dataset_out.astype('int16')
def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the pixel value for the max ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
return dataset_out
def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the pixel value for the min ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values]
return dataset_out
| 40.682051 | 173 | 0.704399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,318 | 0.418253 |
c234a2bf9d847b0178d0e12fe82918d472e89c91 | 2,014 | py | Python | plotter.py | keshavbantu/covclass | e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4 | [
"MIT"
]
| null | null | null | plotter.py | keshavbantu/covclass | e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4 | [
"MIT"
]
| null | null | null | plotter.py | keshavbantu/covclass | e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4 | [
"MIT"
]
| null | null | null | import cleaner as dataStream
import plotly.graph_objects as go
import plotly.io as pio
#DONUT PLOT - CONDITIONS -----------------------------------------
labels = ['Diabetes','Hypertension','Coronary Heart(D)','Chronic Kidney(D)','No Conditions','Obstructive Pulmonary(D)']
values = dataStream.PIEList
fig_cond = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3)])
#fig_cond.show()
pio.write_html(fig_cond, file="templates/cond.html")
#GROUP BAR PLOT - SYMPTOMS ---------------------------------------
symplabel=['Symptoms']
fig_symp = go.Figure(data=[
go.Bar(name='Fever', x=symplabel, y=dataStream.Fever),
go.Bar(name='Cough', x=symplabel, y=dataStream.Cough),
go.Bar(name='Breathlessness', x=symplabel, y=dataStream.Breathlessness),
go.Bar(name='Severe Acute Respiratory Syndrome', x=symplabel, y=dataStream.SARI),
go.Bar(name='Influenza-like Illness', x=symplabel, y=dataStream.ILI),
go.Bar(name='Asymptomatic', x=symplabel, y=dataStream.NONE_sym)
])
fig_symp.update_layout(barmode='group')
#fig_symp.show()
pio.write_html(fig_symp, file="templates/symp.html")
#STACK BAR PLOT - AGE DATA ------------------------------------------
fig_age = go.Figure()
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.maleAgeList,
name='Male Deaths',
orientation='h',
marker=dict(
color='rgba(61, 112, 242, 0.6)',
line=dict(color='rgba(61, 112, 242, 1.0)', width=2)
)
))
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.femaleAgeList,
name='Female Deaths',
orientation='h',
marker=dict(
color='rgba(242, 61, 221, 0.6)',
line=dict(color='rgba(242, 61, 221, 1.0)', width=2)
)
))
fig_age.update_layout(barmode='stack')
#fig_age.show()
pio.write_html(fig_age, file="templates/age.html") | 38 | 124 | 0.627607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 874 | 0.433962 |
c235b37d33733193984303077c70e9f3d941faa4 | 1,847 | py | Python | pyglare/scene/objects.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
]
| 6 | 2017-01-13T22:32:55.000Z | 2022-03-27T22:19:49.000Z | pyglare/scene/objects.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
]
| 1 | 2016-09-13T17:59:41.000Z | 2016-09-13T18:05:20.000Z | pyglare/scene/objects.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
]
| null | null | null | from ..math import geometry as geo
from ..image.color import Color
import math
class Material:
def __init__(self,color,diffuse_rate,specular_rate,specular_exponent,reflection_rate):
self.color = color
self.diffuse_rate = diffuse_rate
self.specular_rate = specular_rate
self.specular_exponent = specular_exponent
self.reflection_rate = reflection_rate
class Object:
def __init__(self,material):
self.material=material
def intersection(self,ray):
'''Considering intersection point is: landa * ray, returns landa if there is intersection or None'''
pass
def normal_at(self,position):
'''Returns normal vector of this shape on a position'''
pass
def color_at(self,position):
pass
class Plane(Object):
def __init__(self,material,normal,intercept):
super().__init__(material)
self.math_repr = geo.Plane(normal,intercept)
def intersection(self,ray):
return self.math_repr.intersection(ray)
def normal_at(self,position):
return self.math_repr.normal
def color_at(self,position):
return self.material.color
class CheckerboardUpPlane(Plane):
def __init__(self,material,intercept,cell_size,cell_color):
super().__init__(material,geo.Vector(0,1,0),intercept)
self.cell_size = cell_size
self.cell_color = cell_color
def color_at(self,position):
checker=math.floor(position.x/self.cell_size)+math.floor(position.z/self.cell_size)
if checker%2 == 0:
return self.material.color
else:
return self.cell_color
class Sphere(Object):
def __init__(self,material,position,radius):
super().__init__(material)
self.math_repr = geo.Sphere(position,radius)
def intersection(self,ray):
return self.math_repr.intersection(ray)
def normal_at(self,position):
return (position-self.math_repr.position).normalize()
def color_at(self,position):
return self.material.color
| 24.959459 | 102 | 0.7634 | 1,757 | 0.951272 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.08392 |
c235c83aedce86f0591eb9d244db1ef5424b59b5 | 1,401 | py | Python | Led.py | Zico56/raspberry-gt500 | 85e29ec8bb604fab9c0eb37b63e85b8058baf2b2 | [
"Xnet",
"X11"
]
| null | null | null | Led.py | Zico56/raspberry-gt500 | 85e29ec8bb604fab9c0eb37b63e85b8058baf2b2 | [
"Xnet",
"X11"
]
| null | null | null | Led.py | Zico56/raspberry-gt500 | 85e29ec8bb604fab9c0eb37b63e85b8058baf2b2 | [
"Xnet",
"X11"
]
| null | null | null | import time
from tkinter import *
from PIL import Image, ImageTk
from Configuration import config
class Led:
imageOn = Image.open(config.get('APPLICATION', 'LED_ON_IMG'))
imageOff = Image.open(config.get('APPLICATION', 'LED_OFF_IMG'))
STATE_ON = "ON"
STATE_OFF = "OFF"
def __init__(self, parent):
self.state = Led.STATE_OFF
self.img = ImageTk.PhotoImage(Led.imageOff)
self.label = Label(parent, image=self.img, bg="black")
self.label.image = self.img
self.label.pack()
def swithOn(self):
self.img = ImageTk.PhotoImage(Led.imageOn)
self.label.configure(image=self.img)
self.state = Led.STATE_ON
def swithOff(self):
self.img = ImageTk.PhotoImage(Led.imageOff)
self.label.configure(image=self.img)
self.state = Led.STATE_OFF
# deprecated: use swithOn/swithOff methods instead
'''
def changeColor(self):
if ( self.state == Led.STATE_OFF):
self.img = ImageTk.PhotoImage(Led.imageOn)
self.label.configure(image=self.img)
self.state = Led.STATE_ON
elif ( self.state == Led.STATE_ON):
self.img = ImageTk.PhotoImage(Led.imageOff)
self.label.configure(image=self.img)
self.state = Led.STATE_OFF
else:
raise Exception('Unknow led state')
''' | 31.840909 | 67 | 0.615989 | 1,302 | 0.929336 | 0 | 0 | 0 | 0 | 0 | 0 | 590 | 0.421128 |
c236c320912188a01c92278b510292a2d1855a42 | 249 | py | Python | tests/accounts/model/test_social_security_number.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
]
| 8 | 2020-10-27T09:46:20.000Z | 2022-01-27T12:16:48.000Z | tests/accounts/model/test_social_security_number.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
]
| null | null | null | tests/accounts/model/test_social_security_number.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
]
| 2 | 2021-05-29T08:11:48.000Z | 2021-07-26T04:44:53.000Z | import pytest
from bank_ddd_es_cqrs.accounts import SocialSecurityNumber
def test_social_security_number_throws_app_exception_with_status_422_if_too_much_digits():
with pytest.raises(ValueError) as e:
SocialSecurityNumber(1324352351)
| 27.666667 | 90 | 0.84739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c236d3b1e5bb73ed1d08dc25325aad2b8f8b0b9e | 358 | py | Python | setup.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
]
| 2 | 2018-10-07T14:16:32.000Z | 2019-01-28T00:14:29.000Z | setup.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
]
| null | null | null | setup.py | jjakimoto/rl_traders.py | d5411c96d49ba6a54751d12cdd11974e5cc1a8aa | [
"MIT"
]
| 1 | 2019-11-05T00:51:20.000Z | 2019-11-05T00:51:20.000Z | from setuptools import setup
from setuptools import find_packages
setup(name='rl_traders',
version='0.1.0',
description='Reinforcement Learning for Trading',
url='https://github.com/jjakimoto/rl_traders.git',
author='jjakimoto',
author_email='[email protected]',
license='MIT',
packages=find_packages()
)
| 27.538462 | 56 | 0.678771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.388268 |
c239846032333fb5d26b1c1eb5b5c8a5cf233d15 | 2,219 | py | Python | Music/__init__.py | izazkhan8293/Musicheu | 9cd33a71868b8b850d6fd78eaac05dda0713b7cc | [
"Apache-2.0"
]
| null | null | null | Music/__init__.py | izazkhan8293/Musicheu | 9cd33a71868b8b850d6fd78eaac05dda0713b7cc | [
"Apache-2.0"
]
| null | null | null | Music/__init__.py | izazkhan8293/Musicheu | 9cd33a71868b8b850d6fd78eaac05dda0713b7cc | [
"Apache-2.0"
]
| null | null | null | from pyrogram import Client
import asyncio
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
import uvloop
from Music import config
import importlib
from pyrogram import Client as Bot
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID, OWNER_ID
from pyrogram import Client
from aiohttp import ClientSession
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
def initialize():
global dbb
dbb = {}
initialize()
MONGODB_CLI = MongoClient(MONGO_DB_URI)
db = MONGODB_CLI.wbb
SUDOERS = SUDO_USERS
OWNER = OWNER_ID
async def load_sudoers():
global SUDOERS
sudoersdb = db.sudoers
sudoers = await sudoersdb.find_one({"sudo": "sudo"})
sudoers = [] if not sudoers else sudoers["sudoers"]
for user_id in SUDOERS:
if user_id not in sudoers:
sudoers.append(user_id)
await sudoersdb.update_one(
{"sudo": "sudo"}, {"$set": {"sudoers": sudoers}}, upsert=True
)
SUDOERS = (SUDOERS + sudoers) if sudoers else SUDOERS
loop = asyncio.get_event_loop()
loop.run_until_complete(load_sudoers())
Music_START_TIME = time.time()
loop = asyncio.get_event_loop()
BOT_ID = 0
BOT_NAME = ""
BOT_USERNAME = ""
ASSID = 0
ASSNAME = ""
ASSUSERNAME = ""
ASSMENTION = ""
app = Client(
'MusicBot',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
)
aiohttpsession = ClientSession()
client = Client(config.SESSION_NAME, config.API_ID, config.API_HASH)
def all_info(app, client):
global BOT_ID, BOT_NAME, BOT_USERNAME
global ASSID, ASSNAME, ASSMENTION, ASSUSERNAME
getme = app.get_me()
getme1 = client.get_me()
BOT_ID = getme.id
ASSID = getme1.id
if getme.last_name:
BOT_NAME = getme.first_name + " " + getme.last_name
else:
BOT_NAME = getme.first_name
BOT_USERNAME = getme.username
ASSNAME = (
f"{getme1.first_name} {getme1.last_name}"
if getme1.last_name
else getme1.first_name
)
ASSUSERNAME = getme1.username
ASSMENTION = getme1.mention
app.start()
client.start()
all_info(app, client)
| 28.448718 | 102 | 0.708878 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.213159 | 112 | 0.050473 |
c23a870064fefb4e740984ad848e886ea4aa0cd9 | 9,372 | py | Python | test.py | ZJianjin/Traffic4cast2020_lds | 6cb76e885a9539e485c055222be77f41a559c507 | [
"Apache-2.0"
]
| 3 | 2020-12-10T13:43:08.000Z | 2021-01-17T04:36:34.000Z | test.py | ZJianjin/Traffic4cast2020_lds | 6cb76e885a9539e485c055222be77f41a559c507 | [
"Apache-2.0"
]
| null | null | null | test.py | ZJianjin/Traffic4cast2020_lds | 6cb76e885a9539e485c055222be77f41a559c507 | [
"Apache-2.0"
]
| null | null | null | import random
from random import shuffle
import numpy as np
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import yaml
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from net_all import *
from trainer_all import *
season = None
use_mask = True
use_flip = False
use_time = True
model_name = 'neta'
train_winter = ['-01-', '-02-', '-03-']
train_summer = ['-05-', '-04-', '-06-']
test_winter = ['-11-', '-12-']
test_summer = ['-07-', '-08-', '-09-', '-10-']
SEED = 0
num_train_file = 285
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
target_frames = [0, 1, 2, 5, 8, 11]
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height = 495
width = 436
num_channel = 9
num_channel_discretized = 8 # 4 * 2
visual_input_channels = 115 # 12 * 8
visual_output_channels = 6 * 8 # 6 * 8
vector_input_channels = 1 # start time point
import json
#
n = 1
s = 255
e = 85
w = 170
tv = 16
##############################Set the path##############################################
data_root = './data'
model_root = './jianjzhmodelstest'
log_root = './output'
##############################Set the path##############################################
#
target_city = 'ISTANBUL' # ['BERLIN', 'MOSCOW', 'ISTANBUL']
# test_start_index_list = np.array([ 18, 57, 114, 174, 222], np.int32) # 'BERLIN'
# test_start_index_list = np.array([ 45, 102, 162, 210, 246], np.int32) # 'Moscow' # 'Istanbul'
input_static_data_path = data_root + '/' + target_city + '/' + target_city + '_static_2019.h5'
input_mask_data_path = data_root + '/maskdata/'
input_train_data_folder_path = data_root + '/' + target_city + '/training'
input_val_data_folder_path = data_root + '/' + target_city + '/validation'
input_test_data_folder_path = data_root + '/' + target_city + '/testing'
save_model_path = model_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
summary_path = log_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
#
batch_size_test = 5
learning_rate = 3e-4
load_model_path = model_root + '/' + 'ISTANBULneta'
# load_model_path = ''
is_training = False
# premodel = os.path.join(model_root, 'BERLINneta', 'model-58000.cptk')
global_step = 60000
def write_data(data, filename):
f = h5py.File(filename, 'w', libver='latest')
dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)
f.close()
def get_data_filepath_list(input_data_folder_path):
data_filepath_list = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
data_filepath_list.append(os.path.join(input_data_folder_path, filename))
data_filepath_list = sorted(data_filepath_list)
return data_filepath_list
def get_static_data(input_static_data_path):
fr = h5py.File(input_static_data_path, 'r')
data = fr['array'].value / 255.0
return data
def get_mask_data(input_mask_data_path, city):
map_0 = np.load(input_mask_data_path + city + 'map_0.npy')
map_1 = np.load(input_mask_data_path + city + 'map_1.npy')
map_2 = np.load(input_mask_data_path + city + 'map_2.npy')
map_3 = np.load(input_mask_data_path + city + 'map_3.npy')
result = np.concatenate([map_0, map_0, map_1, map_1, map_2, map_2, map_3, map_3], axis=-1)
return result
if __name__ == '__main__':
random.seed(SEED)
np.random.seed(SEED)
tf.set_random_seed(SEED)
trainer = Trainer(height, width, visual_input_channels, visual_output_channels, vector_input_channels,
learning_rate,
save_model_path, load_model_path, summary_path, is_training, use_mask, model_name)
tf.reset_default_graph()
test_data_filepath_list = get_data_filepath_list(input_test_data_folder_path)
if season == 'winter':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_winter]):
tmp.append(i)
data_filepath_list = tmp
elif season == 'summer':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_summer]):
tmp.append(i)
data_filepath_list = tmp
print('test_data_filepath_list\t', len(test_data_filepath_list), )
test_output_filepath_list = list()
for test_data_filepath in test_data_filepath_list:
filename = test_data_filepath.split('/')[-1]
test_output_filepath_list.append('output/' + target_city + '/' + target_city + '_test' + '/' + filename)
static_data = get_static_data(input_static_data_path)
mask_data = get_mask_data(input_mask_data_path, target_city)
try:
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('output/' + target_city):
os.makedirs('output/' + target_city)
if not os.path.exists('output/' + target_city + '/' + target_city + '_test'):
os.makedirs('output/' + target_city + '/' + target_city + '_test')
except Exception:
print('output path not made')
exit(-1)
with open('test_data.json') as f:
test_json = json.load(f)
for i in range(len(test_data_filepath_list)):
file_path = test_data_filepath_list[i]
out_file_path = test_output_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
# assert data.shape[0] == num_frame_per_day
data = np.array(data, np.uint8)
test_data_batch_list = []
test_data_time_list = []
test_data_mask_list = []
batch_size_test = data.shape[0]
for j in range(batch_size_test):
test_data_time_list.append(float(j) / float(num_frame_per_day))
data_sliced = data[:, :, :, :, :num_channel]
if use_time:
for time_dict in test_json:
time_data = list(time_dict.keys())[0]
if time_data in file_path:
time_data = time_dict[time_data]
break
time_id = np.ones_like(data_sliced)[:, :, :, :, :1]
for m in range(len(time_data)):
for n in range(num_frame_before):
time_id[m, n] = time_id[m, n] * (time_data[m] + n) / 288.0 * 255.0
data_sliced = np.concatenate([data_sliced, time_id], axis=-1)
data_mask = (np.max(data_sliced, axis=4) == 0)
test_data_mask_list = data_mask[:, :, :, :]
test_data_batch_list.append(data_sliced)
test_data_time_list = np.asarray(test_data_time_list, np.float32)
input_time = np.reshape(test_data_time_list, (batch_size_test, 1))
test_data_mask = test_data_mask_list
input_data = np.concatenate(test_data_batch_list, axis=0).astype(np.float32)
input_data[:, :, :, :, :] = input_data[:, :, :, :, :] / 255.0
input_data = np.moveaxis(input_data, 1, -1).reshape((batch_size_test, height, width, -1))
static_data_tmp = np.tile(static_data, [batch_size_test, 1, 1, 1])
input_data = np.concatenate([input_data, static_data_tmp], axis=-1)
# input_data_mask = np.zeros((batch_size_test, num_frame_before, height, width, num_channel_discretized), np.bool)
# input_data_mask[test_data_mask[:, :num_frame_before, :, :], :] = True
# input_data_mask = np.moveaxis(input_data_mask, 1, -1).reshape((batch_size_test, height, width, -1))
# input_data[input_data_mask] = -1.0
true_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
if use_mask:
orig_label_mask = np.tile(mask_data, [1, 1, 1, len(target_frames)])
else:
orig_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
prediction_list = []
# print(input_data.shape)
# assert 0
import scipy.misc as misc
# trainer.load_model(premodel)
# print('load model')
for b in range(batch_size_test):
run_out_one = trainer.infer(input_data[b, :, :, :][np.newaxis, :, :, :],
input_time[b, :][np.newaxis, :],
true_label_mask[b, :, :, :][np.newaxis, :, :, :], global_step)
prediction_one = run_out_one['predict']
prediction_list.append(prediction_one)
# print(input_data[b,:,:,:].shape)
# for t in range(3):
# misc.imsave('output_'+str(b)+'_'+str(t)+'.png', np.reshape(prediction_one, [495, 436, 3, 8])[:, :, t, 0])
# assert 0
prediction = np.concatenate(prediction_list, axis=0)
prediction = np.moveaxis(np.reshape(prediction, (
batch_size_test, height, width, num_channel_discretized, len(target_frames),)), -1, 1)
prediction = prediction.astype(np.float32) * 255.0
prediction = np.rint(prediction)
prediction = np.clip(prediction, 0.0, 255.0).astype(np.uint8)
assert prediction.shape == (batch_size_test, len(target_frames), height, width, num_channel_discretized)
write_data(prediction, out_file_path)
| 37.94332 | 122 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,666 | 0.177764 |
c23bc080151d66518c85923b1ce1c8be7c0ff949 | 3,037 | py | Python | python/python-010/rds.py | suzuxander/suzuxander_samples | 736224dae91b432ef3ec796f5eda23417865f142 | [
"MIT"
]
| null | null | null | python/python-010/rds.py | suzuxander/suzuxander_samples | 736224dae91b432ef3ec796f5eda23417865f142 | [
"MIT"
]
| null | null | null | python/python-010/rds.py | suzuxander/suzuxander_samples | 736224dae91b432ef3ec796f5eda23417865f142 | [
"MIT"
]
| null | null | null | from troposphere import Template, Ref, Parameter, GetAtt
from troposphere.ec2 import SecurityGroup
from troposphere.rds import DBSubnetGroup, DBInstance
def create_rds_template():
template = Template()
vpc = template.add_parameter(
parameter=Parameter(
title='Vpc',
Type='String'
)
)
subnet_a = template.add_parameter(
parameter=Parameter(
title='SubnetA',
Type='String'
)
)
subnet_b = template.add_parameter(
parameter=Parameter(
title='SubnetB',
Type='String'
)
)
master_user_name = template.add_parameter(
parameter=Parameter(
title='DBMasterUserName',
Type='String'
)
)
master_user_password = template.add_parameter(
parameter=Parameter(
title='DBMasterUserPassword',
Type='String'
)
)
storage_size = template.add_parameter(
parameter=Parameter(
title='StorageSize',
Default='20',
Type='String'
)
)
instance_class = template.add_parameter(
parameter=Parameter(
title='InstanceClass',
Default='db.t2.micro',
Type='String'
)
)
engine_version = template.add_parameter(
parameter=Parameter(
title='EngineVersion',
Default='5.7.26',
Type='String'
)
)
security_group = template.add_resource(
resource=SecurityGroup(
title='SampleSecurityGroup',
GroupDescription='sample-rds',
SecurityGroupIngress=[
{
'IpProtocol': 'tcp',
'FromPort': 3306,
'ToPort': 3306,
'CidrIp': '0.0.0.0/0',
}
],
VpcId=Ref(vpc)
)
)
db_subnet_group = template.add_resource(
resource=DBSubnetGroup(
title='SampleDBSubnetGroup',
DBSubnetGroupDescription='sample-rds',
DBSubnetGroupName='sample-rds',
SubnetIds=[Ref(subnet_a), Ref(subnet_b)]
)
)
template.add_resource(
resource=DBInstance(
title='SampleDBInstance',
DBSubnetGroupName=Ref(db_subnet_group),
# VPCSecurityGroups=[Ref(security_group)],
VPCSecurityGroups=[GetAtt(security_group, 'GroupId')],
AllocatedStorage=Ref(storage_size),
DBInstanceClass=Ref(instance_class),
DBInstanceIdentifier='sample-rds',
DBName='sample_rds',
Engine='mysql',
EngineVersion=Ref(engine_version),
MasterUsername=Ref(master_user_name),
MasterUserPassword=Ref(master_user_password),
PubliclyAccessible=True
)
)
with open('./rds.yml', mode='w') as file:
file.write(template.to_yaml())
if __name__ == '__main__':
create_rds_template()
| 25.957265 | 66 | 0.55186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.148502 |
c23f39dcaa9bc21fb37ef18d6de38e47058d0da3 | 514 | py | Python | hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
]
| 1 | 2020-10-16T16:29:01.000Z | 2020-10-16T16:29:01.000Z | hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
]
| 365 | 2020-02-03T12:46:53.000Z | 2022-02-27T17:20:46.000Z | hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 3.0.7 on 2021-01-26 09:57
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hlwtadmin', '0043_auto_20210126_0833'),
]
operations = [
migrations.AddField(
model_name='gigfinderurl',
name='ignore_periods',
field=django.contrib.postgres.fields.ArrayField(base_field=models.DateTimeField(), blank=True, null=True, size=2),
),
]
| 25.7 | 126 | 0.659533 | 383 | 0.745136 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.219844 |
c23fbfd17a95b6fdf7b229efd815b011116431cc | 1,855 | py | Python | conanfile.py | ltjax/nativefiledialog-extended | 2dc958b98f41e081189e84b56f9f250e1b30f92e | [
"Zlib"
]
| null | null | null | conanfile.py | ltjax/nativefiledialog-extended | 2dc958b98f41e081189e84b56f9f250e1b30f92e | [
"Zlib"
]
| null | null | null | conanfile.py | ltjax/nativefiledialog-extended | 2dc958b98f41e081189e84b56f9f250e1b30f92e | [
"Zlib"
]
| null | null | null | from conans import ConanFile, CMake, tools
class NativeFileDialogExtendedConan(ConanFile):
name = "nativefiledialog-extended"
version = "1.0"
license = "zlib"
author = "Marius Elvert [email protected]"
url = "https://github.com/ltjax/nativefiledialog-extended"
description = "Small C and C++ libraries that portably invoke native file open, folder select and save dialogs."
topics = ("file-dialog",)
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared":False}
generators = "cmake"
exports_sources = "src/*", "test/*", "CMakeLists.txt"
def _configured_cmake(self):
cmake = CMake(self)
cmake.configure(source_folder=".", defs={})
return cmake
def build(self):
self._configured_cmake().build()
def package(self):
self._configured_cmake().install()
def package_info(self):
self.cpp_info.libs = ["nfd"]
if self.settings.os == "Linux":
self._add_libraries_from_pc("gtk+-3.0")
elif self.settings.os == "Macos":
frameworks = ["AppKit"]
for framework in frameworks:
self.cpp_info.exelinkflags.append("-framework {0}".format(framework))
self.cpp_info.sharedlinkflags.append("-framework {0}".format(framework))
def _add_libraries_from_pc(self, library):
pkg_config = tools.PkgConfig(library)
libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix
lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix
self.cpp_info.libs.extend(libs)
self.cpp_info.libdirs.extend(lib_paths)
self.cpp_info.sharedlinkflags.extend(pkg_config.libs_only_other)
self.cpp_info.exelinkflags.extend(pkg_config.libs_only_other) | 40.326087 | 116 | 0.654447 | 1,810 | 0.975741 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.233423 |
c24130645b33d6b4c145bae50da7d266149801e6 | 960 | py | Python | hackerearth/Algorithms/New World/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
]
| 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/New World/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
]
| null | null | null | hackerearth/Algorithms/New World/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
]
| null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
import bisect
def check(arr, x, ln, val):
count = 0
i = 0
while i < ln - 1:
index = bisect.bisect(arr, arr[i] + x) - 1
if index > i:
i = index
count += 1
else:
return False
return count <= val
t = int(input())
for _ in range(t):
n, k = map(int, input().strip().split())
stones = list(map(int, input().strip().split()))
low = 1
high = stones[-1] - stones[0] # Location of all stones are given in ascending order.
while low <= high:
mid = (low + high) // 2
if check(stones, mid, n, k):
high = mid - 1
else:
low = mid + 1
print(low)
| 24 | 94 | 0.527083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.341667 |
c2416fc1e551162c64c074b04f41a960aa792575 | 712 | py | Python | core/departments/urls.py | IvanRch/bsuphys | 105715cde8fc8e9a42019ed4b650fe00b94fa132 | [
"Apache-2.0"
]
| 1 | 2022-01-04T07:04:46.000Z | 2022-01-04T07:04:46.000Z | core/departments/urls.py | IvanRch/bsuphys | 105715cde8fc8e9a42019ed4b650fe00b94fa132 | [
"Apache-2.0"
]
| 1 | 2020-02-17T19:06:03.000Z | 2020-02-17T19:06:03.000Z | core/departments/urls.py | IvanRch/bsuphys | 105715cde8fc8e9a42019ed4b650fe00b94fa132 | [
"Apache-2.0"
]
| 1 | 2021-07-08T13:21:04.000Z | 2021-07-08T13:21:04.000Z | from django.urls import path, re_path
from . import views
app_name = "departments"
urlpatterns = [path("", views.departmentList, name="energy department"),
path(
"<slug:department>/",
views.department_detail,
name="department_detail",
),
path(
"<slug:department>/staff/",
views.department_detail_staff,
name="department_detail",
),
path(
"<slug:department>/thesis/",
views.department_detail_thesis,
name="department_detail",
),
path(
"<slug:department>/directions/",
views.department_detail_directions,
name="department_detail",
),
] | 25.428571 | 72 | 0.573034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.300562 |
c2456834188f5aaff78f04e88343303b398c8b26 | 432 | py | Python | 18th May Assignments/case study 1/question_5.py | JangirSumit/data_science | a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4 | [
"MIT"
]
| 15 | 2019-05-05T04:48:42.000Z | 2022-02-15T12:08:33.000Z | 18th May Assignments/case study 1/question_5.py | JangirSumit/data_science | a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4 | [
"MIT"
]
| null | null | null | 18th May Assignments/case study 1/question_5.py | JangirSumit/data_science | a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4 | [
"MIT"
]
| 53 | 2019-11-10T05:09:25.000Z | 2022-03-28T01:26:32.000Z | # 5. How do you Count The Number Of Times Each Value Appears In An Array Of Integers?
# [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9]
# Answer should be array([4, 2, 1, 1, 3, 2, 0, 0, 0, 1]) which means 0 comes 4 times, 1 comes 2 times, 2 comes 1 time, 3 comes 1 time and so on.
array = [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9]
count_array_elements = [array.count(a) for a in set(array)]
print(count_array_elements)
| 43.2 | 144 | 0.601852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.664352 |
c246213af42c94a04ec5e4409ced7cd800cf77ef | 4,957 | py | Python | pvp-tic-tac-toe.py | lsfidelis/pvp-tic-tac-toe | 6ed73e5a053cc22a092c7b56d774503f57a859ab | [
"MIT"
]
| null | null | null | pvp-tic-tac-toe.py | lsfidelis/pvp-tic-tac-toe | 6ed73e5a053cc22a092c7b56d774503f57a859ab | [
"MIT"
]
| null | null | null | pvp-tic-tac-toe.py | lsfidelis/pvp-tic-tac-toe | 6ed73e5a053cc22a092c7b56d774503f57a859ab | [
"MIT"
]
| null | null | null | from time import sleep
print("Welcome to Tic Tac Toe! \nWe'll be playing in a sec, but, first..")
general_board = {'7': ' ', '8': ' ', '9': ' ',
'4': ' ', '5': ' ', '6': ' ',
'1': ' ', '2': ' ', '3': ' '}
# prints board structure
def show_board(board):
print('\t\t', board['7'], '|', board['8'], '|', board['9'])
print('\t\t', '--+---+--')
print('\t\t', board['4'], '|', board['5'], '|', board['6'])
print('\t\t', '--+---+--')
print('\t\t', board['1'], '|', board['2'], '|', board['3'])
# Choose which player goes first
def game():
while True:
player = str(input("Choose which player goes first: (X/O) ")).strip().upper()[0]
if player not in "XO":
print('\nInvalid choice. Try again.')
continue
else:
break
# Validates index to insert player symbol (X/O)
turns_count = 0
while True:
if turns_count == 9:
print("\t\n ***** GAME OVER! It's a Tie! *****")
break
show_board(general_board)
print(f"\nIt's {player}'s turn.", end=' ')
move = input('Move to which place? ')
if 0 < int(move) < 10:
if general_board[move] == ' ':
turns_count += 1
general_board[move] = player
elif general_board[move] != ' ':
print('\n ---> Place already filled. Try again.\n')
continue
else:
print('\n ---> Invalid place. Try again.\n')
continue
# Adds +1 to turns; Minimum tunrs to win the game = 5
if turns_count >= 5:
if general_board['1'] == general_board['2'] == general_board['3'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Bottom Horizontal
break
elif general_board['4'] == general_board['5'] == general_board['6'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Middle Horizontal
break
elif general_board['7'] == general_board['8'] == general_board['9'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Top Horizontal
break
elif general_board['1'] == general_board['4'] == general_board['7'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Left Vertical
break
elif general_board['2'] == general_board['5'] == general_board['8'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Middle Vertical
break
elif general_board['3'] == general_board['6'] == general_board['9'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Right Vertical
break
elif general_board['1'] == general_board['5'] == general_board['9'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Right-left Diagonal
break
elif general_board['7'] == general_board['5'] == general_board['3'] != ' ':
show_board(general_board)
print(f'\t***** {player} is the winner! *****')
print('-' * 35) # Left-right Diagonal
break
# Changes player turn
if player == 'X':
player = 'O'
elif player == 'O':
player = 'X'
# Clear the board and reset the game
def restart():
while True:
reset_game = str(input('Do you want to continue? (y/n)')).strip().lower()[0]
board_keys = list()
for keys in general_board.keys():
board_keys.append(keys)
if reset_game not in 'yn':
print(f'\nError: {reset_game} is not a valid choice. Please, try again.')
continue
elif reset_game in 'n':
break
elif reset_game in 'y':
print('\033[31m----- Restarting Tic Tac Toe... -----\033[m')
sleep(1)
for keys in general_board.keys():
general_board[keys] = ' '
game()
if __name__ == "__main__":
game()
restart()
| 35.92029 | 89 | 0.435747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,456 | 0.293726 |
c24663b502469b48e008fb30a563fba0b901fd18 | 7,119 | py | Python | total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py | spethso/Verleihsystem-TTF | 39179f9ac5b07f5106e555f82f3c9011d33805bd | [
"MIT"
]
| 1 | 2019-03-17T08:11:14.000Z | 2019-03-17T08:11:14.000Z | total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py | spethso/Verleihsystem-TTF | 39179f9ac5b07f5106e555f82f3c9011d33805bd | [
"MIT"
]
| 60 | 2018-06-12T14:46:50.000Z | 2020-11-16T00:50:37.000Z | total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py | FIUS/ttf-backend | 39179f9ac5b07f5106e555f82f3c9011d33805bd | [
"MIT"
]
| 1 | 2019-12-02T19:25:59.000Z | 2019-12-02T19:25:59.000Z | """
Auth Providers which provides LDAP login
"""
from typing import List, Dict
from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE
from ldap3.core.exceptions import LDAPSocketOpenError, LDAPBindError
from ..login import LoginProvider
from .. import APP, AUTH_LOGGER
class LDAPAuthProvider(LoginProvider, provider_name="LDAP"):
"""
Login Provider with connection to LDAP Server
"""
ldap_uri: str #The URL of the ldpa server
port: int #The port of the ldap server. Use None for default.
ssl: bool #Whether to use ssl for the connection.
start_tls: bool #Whether to upgrade connection with StartTLS once bound.
user_search_base: str #The search base for users.
group_search_base: str #The search base for groups.
user_rdn: str #The RDN for users.
user_uid_field: str # The field of a user, which is the name, that is i the group_membership_field
group_membership_field: str #The field of a group, which contains the username
moderator_filter: str #A moderator must match this filter
admin_filter: str #A admininstrator must match this filter
moderator_group_filter: str # A moderator must be in at least one of the matched groups
admin_group_filter: str # A admin must be in at least one of the matched groups
server: Server = None
known_users: Dict[str, bool]
def __init__(self):
self.ldap_uri: str = APP.config["LDAP_URI"] #The URL of the ldpa server
self.port: int = APP.config["LDAP_PORT"] #The port of the ldap server. Use None for default.
self.ssl: bool = APP.config["LDAP_SSL"] #Whether to use ssl for the connection.
self.start_tls: bool = APP.config["LDAP_START_TLS"] #Whether to upgrade connection with StartTLS once bound.
self.user_search_base: str = APP.config["LDAP_USER_SEARCH_BASE"] #The search base for users.
self.group_search_base: str = APP.config["LDAP_GROUP_SEARCH_BASE"] #The search base for groups.
self.user_rdn: str = APP.config["LDAP_USER_RDN"] #The RDN for users.
# The field of a user, which is the name, that is i the group_membership_field
self.user_uid_field: str = APP.config["LDAP_USER_UID_FIELD"]
#The field of a group, which contains the username
self.group_membership_field: str = APP.config["LDAP_GROUP_MEMBERSHIP_FIELD"]
self.moderator_filter: str = APP.config["LDAP_MODERATOR_FILTER"] #A moderator must match this filter
self.admin_filter: str = APP.config["LDAP_ADMIN_FILTER"] #A admininstrator must match this filter
# A moderator must be in at least one of the matched groups
self.moderator_group_filter: str = APP.config["LDAP_MODERATOR_GROUP_FILTER"]
# A admin must be in at least one of the matched groups
self.admin_group_filter: str = APP.config["LDAP_ADMIN_GROUP_FILTER"]
self.server: Server = None
self.known_users = {}
def init(self) -> None:
self.server = Server(self.ldap_uri, port=self.port, use_ssl=self.ssl)
def valid_user(self, user_id: str) -> bool:
return True
@classmethod
def combine_filters(cls, filters: List[str]) -> str:
"""
Combines the given filters with a or
"""
non_empty_filters = list(filter(None, filters))
if not non_empty_filters:
return ""
elif len(non_empty_filters) == 1:
return non_empty_filters.pop()
else:
return "(|" + ''.join(non_empty_filters) + ")"
def valid_password(self, user_id: str, password: str) -> bool:
try:
user_str = self.user_rdn + "=" + user_id + "," + self.user_search_base
with Connection(self.server,
user=user_str,
password=password,
auto_bind=AUTO_BIND_TLS_BEFORE_BIND,
read_only=True) as conn:
user_base_filter = "(" + self.user_rdn + "=" + user_id + ")"
user_filter = user_base_filter
all_users_filter = self.combine_filters([self.moderator_filter, self.admin_filter])
if all_users_filter:
user_filter = "(&" + all_users_filter + user_base_filter + ")"
if not conn.search(self.user_search_base,
user_filter,
search_scope=SUBTREE,
attributes=[self.user_uid_field]):
AUTH_LOGGER.info("User %s is not in the user filter", user_id)
return False
user_uid = str(conn.entries.pop()[self.user_uid_field])
group_base_filter = "(" + self.group_membership_field + "=" + user_uid + ")"
group_filter = group_base_filter
all_groups_filter = self.combine_filters([self.moderator_group_filter, self.admin_group_filter])
if all_groups_filter:
group_filter = "(&" + all_groups_filter + group_base_filter + ")"
if not conn.search(self.group_search_base, group_filter, search_scope=SUBTREE):
AUTH_LOGGER.info("User %s is not in any group of the group filter", user_id)
return False
admin_user_filter = user_base_filter
all_admin_users_filter = self.combine_filters([self.admin_filter])
if all_admin_users_filter:
admin_user_filter = "(&" + all_admin_users_filter + user_base_filter + ")"
admin_group_filter = group_base_filter
all_admin_groups_filter = self.combine_filters([self.admin_group_filter])
if all_admin_groups_filter:
admin_group_filter = "(&" + all_admin_groups_filter + group_base_filter + ")"
in_admin_user_filter = conn.search(self.user_search_base,
admin_user_filter,
search_scope=SUBTREE)
in_admin_group_filter = conn.search(self.group_search_base,
admin_group_filter,
search_scope=SUBTREE)
if (in_admin_user_filter and in_admin_group_filter):
self.known_users[user_id] = True
else:
self.known_users[user_id] = False
AUTH_LOGGER.debug("Valid login from user %s. User in admin user filter: %s. User in admin group: %s",
user_id, str(in_admin_user_filter), str(in_admin_group_filter))
return True
except LDAPSocketOpenError as error:
raise ConnectionError("Unable to connect to LDAP Server.") from error
except LDAPBindError:
return False
return False
def is_admin(self, user_id: str) -> bool:
return self.known_users[user_id]
def is_moderator(self, user_id: str) -> bool:
return True
| 47.46 | 118 | 0.61975 | 6,828 | 0.959123 | 0 | 0 | 409 | 0.057452 | 0 | 0 | 1,822 | 0.255935 |
c247338889dd4aef3193b428e74aac5424652e3f | 4,117 | py | Python | md2html.py | osfans/yancheng | 1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0 | [
"Apache-2.0"
]
| 4 | 2017-01-26T03:25:24.000Z | 2019-04-15T14:11:46.000Z | md2html.py | osfans/yancheng | 1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0 | [
"Apache-2.0"
]
| 1 | 2016-12-02T04:26:31.000Z | 2016-12-05T05:02:39.000Z | md2html.py | osfans/xu | 1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import re, os, glob
template = """
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<style>
body {
font-family: PMingLiu, HanaMinA, HanaMinB, Helvetica, arial, sans-serif;
writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl; }
.sm {
margin: 20px 0 10px;
padding: 0;
font-weight: bold;
font-size: 30px;
border-left: 1px solid #cccccc;
margin: 0 5px;
cursor: text;
position: static;
clear: both;
text-align: right;
}
.sd, .sd2, .zy, .zi, .zi1, .yi {
font-size: 10px;
text-align: center;
cursor: text;
float: left;
margin-left: 10px;
margin-right: 10px;
line-height: 10px;
letter-spacing: 0.35em;
}
.sd, .sd2 {
margin-right: 25px;
clear: both;
}
.sd2 {
margin-right: 20px;
}
.zi, .zi1 {
padding-top: 20px;
padding-bottom: 10px;
font-size: 20px;
line-height: 20px;
}
.zi1 {
padding-top: 10px;
}
.yi {
min-height: 40px;
text-align: left;
line-height: 12px;
margin-right: 8px;
}
.clear {
clear: both;
}
</style>
<title>徐氏類音字彙</title>
</head>
<body>
%s
</body>
</html>
"""
lines = list()
def append(fmt, s):
#print(s)
lines.append(fmt % s)
def parse(s):
s = s.strip().strip("`").replace("〜", "—").replace("~", "—").replace("※", "").replace(" ", "")
if "(" in s:
s = re.sub("(.[\?=]?)((.+?))", r'<a title="\2">\1</a>', s)
return s
def break_yi(yi):
n = len(yi)
if 0 < n < 4:
yi = yi + (4-n) * " "
n = 4
if n > 0 and '<' not in yi:
yi = yi[:(n+1)//2]+"<br/>"+yi[(n+1)//2:]
return yi
def md2html(filename):
sm = ""
sd = ""
zi_count = 0
zi_single = ""
lines.clear()
for line in open(filename, encoding="U8"):
line = line.strip()
if line:
if line.startswith(">") or line.startswith("---") :
continue
if line.startswith("##"):
line = line[2:].strip()
if line == sd:
continue
sd = line
zi_count = 0
elif line.startswith("#"):
line = line[1:].strip()
if line == sm:
continue
sm = line
append("<div class=sm>%s</div>", sm)
else:
zi, yi= "", ""
if line.startswith("`"):
yi = line #無字
elif line.count("`") == 2:
zi, yi = line.split("`", 1)
if zi or yi:
zi = parse(zi)
yi = parse(yi)
if not yi:
zi_single += zi
continue
if zi:
zi = zi_single + zi
zi_single = ""
yi = break_yi(yi)
zi_count+=1
if zi_count == 1:
sd_title = sd
if not zi:
sd_title = yi
yi = ""
if len(sd_title) == 2:
sd_title = sd[0]+"<br/>" + sd[1]
append("<div class=sd2>%s</div>", sd_title)
else:
append("<div class=sd>%s</div>", sd_title)
append("<div class=zy><div class=zi1>%s</div><div class=yi>%s</div></div>",(zi, yi))
else:
append("<div class=zy><div class=zi>%s</div><div class=yi>%s</div></div>",(zi, yi))
target = open("docs/" + os.path.basename(filename).replace(".md", ".html"), "w", encoding="U8")
target.write(template % ("\n".join(lines)))
target.close()
def copy_readme():
target = open("README.md", "w", encoding="U8")
target.write(open("wiki/Home.md", encoding="U8").read().replace("/osfans/xu/wiki/", "https://osfans.github.io/xu/"))
target.close()
copy_readme()
for filename in glob.glob("wiki/??.md"):
md2html(filename)
| 24.360947 | 120 | 0.459072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,646 | 0.396531 |
c2478e02ca506e0323d992197336faa1570d5c97 | 2,460 | py | Python | plugin_hide_run_panel/__init__.py | Holt59/modorganizer-python_plugins | f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9 | [
"MIT"
]
| null | null | null | plugin_hide_run_panel/__init__.py | Holt59/modorganizer-python_plugins | f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9 | [
"MIT"
]
| null | null | null | plugin_hide_run_panel/__init__.py | Holt59/modorganizer-python_plugins | f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9 | [
"MIT"
]
| null | null | null | # -*- encoding: utf-8 -*-
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtWidgets import QMainWindow, QFrame
import mobase
class HideRunPanelPlugin(mobase.IPlugin):
_runFrame: QFrame
def __init__(self):
super().__init__()
def init(self, organizer: mobase.IOrganizer):
""" For a IPlugin, the only place where things can be done is in init(). """
self._organizer = organizer
# mobase.IOrganizer has a few callbacks available:
# onUserInterfaceInitialized is called when the UI has been initialized, so
# we can use it to retrieve the actual run frame from the main window. Trying
# to access the main window in init() would not work since the main window is
# not yet created.
self._organizer.onUserInterfaceInitialized(self._onUiInit)
# We add a callback when plugin change to hide/show the run frame depending
# on the setting.
self._organizer.onPluginSettingChanged(self._onPluginSettingChanged)
return True
def _onUiInit(self, mainWindow: QMainWindow):
self._runFrame = mainWindow.findChild(QFrame, "startGroup") # type: ignore
self._onPluginSettingChanged(
self.name(),
"visible",
None,
self._organizer.pluginSetting(self.name(), "visible"),
)
def _onPluginSettingChanged(
self, pluginName: str, pluginKey: str, oldValue, newValue
):
""" Note: oldValue and newValue are Union type (mobase.MoVariant), but
MovVariant is not actually in mobase, so we cannot specify it currently. """
if pluginName == self.name() and pluginKey == "visible":
self._runFrame.setVisible(newValue) # type: ignore
def name(self):
return "Hide Run Panel"
def author(self):
return "Holt59"
def description(self):
return self._tr("Hide the run panel")
def version(self):
return mobase.VersionInfo(1, 0, 0, mobase.ReleaseType.final)
def isActive(self):
return True
def settings(self):
# We have a single setting indicating if the run frame should be visible or
# not. We use this in _onPluginSettingChanged.
return [mobase.PluginSetting("visible", "run frame visible", False)]
def _tr(self, str):
return QCoreApplication.translate("HideRunPanelPlugin", str)
def createPlugin():
return HideRunPanelPlugin()
| 31.948052 | 85 | 0.660976 | 2,271 | 0.923171 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.376016 |
c24ab458d07762596a9a0b958ea5ceac2489021a | 164 | py | Python | nnet/learning_rate_func/__init__.py | zhaoyan1117/NeuralNet | a0343dd469e981bf9b4f18db0209ca9bfaf58c4f | [
"BSD-2-Clause"
]
| null | null | null | nnet/learning_rate_func/__init__.py | zhaoyan1117/NeuralNet | a0343dd469e981bf9b4f18db0209ca9bfaf58c4f | [
"BSD-2-Clause"
]
| null | null | null | nnet/learning_rate_func/__init__.py | zhaoyan1117/NeuralNet | a0343dd469e981bf9b4f18db0209ca9bfaf58c4f | [
"BSD-2-Clause"
]
| null | null | null | from ._inv_prop_lr import InvPropLR
from ._constant_lr import ConstantLR
from ._step_size_lr import StepSizeLR
from ._dynamic_step_size_lr import DynamicStepSizeLR
| 32.8 | 52 | 0.878049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c24d4c5a8f9125c9ef834c785c10d1d380869f30 | 8,645 | py | Python | src/utils/strava.py | adrigrillo/endomondo-strava-migrator | 398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079 | [
"MIT"
]
| 2 | 2020-12-08T20:51:38.000Z | 2021-01-03T20:42:10.000Z | src/utils/strava.py | adrigrillo/endomondo-strava-migrator | 398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079 | [
"MIT"
]
| 1 | 2020-12-08T21:09:50.000Z | 2020-12-08T21:30:35.000Z | src/utils/strava.py | adrigrillo/endomondo-strava-migrator | 398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
utils/strava.py
=================
Utility class to Strava API
"""
import json
import time
from configparser import ConfigParser, NoOptionError
from datetime import datetime
from pathlib import Path
from typing import Tuple
from loguru import logger
from stravalib import Client, exc
from utils.parameters import SECRET
from utils.constants import CONFIG_PATH, CODE_ID_FILE_NAME, TOKEN_FILE_NAME
from utils.files_handler import check_folder
from utils.parameters import STRAVA, CLIENT_ID
def get_client_id(app_config: ConfigParser) -> int:
""" Obtains the client ID from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
int: client id from the configuration file.
Raises:
NoOptionError: If the `client_id` key is not
present in the configuration.
ValueError: If the client id is not an integer.
"""
try:
client_id = app_config.getint(STRAVA, CLIENT_ID)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
except ValueError:
logger.exception('Invalid client id format.')
raise
return client_id
def get_secret(app_config: ConfigParser) -> str:
""" Obtains the secret from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
str: secret from the configuration file.
Raises:
NoOptionError: If the `secret` key is not
present in the configuration.
"""
try:
secret = app_config.get(STRAVA, SECRET)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
return secret
def get_strava_token_from_code_id(config: ConfigParser) -> str:
""" Method that interchange the temporary authentication code obtained
when `src/request_auth.py` is executed. The method reads the file
`config/code_id.txt` that contains the temporal authentication and generates
the POST request to obtain the final access token which is saved in
`config/token.json`.
This method requires the Strava application `client_id` and `secret` that
has to be set in the configuration file (`config/config.ini`).
Args:
config (ConfigParser): app configuration.
Returns:
str: Strava access token.
Raises:
ValueError: If no token is found in the configuration.
"""
code_id_path = Path(CONFIG_PATH, CODE_ID_FILE_NAME)
if not code_id_path.is_file():
raise ValueError('The file with the temporal authentication code (`config/code_id.txt`)'
'was NOT found. Execute `request_auth.py` to obtain the temporal access.')
with open(code_id_path, 'r') as file:
logger.debug('The file with the temporal authentication code (`config/code_id.txt`)'
'was found.')
code_id = file.read()
if not code_id:
raise ValueError('No valid temporal code access found. Rerun `request_auth.py` '
'to obtain the temporal access.')
client = Client()
token = client.exchange_code_for_token(client_id=get_client_id(config),
client_secret=get_secret(config),
code=code_id)
logger.debug('Obtained access until {}:\n'
'- token: {}.'
'- refresh token: {}.',
datetime.utcfromtimestamp(int(token['expires_at'])).strftime('%d-%m-%Y %H:%M:%S'),
token['access_token'], token['refresh_token'])
# Save JSON with the response
save_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
with open(save_path, 'w') as file:
logger.info('Writing token information to `{}`.', save_path)
json.dump(token, file, indent=4)
return token['access_token']
def get_strava_client(config: ConfigParser) -> Client:
""" Checks the authentication token and generates the Strava client.
Args:
config (ConfigParser): app configuration.
Returns:
if exist, strava client configured with the authentication token.
"""
token_file_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
if token_file_path.is_file():
logger.debug('The token info file (`config/token.json`) was found.')
with open(token_file_path, 'r') as file:
token_data = json.load(file)
token = token_data.get('access_token')
# If the file exists but no access token found, check against the temporary auth
if not token:
logger.warning('The token info file (`config/token.json`) was found'
' but the access token could not be read.')
token = get_strava_token_from_code_id(config)
else:
logger.info('The token info file (`config/token.json`) was NOT found. '
'Retrieving from the temporal authentication code.')
token = get_strava_token_from_code_id(config)
client = Client(access_token=token)
return client
def upload_activity(client: Client, activity_type: str, file_path: Path) -> bool:
""" Helper method to upload the activity to Strava. This method will handle
the different possibilities when uploading an activity.
Args:
client (Client): configured Strava client.
activity_type (str): Strava activity string.
file_path (Path): Path to the `*.tcx` activity file.
Returns:
bool: True if the activity have been uploaded successfully. False otherwise.
Raises:
RateLimitExceeded: When the API limits have been reached. Generally when
more than 1000 petitions have been done during the day.
ConnectionError: When it has been impossible to connect the Strava servers.
Exception: Unknown exceptions that will be logged in detail.
"""
try:
activity_file = open(file_path, 'r')
client.upload_activity(
activity_file=activity_file,
data_type='tcx',
activity_type=activity_type,
private=False
)
except exc.ActivityUploadFailed:
logger.exception('Error uploading the activity `{}`.', file_path.stem)
return False
except exc.RateLimitExceeded:
logger.exception('Exceeded the API rate limit.')
raise
except ConnectionError:
logger.exception('No internet connection.')
raise
except Exception:
logger.exception('Unknown exception')
raise
# If no error return true
logger.debug('Activity `{}` uploaded sucessfully.', file_path.stem)
return True
def handle_rate_limit(start_time: float, requests: int) -> Tuple[float, int]:
""" Method to handle the 15 minutes API limit. This method will check the
elapsed time since the first request and the number of them. Three cases
are possible:
- Less than 15 minutes elapsed from the first request and less than 100
requests -> continue.
- More than 15 minutes elapsed from the first request and less than 100
requests -> reset timer and request number to count from 0 again.
- Less than 15 minutes elapsed from the first request but more than 100
requests -> sleep until the 15 minutes block is over and reset timer
and request number to count from 0 again.
Args:
start_time (float): timestamp of the first request of the block.
requests (int): number of request done in the block.
Returns:
float, int: updated start time and number of requests following the
possible cases.
"""
requests += 1
elapsed_time = time.time() - start_time
if elapsed_time <= 60 * 15:
if requests >= 100:
remaining_time_stopped = 60 * 15 - elapsed_time
mins, secs = divmod(remaining_time_stopped, 60)
logger.warning('The number of allowed request per 15 minutes have'
'been reached. Sleeping for {:0.0f} minutes, {:0.1f} seconds.',
mins, secs)
time.sleep(remaining_time_stopped)
# Reset values. Include petition to be processed
logger.info('Waiting time elapsed. Continuing with the process.')
requests = 1
start_time = time.time()
else:
logger.debug('15 minutes have been elapsed. Resetting requests and time.')
# Reset values. Include petition to be processed
requests = 1
start_time = time.time()
return start_time, requests
| 36.020833 | 99 | 0.65587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,769 | 0.551648 |
df9a160281e97721997326dd0b0903a52cd73273 | 5,293 | py | Python | train_synthText.py | skyatmoon/Detailed-Handwriting-detection | 1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c | [
"MIT"
]
| 1 | 2020-12-08T01:24:34.000Z | 2020-12-08T01:24:34.000Z | train_synthText.py | skyatmoon/Detailed-Handwriting-detection | 1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c | [
"MIT"
]
| null | null | null | train_synthText.py | skyatmoon/Detailed-Handwriting-detection | 1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c | [
"MIT"
]
| null | null | null | """
Author: brooklyn
train with synthText
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from net.craft import CRAFT
import sys
from utils.cal_loss import cal_synthText_loss
from dataset.synthDataset import SynthDataset
import argparse
from eval import eval_net
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='CRAFT Train Fine-Tuning')
parser.add_argument('--gt_path', default='/media/brooklyn/EEEEE142EEE10425/SynthText/gt.mat', type=str, help='SynthText gt.mat')
parser.add_argument('--synth_dir', default='/media/brooklyn/EEEEE142EEE10425/SynthText', type=str, help='SynthText image dir')
parser.add_argument('--label_size', default=96, type=int, help='target label size')
parser.add_argument('--batch_size', default=16, type=int, help='training data batch size')
parser.add_argument('--test_batch_size', default=16, type=int, help='test data batch size')
parser.add_argument('--test_interval', default=40, type=int, help='test interval')
parser.add_argument('--max_iter', default=50000, type=int, help='max iteration')
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('--epochs', default=500, type=int, help='training epochs')
parser.add_argument('--test_iter', default=10, type=int, help='test iteration')
args = parser.parse_args()
image_transform = transforms.Compose([
transforms.Resize((args.label_size * 2, args.label_size * 2)),
transforms.ToTensor()
])
label_transform = transforms.Compose([
transforms.Resize((args.label_size,args.label_size)),
transforms.ToTensor()
])
def train(net, epochs, batch_size, test_batch_size, lr, test_interval, max_iter, model_save_path, save_weight=True):
train_data = SynthDataset(image_transform=image_transform,
label_transform=label_transform,
file_path=args.gt_path,
image_dir=args.synth_dir)
steps_per_epoch = 1000
#选取SynthText部分数据作为训练集
train_num = batch_size * steps_per_epoch
train_data = torch.utils.data.Subset(train_data, range(train_num))
#划分训练集、验证集
train_num = len(train_data)
test_iter = 10
val_num = test_batch_size * test_iter
train_data, val_data = torch.utils.data.random_split(train_data, [train_num - val_num, val_num])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=test_batch_size, shuffle=False)
criterion = nn.MSELoss(reduction='none')
optimizer = optim.Adam(net.parameters(), lr=lr)
for epoch in range(epochs):
print('epoch = ', epoch)
for i, (images, labels_region, labels_affinity, _) in enumerate(train_loader):
iter = epoch * steps_per_epoch + i
#更新学习率
if iter != 0 and iter % 10000 == 0:
for param in optimizer.param_groups:
param['lr'] *= 0.8
images = images.to(device)
labels_region = labels_region.to(device)
labels_affinity = labels_affinity.to(device)
labels_region = torch.squeeze(labels_region, 1)
labels_affinity = torch.squeeze(labels_affinity, 1)
#前向传播
y, _ = net(images)
score_text = y[:, :, :, 0]
score_link = y[:, :, :, 1]
#联合损失 ohem loss
loss = cal_synthText_loss(criterion, score_text, score_link, labels_region, labels_affinity, device)
#反向传播
optimizer.zero_grad() #梯度清零
loss.backward() #计算梯度
optimizer.step() #更新权重
#打印损失和学习率信息
if i % 10 == 0:
print('i = ', i,': loss = ', loss.item(), ' lr = ', lr)
#计算验证损失
if i != 0 and i % test_interval == 0:
test_loss = eval_net(net, val_loader, criterion, device)
print('test: i = ', i, 'test_loss = ', test_loss, 'lr = ', lr)
if save_weight:
torch.save(net.state_dict(), model_save_path + 'epoch_' + str(epoch) + '_iter' + str(i) + '.pth')
#保存最后训练模型
if iter == max_iter:
if save_weight:
torch.save(net.state_dict(), model_save_path + 'final.pth')
if __name__ == "__main__":
batch_size = args.batch_size
test_batch_size = args.test_batch_size
epochs = args.epochs # 遍历数据集次数
lr = args.lr # 学习率
test_interval = args.test_interval #测试间隔
max_iter = args.max_iter
net = CRAFT(pretrained=True) # craft模型
net = net.to(device)
model_save_prefix = 'checkpoints/craft_netparam_'
try:
train(net=net,
batch_size=batch_size,
test_batch_size=test_batch_size,
lr=lr,
test_interval=test_interval,
max_iter=max_iter,
epochs=epochs,
model_save_path=model_save_prefix)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED1.pth')
print('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 37.807143 | 128 | 0.642736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 992 | 0.18132 |
df9b4ebedd02514962424a1cc0a1b5aae502b670 | 1,896 | py | Python | friendcircle/models.py | jossafossa/Project24_backend | bb5cc91d21c9f93034b85b3e94e829f7ab33c565 | [
"MIT"
]
| null | null | null | friendcircle/models.py | jossafossa/Project24_backend | bb5cc91d21c9f93034b85b3e94e829f7ab33c565 | [
"MIT"
]
| 9 | 2019-12-04T23:15:59.000Z | 2022-02-10T09:08:38.000Z | friendcircle/models.py | jossafossa/Project24_backend | bb5cc91d21c9f93034b85b3e94e829f7ab33c565 | [
"MIT"
]
| null | null | null | from django.db import models
class FriendCircle(models.Model):
name = models.CharField(blank=True, max_length=255)
description = models.CharField(blank=True, max_length=1000)
interests = models.ManyToManyField('interests.Interest', blank=True)
members = models.ManyToManyField(
'users.CustomUser',
through='friendcircle.FriendCircleMembership',
through_fields=('friendcircle', 'user'),
related_name='memberships',
)
def __str__(self):
return self.name
# Keeps track of FriendCircle memberships
class FriendCircleMembership(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
startdate = models.DateTimeField(auto_now_add=True)
enddate = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.user.name + " member at " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
MATCH_STATUS = (
('O', 'Not swiped',),
('V', 'Swiped Right',),
('X', 'Swiped Left',),
)
# Keeps track of matches. If both parties swiped right, the user can be added to FriendCircleMembership
class FriendCircleMatcher(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
user_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
friendcircle_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
def __str__(self):
return self.user.email + " + " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
| 35.773585 | 103 | 0.68038 | 1,611 | 0.849684 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.239451 |
df9d6d03fbed45db8f46a22336474ebb4831783c | 474 | py | Python | components/collector/tests/source_collectors/jira/test_issues.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
]
| null | null | null | components/collector/tests/source_collectors/jira/test_issues.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
]
| null | null | null | components/collector/tests/source_collectors/jira/test_issues.py | m-zakeri/quality-time | 531931f0d8d4f5d262ea98445868158e41d268da | [
"Apache-2.0"
]
| null | null | null | """Unit tests for the Jira issues collector."""
from .base import JiraTestCase
class JiraIssuesTest(JiraTestCase):
"""Unit tests for the Jira issue collector."""
METRIC_TYPE = "issues"
async def test_issues(self):
"""Test that the issues are returned."""
issues_json = dict(total=1, issues=[self.issue()])
response = await self.get_response(issues_json)
self.assert_measurement(response, value="1", entities=[self.entity()])
| 29.625 | 78 | 0.679325 | 391 | 0.824895 | 0 | 0 | 0 | 0 | 271 | 0.57173 | 144 | 0.303797 |
df9e429f72ebf0471ad51a2d2296ecb2934b944d | 1,485 | py | Python | cf_xarray/tests/test_coding.py | rcaneill/cf-xarray | 210e997ab5e550e411ec1a4e789aac28e77bacff | [
"Apache-2.0"
]
| null | null | null | cf_xarray/tests/test_coding.py | rcaneill/cf-xarray | 210e997ab5e550e411ec1a4e789aac28e77bacff | [
"Apache-2.0"
]
| null | null | null | cf_xarray/tests/test_coding.py | rcaneill/cf-xarray | 210e997ab5e550e411ec1a4e789aac28e77bacff | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import pandas as pd
import pytest
import xarray as xr
import cf_xarray as cfxr
@pytest.mark.parametrize(
"mindex",
[
pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("lat", "lon")),
pd.MultiIndex.from_arrays(
[["a", "b", "c", "d"], [1, 2, 4, 10]], names=("lat", "lon")
),
pd.MultiIndex.from_arrays(
[["a", "b", "b", "a"], [1, 2, 1, 2]], names=("lat", "lon")
),
],
)
@pytest.mark.parametrize("idxnames", ["foo", "landpoint", ("landpoint",), None])
def test_compression_by_gathering_multi_index_roundtrip(mindex, idxnames):
dim = "foo" if idxnames == "foo" else "landpoint"
dataset = xr.Dataset(
data_vars={"landsoilt": (dim, np.random.randn(4), {"foo": "bar"})},
coords={
dim: (dim, mindex, {"long_name": "land point number"}),
"coord1": (dim, [1, 2, 3, 4], {"foo": "baz"}),
},
attrs={"dataset": "test dataset"},
)
dataset.lat.attrs["standard_name"] = "latitude"
dataset.lon.attrs["standard_name"] = "longitude"
encoded = cfxr.encode_multi_index_as_compress(dataset, idxnames)
roundtrip = cfxr.decode_compress_to_multi_index(encoded, idxnames)
assert "compress" not in roundtrip[dim].encoding
xr.testing.assert_identical(roundtrip, dataset)
dataset[dim].attrs["compress"] = "lat lon"
with pytest.raises(ValueError):
cfxr.encode_multi_index_as_compress(dataset, idxnames)
| 34.534884 | 80 | 0.60404 | 0 | 0 | 0 | 0 | 1,383 | 0.931313 | 0 | 0 | 298 | 0.200673 |
dfa1fd750ebe41f9f5e5dbc785b717257dc70d9d | 1,908 | py | Python | slack_bolt/adapter/socket_mode/base_handler.py | hirosassa/bolt-python | befc3a1463f3ac8dbb780d66decc304e2bdf3e7a | [
"MIT"
]
| 504 | 2020-08-07T05:02:57.000Z | 2022-03-31T14:32:46.000Z | slack_bolt/adapter/socket_mode/base_handler.py | hirosassa/bolt-python | befc3a1463f3ac8dbb780d66decc304e2bdf3e7a | [
"MIT"
]
| 560 | 2020-08-07T01:16:06.000Z | 2022-03-30T00:40:56.000Z | slack_bolt/adapter/socket_mode/base_handler.py | hirosassa/bolt-python | befc3a1463f3ac8dbb780d66decc304e2bdf3e7a | [
"MIT"
]
| 150 | 2020-08-07T09:41:14.000Z | 2022-03-30T04:54:51.000Z | """The base class of Socket Mode client implementation.
If you want to build asyncio-based ones, use `AsyncBaseSocketModeHandler` instead.
"""
import logging
import signal
import sys
from threading import Event
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_bolt import App
from slack_bolt.util.utils import get_boot_message
class BaseSocketModeHandler:
app: App # type: ignore
client: BaseSocketModeClient
def handle(self, client: BaseSocketModeClient, req: SocketModeRequest) -> None:
"""Handles Socket Mode envelope requests through a WebSocket connection.
Args:
client: this Socket Mode client instance
req: the request data
"""
raise NotImplementedError()
def connect(self):
"""Establishes a new connection with the Socket Mode server"""
self.client.connect()
def disconnect(self):
"""Disconnects the current WebSocket connection with the Socket Mode server"""
self.client.disconnect()
def close(self):
"""Disconnects from the Socket Mode server and cleans the resources this instance holds up"""
self.client.close()
def start(self):
"""Establishes a new connection and then blocks the current thread
to prevent the termination of this process.
If you don't want to block the current thread, use `#connect()` method instead.
"""
self.connect()
if self.app.logger.level > logging.INFO:
print(get_boot_message())
else:
self.app.logger.info(get_boot_message())
if sys.platform == "win32":
# Ctrl+C etc does not work on Windows OS
# see https://bugs.python.org/issue35935 for details
signal.signal(signal.SIGINT, signal.SIG_DFL)
Event().wait()
| 32.896552 | 101 | 0.678197 | 1,492 | 0.781971 | 0 | 0 | 0 | 0 | 0 | 0 | 892 | 0.467505 |
dfa2ba545c720071817fb0691cb4e7c5aad3c2a5 | 8,344 | py | Python | project/pfasst/transfer_tools.py | amit17133129/pyMG-2016 | b82a60811bb0a8b91d8793c47177a240221f9176 | [
"BSD-2-Clause"
]
| 2 | 2016-04-04T15:20:50.000Z | 2020-08-01T19:28:55.000Z | project/pfasst/transfer_tools.py | amit17133129/pyMG-2016 | b82a60811bb0a8b91d8793c47177a240221f9176 | [
"BSD-2-Clause"
]
| 1 | 2020-10-02T05:44:45.000Z | 2020-10-02T05:44:45.000Z | project/pfasst/transfer_tools.py | amit17133129/pyMG-2016 | b82a60811bb0a8b91d8793c47177a240221f9176 | [
"BSD-2-Clause"
]
| 11 | 2016-03-26T18:37:06.000Z | 2020-10-01T19:44:55.000Z | # coding=utf-8
import numpy as np
import scipy.interpolate as intpl
import scipy.sparse as sprs
def to_sparse(D, format="csc"):
"""
Transform dense matrix to sparse matrix of return_type
bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix
coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format.
csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix
csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix
dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage
dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix.
lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix
:param D: Dense matrix
:param format: how to save the sparse matrix
:return: sparse version
"""
if format == "bsr":
return sprs.bsr_matrix(D)
elif format == "coo":
return sprs.coo_matrix(D)
elif format == "csc":
return sprs.csc_matrix(D)
elif format == "csr":
return sprs.csr_matrix(D)
elif format == "dia":
return sprs.dia_matrix(D)
elif format == "dok":
return sprs.dok_matrix(D)
elif format == "lil":
return sprs.lil_matrix(D)
else:
return to_dense(D)
def to_dense(D):
if sprs.issparse(D):
return D.toarray()
elif isinstance(D, np.ndarray):
return D
def next_neighbors_periodic(p, ps, k, T=None):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors and an array containing the
"""
if T is None:
T = ps[-1]-2*ps[0]+ps[1]
p_bar = p - np.floor(p/T)*T
ps = ps - ps[0]
distance_to_p = []
for tk in ps:
d1 = tk+T-p_bar
d2 = tk-p_bar
d3 = tk-T-p_bar
min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)])
if np.abs(d1) == min_d:
distance_to_p.append(d1)
elif np.abs(d2) == min_d:
distance_to_p.append(d2)
else:
distance_to_p.append(d3)
distance_to_p = np.asarray(distance_to_p)
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0])
if k % 2 == 1:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k]
else:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0])
return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign)
def next_neighbors(p, ps, k):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors
"""
distance_to_p = np.abs(ps-p)
# zip it
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d,i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr,nn,T):
nn = np.asarray(nn)
d_nn = nn[1:]-nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0]-1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.
for n,d in zip(nn[1:],d_nn):
if d != 1:
shift = -T
cont_arr.append(arr[n]+shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the restriction matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a restriction matrix
"""
M = np.zeros((coarse_grid.size, fine_grid.size))
n_g = coarse_grid.size
for i, p in zip(range(n_g), coarse_grid):
if periodic:
nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, fine_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the interpolation matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a interpolation matrix
"""
M = np.zeros((fine_grid.size, coarse_grid.size))
n_f = fine_grid.size
for i, p in zip(range(n_f), fine_grid):
if periodic:
nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, coarse_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(coarse_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def kron_on_list(matrix_list):
"""
:param matrix_list: a list of sparse matrices
:return: a matrix
"""
if len(matrix_list) == 2:
return sprs.kron(matrix_list[0], matrix_list[1])
elif len(matrix_list) == 1:
return matrix_list[0]
else:
return sprs.kron(matrix_list[0], kron_on_list(matrix_list[1:]))
def matrixN(tau, rows=-1, last_value=1.0):
n = tau.shape[0]
if rows == -1:
rows = n
N = np.zeros((rows, n))
# construct the lagrange polynomials
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
for i in range(n):
lag_pol.append(intpl.lagrange(tau, np.roll(circulating_one, i)))
N[:, i] = -np.ones(rows)*lag_pol[-1](last_value)
return N
def interpolate_to_t_end(nodes_on_unit, values):
"""
Assume a GaussLegendre nodes, we are interested in the value at the end of
the interval, but we now only the values in the interior of the interval.
We compute the value by legendre interpolation.
:param nodes_on_unit: nodes transformed to the unit interval
:param values: values on those nodes
:return: interpolation to the end of the interval
"""
n = nodes_on_unit.shape[0]
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
result = np.zeros(values[0].shape)
for i in range(n):
lag_pol.append(intpl.lagrange(nodes_on_unit, np.roll(circulating_one, i)))
result += values[i]*lag_pol[-1](1.0)
return result
| 36.920354 | 111 | 0.628715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,896 | 0.347076 |
dfa3a2fa2289a9c892b09c29ede2ebe39a3dd0c8 | 7,266 | py | Python | python/trees/rbtree_graphviz.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
]
| null | null | null | python/trees/rbtree_graphviz.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
]
| null | null | null | python/trees/rbtree_graphviz.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
]
| null | null | null | """rbtree_graphviz.py - create a graphviz representation of a LLRBT.
The purpose of this module is to visually show how the shape of a LLRBT
changes when keys are inserted in it. For every insert, sub graph (tree)
is added to the main graph.
`initialization_list` holds the values that are inserted in the tree.
This list can be changed for a list of anything that can be compared
with > == <. For example, with `initialization_list = range(50)` keys
from 0 to 49 will be inserted in the tree.
Consider that for every key, a graph is going to be generated.
"""
from graphviz import Digraph
from trees.rbtree import LLRBT, is_red
NODE_SHAPE = "circle"
NONE_NODE_SHAPE = "point"
TITLE_SHAPE = "box"
RED_COLOR = "#b8000f"
DEFAULT_GRAPH_NODE_ATTR = {
"shape": NODE_SHAPE,
"color": "black",
"style": "filled",
"fillcolor": "#cfd3d6",
}
RED_NODE_ATTR = {
"fontcolor": "white",
"fillcolor": RED_COLOR
}
DEFAULT_GRAPH_EDGE_ATTR = {
"color": "black",
"arrowhead": "vee",
"style": "solid",
}
def add_node(graph, node):
"""Add `node` to `graph`. `node` is a tuple with the
following shape:
(node_id, {<node attributes>}, {<graph's node attributes>})
^ ^ ^
string see graphviz documentation"""
node_id, node_attr, graph_node_attr = node
graph.node(node_id, **node_attr, **graph_node_attr)
return graph
def add_edge(graph, edge):
"""Add edge from `edge[0]` to `edge[1]` to `graph`. `edge` is
a tuple with the following shape:
(source_node_id, destiny_node_id, {<graph's edge attributes>})
^ ^ ^
string string see graphviz documentation"""
source_node_id, destiny_node_id, graph_edge_attr = edge
graph.edge(source_node_id, destiny_node_id, **graph_edge_attr)
return graph
def generate_graph(tree, initialization_list, format="pdf"):
if initialization_list is None or len(initialization_list) == 0:
raise Exception("You can't generate a graph with an empty tree.")
if not isinstance(tree, LLRBT):
raise Exception("You need to provide an instance of a Leaf Leaning Red Black Tree (LLRBT).")
for value in initialization_list:
tree.insert(value)
graph = Digraph(format="pdf",
node_attr=DEFAULT_GRAPH_NODE_ATTR,
edge_attr=DEFAULT_GRAPH_EDGE_ATTR)
# Iterate over all keys and create nodes and edges.
for idx, node in enumerate(tree.pre_order_traversal()):
node_id = str(node.value)
node_label = str(node.value)
if is_red(node):
add_node(graph, (node_id, {"label": node_label}, RED_NODE_ATTR))
else:
add_node(graph, (node_id, {"label": node_label}, {}))
# Create edge between node and its left child.
if node.left:
node_left_id = str(node.left.value)
add_edge(graph, (node_id, node_left_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_value = "left-null-" + str(idx)
add_node(graph, (null_node_value, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(graph, (node_id, null_node_value, {}))
# Create edge between node and its right child.
if node.right:
node_right_id = str(node.right.value)
add_edge(graph, (node_id, node_right_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_value = "right-null-" + str(idx)
add_node(graph, (null_node_value, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(graph, (node_id, null_node_value, {}))
return graph
def generate_graph_per_insert(tree, initialization_list, format="pdf"):
if initialization_list is None or len(initialization_list) == 0:
raise Exception("You can't generate a graph with an empty tree.")
if not isinstance(tree, LLRBT):
raise Exception("You need to provide an instance of a Leaf Leaning Red Black Tree (LLRBT).")
main_graph = Digraph(format=format,
node_attr=DEFAULT_GRAPH_NODE_ATTR,
edge_attr=DEFAULT_GRAPH_EDGE_ATTR)
main_graph.attr(rankdir="TB", newrank="true") # print sub graph from top to bottom
# For every key to be inserted, create a sub graph representing
# the tree after the insertion.
for graph_number, value in enumerate(initialization_list):
tree.insert(value)
# Create sub graph.
sub_graph_name = "cluster_" + str(graph_number)
with main_graph.subgraph(name=sub_graph_name) as sub_graph:
sub_graph.attr(label="Inserting = " + str(value), fontsize="12")
# Iterate over all keys and fill the sub graph.
for idx, node in enumerate(tree.pre_order_traversal()):
node_id = str(graph_number) + "." + str(node.value)
node_label = str(node.value)
if is_red(node):
add_node(sub_graph, (node_id, {"label": node_label}, RED_NODE_ATTR))
else:
add_node(sub_graph, (node_id, {"label": node_label}, {}))
# Create edge between node and its left child.
if node.left:
node_left_id = str(graph_number) + "." + str(node.left.value)
# Paint edge red if the left child is red.
if is_red(node.left):
add_edge(sub_graph, (node_id, node_left_id, {}))
else:
add_edge(sub_graph, (node_id, node_left_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_id = str(graph_number) + "-left-null-" + str(idx)
add_node(sub_graph, (null_node_id, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(sub_graph, (node_id, null_node_id, {}))
# Create edge between node and its right child.
if node.right:
node_right_id = str(graph_number) + "." + str(node.right.value)
# Paint edge red if the right child is red.
if is_red(node.right):
add_edge(sub_graph, (node_id, node_right_id, {}))
else:
add_edge(sub_graph, (node_id, node_right_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_id = str(graph_number) + "-right-null-" + str(idx)
add_node(sub_graph, (null_node_id, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(sub_graph, (node_id, null_node_id, {}))
return main_graph
if __name__ == "__main__":
initialization_list = ["Z", "W", "F", "D", "S", "E", "A", "R", "C", "H", "X", "M", "P", "L"]
# initialization_list = ["A", "B", "C", "D"]
tree = LLRBT()
# graph = generate_graph(tree, initialization_list)
graph = generate_graph_per_insert(tree, initialization_list)
print(graph.source)
graph.render("trees/rbtree.gv", view=True)
| 38.648936 | 100 | 0.597991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,603 | 0.358244 |
dfa4028f5508da847e15896d92223ad5641ce047 | 87 | py | Python | sampledata/exceptions.py | jespino/sampledata | 18682f878787f296adc90eb3ab6d236a863954bf | [
"BSD-3-Clause"
]
| 3 | 2015-06-19T08:50:50.000Z | 2019-05-30T07:40:59.000Z | sampledata/exceptions.py | jespino/sampledata | 18682f878787f296adc90eb3ab6d236a863954bf | [
"BSD-3-Clause"
]
| 4 | 2015-06-19T09:20:09.000Z | 2017-03-10T17:15:21.000Z | sampledata/exceptions.py | jespino/sampledata | 18682f878787f296adc90eb3ab6d236a863954bf | [
"BSD-3-Clause"
]
| 4 | 2015-06-19T07:25:27.000Z | 2022-02-15T10:09:12.000Z | class ParameterError(Exception):
pass
class NotChoicesFound(Exception):
pass
| 12.428571 | 33 | 0.747126 | 83 | 0.954023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dfa5cb1adcaf33702f7f2c3cd145a0c36382a865 | 97 | py | Python | students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py | Vivasus/ITMO_ICT_WebDevelopment_2020-2021 | 833d7ac1d40d9f7456a3c6b104a0e53c652d0035 | [
"MIT"
]
| null | null | null | students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py | Vivasus/ITMO_ICT_WebDevelopment_2020-2021 | 833d7ac1d40d9f7456a3c6b104a0e53c652d0035 | [
"MIT"
]
| null | null | null | students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py | Vivasus/ITMO_ICT_WebDevelopment_2020-2021 | 833d7ac1d40d9f7456a3c6b104a0e53c652d0035 | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class HwSystemConfig(AppConfig):
name = 'hw_system'
| 16.166667 | 34 | 0.721649 | 56 | 0.57732 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.113402 |
dfa771b70b06ebdb06698f8a6ef450643663f3e9 | 5,721 | py | Python | azure_sftp_plugin/hooks/adls_gen2_hook.py | christo-olivier/airflow_azure_sftp_plugin | 1d483be6419744909af2fa940cd50880ed8d5890 | [
"Apache-2.0"
]
| null | null | null | azure_sftp_plugin/hooks/adls_gen2_hook.py | christo-olivier/airflow_azure_sftp_plugin | 1d483be6419744909af2fa940cd50880ed8d5890 | [
"Apache-2.0"
]
| null | null | null | azure_sftp_plugin/hooks/adls_gen2_hook.py | christo-olivier/airflow_azure_sftp_plugin | 1d483be6419744909af2fa940cd50880ed8d5890 | [
"Apache-2.0"
]
| null | null | null | from pathlib import Path
from typing import Generator
from airflow.hooks.base_hook import BaseHook
from azure.storage.filedatalake import FileSystemClient
from azure.storage.filedatalake._generated.models._models_py3 import (
StorageErrorException,
)
class ADLSGen2Hook(BaseHook):
"""
Hook to interact with the Azure Data Lake Gen 2 storage service.
:param container: Name of the ADLS Gen 2 container to be used.
:param azure_data_lake_conn_id: Reference to the Azure Data Lake connection.
storage_account_name and storage_account_key
should be in the `login` and `password`
fields of the azure data lake connection.
"""
def __init__(
self, container: str, azure_data_lake_conn_id: str = "azure_data_lake_default"
):
self.container = container
self.conn_id = azure_data_lake_conn_id
self.connection = self.get_conn()
def __enter__(self):
return self
def __exit__(self, *args):
self.connection.close()
def get_conn(self) -> FileSystemClient:
"""
Return an Azure Data Lake Service Client object.
:return: FileSystemClient
"""
conn = self.get_connection(self.conn_id)
file_system_client = FileSystemClient(
account_url=f"https://{conn.login}.dfs.core.windows.net",
file_system_name=self.container,
credential=conn.password,
)
return file_system_client
def check_for_file(self, file_path: str, recursive: bool = True) -> bool:
"""
Check if a file exists in on Azure Data Lake.
:param file_path: File path on Azure Data Lake.
:param recursive: Specify if the path should be traversed recursively.
:return: bool
"""
try:
return file_path in self.list_files(path=file_path, recursive=recursive)
except StorageErrorException:
return False
def list_files(
self, path: str, recursive: bool = True
) -> Generator[str, None, None]:
"""
List files in an Azure Data Lake Store Gen 2 container.
:param path: The path in the container that needs to be listed.
:param recursive: Specify if the path should be traversed recursively.
:return: Generator
"""
# Create generator of path names instead of path objects and yield
# the value from it.
yield from (
path.name
for path in self.connection.get_paths(path=path, recursive=recursive)
)
def delete_file(self, remote_path: str) -> None:
"""
Delete a file from ADLS.
:param remote_path: Remote path where the file is located on ADLS.
:return: None
"""
self.connection.delete_file(file=remote_path)
def download_file(
self, local_path: str, remote_path: str, overwrite: bool = True
) -> None:
"""
Download a file from ADLS to the local path.
:param local_path: Local path where the file is to be downloaded to.
:param remote_path: Remote path where the file is located on ADLS.
:param overwrite: Should the file be overwritten if it already exists in
the local path.
:return: None
"""
# Check if the local file exists and if overwrite is `True` otherwise
# raise an exception
if not overwrite:
path = Path(local_path)
if path.exists():
msg = f"`{local_path}` already exists and overwrite is set to False."
raise FileExistsError(msg)
# Check the file exists on ADLS
if not self.check_for_file(file_path=remote_path, recursive=False):
raise FileNotFoundError(f"`{remote_path}` does not exist.")
with open(local_path, "wb") as fout:
file_client = self.connection.get_file_client(file_path=remote_path)
download = file_client.download_file()
download.readinto(fout)
def upload_file(
self, local_path: str, remote_path: str, overwrite: bool = True
) -> None:
"""
Upload a file from the local path to the remote path on Azure Data Lake.
:param local_path: Local path where the file is located.
:param remote_path: Remote path where the file is to be uploaded to.
:param overwrite: Should the file be overwritten if it already exists in
the remote path.
:return: None
"""
# If `overwrite` is not True then check to see if the file exist and
# raise an excption if it does.
# NB this is required as the current Microsoft SDK's does not provide
# an elegant way of uploading files that dont exist on ADLS yet. It
# raises a generic error which is too broad. The only way to deal
# with this is to manually check if a file already exists and raise an
# exception if the user specifies `overwrite` to be False.
if not overwrite:
if self.check_for_file(file_path=remote_path, recursive=False):
raise FileExistsError(f"`{remote_path}` already exists on ADLS.")
# As above, overwrite is set to True as otherwise new files will fail
# to upload to ADLS. The previous check will make sure no existing files
# are overwritten if the user does not want this to happen.
with open(local_path, "rb") as fin:
file_client = self.connection.get_file_client(file_path=remote_path)
file_client.upload_data(fin.read(), overwrite=True)
| 39.729167 | 86 | 0.632582 | 5,463 | 0.954903 | 606 | 0.105926 | 0 | 0 | 0 | 0 | 3,004 | 0.525083 |
dfa9f05edc79136d5654d284a464ccb459169f40 | 536 | py | Python | phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
]
| null | null | null | phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
]
| null | null | null | phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ciudadano', '0043_ciudadanorn_numero_dni_ciudadano'),
]
operations = [
migrations.AddField(
model_name='ciudadano',
name='codigo_asegurado',
field=models.CharField(blank=True, max_length=24, null=True, verbose_name='Código de asegurado'),
),
]
| 26.8 | 109 | 0.654851 | 379 | 0.705773 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.320298 |
dfad270ef93b37ed6df9bcf779f6cf41ac7ec78e | 2,499 | py | Python | graphtiny/service.py | Canicio/pyqtgraph-tiny | b88ebe8a2e6ad860ca4857b527adccbbde14851d | [
"MIT"
]
| 1 | 2018-03-17T12:36:56.000Z | 2018-03-17T12:36:56.000Z | graphtiny/service.py | Canicio/pyqtgraph-tiny | b88ebe8a2e6ad860ca4857b527adccbbde14851d | [
"MIT"
]
| 1 | 2017-08-08T18:31:31.000Z | 2017-08-08T18:31:31.000Z | graphtiny/service.py | Canicio/graphtiny | b88ebe8a2e6ad860ca4857b527adccbbde14851d | [
"MIT"
]
| null | null | null | from time import sleep
import pyqtgraph as pg
import threading
from graphtiny.api import IChart, IDataStreamWindow
from graphtiny.domain import DataStreamWindow, Chart
class FuncThread(threading.Thread):
def __init__(self, t, *a) -> None:
self._t = t
self._a = a
threading.Thread.__init__(self)
def run(self) -> None:
self._t(*self._a)
class ChartService(IChart):
def set_data_stream(self, chart: Chart, x, y) -> None:
chart.x[chart.ptr] = x
chart.y[chart.ptr] = y
chart.ptr += 1
class DataStreamWindowService(IDataStreamWindow):
def launch_window(self, window: DataStreamWindow) -> None:
calculating_thread = FuncThread(self.__raise_thread_with_window, window)
calculating_thread.start()
sleep(1)
def __raise_thread_with_window(self, window: DataStreamWindow) -> None:
window.qapp = pg.mkQApp()
window.win = pg.GraphicsWindow() # raise window!
if window.background_color:
window.win.setBackground(window.background_color)
if window.coordinate_system_color:
pg.setConfigOption('foreground', window.coordinate_system_color)
i = 0
for chart in window.charts_list:
if i % window.columns_display == 0 and i >= window.columns_display:
window.win.nextRow()
chart.plot = window.win.addPlot()
if chart.downsampling:
chart.plot.setDownsampling(mode=chart.downsampling)
if chart.clipToView:
chart.plot.setClipToView(True)
if chart.left_label:
if chart.left_label_units:
chart.plot.setLabel('left', chart.left_label, chart.left_label_units)
else:
chart.plot.setLabel('left', chart.left_label)
if chart.bottom_label:
if chart.bottom_label_units:
chart.plot.setLabel('bottom', chart.bottom_label, chart.bottom_label_units)
else:
chart.plot.setLabel('bottom', chart.bottom_label)
chart.curve = chart.plot.plot()
if chart.line_color:
chart.curve.setPen(chart.line_color)
i += 1
while window.win.isVisible():
# refresh data
for chart in window.charts_list:
chart.curve.setData(chart.x[:chart.ptr], chart.y[:chart.ptr])
window.qapp.processEvents()
| 34.232877 | 95 | 0.612645 | 2,321 | 0.928772 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.027611 |
dfad2ce40cf4b3e7c6bdab613bdf207aa9161bc1 | 2,576 | py | Python | backend/app/app/api/deps.py | totalhack/zillion-web | e567c04d3564aec8105d54533d318b79d943c9c6 | [
"MIT"
]
| 3 | 2020-10-01T11:28:02.000Z | 2020-10-31T15:35:51.000Z | backend/app/app/api/deps.py | totalhack/zillion-web | e567c04d3564aec8105d54533d318b79d943c9c6 | [
"MIT"
]
| 1 | 2022-02-09T04:19:20.000Z | 2022-02-09T13:56:40.000Z | backend/app/app/api/deps.py | totalhack/zillion-web | e567c04d3564aec8105d54533d318b79d943c9c6 | [
"MIT"
]
| null | null | null | from typing import Generator, Dict, Any
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from pydantic import ValidationError
from sqlalchemy.orm import Session
from tlbx import json, pp
from zillion.configs import load_warehouse_config, zillion_config
from zillion.model import Warehouses
from zillion.warehouse import Warehouse
from app import app
from app import crud, models, schemas
from app.core import security
from app.core.config import settings
from app.db.session import SessionLocal
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/login/access-token"
)
warehouses = {}
@app.on_event("startup")
async def init_warehouses():
global warehouses
warehouses = get_warehouses()
def get_db() -> Generator:
try:
db = SessionLocal()
yield db
finally:
db.close()
def get_warehouses() -> Dict[str, Any]:
"""NOTE: this assumes Zillion Web DB is same as Zillion DB"""
global warehouses
if warehouses:
# TODO: cache control?
return warehouses
print("Building warehouses...")
db = SessionLocal()
try:
result = db.query(Warehouses).all()
for row in result:
warehouses[row.id] = Warehouse.load(row.id)
pp(warehouses)
return warehouses
finally:
db.close()
def get_current_user(
db: Session = Depends(get_db), token: str = Depends(reusable_oauth2)
) -> models.User:
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Could not validate credentials",
)
user = crud.user.get(db, id=token_data.sub)
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
def get_current_active_user(
current_user: models.User = Depends(get_current_user),
) -> models.User:
if not crud.user.is_active(current_user):
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
def get_current_active_superuser(
current_user: models.User = Depends(get_current_user),
) -> models.User:
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return current_user
| 27.115789 | 77 | 0.69604 | 0 | 0 | 112 | 0.043478 | 109 | 0.042314 | 84 | 0.032609 | 263 | 0.102096 |
dfaff48e9268da391e5354a533c983a642617b56 | 517 | py | Python | matching_operators.py | DeadManPoe/PyMatcher | fa0301033d6a9476920519c867bc5e66db7793b7 | [
"MIT"
]
| null | null | null | matching_operators.py | DeadManPoe/PyMatcher | fa0301033d6a9476920519c867bc5e66db7793b7 | [
"MIT"
]
| null | null | null | matching_operators.py | DeadManPoe/PyMatcher | fa0301033d6a9476920519c867bc5e66db7793b7 | [
"MIT"
]
| null | null | null | def match_plus(string, matching_value):
matches = match_star(string, matching_value)
return matches != 0, matches
def match_star(string, matching_value):
found_occurrences = 0
for i, char in enumerate(string):
if not match_identity(char, matching_value):
return found_occurrences
found_occurrences += 1
return found_occurrences
def match_identity(string, matching_value):
if matching_value == '.':
return string != ''
return string == matching_value
| 28.722222 | 52 | 0.694391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.009671 |
dfb0e4025d32f8743112eeea3ef16b5393035552 | 212 | py | Python | BufferStockModel/run.py | bbardoczy/ConsumptionSavingNotebooks | 91811f784ec61fe2f11f8c9e0e172d085574f57c | [
"MIT"
]
| 1 | 2022-03-09T14:43:29.000Z | 2022-03-09T14:43:29.000Z | BufferStockModel/run.py | bbardoczy/ConsumptionSavingNotebooks | 91811f784ec61fe2f11f8c9e0e172d085574f57c | [
"MIT"
]
| null | null | null | BufferStockModel/run.py | bbardoczy/ConsumptionSavingNotebooks | 91811f784ec61fe2f11f8c9e0e172d085574f57c | [
"MIT"
]
| null | null | null | from BufferStockModel import BufferStockModelClass
updpar = dict()
updpar["Np"] = 1500
updpar["Nm"] = 1500
updpar["Na"] = 1500
model = BufferStockModelClass(name="baseline",solmethod="egm",**updpar)
model.test()
| 26.5 | 71 | 0.745283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.127358 |
dfb2125e655f351b14d7a2e313cfea92c5b3d51d | 4,629 | py | Python | pcie_bw.py | pcie-bench/pcie-model | 5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11 | [
"Apache-2.0"
]
| 30 | 2018-12-05T22:02:26.000Z | 2022-03-13T17:09:51.000Z | pcie_bw.py | pcie-bench/pcie-model | 5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11 | [
"Apache-2.0"
]
| null | null | null | pcie_bw.py | pcie-bench/pcie-model | 5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11 | [
"Apache-2.0"
]
| 13 | 2018-12-28T14:31:48.000Z | 2022-02-25T11:24:36.000Z | #! /usr/bin/env python3
#
## Copyright (C) 2015-2018 Rolf Neugebauer. All rights reserved.
## Copyright (C) 2015 Netronome Systems, Inc. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""A simple script to generate data for PCIe and ethernet bandwidth estimates"""
import sys
from optparse import OptionParser
from model import pcie, eth, mem_bw
# pylint: disable=too-many-locals
OUT_FILE = "pcie_bw.dat"
def main():
"""Main"""
usage = """usage: %prog [options]"""
parser = OptionParser(usage)
parser.add_option('--mps', dest='MPS', type="int", action='store',
default=256,
help='Set the maximum payload size of the link')
parser.add_option('--mrrs', dest='MRRS', type="int", action='store',
default=512,
help='Set the maximum read request size of the link')
parser.add_option('--rcb', dest='RCB', type="int", action='store',
default=64,
help='Set the read completion boundary of the link')
parser.add_option('--lanes', dest='lanes', type="string", action='store',
default='x8',
help='Set num lanes (x2, x4, x8, x16, or x32)')
parser.add_option('--gen', dest='gen', type="string", action='store',
default='gen3',
help='Set PCIe version (gen1, gen2, gen3, gen4, or gen5)')
parser.add_option('--addr', dest='addr', type="int", action='store',
default=64,
help='Set the number of address bits (32 or 64)')
parser.add_option('--ecrc', dest='ecrc', type="int", action='store',
default=0,
help='Use ECRC (0 or 1)')
parser.add_option('-o', '--outfile', dest='FILE',
default=OUT_FILE, action='store',
help='File where to write the data to')
(options, _) = parser.parse_args()
pciecfg = pcie.Cfg(version=options.gen,
lanes=options.lanes,
addr=options.addr,
ecrc=options.ecrc,
mps=options.MPS,
mrrs=options.MRRS,
rcb=options.RCB)
print("PCIe Config:")
pciecfg.pp()
ethcfg = eth.Cfg('40GigE')
tlp_bw = pciecfg.TLP_bw
bw_spec = pcie.BW_Spec(tlp_bw, tlp_bw, pcie.BW_Spec.BW_RAW)
dat = open(options.FILE, "w")
dat.write("\"Payload(Bytes)\" "
"\"PCIe Write BW\" "
"\"PCIe Write Trans/s\" "
"\"PCIe Read BW\" "
"\"PCIe Read Trans/s\" "
"\"PCIe Read/Write BW\" "
"\"PCIe Read/Write Trans/s\" "
"\"40G Ethernet BW\" "
"\"40G Ethernet PPS\" "
"\"40G Ethernet Frame time (ns)\" "
"\n")
for size in range(1, 1500 + 1):
wr_bw = mem_bw.write(pciecfg, bw_spec, size)
rd_bw = mem_bw.read(pciecfg, bw_spec, size)
rdwr_bw = mem_bw.read_write(pciecfg, bw_spec, size)
wr_trans = (wr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
rd_trans = (rd_bw.rx_eff * 1000 * 1000 * 1000 / 8) / size
rdwr_trans = (rdwr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
if size >= 64:
eth_bw = ethcfg.bps_ex(size) / (1000 * 1000 * 1000.0)
eth_pps = ethcfg.pps_ex(size)
eth_lat = 1.0 * 1000 * 1000 * 1000 / eth_pps
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f %.2f %d %.2f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans,
eth_bw, eth_pps, eth_lat))
else:
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans))
dat.close()
if __name__ == '__main__':
sys.exit(main())
| 38.575 | 80 | 0.534241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,777 | 0.383884 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.