max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
leetcode/2141.py | ShengyuanWang/ShengyuanWang.github.io | 1 | 12798651 | class Solution:
def maxRunTime(self, n: int, batteries: List[int]) -> int:
left, right, ans = 0, sum(batteries) // n, 0
while left <= right:
mid = (left + right) // 2
total = 0
for cap in batteries:
total += min(cap, mid)
if total >= n * mid:
ans = mid
left = mid + 1
else:
right = mid -1
return ans | 3.046875 | 3 |
src/Data.py | Abdulla-binissa/Matrix | 0 | 12798652 | import random
from random import randint
class State():
def __init__(self):
self.dictionary = {} #{(row, col): (pieceIMG, brightness)}
def addDrop(self, width, top):
screenTop = top - 1
screenLeft = -width // 2
screenRight = width // 2
column = random.randint(screenLeft, screenRight)
self.dictionary[screenTop, column] = (1, 255)
def update(self, screenBottom):
tailSize = 10
keys = self.dictionary.keys()
for cellPos in list(reversed(keys)):
cellIMG = self.dictionary[cellPos][0]
cellOpacity = self.dictionary[cellPos][1]
#Update Cell
opacity = cellOpacity - tailSize if cellOpacity >= tailSize else 0
cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4)
self.dictionary[cellPos] = (cellIMG, opacity)
# Add white to next bottom
if cellPos[0] <= screenBottom:
nextCell = (cellPos[0] + 1, cellPos[1])
if nextCell not in self.dictionary:
self.dictionary[nextCell] = (randint(0,4), 255)
# Deleting cells
if cellOpacity < tailSize:
#del temp[cellPos]
self.dictionary.pop(cellPos)
| 3.21875 | 3 |
generate-excel/src/__init__.py | spencercjh/SpringCloudCrabScore | 1 | 12798653 | import logging
import sys
import traceback
from flask import Flask, jsonify
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__, template_folder='../templates'
)
# set config
app.logger.setLevel(logging.INFO)
from src.controller import excel_service
app.register_blueprint(excel_service, url_prefix='/')
@app.route('/healthcheck')
def healthcheck():
return jsonify("ok")
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'app': app}
@app.errorhandler(Exception)
def _error(error):
trace = traceback.format_exc()
status_code = getattr(error, 'status_code', 400)
response_dict = dict(getattr(error, 'payload', None) or ())
response_dict['message'] = str(error)
response_dict['traceback'] = trace
response = jsonify(response_dict)
response.status_code = status_code
traceback.print_exc(file=sys.stdout)
return response
return app
| 2.328125 | 2 |
jprq/tunnel_tcp.py | AbduazizZiyodov/jprq-python-client | 9 | 12798654 | <reponame>AbduazizZiyodov/jprq-python-client
import sys
import ssl
import json
import certifi
import threading
import websockets
from rich import print as pretty_print
from .tcp import Client
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port):
async with websockets.connect(ws_uri, ssl=ssl_context) as websocket:
message = json.loads(await websocket.recv())
if message.get("warning"):
pretty_print(
f"[bold yellow]WARNING: {message['warning']}", file=sys.stderr)
if message.get("error"):
pretty_print(
f"[bold yellow]ERROR: {message['error']}", file=sys.stderr)
return
local_server_host = '127.0.0.1'
public_server_port = message["public_server_port"]
private_server_port = message["private_server_port"]
pretty_print(f"{'Tunnel Status:':<25}[bold green]Online")
pretty_print(
f"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}")
client = Client(
remote_server_host=remote_server_host,
remote_server_port=private_server_port,
local_server_host=local_server_host,
local_server_port=local_server_port,
)
while True:
message = json.loads(await websocket.recv())
pretty_print("[bold green]INFO: [bold white] New Connection +1")
threading.Thread(
target=client.process,
args=(message, websocket)
).start()
| 2.78125 | 3 |
ex087.py | honeyhugh/PythonCurso | 0 | 12798655 | <reponame>honeyhugh/PythonCurso<filename>ex087.py
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
par = []
maior = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: '))
if matriz[l][c] % 2 == 0:
par.append(matriz[l][c])
print('=' * 30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
print()
soma = matriz[0][2] + matriz[1][2] + matriz[2][2]
print('=' * 30)
print(f'A soma de todos os valores pares digitados foi {sum(par)}')
print(f'A soma dos valores da terceira coluna foi {soma}')
print(f'E o maior valor da segunda linha foi {max(matriz[1][:])}')
| 3.671875 | 4 |
streamlit/utils/ui.py | T-Sumida/ObjectDetection-Streamlit | 1 | 12798656 | # -*- coding:utf-8 -*-
from typing import Optional, Tuple, List
import cv2
import numpy as np
import streamlit as st
from PIL import Image
from utils.model import MODEL_TYPE, draw_bboxes
def description(header: str, description: str):
"""show description
Args:
header (str): header message
description (str): description text
"""
st.subheader(header)
st.markdown(description)
def object_detector_ui() -> Tuple[int, str, float]:
"""show object detector ui in sidebar
Returns:
Tuple[int, str, float]: [number of threads, model type string, threshold]
"""
st.sidebar.markdown("# Model Config")
num_thread = st.sidebar.slider("Number of Thread", 1, 4, 1, 1)
confidence_threshold = st.sidebar.slider(
"Confidence threshold", 0.0, 1.0, 0.5, 0.01)
model_type = st.sidebar.radio("Model Type", MODEL_TYPE)
return num_thread, model_type, confidence_threshold
def upload_image() -> Optional[np.ndarray]:
"""show upload image area
Returns:
Optional[np.ndarray]: uploaded image
"""
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "JPG"])
if uploaded_file is not None:
file_bytes = np.asarray(
bytearray(uploaded_file.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
return image
else:
return None
def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int):
"""show processed image.
Args:
image (np.ndarray): original image
bboxes (List): detected bounding box
scores (List): detected score
classes (List): detected class names
detect_num (int): number of detection
elapsed_time (int): processing time
"""
image = draw_bboxes(image, bboxes, scores, classes, detect_num)
image = cv2pil(image)
st.image(image, caption='Uploaded Image.', use_column_width=True)
st.markdown("**elapsed time : " + str(elapsed_time) + "[msec]**")
pass
def cv2pil(image: np.ndarray) -> Image:
"""cv2 image to PIL image
Args:
image (np.ndarray): cv2 image
Returns:
Image: PIL image
"""
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
| 2.890625 | 3 |
DoubanTop250/top250.py | qinyunkone/Web-Crawler | 2 | 12798657 | from urllib import request, error
from fake_useragent import UserAgent
import re
import time
def request_(url):
try:
ua = UserAgent()
headers = {'User-Agent': ua.chrome}
req = request.Request(url, headers=headers)
return request.urlopen(req).read().decode('utf-8')
except error as e:
return e.reason
def parse_(html):
ol = re.search('<ol class="grid_view">(.*?)</ol>', html, re.S).group(0)
content = ('<li>.*?<em class="">(\d+)</em>.*?class="hd".*?href="(.*?)".*?class="title">(.*?)</span>.*?' +
'property="v:average">(.*?)</span>.*?</li>')
matchlist = re.compile(content, re.S).findall(ol)
for match in matchlist:
yield {
'rank' : match[0],
'src' : match[1],
'name' : match[2],
'score' : match[3]
}
def main():
url = 'https://movie.douban.com/top250?start={}'
for page in range(10):
start = page*25
html = request_(url.format(start))
time.sleep(0.5)
for match in parse_(html):
print(match)
if __name__ == '__main__':
main()
| 2.609375 | 3 |
fdf-cpp/test/unzipall.py | valgarn/fraud-detection-framework | 0 | 12798658 | <gh_stars>0
import os
import sys
import uuid
import zipfile
def extract(source, destination):
z = zipfile.ZipFile(source)
for f in z.namelist():
if(f.upper().endswith(".JPG") or f.upper().endswith(".JPEG")):
with open(os.path.join(destination, "{}.jpg".format(str(uuid.uuid4()))), "wb") as outfile:
outfile.write(z.read(f))
extract("/media/val/Sources/OnSource/servicerequests/servicerequests-20170223T162149Z-004.zip", "/media/val/SSD/temp")
| 2.734375 | 3 |
tune_hyperopt/__init__.py | fugue-project/tune | 14 | 12798659 | # flake8: noqa
from tune_hyperopt.optimizer import HyperoptLocalOptimizer
| 1.085938 | 1 |
morm/model.py | neurobin/python-morm | 4 | 12798660 | <gh_stars>1-10
"""Model.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>'
__license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)'
__version__ = '0.0.1'
import inspect
import typing
from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator
from collections import OrderedDict
import copy
from abc import ABCMeta
from asyncpg import Record # type: ignore
from morm.exceptions import ItemDoesNotExistError
from morm.fields.field import Field, FieldValue
from morm.types import Void
import morm.meta as mt # for internal use
# morm.db must not be imported here.
Meta = mt.Meta # For client use
class _FieldNames():
"""Access field names
"""
def __init__(self, func):
self.__dict__['func'] = func
def __getattr__(self, k):
return self.__dict__['func'](k)
def __setattr__(self, k, v):
raise NotImplementedError
class ModelType(type):
Meta: typing.ClassVar # fixing mypy error: "ModelType" has no attribute "Meta"
def __new__(mcs, class_name: str, bases: tuple, attrs: dict):
# Ensure initialization is only performed for subclasses of Model
# excluding Model class itself.
parents = tuple(b for b in bases if isinstance(b, ModelType))
if not parents:
return super().__new__(mcs, class_name, bases, attrs)
classcell = attrs.pop('__classcell__', None)
class _Meta_(mt.Meta): pass
meta = attrs.pop('Meta', _Meta_)
if not inspect.isclass(meta): #TEST: Meta is restricted as a class
raise TypeError(f"Name 'Meta' is reserved for a class to pass configuration or metadata of a model. Error in model '{class_name}'")
_class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs)
BaseMeta = getattr(_class_, 'Meta', _Meta_)
meta_attrs = {}
def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False):
try:
given_value = getattr(meta, k)
if internal:
raise ValueError(f"'{k}' is a reserved attribute for class Meta. Error in model '{class_name}'")
given_type = type(given_value)
required_type = type(v)
if not given_type is required_type:
raise TypeError(f"Invalid type {given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.")
meta_attrs[k] = given_value
except AttributeError:
if inherit:
v = getattr(BaseMeta, k, v)
# mutable values can be changed by other class meta change
if mutable:
meta_attrs[k] = copy.deepcopy(v)
else:
meta_attrs[k] = v
_set_meta_attr('proxy', False)
_set_meta_attr('pk', 'id')
_set_meta_attr('ordering', ())
_set_meta_attr('fields_up', ())
_set_meta_attr('fields_down', ())
_set_meta_attr('exclude_fields_up', ())
_set_meta_attr('exclude_fields_down', ())
_set_meta_attr('exclude_values_up', {'':()}, mutable=True)
_set_meta_attr('exclude_values_down', {'':()}, mutable=True)
_set_meta_attr('_field_defs_', {}, internal=True, mutable=True)
if meta_attrs['proxy']:
#proxy model inherits everything
try:
meta_attrs['db_table'] = BaseMeta.db_table
meta_attrs['abstract'] = BaseMeta.abstract
except AttributeError:
raise TypeError(f"This model '{class_name}' can not be a proxy model. It does not have a valid base or super base non-proxy model")
else:
_set_meta_attr('abstract', False, inherit=False)
if meta_attrs['abstract']:
meta_attrs['db_table'] = Void
else:
_set_meta_attr('db_table', class_name, inherit=False)
new_attrs = {}
# dict is ordered, officially from python 3.7
for n, v in _class_.__dict__.items():
if isinstance(v, Field):
if n.startswith('_'):
raise AttributeError(f"Invalid field name '{n}' in model '{class_name}'. \
Field name must not start with underscore.")
if meta_attrs['proxy'] and n in attrs:
raise ValueError(f"Proxy model '{class_name}' can not define new field: {n}")
v.name = n
# v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name, because it is void when model is abstract and it gets inherited.
meta_attrs['_field_defs_'][n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
# we do this after finalizing meta_attr
def _get_field_name(n: str) -> str:
if n in meta_attrs['_field_defs_']:
return n
else:
raise AttributeError(f"No such field '{n}' in model '{class_name}'")
meta_attrs['f'] = _FieldNames(_get_field_name)
MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs)
new_attrs['Meta'] = MetaClass
if classcell is not None:
new_attrs['__classcell__'] = classcell
return super().__new__(mcs, class_name, bases, new_attrs)
def __setattr__(self, k, v):
raise NotImplementedError("You can not set model attributes outside model definition.")
def __delattr__(self, k):
raise NotImplementedError("You can not delete model attributes outside model definition.")
def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool:
"""Returns True if the key is valid considering include/exclude keys
"""
if k in exclude_keys: return False
if fields and k not in fields: return False
return True
def _is_valid_down_key_(self, k: str) -> bool:
"""Returns True if the key is valid considering include/exclude down keys
"""
return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down)
def _is_valid_up_key_(self, k: str) -> bool:
"""Returns True if the key is valid considering include/exclude up keys
"""
return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up)
def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool:
"""Returns True if the value for the key is valid considering exclude values
"""
if v is Void:
return False
if k in exclude_values:
if v in exclude_values[k]:
return False
if '' in exclude_values and v in exclude_values['']:
return False
return True
def _is_valid_up_value_(self, k: str, v: Any) -> bool:
"""Returns True if the value for the key is valid considering exclude up values
"""
return self._is_valid_value_(k, v, self.Meta.exclude_values_up)
def _is_valid_down_value_(self, k: str, v: Any) -> bool:
"""Returns True if the value for the key is valid considering exclude down values
"""
return self._is_valid_value_(k, v, self.Meta.exclude_values_down)
def _is_valid_down_(self, k: str, v: Any) -> bool:
"""Check whether the key and value is valid for down (data retrieval)
"""
return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v)
def _is_valid_up_(self, k: str, v: Any) -> bool:
"""Check whether the key and value is valid for up (data update)
"""
return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v)
def _get_all_fields_(self) -> Dict[str, Field]:
"""Get all fields on model without applying any restriction.
Returns:
Dict[str, Field]: Dictionary of all fields
"""
return self.Meta._field_defs_
def _check_field_name_(self, n: str) -> str:
"""Return the field name if exists else raise AttributeError
Args:
n (str): field name
Raises:
AttributeError: if field name does not exist
Returns:
str: field name
"""
if n in self.Meta._field_defs_:
return n
else:
raise AttributeError(f"No such field `{n}` in model `{self.__name__}`")
def _get_fields_(self, up=False) -> Iterator[str]:
"""Yields field names that pass include/exclude criteria
Args:
up (bool, optional): up criteria or down criteria. Defaults to False (down).
Yields:
str: field name
"""
if up:
fields = self.Meta.fields_up
exclude_keys = self.Meta.exclude_fields_up
else:
fields = self.Meta.fields_down
exclude_keys = self.Meta.exclude_fields_down
all_fields = self._get_all_fields_()
for k in all_fields:
if not self._is_valid_key_(k, fields, exclude_keys):
continue
yield k
def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]:
"""Yields valid key,value pairs from data.
Validity is checked against include/exclude key/value criteria.
Args:
data (dict): data to be validated.
up (bool, optional): whether up (data update) or down (data retrieval). Defaults to False.
Yields:
Iterator[Tuple[str, Any]]: Yields key, value pair
"""
if up:
exclude_values = self.Meta.exclude_values_up
fields = self.Meta.fields_up
exclude_fields = self.Meta.exclude_fields_up
else:
exclude_values = self.Meta.exclude_values_down
fields = self.Meta.fields_down
exclude_fields = self.Meta.exclude_fields_down
# new_data = type(data)()
for k,v in data.items():
if not self._is_valid_key_(k, fields, exclude_fields):
continue
if not self._is_valid_value_(k, v.value, exclude_values):
continue
yield k, v
# def _get_data_for_valid_values_(self, data, up=False, gen=False):
# if up:
# exclude_values = self.Meta.exclude_values_up
# else:
# exclude_values = self.Meta.exclude_values_down
# new_data = type(data)()
# for k,v in data.items():
# if not self._is_valid_value_(k, v, exclude_values):
# continue
# if gen:
# yield k, v
# else:
# new_data[k] = v
# if not gen:
# return new_data
def _get_db_table_(self) -> str:
"""Get db table name for model
"""
return self.Meta.db_table
def _is_abstract_(self) -> bool:
"""Whether it's an abstract model or not
"""
return self.Meta.abstract
def _is_proxy_(self) -> bool:
"""Whether it is a proxy model or not
"""
return self.Meta.proxy
def _get_pk_(self) -> str:
"""Get primary column name
"""
return self.Meta.pk
def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]:
"""Yield each ordering from model parsed and converted to column, direction
direction is either `ASC` or `DESC`
Args:
quote (str): Quote to apply to the column
Yields:
Iterator[Tuple[str, str]]: Yields column, direction
"""
ordering = self.Meta.ordering
direction = 'ASC'
for o in ordering:
if o.startswith('-'):
direction = 'DESC'
o = o[1:]
elif o.startswith('+'):
o = o[1:]
o = f"{quote}{o}{quote}"
yield o, direction
class ModelBase(metaclass=ModelType):
"""Base Model for all models.
Do not inherit from this class, use Model instead.
Raises:
TypeError: When invalid type is encountered
AttributeError: When misspelled fields are tried to set.
"""
class Meta:
"""Meta that holds metadata for model
"""
# The following needs to be defined here, not in meta.Meta
# meta.Meta is used in client Models, thus everything
# included there will be blindly inherited, while these are passed
# through the metaclasses __new__ methods and processed accordingly
# to determine which one should be inherited and which one should not.
pk = 'id'
'''Primary key'''
db_table = Void
abstract = True
proxy = False
ordering = ()
fields_up = ()
fields_down = ()
exclude_fields_up = ()
exclude_fields_down = ()
exclude_values_up = {'':()}
exclude_values_down = {'':()}
#internal
_field_defs_: Dict[str, Field]
_fields_: Dict[str, FieldValue]
_fromdb_: List[str]
def __init__(self, *args, **kwargs):
class Meta:
_fields_: Dict[str, FieldValue] = {}
_fromdb_: List[str] = []
# super(ModelBase, self).__setattr__('Meta', Meta)
self.__dict__['Meta'] = Meta
for k, v in self.__class__.Meta._field_defs_.items():
self.Meta._fields_[k] = FieldValue(v)
for arg in args:
try:
arg_items = arg.items()
except AttributeError:
raise TypeError(f"Invalid argument type ({type(arg)}) to Model __init__ method. Expected: dictionary or keyword argument")
for k,v in arg_items:
setattr(self, k, v)
for k,v in kwargs.items():
setattr(self, k, v)
def __iter__(self):
"""Iter through k, v where k is field name and v is field value
Yields:
tuple: field_name, field_value
"""
for k, f in self.Meta._fields_.items():
if self.__class__._is_valid_down_(k, f.value):
yield k, f.value
def __delattr__(self, k):
fields = self.Meta._fields_
if k in fields:
fields[k].delete_value()
else:
super().__delattr__(k)
def __getattr__(self, k):
Meta = self.__dict__['Meta']
fields = Meta._fields_
if k in fields:
v = fields[k].value
if self.__class__._is_valid_down_(k, v):
return v
raise AttributeError(f'Invalid attempt to access field `{k}`. It is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it does not have any valid value.')
raise AttributeError
def __setattr__(self, k: str, v):
if k == 'Meta':
raise AttributeError(f"Name '{k} is reserved. You should not try to change it.")
if k.startswith('_'):
if k.endswith('_'):
raise AttributeError('_<name>_ such names are reserved for predefined methods.')
self.__dict__[k] = v
return
fields = self.Meta._fields_
if k not in fields:
raise AttributeError(f"No such field ('{k}') in model '{self.__class__.__name__}''")
# v = fields[k].clean(v)
# super().__setattr__(k, v)
if self.__class__._is_valid_up_(k, v):
if k in self.Meta._fromdb_:
fields[k]._ignore_first_change_count_ = True
self.Meta._fromdb_.remove(k)
fields[k].value = v
elif k in self.Meta._fromdb_:
self.Meta._fromdb_.remove(k)
else:
raise AttributeError(f'Can not set field `{k}`. It is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying to set an invalid value: {v}')
def __repr__(self):
reprs = []
for k, v in self:
reprs.append(f'{k}={repr(v)}')
body = ', '.join(reprs)
return f'{self.__class__.__name__}({body})'
async def _pre_save_(self, db):
"""Pre-save hook.
Override to run pre save cleanup.
Args:
db (DB): db handle.
"""
pass
async def _pre_delete_(self, db):
"""Pre-delete hook.
Override to run pre delete cleanup.
Args:
db (DB): db handle.
"""
pass
async def _post_save_(self, db):
"""Pre-save hook.
Override to run post save cleanup.
Args:
db (DB): db handle.
"""
pass
async def _post_delete_(self, db):
"""Pre-delete hook.
Override to run post delete cleanup.
Args:
db (DB): db handle.
"""
pass
async def _pre_insert_(self, db):
"""Pre-insert hook.
Override to run pre insert cleanup.
Args:
db (DB): db handle.
"""
pass
async def _pre_update_(self, db):
"""Pre-update hook.
Override to run pre update cleanup.
Args:
db (DB): db handle.
"""
pass
async def _post_insert_(self, db):
"""Pre-insert hook.
Override to run post insert cleanup.
Args:
db (DB): db handle.
"""
pass
async def _post_update_(self, db):
"""Pre-update hook.
Override to run post update cleanup.
Args:
db (DB): db handle.
"""
pass
class Model(ModelBase):
"""Base model to be inherited by other models.
It's more than a good practice to define a Base model first:
```python
from morm.model import Model
from morm.datetime import timestamp
class Base(Model):
class Meta:
pk = 'id' # setting primary key, it is defaulted to 'id'
abstract = True
# postgresql example
id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL')
created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE "{table}" ALTER COLUMN "{column}" SET DEFAULT NOW()',))
updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp)
```
Then a minimal model could look like this:
```python
class User(Base):
name = Field('varchar(65)')
email = Field('varchar(255)')
password = Field('varchar(255)')
```
An advanced model could look like:
```python
import random
def get_rand():
return random.randint(1, 9)
class User(Base):
class Meta:
db_table = 'myapp_user'
abstract = False # default is False
proxy = False # default is False
# ... etc...
# see morm.meta.Meta for supported meta attributes.
name = Field('varchar(65)')
email = Field('varchar(255)')
password = Field('varchar(255)')
profession = Field('varchar(255)', default='Unknown')
random = Field('integer', default=get_rand) # function can be default
```
## Initialize a model instance
keyword arguments initialize corresponding fields according to
the keys.
Positional arguments must be dictionaries of
keys and values.
Example:
```python
User(name='<NAME>', profession='Teacher')
User({'name': '<NAME>', 'profession': 'Teacher'})
User({'name': '<NAME>', 'profession': 'Teacher'}, age=34)
User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34)
```
Raises:
TypeError: If invalid type of argument is provided.
## Special Model Meta attribute `f`:
You can access field names from `ModelClass.Meta.f`.
This allows a spell-safe way to write the field names. If you
misspell the name, you will get `AttributeError`.
```python
f = User.Meta.f
my_data = {
f.name: '<NAME>', # safe from spelling mistake
f.profession: 'Teacher', # safe from spelling mistake
'hobby': 'Gardenning', # unsafe from spelling mistake
}
```
"""
class Meta:
# The following needs to be defined here, not in meta.Meta
# meta.Meta is used in client Models, thus everything
# included there will be blindly inherited, while these are passed
# through the metaclasses __new__ methods and processed accordingly
# to determine which one should be inherited and which one should not.
abstract = True
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
| 1.945313 | 2 |
sql_demo.py | Piyuyang/Flask_demo | 0 | 12798661 | from datetime import datetime
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
class MySQLConfig(object):
SQLALCHEMY_DATABASE_URI = "mysql://root:[email protected]:3306/toutiao"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
app.config.from_object(MySQLConfig)
# 创建操作数据库的管家
db = SQLAlchemy(app)
class User(db.Model):
"""
用户基本信息
"""
__tablename__ = 'user_basic'
class STATUS:
ENABLE = 1
DISABLE = 0
id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')
mobile = db.Column(db.String, doc='手机号')
password = db.Column(db.String, doc='密码')
name = db.Column('user_name', db.String, doc='昵称')
profile_photo = db.Column(db.String, doc='头像')
last_login = db.Column(db.DateTime, doc='最后登录时间')
is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体')
is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证')
introduction = db.Column(db.String, doc='简介')
certificate = db.Column(db.String, doc='认证')
article_count = db.Column(db.Integer, default=0, doc='发帖数')
following_count = db.Column(db.Integer, default=0, doc='关注的人数')
fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)')
like_count = db.Column(db.Integer, default=0, doc='累计点赞人数')
read_count = db.Column(db.Integer, default=0, doc='累计阅读人数')
account = db.Column(db.String, doc='账号')
email = db.Column(db.String, doc='邮箱')
status = db.Column(db.Integer, default=1, doc='状态,是否可用')
# 使用补充的relationship字段明确触发的属性
profile = db.relationship('UserProfile', uselist=False)
follows = db.relationship('Relation')
# 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性
# profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False)
class UserProfile(db.Model):
"""
用户资料表
"""
__tablename__ = 'user_profile'
class GENDER:
MALE = 0
FEMALE = 1
# 使用外键ForeignKey来明确两张表的关系
id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID')
# id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')
gender = db.Column(db.Integer, default=0, doc='性别')
birthday = db.Column(db.Date, doc='生日')
real_name = db.Column(db.String, doc='真实姓名')
id_number = db.Column(db.String, doc='身份证号')
id_card_front = db.Column(db.String, doc='身份证正面')
id_card_back = db.Column(db.String, doc='身份证背面')
id_card_handheld = db.Column(db.String, doc='手持身份证')
ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')
utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间')
register_media_time = db.Column(db.DateTime, doc='注册自媒体时间')
area = db.Column(db.String, doc='地区')
company = db.Column(db.String, doc='公司')
career = db.Column(db.String, doc='职业')
class Relation(db.Model):
"""
用户关系表
"""
__tablename__ = 'user_relation'
class RELATION:
DELETE = 0
FOLLOW = 1
BLACKLIST = 2
id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID')
# user_id = db.Column(db.Integer, doc='用户ID')
user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID')
target_user_id = db.Column(db.Integer, doc='目标用户ID')
relation = db.Column(db.Integer, doc='关系')
ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')
utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间')
target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)')
# 查询出 手机号为13912345678的用户关注了哪些用户 用户id
# SELECT user_basic.user_id,user_relation.target_user_id FROM
# user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id
# WHERE user_basic.mobile = '13912345678'
# User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all()
# 查询出 编号为1的用户 被哪些用户关注 用户名
# SELECT user_basic.user_name FROM
# user_basic INNER JOIN user_relation
# ON user_basic.user_id=user_relation.target_user_id
# WHERE user_basic.user_id=1
# Relation.query.join(Relation.target_user).options(contains_eager(Relation.target_user).load_only(User.name),load_only(Relation.target_user_id)).filter(User.id==1).all()
| 2.546875 | 3 |
src/app_conf.py | kb2ma/nethead-ui | 0 | 12798662 | """
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import app_server_conf
"""Default configuration settings for NetHead UI"""
# Pathname for log files.
LOGGING_PATHNAME = 'nethead.log'
| 1.117188 | 1 |
app/migrations/0015_rename_users_list_user.py | djyasin/GrocerEase | 1 | 12798663 | # Generated by Django 4.0.1 on 2022-01-22 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0014_alter_list_users'),
]
operations = [
migrations.RenameField(
model_name='list',
old_name='users',
new_name='user',
),
]
| 1.710938 | 2 |
Strip_Method.py | BeenashPervaiz/Command_Line_Task | 0 | 12798664 | <filename>Strip_Method.py
name = " Pervaiz "
dots = " ........."
print(name.lstrip() + dots) #lstrip Method
print(name.rstrip() + dots) #rstrip Method
print(name.strip() + dots) #strip Method
print(name.replace(" ", "") + dots) #Replace Method | 3.234375 | 3 |
micromelon/_robot_comms/_rover_read_cache.py | timmyhadwen/mm-pymodule | 3 | 12798665 | <reponame>timmyhadwen/mm-pymodule
from enum import Enum
import time
from ._comms_constants import MicromelonType as OPTYPE
class BUFFER_POSITIONS(Enum):
ULTRASONIC = 0
ACCL = 2
GYRO = 8
COLOUR_ALL = 20
TIME_OF_FLIGHT = 50
BATTERY_VOLTAGE = 54
BATTERY_PERCENTAGE = 56
PERCENTAGE_PADDING = 57
BATTERY_CURRENT = 58
GYRO_ACCUM = 60
class BUFFER_SIZES(Enum):
ULTRASONIC = 2
ACCL = 6
GYRO = 12
COLOUR_ALL = 30
TIME_OF_FLIGHT = 4
BATTERY_VOLTAGE = 2
BATTERY_PERCENTAGE = 1
PERCENTAGE_PADDING = 1
BATTERY_CURRENT = 2
GYRO_ACCUM = 12
class RoverReadCache:
def __init__(self) -> None:
self._allSensors = None
self._lastUpdatedTime = 0
self._useByInterval = (
0.25 # cached values older than 0.25 seconds will be ignored
)
self._startAndSizeIndexForOpType = {
OPTYPE.ULTRASONIC.value: (
BUFFER_POSITIONS.ULTRASONIC.value,
BUFFER_SIZES.ULTRASONIC.value,
),
OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value),
OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value),
OPTYPE.COLOUR_ALL.value: (
BUFFER_POSITIONS.COLOUR_ALL.value,
BUFFER_SIZES.COLOUR_ALL.value,
),
OPTYPE.TIME_OF_FLIGHT.value: (
BUFFER_POSITIONS.TIME_OF_FLIGHT.value,
BUFFER_SIZES.TIME_OF_FLIGHT.value,
),
OPTYPE.BATTERY_VOLTAGE.value: (
BUFFER_POSITIONS.BATTERY_VOLTAGE.value,
BUFFER_SIZES.BATTERY_VOLTAGE.value,
),
OPTYPE.STATE_OF_CHARGE.value: (
BUFFER_POSITIONS.BATTERY_PERCENTAGE.value,
BUFFER_SIZES.BATTERY_PERCENTAGE.value,
),
OPTYPE.CURRENT_SENSOR.value: (
BUFFER_POSITIONS.BATTERY_CURRENT.value,
BUFFER_SIZES.BATTERY_CURRENT.value,
),
OPTYPE.GYRO_ACCUM.value: (
BUFFER_POSITIONS.GYRO_ACCUM.value,
BUFFER_SIZES.GYRO_ACCUM.value,
),
}
def updateAllSensors(self, data):
self._allSensors = data
self._lastUpdatedTime = time.time()
def setUseByInterval(self, seconds):
if seconds <= 0:
raise Exception(
"Use by interval for RoverReadCache must be a positive non-zero number"
)
self._useByInterval = seconds
def invalidateCache(self):
self._allSensors = None
def readCache(self, opType):
if (
not self._allSensors
or time.time() - self._lastUpdatedTime > self._useByInterval
):
return None
if isinstance(opType, Enum):
opType = opType.value
if opType in self._startAndSizeIndexForOpType:
indices = self._startAndSizeIndexForOpType[opType]
return self._allSensors[indices[0] : indices[0] + indices[1]]
return None
| 2.5625 | 3 |
Data Science With Python/13-introduction-to-data-visualization-with-python/04-analyzing-time-series-and-images/08-cumulative-distribution-function-from-an-image-historgram.py | aimanahmedmoin1997/DataCamp | 3 | 12798666 | '''
Cumulative Distribution Function from an image histogram
A histogram of a continuous random variable is sometimes called a Probability Distribution Function (or PDF). The area under a PDF (a definite integral) is called a Cumulative Distribution Function (or CDF). The CDF quantifies the probability of observing certain pixel intensities.
Your task here is to plot the PDF and CDF of pixel intensities from a grayscale image. You will use the grayscale image of Hawkes Bay, New Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY 2.0). This time, the 2D array image will be pre-loaded and pre-flattened into the 1D array pixels for you.
The histogram option cumulative=True permits viewing the CDF instead of the PDF.
Notice that plt.grid('off') switches off distracting grid lines.
The command plt.twinx() allows two plots to be overlayed sharing the x-axis but with different scales on the y-axis.
INSTRUCTIONS
70XP
First, use plt.hist() to plot the histogram of the 1-D array pixels in the bottom subplot.
Use the histogram options bins=64, range=(0,256), and normed=False.
Use the plotting options alpha=0.4 and color='red' to make the overlayed plots easier to see.
Second, use plt.twinx() to overlay plots with different vertical scales on a common horizontal axis.
Third, call plt.hist() again to overlay the CDF in the bottom subplot.
Use the histogram options bins=64, range=(0,256), and normed=True.
This time, also use cumulative=True to compute and display the CDF.
Also, use alpha=0.4 and color='blue' to make the overlayed plots easier to see.
'''
# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')
# Display image in top subplot using color map 'gray'
plt.subplot(2,1,1)
plt.imshow(image, cmap='gray')
plt.title('Original image')
plt.axis('off')
# Flatten the image into 1 dimension: pixels
pixels = image.flatten()
# Display a histogram of the pixels in the bottom subplot
plt.subplot(2,1,2)
pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False,
color='red', alpha=0.4)
plt.grid('off')
# Use plt.twinx() to overlay the CDF in the bottom subplot
plt.twinx()
# Display a cumulative histogram of the pixels
cdf = plt.hist(pixels, bins=64, range=(0,256),
cumulative=True, normed=True,
color='blue', alpha=0.4)
# Specify x-axis range, hide axes, add title and display plot
plt.xlim((0,256))
plt.grid('off')
plt.title('PDF & CDF (original image)')
plt.show()
| 3.359375 | 3 |
src/models/heads/__init__.py | takedarts/skipresnet | 3 | 12798667 | <gh_stars>1-10
from .mobilenet import MobileNetV2Head, MobileNetV3Head
from .nfnet import NFHead
from .none import NoneHead
from .pre_activation import PreActivationHead
from .swin import SwinHead
from .vit import ViTHead
| 1.03125 | 1 |
ce_tools-TESTER.py | ArezalGame89/Corn-Engine | 0 | 12798668 | ####################################################=
###################CE_TOOLS TEST####################
####################################################=
# A functional tester for the famous Corn Engine Utillity "CE_tools.py"
# This can also be used as a template on how to make a tester correctly
import ce_tools
from ce_tools import debugFound, systemc
systemc.system("cls" if systemc.name=='nt' else 'clear')
print("Corn Engine Tools Tester (CE_tools.py)")
print("This tools tests all functions up to", ce_tools.ce_tools_ver)
while True:
poop = input("Where would you like to go?: ")
if poop == "help":
systemc.startfile(ce_tools.help)
elif poop == "randomNum":
print("This define prints a random number")
print("using random randint between 0 to infinity")
ce_tools.randomNum()
elif poop == "request hello":
print("this program says hi to you in different ways")
print("Check out cetHelp.txt for more info on that")
ce_tools.RequestHello()
elif poop == "roll a dice":
print("The dice function rolls a random dice")
ce_tools.rollADice()
elif poop == "exit":
SystemExit()
elif poop == "Debug":
ce_tools.debugFound() # exclusive for testers only
else:
print(ce_tools.wrongInputTester) # uses the text from ce_tools | 3.171875 | 3 |
louvain_to_gephi_graphx.py | ErathosthemesAmmoro/track-communities | 12 | 12798669 | #
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import re
import sys
from subprocess import call
table = sys.argv[1]
garbage = open("garbage.out","w")
v = 'output/graphx/level_0_vertices'
os.system("cat " + v + "/part-* > " + v + "/output")
f = open(v + '/output','r')
o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w')
nodeMap = {}
for line in f:
id = re.search(r'\(([a-zA-Z0-9]+)', line).group(1)
name = re.search(r'(name):([a-zA-Z0-9\-]+)', line).group(2)
comm = re.search(r'(communityName):([a-zA-Z0-9\-]+)', line).group(2)
nodeMap[id] = name
o.write(name + '\t' + comm + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/graphx/comm_1", stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/" + table + "/output/graphx/comm_1", stdout=garbage, shell=True)
f = open('edgelist.tsv','r')
o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w')
for line in f:
if len(line.split('\t')) == 3:
source,weight,edgelist = line.split('\t')
edgelist = edgelist.strip().split(',')
for e in edgelist:
o.write('\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\n')
o.close()
f.close()
# Here's the looping piece
i = 1
v = 'output/graphx/level_'+str(i)+'_vertices'
e = 'output/graphx/level_'+str(i)+'_edges'
while os.path.exists(e):
os.system("cat " + v + "/part-* > " + v + "/output")
os.system("cat " + e + "/part-* > " + e + "/output")
level = str(i+1)
f = open(v + '/output','r')
o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w')
for line in f:
id = re.search(r'\(([a-zA-Z0-9]+)', line).group(1)
name = re.search(r'(name):([a-zA-Z0-9\-]+)', line).group(2)
comm = re.search(r'(communityName):([a-zA-Z0-9\-]+)', line).group(2)
nodeMap[id] = name
o.write(name + '\t' + comm + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/graphx/comm_" + level, stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/graphx/community_itr_" + level + ".nodes /tmp/trackcomms/" + table + "/output/graphx/comm_" + level, stdout=garbage, shell=True)
f = open(e + '/output','r')
o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w')
for line in f:
match = re.search(r'Edge\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\)', line)
o.write('\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\n')
o.close()
f.close()
i = i + 1
v = 'output/graphx/level_'+str(i)+'_vertices'
e = 'output/graphx/level_'+str(i)+'_edges'
| 1.890625 | 2 |
src/board/models.py | woongchoi84/BLEX | 1 | 12798670 | <reponame>woongchoi84/BLEX
import requests
import datetime
import random
from django.db import models
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from tagging.fields import TagField
font_mapping = {
'Noto Sans' : 'noto',
'RIDIBatang' : 'ridi',
'Noto Sans Serif' : 'serif'
}
theme_mapping = {
'Default' : '',
'Dark Mode' : 'dark',
'Violet' : 'purple',
'Green & Blue' : 'glue'
}
grade_mapping = {
'blogger' : 'blogger-gray',
'contributor' : 'contributor-green',
'supporter' : 'supporter-orange',
'sponsor' : 'sponsor-ff69b4',
'partner' : 'partner-blueviolet',
'master' : 'master-purple'
}
def randstr(length):
rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ'
rstr_len = len(rstr) - 1
result = ''
for i in range(length):
result += rstr[random.randint(0, rstr_len)]
return result
def parsedown(text):
data = {'md': text.encode('utf-8')}
res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data)
return res.text
def avatar_path(instance, filename):
dt = datetime.datetime.now()
return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1]
def title_image_path(instance, filename):
dt = datetime.datetime.now()
return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1]
def create_notify(user, post, infomation):
new_notify = Notify(user=user, post=post, infomation=infomation)
new_notify.save()
"""
class Team(models.Model):
name = models.CharField(max_length=15, unique=True)
owner = models.ForeignKey('auth.User')
member = models.ManyToManyField(User, related_name='members', blank=True)
bio = models.TextField(max_length=500, blank=True)
about = models.TextField(blank=True)
avatar = models.ImageField(blank=True, upload_to=team_logo_path)
class TeamPost(models.Model):
pass
class TeamCategory(models.Model):
pass
"""
"""
class Request(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey('board.Post', on_delete = models.CASCADE)
comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE)
content = models.TextField(blank=True)
is_apply = models.BooleanField(default=False)
created_date = models.DateTimeField(default=timezone.now)
"""
"""
class MiddleComment(models.Model):
pass
"""
class History(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey('board.Post', on_delete = models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.user.username
class Grade(models.Model):
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class Font(models.Model):
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class Theme(models.Model):
color = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.color
class Config(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
agree_email = models.BooleanField(default=False)
agree_history = models.BooleanField(default=False)
post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True)
post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.user.username
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True)
grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True)
exp = models.IntegerField(default=0)
bio = models.TextField(max_length=500, blank=True)
avatar = models.ImageField(blank=True,upload_to=avatar_path)
github = models.CharField(max_length=15, blank=True)
twitter = models.CharField(max_length=15, blank=True)
youtube = models.CharField(max_length=30, blank=True)
facebook = models.CharField(max_length=30, blank=True)
instagram = models.CharField(max_length=15, blank=True)
homepage = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.user.username
def total_subscriber(self):
return self.subscriber.count()
def save(self, *args, **kwargs):
try:
this = Profile.objects.get(id=self.id)
if this.avatar != self.avatar:
this.avatar.delete(save=False)
except:
pass
super(Profile, self).save(*args, **kwargs)
class Follow(models.Model):
class Meta:
db_table = 'board_user_follow'
auto_created = True
following = models.ForeignKey(Profile, on_delete=models.CASCADE)
follower = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.post.title
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=50)
url = models.SlugField(max_length=50, unique=True, allow_unicode=True)
image = models.ImageField(blank=True, upload_to=title_image_path)
text_md = models.TextField()
text_html = models.TextField()
trendy = models.IntegerField(default=0)
view_cnt = models.IntegerField(default=0)
hide = models.BooleanField(default=False)
notice = models.BooleanField(default=False)
block_comment = models.BooleanField(default=False)
likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True)
tag = TagField()
created_date = models.DateTimeField(default=timezone.now)
updated_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', args=[self.author, self.url])
def total_likes(self):
return self.likes.count()
def save(self, *args, **kwargs):
try:
this = Post.objects.get(id=self.id)
if this.image != self.image:
this.image.delete(save=False)
except:
pass
super(Post, self).save(*args, **kwargs)
class PostLikes(models.Model):
class Meta:
db_table = 'board_post_likes'
auto_created = True
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.post.title
class Comment(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE)
text = models.TextField(max_length=300)
edit = models.BooleanField(default=False)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.text
class Notify(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
is_read = models.BooleanField(default=False)
infomation = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.infomation
class Series(models.Model):
owner = models.ForeignKey('auth.User', on_delete=models.CASCADE)
name = models.CharField(max_length=50, unique=True)
url = models.SlugField(max_length=50, unique=True, allow_unicode=True)
posts = models.ManyToManyField(Post, related_name='postlist', blank=True)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('series_list', args=[self.owner, self.url]) | 2.078125 | 2 |
testsuite/modulegraph-dir/renamed_package/__init__.py | xoviat/modulegraph2 | 9 | 12798671 | from sys import path as the_path
| 1.140625 | 1 |
02.Button/09.SwitchFun.py | sarincr/Python-App-Development-using-Kivy | 1 | 12798672 | <gh_stars>1-10
from kivy.app import App
from kivy.uix.switch import Switch
class SwitchApp(App):
def build(self):
switch = Switch()
switch.bind(active=self.switch_state)
return switch
def switch_state(self, instance, value):
print('Switch is', value)
SwitchApp().run()
| 2.546875 | 3 |
python/559.maximum-depth-of-n-ary-tree.py | fengbaoheng/leetcode | 1 | 12798673 | <reponame>fengbaoheng/leetcode
#
# @lc app=leetcode.cn id=559 lang=python3
#
# [559] N叉树的最大深度
#
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
# 递归子树求深度
def maxDepth(self, root: 'Node') -> int:
if root is None:
return 0
# 没有子树,只有根节点1层
if root.children is None or len(root.children) == 0:
return 1
# 对每颗子树递归调用求最大深度
return 1 + max(map(self.maxDepth, root.children))
| 3.28125 | 3 |
conf/gunicorn.ini.py | tkosht/wsgis | 0 | 12798674 | # import multiprocessing
bind = "0.0.0.0:8000"
# workers = multiprocessing.cpu_count() * 2 + 1
workers = 2
threads = 2
backlog = 4096
| 1.890625 | 2 |
src/healthvaultlib/tests/methods/test_getservicedefinition.py | rajeevs1992/pyhealthvault | 1 | 12798675 | from healthvaultlib.tests.testbase import TestBase
from healthvaultlib.methods.getservicedefinition import GetServiceDefinition
class TestGetServiceDefinition(TestBase):
def test_getservicedefinition(self):
method = GetServiceDefinition(['platform', 'shell', 'topology',
'xml-over-http-methods', 'meaningful-use'])
method.execute(self.connection)
self.assertIsNotNone(method.response)
self.assertIsNotNone(method.response.service_definition.platform)
self.assertIsNotNone(method.response.service_definition.shell)
self.assertNotEqual(len(method.response.service_definition.xml_method), 0)
self.assertNotEqual(len(method.response.service_definition.common_schema), 0)
self.assertNotEqual(len(method.response.service_definition.instances), 0)
self.assertIsNotNone(method.response.service_definition.meaningful_use)
self.assertIsNotNone(method.response.service_definition.updated_date)
| 2.328125 | 2 |
sta/980004006.py | IntSPstudio/vslst-python | 0 | 12798676 | #|==============================================================|#
# Made by IntSPstudio
# Project Visual Street
# ID: 980004006
# Twitter: @IntSPstudio
#|==============================================================|#
#SYSTEM
import os
import sys
#import time
import turtle
import math
#ALG
#Ympyrän kehän koko
def calcCircleRl(rlRadius):
#2PIR
output = 2*pi*rlRadius
return output
#Laskee piiraan kehän koon
def calcCircleSliceRl(rlAngle,rlRadius):
output = rlAngle/360*pi*rlRadius*2
return output
#CONTENT SCREEN
contentscreen = turtle.Screen()
contentscreen.bgcolor("black")
#TURTLE
julle = turtle.Turtle()
julle.color("white")
julle.speed(5)
#INPUT
scriptFle = sys.argv[0]
scriptCircleRadius = sys.argv[1]
scriptCircleSliceAngle = sys.argv[2]
#BASIC VRB
#systemContinuity =1
pi = math.pi
inputCircleRadius = int(scriptCircleRadius)
inputCircleSliceAngle = int(scriptCircleSliceAngle)
inputCircleRl = calcCircleRl(inputCircleRadius)
inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius)
#CLEAR SCREEN
os.system("cls")
#PRINT DATA
print(" Radius:", inputCircleRadius)
print(" Slice:", scriptCircleSliceAngle)
print("Circle Rl:", inputCircleRl)
print(" Slice Rl:", inputCircleSliceRl)
print(" %Rld:", inputCircleSliceRl / inputCircleRl *100)
#ACTION
#Start position
julle.penup()
julle.forward(inputCircleRadius)
julle.left(90)
julle.pendown()
#Circle
julle.circle(inputCircleRadius)
#Slice
julle.pendown()
julle.left(90)
julle.forward(inputCircleRadius)
julle.right(180 - inputCircleSliceAngle)
julle.forward(inputCircleRadius)
julle.right(180)
julle.forward(inputCircleRadius)
#Wait
contentscreen.mainloop()
os.system("cls") | 3.09375 | 3 |
OpenCV/goalTracker.py | cyamonide/FRC-2017 | 0 | 12798677 | <filename>OpenCV/goalTracker.py
import os
import numpy as np
import cv2
from networktables import NetworkTable
os.system("sudo bash /home/pi/vision/init.sh")
NetworkTable.setIPAddress("roboRIO-4914-FRC.local")
NetworkTable.setClientMode()
NetworkTable.initialize()
table = NetworkTable.getTable("HookContoursReport")
COLOR_MIN = np.array([60, 80, 80])
COLOR_MAX = np.array([85, 255, 255])
VIEW_ANGLE = 60 * 360 / 6.283185307 # (for lifecam 3000)
FOV_PIXEL = 320
HOOK_CAM_ID = 0
BOIL_CAM_ID = 1
DEBUG = False
# HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in cm
MIN_HOOK_AREA = 150
MIN_BOIL_AREA = 100
def cart2pol(a):
x = a[0]
y = a[1]
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return([rho, phi])
def pol2cart(a):
rho = a[0]
phi = a[1]
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return([x, y])
def trackHook():
# read image from camera, resize to 320x240, convert to HSV
ret, frame = cap.read()
frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
if DEBUG:
cv2.imshow('hsv', hsv)
cv2.imshow('brg', frame)
# threshold image based on HSV range provided by COLOR_MIN and COLOR_MAX
frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX)
if DEBUG:
cv2.imshow('frame', frame)
# find contours based on thresholded image
_, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# creates array of contours larger than given min area
filteredContours = []
for i in range(0, len(contours)):
if cv2.contourArea(contours[i]) > MIN_HOOK_AREA:
filteredContours.append(contours[i])
# finds most rightward (highest x-val) contour from filtered contours
if len(filteredContours) > 0:
# default index and x value
iTargetContour = 0;
maxRightness = 0;
# searches for index of most rightward contours
for i in range(0, len(filteredContours)):
# analyze centre X
M = cv2.moments(filteredContours[i])
cX = int(M["m10"] / M["m00"])
if cX > maxRightness:
maxRightness = cX
iTargetContour = i
targetContour = filteredContours[iTargetContour]
M = cv2.moments(targetContour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
print(cX, " ", cY)
table.putNumber('cX', cX)
table.putNumber('cY', cY)
else: # if no contours found
table.putNumber('cX', -1)
table.putNumber('cY', -1)
# end of trackHook()
cap = cv2.VideoCapture(HOOK_CAM_ID)
while True:
trackHook()
if DEBUG:
cap.release()
cv2.destroyAllWindows()
| 2.484375 | 2 |
sonnenblume/account/dal_models.py | Sylver11/sonnenblume | 0 | 12798678 | from sqlalchemy.orm import Session, noload
from sqlalchemy.future import select
from sqlalchemy import update
from uuid import UUID
from . import models, schemas
class UserDAL():
def __init__(self, db_session: Session):
self.db_session = db_session
async def get_user_by_email(self, email: str):
stmt = select(models.User).\
where(models.User.email == email).\
options(noload('*'))
q = await self.db_session.execute(stmt)
return q.scalars().first()
async def get_user_by_uuid(self, user_uuid: UUID):
stmt = select(models.User).\
where(models.User.uuid == user_uuid).\
options(noload('*'))
q = await self.db_session.execute(stmt)
return q.scalars().first()
async def create_user(self, user: schemas.UserCreate):
new_user = models.User(
email=user.email,
firstname=user.firstname,
secondname=user.secondname,
)
new_user.set_password(<PASSWORD>)
self.db_session.add(new_user)
return await self.db_session.flush()
| 2.5625 | 3 |
source_code_tokenizer/languages/c/regex.py | matteogabburo/source-code-tokenizer | 1 | 12798679 | LIST_KEYWORDS = [
r"_Alignas",
r"_Alignof",
r"_Atomic",
r"_Bool",
r"_Complex",
r"_Decimal128",
r"_Decimal32",
r"_Decimal64",
r"_Generic",
r"_Imaginary",
r"_Noreturn",
r"_Static_assert",
r"_Thread_local",
r"asm",
r"auto",
r"break",
r"case",
r"char",
r"const",
r"continue",
r"default",
r"define",
r"double",
r"do",
r"elif",
r"else",
r"endif",
r"enum",
r"error",
r"extern",
r"float",
r"fortran",
r"for",
r"goto",
r"ifdef",
r"ifndef",
r"if",
r"include",
r"inline",
r"int",
r"line",
r"long",
r"pragma",
r"register",
r"restrict",
r"return",
r"short",
r"signed",
r"sizeof",
r"static",
r"struct",
r"switch",
r"typedef",
r"undef",
r"union",
r"unsigned",
r"void",
r"volatile",
r"while",
]
# cleaning regex
RM_INDENT = r"(//[^\n]*\n)|(\s*^\s*)"
RM_MULTIPLE_SPACES = r"[^\n\S]+"
# Language keywords
_REGEX_KEYWORD = "|".join([r"{}(?!\w)".format(kw) for kw in LIST_KEYWORDS])
KEYWORD = "(?P<KW>" + _REGEX_KEYWORD + ")"
# variable names
_REGEX_NAME = r"[^\d\W]\w*"
NAME = r"(?P<NAME>" + _REGEX_NAME + ")"
# match every not-[\w] character (for example it match +,-,*,-,...) except spaces
_REGEX_OP = r"[^\s\w]"
OP = r"(?P<OP>" + _REGEX_OP + ")"
# numbers regex
_INT_SUFFIX = r"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?"
_NUMBER_OCT = r"[0][0-7]+" + _INT_SUFFIX
_NUMBER_HEX = r"[0][xX][\da-fA-F]+" + _INT_SUFFIX
_NUMBER_BIN = r"[0][bB][0-1]+" + _INT_SUFFIX
_NUMBER_INT = r"[^0\D][\d]*" + _INT_SUFFIX
_NUMBER_SCI = r"[\d]+[\.]?[\d]*[eE][+-]?[\d]+"
_NUMBER_FLO1 = r"[\d]+[\.]?[\d]*"
_NUMBER_FLO2 = r"[\.][\d]+"
_NUMBER_FLO = _NUMBER_FLO1 + "|" + _NUMBER_FLO2
_REGEX_NUMBER = "|".join(
[_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO]
)
NUMBER = r"(?P<NUMBER>" + _REGEX_NUMBER + ")"
# comments regex
_COMMENT = r"//[^\n]*"
_STRING_CB = r"(?P<ERROR_{}>\Z)" # catastrofic backtracking if a string is not closed
_COMMENT_MULTILINE = r"/\*(.|[\r\n])*?(\*/|" + _STRING_CB.format("COMM_M") + ")"
_REGEX_COMMENT = _COMMENT + "|" + _COMMENT_MULTILINE
COMMENT = r"(?P<COMMENT>" + _REGEX_COMMENT + ")"
# string regex
_STRING_CB = r"(?P<ERROR_{}>\Z)" # catastrofic backtracking if a string is not closed
_STR_HEADERS = ["L", "u8", "u", "U"]
_STRING_PREFIX = r"(" + "|".join(_STR_HEADERS) + ")?"
_REGEX_STRING = (
_STRING_PREFIX + r'"(\\\n|\\"|\\\\|[^"]|.\n])*("|' + _STRING_CB.format("STR") + ")"
)
STRING = r"(?P<STRING>" + _REGEX_STRING + ")"
# char regex
_CHAR_CB = r"(?P<ERROR_{}>\Z)" # catastrofic backtracking if a string is not closed
_CHAR_HEADERS = []
_CHAR_PREFIX = "" # no char literals for C
_REGEX_CHAR = (
_CHAR_PREFIX
+ r"'(\\(\\|'|\"|\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\s|\w){0,1}('|"
+ _CHAR_CB.format("CHR")
+ ")"
)
CHAR = r"(?P<CHAR>" + _REGEX_CHAR + ")"
FULL_CREGEX = "|".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME])
class CRegex:
def __init__(self):
self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]
def get_full_regex(self):
return FULL_CREGEX
def get_clean_indent_regex(self):
return RM_INDENT
def get_remove_doublespaces_regex(self):
return RM_MULTIPLE_SPACES
def get_str_headers(self):
return _STR_HEADERS
def get_chr_headers(self):
return _CHAR_HEADERS
| 2.390625 | 2 |
svg_ultralight/strings/svg_strings.py | ShayHill/svg_ultralight | 1 | 12798680 | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""Explicit string formatting calls for arguments that aren't floats or strings.
:author: <NAME>
:created: 10/30/2020
The `string_conversion` module will format floats or strings. Some other formatters can
make things easier.
"""
from typing import Iterable, Tuple
from ..string_conversion import format_number
def svg_color_tuple(rgb_floats):
"""
Turn an rgb tuple (0-255, 0-255, 0-255) into an svg color definition.
:param rgb_floats: (0-255, 0-255, 0-255)
:return: "rgb(128,128,128)"
"""
r, g, b = (round(x) for x in rgb_floats)
return f"rgb({r},{g},{b})"
def svg_ints(floats: Iterable[float]) -> str:
"""
Space-delimited ints
:param floats: and number of floats
:return: each float rounded to an int, space delimited
"""
return " ".join(str(round(x)) for x in floats)
def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str:
"""
Space-delimited tuples
:param tuples: [(a, b), (c, d)]
:return: "a,b c,d"
"""
tuples = [",".join(format_number(x) for x in y) for y in tuples]
return " ".join(tuples)
| 4.25 | 4 |
2020/day4/day4.py | andrejkurusiov/advent-of-code-2020 | 0 | 12798681 | import re
# initial data input
infilename = "./day4.txt"
# required fields for checking
required = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
def readfile():
with open(infilename, "rt", encoding="utf-8") as file:
inlist = [line.strip() for line in file]
return inlist
def parse_input(inlist=readfile()):
data_list = [] # list of dictionaries
# artificially add empty item in order to mark the end of the last document
inlist.append("")
dic = {}
for item in inlist:
if item: # not an empty line => belongs to the same document
for item in item.split():
keyvalue = item.split(":")
dic[keyvalue[0]] = keyvalue[1]
else: # starts new document
data_list.append(dic)
dic = {}
return data_list
# 2 valid passports for part 1:
testinput = [
"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \
byr:1937 iyr:2017 cid:147 hgt:183cm",
"",
"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \
hcl:#cfa07d byr:1929",
"",
"hcl:#ae17e1 iyr:2013 \
eyr:2024 \
ecl:brn pid:760753108 byr:1931 \
hgt:179cm",
"",
"hcl:#cfa07d eyr:2025 pid:166559648 \
iyr:2011 ecl:brn hgt:59in",
]
# --- Part One ---
"""
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
"""
def extra_check(doc: dict, extra):
# returns True if all checks pass
def n_digits_check(txt: str, start: int, end: int, n=4):
# check for n-digits ranges
return len(txt) == n and int(txt) in range(start, end + 1)
def hgt_check(txt):
# hgt (Height) - a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
pat = re.compile(r"(\d+)(cm|in)") # compile regex
tuples = re.search(pat, txt)
if not tuples: # if correct pattern not found
return False
num, unit = int(tuples.group(1)), tuples.group(2)
if unit == "cm":
ok = num in range(150, 193 + 1)
elif unit == "in":
ok = num in range(59, 76 + 1)
else:
ok = False
return ok
def hcl_check(txt):
pat = re.compile(r"#[a-f0-9]{6}") # compile regex
return re.search(pat, txt) != None
def ecl_check(txt):
return txt in ("amb blu brn gry grn hzl oth").split()
def pid_check(txt):
return txt.isdigit() and len(txt) == 9
if not extra:
return True
# checking extra rules
return (
n_digits_check(doc["byr"], 1920, 2002)
and n_digits_check(doc["iyr"], 2010, 2020)
and n_digits_check(doc["eyr"], 2020, 2030)
and hgt_check(doc["hgt"])
and hcl_check(doc["hcl"])
and ecl_check(doc["ecl"])
and pid_check(doc["pid"])
)
def analyse(doclist, required, extra=False) -> int:
# returns the number of valid documents according to fields listed in 'required' dictionary
valid = 0
nreq = len(required)
for doc in doclist:
fields_found = 0
for r in required: # check if all required fields are found from the document
if r in doc:
fields_found += 1
if fields_found == nreq and extra_check(doc, extra):
valid += 1
return valid
def part1(inlist=testinput) -> int:
# returns number of valid documents
return analyse(parse_input(inlist), required)
# --- Part Two ---
"""
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
__Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
__Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional.
"""
# 2 valid passports for part 2:
testinput2 = [
"eyr:1972 cid:100 \
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926",
"",
"iyr:2019 \
hcl:#602927 eyr:1967 hgt:170cm \
ecl:grn pid:012533040 byr:1946",
"",
"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \
hcl:#623a2f",
"",
"eyr:2029 ecl:blu cid:129 byr:1989 \
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm",
"",
"eyr:2029 ecl:blu cid:129 byr:1989 \
iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm",
]
def part2(inlist=testinput2) -> int:
return analyse(parse_input(inlist), required, extra=True)
# --- MAIN ---
if __name__ == "__main__":
# if no parameter for part X - test input is used
print("Part1. Number of valid passports:", part1(readfile()))
print("Part2. Number of valid passports:", part2(readfile()))
| 3.203125 | 3 |
tests/test_finite_automaton.py | caiopo/kleeneup | 0 | 12798682 | <filename>tests/test_finite_automaton.py
from kleeneup import FiniteAutomaton, Sentence, State, Symbol
def test_copy():
a, b = Symbol('a'), Symbol('b')
A, B = State('A'), State('B')
transitions = {
(A, a): {B},
(A, b): {A},
(B, a): {A},
(B, b): {B},
}
fa1 = FiniteAutomaton(transitions, A, {A})
fa2 = fa1.copy()
assert fa1 is not fa2
def test_evaluate():
a = Symbol('a')
A, B = State('A'), State('B')
transitions = {
(A, a): {B},
(B, a): {A},
}
fa = FiniteAutomaton(transitions, A, {A})
assert not fa.evaluate(Sentence('aaa'))
assert fa.evaluate(Sentence(''))
assert fa.evaluate(Sentence('aa'))
def test_union():
a, b = Symbol('a'), Symbol('b')
A, B = State('A'), State('B')
fa_1 = FiniteAutomaton({(A, a): B}, A, {B})
C, D = State('C'), State('D')
fa_2 = FiniteAutomaton({(C, b): D}, C, {D})
fa_union = FiniteAutomaton.union(fa_1, fa_2)
assert not fa_1.evaluate(Sentence('b'))
assert not fa_2.evaluate(Sentence('a'))
assert fa_union.evaluate(Sentence('a'))
assert fa_union.evaluate(Sentence('b'))
assert not fa_union.evaluate(Sentence(''))
assert not fa_union.evaluate(Sentence('ab'))
assert not fa_union.evaluate(Sentence('ba'))
def test_concatenate():
a = Symbol('a')
A = State('A')
B = State('B')
fa_1 = FiniteAutomaton({(A, a): B}, A, {B})
C = State('C')
D = State('D')
fa_2 = FiniteAutomaton({(C, a): D}, C, {D})
fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2)
assert not fa_concat.evaluate(Sentence(''))
assert not fa_concat.evaluate(Sentence('a'))
assert fa_concat.evaluate(Sentence('aa'))
assert not fa_concat.evaluate(Sentence('aaa'))
def test_complete():
a = Symbol('a')
b = Symbol('b')
A = State('A')
B = State('B')
transitions = {
(A, a): {A},
(A, b): {B},
}
fa = FiniteAutomaton(transitions, A, {B})
fa.complete()
def test_negate():
a = Symbol('a')
b = Symbol('b')
A = State('A')
B = State('B')
transitions = {
(A, a): {A},
(A, b): {B},
}
fa = FiniteAutomaton(transitions, A, {B})
assert not fa.evaluate(Sentence(''))
assert not fa.evaluate(Sentence('aaa'))
assert fa.evaluate(Sentence('ab'))
n_fa = fa.negate()
assert n_fa.evaluate(Sentence(''))
assert n_fa.evaluate(Sentence('aaa'))
assert not n_fa.evaluate(Sentence('ab'))
def test_remove_unreachable():
a = Symbol('a')
b = Symbol('b')
A = State('A')
B = State('B')
C = State('C')
D = State('D')
E = State('E')
F = State('F')
G = State('G')
H = State('H')
transitions = {
(A, a): {G},
(A, b): {B},
(B, a): {F},
(B, b): {E},
(C, a): {C},
(C, b): {G},
(D, a): {A},
(D, b): {H},
(E, a): {E},
(E, b): {A},
(F, a): {B},
(F, b): {C},
(G, a): {G},
(G, b): {F},
(H, a): {H},
(H, b): {D},
}
fa = FiniteAutomaton(transitions, A, [A, D, G])
fa.remove_unreachable_states()
assert len(fa.states) == 6
def test_minimize():
a = Symbol('a')
b = Symbol('b')
A = State('A')
B = State('B')
C = State('C')
D = State('D')
E = State('E')
F = State('F')
G = State('G')
H = State('H')
transitions = {
(A, a): {G},
(A, b): {B},
(B, a): {F},
(B, b): {E},
(C, a): {C},
(C, b): {G},
(D, a): {A},
(D, b): {H},
(E, a): {E},
(E, b): {A},
(F, a): {B},
(F, b): {C},
(G, a): {G},
(G, b): {F},
(H, a): {H},
(H, b): {D},
}
fa = FiniteAutomaton(transitions, A, [A, D, G])
fa = fa.minimize()
assert len(fa.states) == 3
| 2.71875 | 3 |
data.py | Goteia000/simple_dcf_model | 2 | 12798683 | <filename>data.py
from util import retrieve_from
# 3b732d31142b79d4e8d659612f55181a
class API_Data:
def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True):
self.url = url
self.ticker = ticker
self.verbose = verbose
self.say('Initiate data module...')
self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey))
print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url))
self.say('Loading Income statements...')
self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey))
self.say('Loading Balance Sheet...')
self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey))
self.say('Loading Statements of Cash Flow...')
self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey))
self.say('Loading Quote...')
self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey))
def net_receivable(self, period=0):
return self.balance_sheets[period]['netReceivables']
def account_payable(self, period=0):
return self.balance_sheets[period]['accountPayables']
def inventory(self, period=0):
return self.balance_sheets[period]['inventory']
def revenue(self, period=0):
return self.income_statements[period]['revenue']
def gross_profit_ratio(self, period=0):
return self.income_statements[period]['grossProfitRatio']
def operating_cash_flow(self, period=0):
return self.statements_of_cash_flow[period]['operatingCashFlow']
def total_liabilities(self, period=0):
return self.balance_sheets[period]['totalLiabilities']
def ebitda(self, period=0):
return self.income_statements[period]['ebitda']
def depreciation_and_amortization(self, period=0):
return self.statements_of_cash_flow[period]['depreciationAndAmortization']
def working_capital(self, period=0):
return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities']
def capital_expenditure(self, period=0):
return self.statements_of_cash_flow[period]['capitalExpenditure']
def cash_and_cash_equivalent(self, period=0):
return self.balance_sheets[period]['cashAndCashEquivalents']
def stock_price(self, period=0):
return self.quote[0]['price']
@property
def company_name(self):
return self.profile[0]['companyName']
@property
def beta(self):
return self.profile[0]['beta']
@property
def share_outstanding(self):
return self.quote[0]['sharesOutstanding']
@property
def tax_rate(self):
period = 0
while self.income_statements[period]['incomeTaxExpense'] < 0:
period = period + 1
return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax']
def say(self, contet):
if self.verbose:
print('[DATA MODULE] {}'.format(contet))
if __name__ == "__main__":
data = API_Data('F')
print(data.net_receivable(1)) | 2.71875 | 3 |
meteocat_api_client/xarxes/pronostic/__init__.py | herrera-lu/meteocat-api-client | 0 | 12798684 | from ..pronostic.pronostic import Pronostic
| 1.0625 | 1 |
main_store/migrations/0002_auto_20201116_1214.py | Melto007/medical_store_project | 0 | 12798685 | <filename>main_store/migrations/0002_auto_20201116_1214.py<gh_stars>0
# Generated by Django 3.1.1 on 2020-11-16 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_store', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product_data',
name='product_name',
field=models.CharField(blank=True, max_length=2000, null=True),
),
]
| 1.34375 | 1 |
PWGPP/FieldParam/fitsol.py | maroozm/AliPhysics | 114 | 12798686 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| 2.625 | 3 |
Examples/example_magnetic_symmetry.py | DanPorter/Dans_Diffaction | 22 | 12798687 | <gh_stars>10-100
"""
Dans_Diffraction Examples
Load space groups and look at the information contained.
"""
import sys,os
import numpy as np
import matplotlib.pyplot as plt # Plotting
cf = os.path.dirname(__file__)
sys.path.insert(0,os.path.join(cf, '..'))
import Dans_Diffraction as dif
f = '../Dans_Diffraction/Structures/LaMnO3.mcif'
f = "../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif"
cif = dif.readcif(f)
ops = cif['_space_group_symop_magn_operation.xyz']
cen = cif['_space_group_symop_magn_centering.xyz']
print('Symmetry Operations (%d):' % len(ops))
print(ops)
print('Centring Operations (%d):' % len(cen))
print(cen)
# Combine operations with centring
allops = dif.fc.gen_symcen_ops(ops, cen)
# Convert to magnetic symmetry
magops = dif.fc.symmetry_ops2magnetic(allops)
print('\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations'))
for n in range(len(allops)):
print('%40s | %-40s' % (allops[n], magops[n]))
sym, mag, tim = dif.fc.cif_symmetry(cif)
print('\ncif_symmetry')
for n in range(len(sym)):
print('%40s | %+d | %-40s' % (sym[n], tim[n], mag[n])) | 2.828125 | 3 |
distances/migrations/0011_auto_20170602_1044.py | tkettu/rokego | 0 | 12798688 | <filename>distances/migrations/0011_auto_20170602_1044.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-02 07:44
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('distances', '0010_auto_20170519_1604'),
]
operations = [
migrations.AlterField(
model_name='dates',
name='startDate',
field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)),
),
]
| 1.5 | 2 |
blog/urls.py | yanfreitas/Django-blog-project | 0 | 12798689 | from django.urls import path
from blog import views
from blog.views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
CommentDeleteView,
UserPostListView,
LikeView,
)
urlpatterns = [
path('', PostListView.as_view(), name='index'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),
path('user/<str:username>/', UserPostListView.as_view(), name='user-post'),
path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'),
path('post/<int:pk>/like/', LikeView, name='like-post'),
] | 2.09375 | 2 |
benchmarking/pfire_benchmarking/analysis_routines.py | willfurnass/pFIRE | 9 | 12798690 | <filename>benchmarking/pfire_benchmarking/analysis_routines.py
#!/usr/bin/env python3
""" Mathematical analysis functions for image and map comparison
"""
from collections import namedtuple
from textwrap import wrap
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sps
from tabulate import tabulate
from .image_routines import load_image, load_map
MIResult = namedtuple("mi_result", ['mi', 'hist'])
def calculate_entropy(prob_dist):
r"""
Calculate Shannon entropy of the provided probability distribution
Shannon Entropy is defined as
$H(X) = \sum_n p_X(x)\log_2{p_X(x)}$
"""
# First disregard all values where p_X == 0 to avoid nans from log(p_X)
normed_prob_dist = prob_dist[prob_dist > 0]
normed_prob_dist /= normed_prob_dist.sum()
entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist))
return entropy
def calculate_mutual_information(data1, data2, resolution=50,
return_hist=False):
r"""
Calculate mutual information using Shannon entropy of provided data.
Mutual Information is defined as:
MI(X, Y) = H(X) + H(Y) - H(X,Y)
Where H(X), H(Y) and H(X,Y) are the Shannon entropies of the probabilities
and the joint probability of the data.
N.B it is assumed that the two datasets are independent.
Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y)
"""
jointmax = max(data1.max(), data2.max())
# First calculate probability density
bin_edges = np.linspace(0, 1, num=resolution)
prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax,
data2.flatten()/jointmax,
bins=bin_edges, density=True)
prob1 = np.sum(prob1_2, axis=1)
prob2 = np.sum(prob1_2, axis=0)
entropy1 = calculate_entropy(prob1)
entropy2 = calculate_entropy(prob2)
entropy1_2 = calculate_entropy(prob1_2)
mutual_information = entropy1 + entropy2 - entropy1_2
if return_hist:
return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2)
else:
return (mutual_information, entropy1, entropy2, entropy1_2)
def plot_2dhist(data, path, title):
""" Helper function to plot 2d histogram and return rst inclusion command.
"""
plt.matshow(data, origin='lower', cmap='gray')
plt.title("\n".join(wrap(title, 40)))
plt.savefig(path)
plt.close()
return ".. image:: {}\n".format(os.path.basename(path))
def calculate_proficiency(alpha, beta):
""" Calculate proficiency (normalized mutual information) of an image pair
"""
alpha_data = load_image(alpha)
beta_data = load_image(beta)
res = calculate_mutual_information(alpha_data, beta_data, return_hist=True)
prof = res[0]/min(res[1], res[2])
return MIResult(prof, res[-1])
def compare_image_results(fixed_path, moved_path, accepted_path,
pfire_path, fig_dir=None, cmpname="accepted"):
"""Compare ShIRT and pFIRE registered images
"""
if fig_dir:
os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True)
else:
fig_dir = os.path.normpath('.')
mi_start = calculate_proficiency(fixed_path, moved_path)
mi_accepted = calculate_proficiency(fixed_path, accepted_path)
mi_pfire = calculate_proficiency(fixed_path, pfire_path)
mi_comparison = calculate_proficiency(accepted_path, pfire_path)
res_table = [("Normalized mutual information (proficiency):", ""),
("Fixed vs. Moved:", "{:.3f}".format(mi_start.mi)),
("{} vs. Fixed:".format(cmpname), "{:.3f}".format(mi_accepted.mi)),
("pFIRE vs. Fixed:", "{:.3f}".format(mi_pfire.mi)),
("pFIRE vs. {}:".format(cmpname), "{:.3f}\n".format(mi_comparison.mi))]
print(tabulate(res_table, headers="firstrow", tablefmt='grid') + "\n")
rst_output = []
rst_output.append(tabulate(res_table, headers="firstrow", tablefmt="rst"))
rst_output.append("") # table must be followed by blank line
image_rst = []
if fig_dir:
image_rst.append(plot_2dhist(
mi_start.hist, os.path.join(fig_dir, "prereg.png"),
"Fixed vs. Moved normalized mutual information: "
"{:0.3f}".format(mi_start.mi)))
image_rst.append(plot_2dhist(
mi_accepted.hist, os.path.join(fig_dir, "accepted.png"),
"{} vs. Fixed normalized mutual information: "
"{:0.3f}".format(cmpname, mi_accepted.mi)))
image_rst.append(plot_2dhist(
mi_pfire.hist, os.path.join(fig_dir, "pfire.png"),
"pFIRE vs Fixed normalized mutual information: "
"{:0.3f}".format(mi_pfire.mi)))
image_rst.append(plot_2dhist(
mi_comparison.hist, os.path.join(fig_dir, "comparison.png"),
"pFIRE vs. {} normalized mutual information: "
"{:0.3f}".format(cmpname, mi_comparison.mi)))
return ("\n".join(rst_output), "\n".join(image_rst))
def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None,
cmpname='Accepted'):
"""Compare ShIRT and pFIRE displacement maps
"""
if fig_dir:
os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True)
cmp_map = load_map(cmp_map_path)
pfire_map = load_map(pfire_map_path)
table_entries = [("Map coefficients of determination (R^2), by dimension:", "")]
image_entries = []
for didx, dim in enumerate(['X', 'Y', 'Z']):
try:
corr = sps.linregress(cmp_map[didx].flatten(),
pfire_map[didx].flatten())[2]
table_entries.append(("{}:".format(dim), "{:0.3}".format(corr**2)))
if fig_dir:
savepath = os.path.join(fig_dir, "map_{}.png".format(dim.lower()))
plt.plot(cmp_map[didx].flatten(), marker='x', ls='none',
label=cmpname)
plt.plot(pfire_map[didx].flatten(), marker='+', ls='none',
label="pFIRE")
plt.title("Map {} component, R^2={:0.3}".format(dim, corr**2))
plt.legend()
plt.savefig(savepath)
plt.close()
image_entries.append(".. image:: {}"
"".format(os.path.basename(savepath)))
except IndexError:
break
print(tabulate(table_entries, headers="firstrow", tablefmt="grid"))
table = tabulate(table_entries, headers="firstrow", tablefmt="rst")
return (table, "\n".join(image_entries))
| 2.671875 | 3 |
fonts/FreeSerifItalic9pt7b.py | cnobile2012/Python-TFT | 0 | 12798691 | FreeSerifItalic9pt7bBitmaps = [
0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20,
0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20,
0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4,
0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0,
0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99,
0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66,
0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08,
0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82,
0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E,
0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10,
0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10,
0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F,
0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08,
0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06,
0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0,
0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41,
0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84,
0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00,
0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1,
0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20,
0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0,
0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C,
0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33,
0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C,
0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F,
0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00,
0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F,
0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91,
0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04,
0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F,
0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4,
0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C,
0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0,
0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30,
0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80,
0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8,
0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10,
0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02,
0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0,
0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06,
0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20,
0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80,
0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78,
0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33,
0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01,
0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C,
0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26,
0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38,
0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28,
0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0,
0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00,
0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30,
0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07,
0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70,
0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1,
0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0,
0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9,
0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00,
0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04,
0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C,
0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60,
0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86,
0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04,
0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00,
0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8,
0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4,
0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05,
0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40,
0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08,
0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20,
0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8,
0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08,
0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00,
0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60,
0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65,
0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40,
0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20,
0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01,
0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03,
0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2,
0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10,
0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24,
0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77,
0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E,
0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C,
0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88,
0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C,
0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50,
0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C,
0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C,
0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44,
0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01,
0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50,
0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10,
0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04,
0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82,
0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ]
FreeSerifItalic9pt7bGlyphs = [
[ 0, 0, 0, 5, 0, 1 ], # 0x20 ' '
[ 0, 4, 12, 6, 1, -11 ], # 0x21 '!'
[ 6, 5, 4, 6, 3, -11 ], # 0x22 '"'
[ 9, 10, 12, 9, 0, -11 ], # 0x23 '#'
[ 24, 9, 15, 9, 1, -12 ], # 0x24 '$'
[ 41, 14, 12, 15, 1, -11 ], # 0x25 '%'
[ 62, 12, 12, 14, 1, -11 ], # 0x26 '&'
[ 80, 2, 4, 4, 3, -11 ], # 0x27 '''
[ 81, 6, 15, 6, 1, -11 ], # 0x28 '('
[ 93, 6, 15, 6, 0, -11 ], # 0x29 ')'
[ 105, 6, 8, 9, 3, -11 ], # 0x2A '#'
[ 111, 9, 9, 12, 1, -8 ], # 0x2B '+'
[ 122, 2, 4, 5, 0, -1 ], # 0x2C ','
[ 123, 4, 1, 6, 1, -3 ], # 0x2D '-'
[ 124, 2, 2, 5, 0, -1 ], # 0x2E '.'
[ 125, 8, 12, 5, 0, -11 ], # 0x2F '/'
[ 137, 9, 13, 9, 1, -12 ], # 0x30 '0'
[ 152, 6, 13, 9, 1, -12 ], # 0x31 '1'
[ 162, 8, 12, 9, 1, -11 ], # 0x32 '2'
[ 174, 9, 12, 9, 0, -11 ], # 0x33 '3'
[ 188, 9, 12, 9, 0, -11 ], # 0x34 '4'
[ 202, 9, 12, 9, 0, -11 ], # 0x35 '5'
[ 216, 9, 13, 9, 1, -12 ], # 0x36 '6'
[ 231, 9, 12, 9, 1, -11 ], # 0x37 '7'
[ 245, 9, 13, 9, 1, -12 ], # 0x38 '8'
[ 260, 9, 13, 9, 0, -12 ], # 0x39 '9'
[ 275, 4, 8, 4, 1, -7 ], # 0x3A ':'
[ 279, 4, 10, 4, 1, -7 ], # 0x3B ''
[ 284, 9, 9, 10, 1, -8 ], # 0x3C '<'
[ 295, 9, 5, 12, 2, -6 ], # 0x3D '='
[ 301, 9, 9, 10, 1, -8 ], # 0x3E '>'
[ 312, 7, 12, 8, 2, -11 ], # 0x3F '?'
[ 323, 13, 12, 14, 1, -11 ], # 0x40 '@'
[ 343, 11, 11, 12, 0, -10 ], # 0x41 'A'
[ 359, 11, 12, 11, 0, -11 ], # 0x42 'B'
[ 376, 12, 12, 11, 1, -11 ], # 0x43 'C'
[ 394, 13, 12, 13, 0, -11 ], # 0x44 'D'
[ 414, 12, 12, 10, 0, -11 ], # 0x45 'E'
[ 432, 12, 12, 10, 0, -11 ], # 0x46 'F'
[ 450, 12, 12, 12, 1, -11 ], # 0x47 'G'
[ 468, 14, 12, 13, 0, -11 ], # 0x48 'H'
[ 489, 7, 12, 6, 0, -11 ], # 0x49 'I'
[ 500, 9, 12, 8, 0, -11 ], # 0x4A 'J'
[ 514, 13, 12, 12, 0, -11 ], # 0x4B 'K'
[ 534, 11, 12, 10, 0, -11 ], # 0x4C 'L'
[ 551, 16, 12, 15, 0, -11 ], # 0x4D 'M'
[ 575, 13, 12, 12, 0, -11 ], # 0x4E 'N'
[ 595, 11, 12, 12, 1, -11 ], # 0x4F 'O'
[ 612, 11, 12, 10, 0, -11 ], # 0x50 'P'
[ 629, 11, 15, 12, 1, -11 ], # 0x51 'Q'
[ 650, 11, 12, 11, 0, -11 ], # 0x52 'R'
[ 667, 10, 12, 8, 0, -11 ], # 0x53 'S'
[ 682, 11, 12, 11, 2, -11 ], # 0x54 'T'
[ 699, 12, 12, 13, 2, -11 ], # 0x55 'U'
[ 717, 11, 12, 12, 2, -11 ], # 0x56 'V'
[ 734, 15, 12, 16, 2, -11 ], # 0x57 'W'
[ 757, 12, 12, 12, 0, -11 ], # 0x58 'X'
[ 775, 10, 12, 11, 2, -11 ], # 0x59 'Y'
[ 790, 11, 12, 10, 0, -11 ], # 0x5A 'Z'
[ 807, 7, 15, 7, 0, -11 ], # 0x5B '['
[ 821, 6, 12, 9, 2, -11 ], # 0x5C '\'
[ 830, 6, 15, 7, 1, -11 ], # 0x5D ']'
[ 842, 8, 7, 8, 0, -11 ], # 0x5E '^'
[ 849, 9, 1, 9, 0, 2 ], # 0x5F '_'
[ 851, 3, 3, 5, 2, -11 ], # 0x60 '`'
[ 853, 9, 8, 9, 0, -7 ], # 0x61 'a'
[ 862, 9, 12, 9, 0, -11 ], # 0x62 'b'
[ 876, 8, 8, 7, 0, -7 ], # 0x63 'c'
[ 884, 9, 12, 9, 0, -11 ], # 0x64 'd'
[ 898, 7, 8, 7, 0, -7 ], # 0x65 'e'
[ 905, 11, 17, 8, -1, -12 ], # 0x66 'f'
[ 929, 9, 12, 8, 0, -7 ], # 0x67 'g'
[ 943, 9, 12, 9, 0, -11 ], # 0x68 'h'
[ 957, 4, 12, 4, 1, -11 ], # 0x69 'i'
[ 963, 7, 16, 5, -1, -11 ], # 0x6A 'j'
[ 977, 8, 12, 8, 0, -11 ], # 0x6B 'k'
[ 989, 4, 12, 5, 1, -11 ], # 0x6C 'l'
[ 995, 13, 8, 13, 0, -7 ], # 0x6D 'm'
[ 1008, 8, 8, 9, 0, -7 ], # 0x6E 'n'
[ 1016, 9, 8, 9, 0, -7 ], # 0x6F 'o'
[ 1025, 10, 12, 8, -1, -7 ], # 0x70 'p'
[ 1040, 9, 12, 9, 0, -7 ], # 0x71 'q'
[ 1054, 7, 8, 7, 0, -7 ], # 0x72 'r'
[ 1061, 7, 8, 6, 0, -7 ], # 0x73 's'
[ 1068, 5, 9, 4, 0, -8 ], # 0x74 't'
[ 1074, 8, 8, 9, 1, -7 ], # 0x75 'u'
[ 1082, 7, 8, 8, 1, -7 ], # 0x76 'v'
[ 1089, 11, 8, 12, 1, -7 ], # 0x77 'w'
[ 1100, 9, 8, 8, -1, -7 ], # 0x78 'x'
[ 1109, 9, 12, 9, 0, -7 ], # 0x79 'y'
[ 1123, 8, 9, 7, 0, -7 ], # 0x7A 'z'
[ 1132, 6, 15, 7, 1, -11 ], # 0x7B '['
[ 1144, 1, 12, 5, 2, -11 ], # 0x7C '|'
[ 1146, 7, 16, 7, 0, -12 ], # 0x7D ']'
[ 1160, 8, 3, 10, 1, -5 ] ] # 0x7E '~'
FreeSerifItalic9pt7b = [
FreeSerifItalic9pt7bBitmaps,
FreeSerifItalic9pt7bGlyphs,
0x20, 0x7E, 22 ]
# Approx. 1835 bytes
| 1.03125 | 1 |
travello/migrations/0004_auto_20200607_1817.py | KaushikAlwala/COVID-19---a-DBMS-approach | 0 | 12798692 | # Generated by Django 3.0.6 on 2020-06-07 12:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travello', '0003_travel_history2'),
]
operations = [
migrations.RenameField(
model_name='medical_history',
old_name='Bronchitis',
new_name='bronchitis',
),
migrations.RenameField(
model_name='medical_history',
old_name='COPD',
new_name='copd',
),
migrations.RenameField(
model_name='medical_history',
old_name='Diabetes_mellitus',
new_name='diabetes_mellitus',
),
migrations.RenameField(
model_name='medical_history',
old_name='HIV_AIDS',
new_name='hiv_aids',
),
migrations.RenameField(
model_name='medical_history',
old_name='Ischemic_heart_disease',
new_name='ischemic_heart_disease',
),
migrations.RenameField(
model_name='medical_history',
old_name='Kidney_Disease',
new_name='kidney_disease',
),
migrations.RenameField(
model_name='medical_history',
old_name='Stroke',
new_name='stroke',
),
]
| 1.765625 | 2 |
src/leetcode_1996_the_number_of_weak_characters_in_the_game.py | sungho-joo/leetcode2github | 0 | 12798693 | <filename>src/leetcode_1996_the_number_of_weak_characters_in_the_game.py
# @l2g 1996 python3
# [1996] The Number of Weak Characters in the Game
# Difficulty: Medium
# https://leetcode.com/problems/the-number-of-weak-characters-in-the-game
#
# You are playing a game that contains multiple characters,
# and each of the characters has two main properties: attack and defense.
# You are given a 2D integer array properties where properties[i] = [attacki,
# defensei] represents the properties of the ith character in the game.
# A character is said to be weak if any other character has both attack and defense levels strictly greater than this character's attack and defense levels.
# More formally,
# a character i is said to be weak if there exists another character j where attackj > attacki and defensej > defensei.
# Return the number of weak characters.
#
# Example 1:
#
# Input: properties = [[5,5],[6,3],[3,6]]
# Output: 0
# Explanation: No character has strictly greater attack and defense than the other.
#
# Example 2:
#
# Input: properties = [[2,2],[3,3]]
# Output: 1
# Explanation: The first character is weak because the second character has a strictly greater attack and defense.
#
# Example 3:
#
# Input: properties = [[1,5],[10,4],[4,3]]
# Output: 1
# Explanation: The third character is weak because the second character has a strictly greater attack and defense.
#
#
# Constraints:
#
# 2 <= properties.length <= 10^5
# properties[i].length == 2
# 1 <= attacki, defensei <= 10^5
#
#
from typing import List
class Solution:
def numberOfWeakCharacters(self, properties: List[List[int]]) -> int:
properties.sort(key=lambda x: (-x[0], x[1]))
ans = 0
curr_max = 0
for _, d in properties:
if d < curr_max:
ans += 1
else:
curr_max = d
return ans
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1996.py")])
| 4.15625 | 4 |
pydsstools/examples/example4.py | alai-arpas/pydsstools | 54 | 12798694 | <reponame>alai-arpas/pydsstools
'''
Read irregular time-series data
'''
from pydsstools.heclib.dss import HecDss
dss_file = "example.dss"
pathname = "/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/"
with HecDss.Open(dss_file) as fid:
ts = fid.read_ts(pathname,regular=False,window_flag=0)
print(ts.pytimes)
print(ts.values)
print(ts.nodata)
print(ts.empty)
| 2.234375 | 2 |
src/fuzzingtool/decorators/plugin_meta.py | NESCAU-UFLA/FuzzyingTool | 0 | 12798695 | # Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ..core.bases.base_plugin import Plugin
from ..utils.consts import FuzzType
from ..exceptions import MetadataException
def plugin_meta(cls: Plugin) -> Plugin:
"""Decorator to check for plugin metadata on a plugin class
@type cls: Plugin
@param cls: The class that call this decorator
"""
_check_mandatory_meta(cls)
if not cls.__author__:
raise MetadataException(f"Author cannot be empty on plugin {cls.__name__}")
if cls.__params__:
_check_params_meta(cls)
if not cls.__desc__:
raise MetadataException(
f"Description cannot be blank on plugin {cls.__name__}"
)
if cls.__type__ is not None and cls.__type__ not in [
value for key, value in vars(FuzzType).items() if not key.startswith("__")
]:
raise MetadataException(
f"Plugin type should be None or a valid FuzzType on plugin {cls.__name__}"
)
if not cls.__version__:
raise MetadataException(f"Version cannot be blank on plugin {cls.__name__}")
return cls
def _check_mandatory_meta(cls: Plugin) -> None:
"""Checks the mandatory metadata into the plugin decorator
@type cls: Plugin
@param cls: The class with the plugin metadata
"""
metadata = ['__author__', '__params__',
'__desc__', '__type__', '__version__']
class_attr = vars(cls)
for meta in metadata:
if meta not in class_attr:
raise MetadataException(
f"Metadata {meta} not specified on plugin {cls.__name__}"
)
def _check_params_meta(cls: Plugin) -> None:
"""Checks the parameter metadata into the plugin decorator
@type cls: Plugin
@param cls: The class with the plugin metadata
"""
if (type(cls.__params__) is not dict):
raise MetadataException("The parameters must be a "
f"dictionary on plugin {cls.__name__}")
param_dict_keys = cls.__params__.keys()
for key in ['metavar', 'type']:
if key not in param_dict_keys:
raise MetadataException(f"Key {key} must be in parameters "
f"dict on plugin {cls.__name__}")
if not cls.__params__[key]:
raise MetadataException(f"Value of {key} cannot be empty in "
f"parameters dict on plugin {cls.__name__}")
if cls.__params__['type'] is list:
if 'cli_list_separator' not in param_dict_keys:
raise MetadataException("The key 'cli_list_separator' must be present "
"when parameter type is list "
f"on plugin {cls.__name__}")
if not cls.__params__['cli_list_separator']:
raise MetadataException("Value of 'cli_list_separator' "
f"cannot be blank on {cls.__name__}")
| 1.90625 | 2 |
app.py | heminsatya/free_notes | 0 | 12798696 | <filename>app.py
# Dependencies
from aurora import Aurora
# Instantiate the root app
root = Aurora()
# Run the root app
if __name__ == '__main__':
root.run()
| 1.570313 | 2 |
_states/ctags.py | mdavezac/pepper | 0 | 12798697 | def run(name, fields="+l", exclude=None, ctags="/usr/local/bin/ctags",
creates='tags'):
from os.path import join
if fields is None:
fields = []
elif isinstance(fields, str):
fields = [fields]
fields = " --fields=".join([""] + fields)
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
exclude = " --exclude=".join([""] + exclude)
cmd = "{ctags} -R {fields} {exclude} .".format(ctags=ctags, fields=fields,
exclude=exclude)
return __states__['cmd.run'](
name=cmd, cwd=name, creates=join(name, creates))
| 2.296875 | 2 |
setup.py | Cray-HPE/manifestgen | 0 | 12798698 | # MIT License
#
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
with open('requirements.txt', encoding='utf-8') as reqs_file:
REQUIREMENTS = reqs_file.read().splitlines()
setup(
name='manifestgen',
description="Loftsman manifest generator",
packages=['manifestgen'],
include_package_data=True,
install_requires=[REQUIREMENTS],
entry_points='''
[console_scripts]
manifestgen=manifestgen.generate:main
'''
)
| 1.398438 | 1 |
examples/zhexiantu.py | lora-chen/DeepCTR | 0 | 12798699 | print( -1.7280e+01 ) | 1.335938 | 1 |
dropup/up.py | Mr8/dropup | 0 | 12798700 | <reponame>Mr8/dropup
#!/usr/bin/env python
# encoding: utf-8
import upyun
import os
from .config import UPYUNCONFIG
from .trans import TranslatorIf
class UpyunCli(TranslatorIf):
'''Implemented of up yun client, inhanced from Translator'''
BUCKETNAME = UPYUNCONFIG.BUCKETNAME
def __init__(self):
self.operator = None
def login(self, *arg, **wargs):
print arg
user, pwd = arg
try:
self.operator = upyun.UpYun(self.BUCKETNAME, user,
pwd, timeout = 30, endpoint = upyun.ED_AUTO)
except Exception, e:
print '[ERROR]Login error:%s' %str(e)
return
return True
def upload(self, localPath, remotePath):
if not localPath:
print '[ERROR]Local file %s not exists' %localPath
return
if remotePath.endswith('/'):
remotePath += os.path.basename(localPath)
with open(localPath, 'rb') as fp:
try:
self.operator.mkdir(os.path.dirname(remotePath))
if not self.operator.put(remotePath, fp):
print '[ERROR] upload file %s error' %localPath
return
except Exception, e:
print '[ERROR] upload file except:%s' %str(e)
return
print '[INFO]Upload file %s success!' %localPath
def download(self, remotePath, localPath):
with open(localPath, 'wb') as fp:
try:
if not self.operator.get(remotePath, fp):
print '[ERROR]Download file %s failed' %remotePath
return
except Exception, e:
print '[ERROR]Download file error:%s' %str(e)
return
print '[INFO]Download file %s success' %remotePath
| 2.5 | 2 |
01-first-flask-app/dynamic_routing_2.py | saidulislam/flask-bootcamp-2 | 0 | 12798701 | """An example application to demonstrate Dynamic Routing"""
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
""""View for the Home page of the Website"""
return "Welcome to the HomePage!"
@app.route('/square/<int:number>')
def show_square(number):
"""View that shows the square of the number passed by URL"""
return f"Square of {str(number)} is: {(number * number)}"
if __name__ == '__main__':
app.run(debug=True)
| 3.984375 | 4 |
logitch/discord_log.py | thomaserlang/logitch | 0 | 12798702 | try:
import discord
except ImportError:
raise Exception('''
The discord libary must be installed manually:
pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip
''')
import logging, asyncio, json, aiohttp
from dateutil.parser import parse
from datetime import datetime
from logitch import config, db
class Client(discord.Client):
async def on_connect(self):
if not hasattr(self, 'ahttp'):
self.ahttp = aiohttp.ClientSession()
self.db = await db.Db().connect(self.loop)
async def on_socket_response(self, data):
if data['op'] != 0:
return
msg = data['d']
try:
if data['t'] == 'MESSAGE_CREATE':
if 'content' not in msg:
return
if msg['type'] != 0:
return
await self.db.execute('''
INSERT INTO discord_entries
(id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
''', (
msg['id'],
msg['guild_id'],
msg['channel_id'],
parse(msg['timestamp']).replace(tzinfo=None),
msg['content'],
json.dumps(msg['attachments']) if msg['attachments'] else None,
msg['author']['username'],
msg['author']['id'],
msg['author']['discriminator'],
msg['member']['nick'] if 'nick' in msg['member'] else None,
))
elif data['t'] == 'MESSAGE_UPDATE':
if 'content' not in msg:
return
if msg['type'] != 0:
return
await self.db.execute('''
INSERT INTO discord_entry_versions
(entry_id, created_at, message, attachments)
SELECT
id, ifnull(updated_at, created_at), message, attachments
FROM discord_entries WHERE id=%s;
''', (msg['id'],)
)
await self.db.execute('''
UPDATE discord_entries SET
updated_at=%s,
message=%s,
attachments=%s
WHERE
id=%s;
''', (
parse(msg['edited_timestamp']).replace(tzinfo=None),
msg['content'],
json.dumps(msg['attachments']) if msg['attachments'] else None,
msg['id'],
))
elif data['t'] == 'MESSAGE_DELETE':
await self.db.execute('''
UPDATE discord_entries SET
deleted="Y",
deleted_at=%s
WHERE
id=%s;
''',
(datetime.utcnow(), msg['id'],)
)
except:
logging.exception('on_socket_response')
def main():
bot = Client()
bot.run(config['discord']['token'], bot=config['discord']['bot'])
if __name__ == '__main__':
from logitch import config_load, logger
config_load()
logger.set_logger('discord.log')
main() | 2.53125 | 3 |
bin/Package/PipPackageBase.py | Inokinoki/craft | 0 | 12798703 | from BuildSystem.PipBuildSystem import *
from Package.PackageBase import *
from Packager.PackagerBase import *
from Package.VirtualPackageBase import *
from Source.MultiSource import *
class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase):
"""provides a base class for pip packages"""
def __init__(self):
CraftCore.log.debug("PipPackageBase.__init__ called")
PackageBase.__init__(self)
if self.subinfo.svnTarget():
self.__class__.__bases__ += (MultiSource,)
MultiSource.__init__(self)
else:
self.__class__.__bases__ += (VirtualPackageBase,)
VirtualPackageBase.__init__(self)
PipBuildSystem.__init__(self)
PackagerBase.__init__(self)
# from PackagerBase
def createPackage(self):
return True
def preArchive(self):
return True
| 2.359375 | 2 |
2_Advanced_Images_3_TransferLearningAndFIneTuning2.py | BrunoDatoMeneses/TensorFlowTutorials | 0 | 12798704 | <reponame>BrunoDatoMeneses/TensorFlowTutorials
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
train_dataset = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
#rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF')
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
| 2.984375 | 3 |
utils/__init__.py | Ilya-koala/VSL_Bot | 0 | 12798705 | <filename>utils/__init__.py
from . import misc
from .db_api.json_db import Database
from .notify_admins import on_startup_notify
from .voice_recognition import voice_to_text
from .pages import *
| 1.09375 | 1 |
15/15.py | hiszm/python-train | 0 | 12798706 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 13:21:44 2021
@author: hiszm
"""
from sys import argv
script,filename = argv
txt = open (filename)
print ("Here's your file %r:" % filename)
print (txt.read())
print ("Type the filename again:")
file_again = input ("> ")
txt_again = open (file_again)
print (txt_again.read()) | 3.859375 | 4 |
mt/base/ndarray.py | inteplus/mtbase | 0 | 12798707 | <gh_stars>0
'''Useful functions dealing with numpy array.'''
import numpy as _np
__all__ = ['ndarray_repr']
def ndarray_repr(a):
'''Gets a one-line representation string for a numpy array.
Parameters
----------
a : numpy.ndarray
input numpy array
Returns
-------
str
a short representation string for the array
'''
if not isinstance(a, _np.ndarray):
raise TypeError("An ndarray expected. Got '{}'.".format(type(a)))
if a.size > 20:
return "ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std())
return "ndarray({}, dtype={})".format(repr(a.tolist()), a.dtype)
| 3.453125 | 3 |
normalize_shape.py | alexandor91/Data-Generation-Tool | 22 | 12798708 | '''
Normalize shapenet obj files to [-0.5, 0.5]^3.
author: ynie
date: Jan, 2020
'''
import sys
sys.path.append('.')
from data_config import shape_scale_padding, \
shapenet_path, shapenet_normalized_path
import os
from multiprocessing import Pool
from tools.utils import append_dir, normalize_obj_file
from tools.read_and_write import write_json, load_data_path
from settings import cpu_cores
def recursive_normalize(input_path, output_path):
'''
Normalize *.obj file recursively
:param input_path:
:param output_path:
:return:
'''
input_path = os.path.abspath(input_path)
output_path = os.path.abspath(output_path)
for root, _, files in os.walk(input_path, topdown=True):
for file in files:
input_file_path = os.path.join(root, file)
output_file_path = input_file_path.replace(input_path, output_path)
if not os.path.exists(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
if not file.endswith('.obj'):
if os.path.exists(output_file_path):
os.remove(output_file_path)
os.symlink(input_file_path, output_file_path)
continue
else:
# write obj file
size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json'
if os.path.exists(output_file_path) and os.path.exists(size_centroid_file):
continue
total_size, centroid = normalize_obj_file(input_file_path, output_file_path,
padding=shape_scale_padding)
size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()}
write_json(size_centroid_file, size_centroid)
def normalize(obj_path):
'''
normalize shapes
:param obj_path: ShapeNet object path
:return:
'''
cat, obj_file = obj_path.split('/')[3:5]
input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i')
output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o')
recursive_normalize(input_cat_dir, output_cat_dir)
if __name__ == '__main__':
if not os.path.exists(shapenet_normalized_path):
os.mkdir(shapenet_normalized_path)
all_objects = load_data_path(shapenet_path)
p = Pool(processes=cpu_cores)
p.map(normalize, all_objects)
p.close()
p.join() | 2.25 | 2 |
lochlanandcatherinecom/urls.py | Lochlan/LochlanAndCatherine.com | 1 | 12798709 | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rsvps.views import GuestRsvpView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="home.html"), name='home'),
url(r'^about/$', TemplateView.as_view(template_name="about.html"), name='about'),
url(r'^event/$', TemplateView.as_view(template_name="event.html"), name='event'),
url(r'^registry/$', TemplateView.as_view(template_name="registry.html"), name='registry'),
url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()),
url(r'^admin/', include(admin.site.urls)),
]
| 1.742188 | 2 |
test.py | latte488/smth-smth-v2 | 2 | 12798710 | import toml
import random
def main():
filename = 'test'
n_hidden = 64
mutual_infos = []
for i in range(n_hidden):
mutual_infos.append(random.random())
mutual_info_dict = {}
for i in range(n_hidden):
mutual_info_dict[f'{i:04}'] = mutual_infos[i]
with open(f'mutual_info_{filename}.toml', 'w') as f:
toml_str = toml.dump(mutual_info_dict, f)
print(toml_str)
main()
| 2.5 | 2 |
app/mocks/bme280.py | mygulamali/pi-sensors | 0 | 12798711 | from mocks.base import Base
class BME280(Base):
@property
def humidity(self) -> float: # %
return 100.0 * self.value
@property
def pressure(self) -> float: # hPa
return 1013.25 * self.value
@property
def temperature(self) -> float: # ºC
return 100.0 * self.value
| 2.4375 | 2 |
PythonExercicio/jogodavelha.py | fotavio16/PycharmProjects | 0 | 12798712 | import random
# Pedir ao Jogador para escolher uma letra O ou X
def escolhaLetraJogador():
l = ""
while l != "O" and l != "X":
l = str(input('Escolha a letra que prefere jogar (O ou X): ')).upper()
if l == "O":
letras = ['O', "X"]
else:
letras = ['X', "O"]
return letras
# Sortear quem começa primeiro
def iniciaJogador():
if random.randint(1,2) == 1:
return True
else:
return False
def criaTabuleiro():
t = []
t.append('')
for i in range(9):
t.append(' ')
return t
# Mostrar o tabuleiro
def mostraTabuleiro(posi):
print(" | | ")
print(' {} | {} | {} '.format(posi[7],posi[8],posi[9]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[4], posi[5], posi[6]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[1], posi[2], posi[3]))
print(" | | ")
letras = escolhaLetraJogador()
vezJogador = iniciaJogador()
#tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X']
tabuleiro = criaTabuleiro()
mostraTabuleiro(tabuleiro)
# Vez do Jogador
# Mostrar o tabuleiro
# Receber o movimento do jogador
# Vez do Computador
# Definir movimento do computador
# 1) Executar movimento para vencer
# 2) Executar movimento para bloquaer o jogador de vencer na próxima jogada
# 3) Jogar nos cantos
# 4) Jogar no centro
# 5) Jogar nos lados
# Verifica se houve vencedor
# Verifica se houve empate
# Pergunta se o Jogador deseja jogar novamente
| 3.921875 | 4 |
model.py | yanjingmao/NC_paper_source_code | 0 | 12798713 | import torch
import torch.nn as nn
class SeparableConv2D(nn.Module):
'''
Definition of Separable Convolution.
'''
def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
super(SeparableConv2D, self).__init__()
depthwise_conv_out_channels = in_channels * depth_multiplier
self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode)
self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False)
def forward(self, x):
x = self.depthwise_conv(x)
output = self.pointwise_conv(x)
return output
class Block1(nn.Module):
'''
Definition of Block 1.
'''
def __init__(self, in_channels):
super(Block1, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.out_channels = 64
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
return x
class Block2(nn.Module):
'''
Definition of Block 2.
'''
def __init__(self, in_channels):
super(Block2, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(128)
self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 128
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block3(nn.Module):
'''
Definition of Block 3.
'''
def __init__(self, in_channels):
super(Block3, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(256)
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 256
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block4(nn.Module):
'''
Definition of Block 4.
'''
def __init__(self, in_channels):
super(Block4, self).__init__()
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.out_channels = 256
def forward(self, x):
# Shortcut
rx = x
# Main way
x = torch.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
x = self.conv3(x)
x = self.bn3(x)
# Confluence
x = x + rx
return x
class Block5(nn.Module):
'''
Definition of Block 5.
'''
def __init__(self, in_channels):
super(Block5, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(512)
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(512)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 512
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = torch.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block6(nn.Module):
'''
Definition of Block 6.
'''
def __init__(self, in_channels):
super(Block6, self).__init__()
self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(1024)
self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(2048)
self.out_channels = 2048
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
return x
class Network(nn.Module):
'''
Definition of the whole network with Block[1-6] utilized.
'''
def __init__(self, in_channels, num_classes, num_middle_layers=4):
super(Network, self).__init__()
self.block1 = Block1(in_channels)
self.block2 = Block2(self.block1.out_channels)
self.block3 = Block3(self.block2.out_channels)
assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}'
if num_middle_layers != 0:
self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)])
self.block5 = Block5(self.block4_lst[0].out_channels)
else:
self.block5 = Block5(self.block3.out_channels)
self.block6 = Block6(self.block5.out_channels)
self.avg = nn.AdaptiveAvgPool2d(1)
self.final = nn.Linear(self.block6.out_channels, num_classes)
def forward(self, x):
x = self.block1(x)
x = self.block2(x) # half-sized length and high
x = self.block3(x) # half-sized length and high
for i in range(len(self.block4_lst)):
x = self.block4_lst[i](x)
x = self.block5(x) # half-sized length and high
x = self.block6(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.final(x)
return x
| 3.25 | 3 |
trump/extensions/source/tx-bbfetch/__init__.py | Equitable/trump | 8 | 12798714 | <reponame>Equitable/trump
from bbfetchext import * | 0.820313 | 1 |
dtask/task/apps.py | ysjiang4869/python_task | 0 | 12798715 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class TaskConfig(AppConfig):
name = 'task'
| 1.140625 | 1 |
units/jit_optimizations/image_loader_to_dataframe.py | dereina/pypreprocessing | 0 | 12798716 | <filename>units/jit_optimizations/image_loader_to_dataframe.py
import imageio
import utils
import os
import pandas as pd
import units.unit as unit
import time
from numba import njit, jit
@jit(cache=False, forceobj = True)
def getMetaData(append_by_name_meta_data, meta_data_extensions):
out = []
for entry in append_by_name_meta_data:
for extension in meta_data_extensions:
out.append((entry, extension))
return tuple(out)
def printTime(length, now):
print(length)
print("Ellapsed: " +str( time.time()-now))
@jit(cache=True)
def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors):
print(folders)
now = time.time()
print("eiiii youuuuu ")
row_list = []
for i in range(len(folders)):
path = folders[i]
print("new path:")
print(path)
count = 0
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
if count % 100 == 0:
printTime(count, now)
#i+=1
#if i > 5:
# break
img_name, imgext = os.path.splitext(name)
shape= (1,1)
if prevent_load_errors:
shape = utils.checkImageFromFileGetShape(path+"/"+name)
if load_errors is not None:
if shape == (1,1,3):
load_errors.append(path+"/"+name)
continue
else:
if imgext not in image_extensions:
print("Not a file image: ", name)
continue
#print(path+"/"+name)
#print("path: "+path)
#savepath = context.origin+"/"+path+"/"+name
#if prepend_category:
# if not name.startswith(path):
#print("preprend")
# os.rename(context.origin+"/"+path+"/"+name, context.origin+"/"+path+"/"+path+"-"+name)
# savepath = context.origin+"/"+path+"/"+path+"-"+name
# name = path+"-"+name
#if fix_bmp: #resave images if there is a problem with bmp header that won't load on some libraries but with imageio
# imageio.imwrite(savepath, img)
meta_data_list = []
#for entry in self.append_by_name_meta_data:
# for meta_data in utils.getFileNamesFrom(entry):
# name_meta, ext = os.path.splitext(meta_data)
# if img_name == name_meta:
#print(meta_data)
# meta_data_list.append(entry+"/"+meta_data)
#for entry in append_by_name_meta_data:
# for extension in meta_data_extensions:
# for filename in utils.getFileIfExistFrom(entry, img_name + extension):
#print(entry+"/"+filename)
# meta_data_list.append(entry+"/"+filename)
for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions):
filename = utils.getFileIfExistFrom(entry, img_name + extension)
if filename:
meta_data_list.append(entry+"/"+filename)
shape = (1,1)
neww = [path, path+"/"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list]
#row_df = pd.Series(neww)
#print(neww)
count += 1
row_list.append(neww)
#df.append(row_df, ignore_index=True)
#print(df)
#df.reset_index(drop=True)
#df.loc[len(df.index)]= neww
#except Exception as e:
# print(e)
# raise
#print(context.origin+"/"+path+"/"+name)
print("loaded: ", len(row_list))
print(load_errors)
return row_list | 2.34375 | 2 |
utils/trainer.py | niqbal996/ViewAL | 126 | 12798717 | import os
import torch
import constants
from utils.misc import get_learning_rate
from utils.summary import TensorboardSummary
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weights_labels
from torch.utils.data import DataLoader
import numpy as np
from utils.metrics import Evaluator
from tqdm import tqdm
import random
class Trainer:
def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver):
self.args = args
self.saver = saver
self.saver.save_experiment_config()
self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "train"))
self.train_writer = self.train_summary.create_summary()
self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "validation"))
self.val_writer = self.val_summary.create_summary()
self.model = model
self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)}
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]
if args.use_balanced_weights:
weight = torch.from_numpy(class_weights.astype(np.float32))
else:
weight = None
if args.optimizer == 'SGD':
print('Using SGD')
self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)
elif args.optimizer == 'Adam':
print('Using Adam')
self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay)
else:
raise NotImplementedError
self.lr_scheduler = None
if args.use_lr_scheduler:
if args.lr_scheduler == 'step':
print('Using step lr scheduler')
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(",")], gamma=0.1)
self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type)
self.evaluator = Evaluator(train_set.num_classes)
self.best_pred = 0.0
def training(self, epoch):
train_loss = 0.0
self.model.train()
num_img_tr = len(self.train_dataloader)
tbar = tqdm(self.train_dataloader, desc='\r')
visualization_index = int(random.random() * len(self.train_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch)
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
self.optimizer.zero_grad()
output = self.model(image)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch)
if constants.VISUALIZATION:
self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
print('BestPred: %.3f' % self.best_pred)
def validation(self, epoch, test=False):
self.model.eval()
self.evaluator.reset()
ret_list = []
if test:
tbar = tqdm(self.test_dataloader, desc='\r')
else:
tbar = tqdm(self.val_dataloader, desc='\r')
test_loss = 0.0
visualization_index = int(random.random() * len(self.val_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
loss = self.criterion(output, target)
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = torch.argmax(output, dim=1).data.cpu().numpy()
target = target.cpu().numpy()
self.evaluator.add_batch(target, pred)
Acc = self.evaluator.Pixel_Accuracy()
Acc_class = self.evaluator.Pixel_Accuracy_Class()
mIoU = self.evaluator.Mean_Intersection_over_Union()
mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20()
FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
if not test:
self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch)
self.val_writer.add_scalar('mIoU', mIoU, epoch)
self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch)
self.val_writer.add_scalar('Acc', Acc, epoch)
self.val_writer.add_scalar('Acc_class', Acc_class, epoch)
self.val_writer.add_scalar('fwIoU', FWIoU, epoch)
if constants.VISUALIZATION:
self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print("Test: " if test else "Validation:")
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print("Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU))
print('Loss: %.3f' % test_loss)
if not test:
new_pred = mIoU
if new_pred > self.best_pred:
self.best_pred = new_pred
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
})
return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list
def load_best_checkpoint(self):
checkpoint = self.saver.load_checkpoint()
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print(f'=> loaded checkpoint - epoch {checkpoint["epoch"]})')
return checkpoint["epoch"]
| 2.015625 | 2 |
queries/variables/schedule_vars.py | CaladBlogBaal/Victorique | 0 | 12798718 | <gh_stars>0
def get_schedule(page: str):
if not page.isdecimal():
return False
variables = {"page": page}
return variables
| 2.1875 | 2 |
ComplementaryScripts/Step_02_DraftModels/Branch_carveme_draft.py | HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM | 0 | 12798719 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by lhao at 2019-05-17
'''
input: L.reuteri protein sequence
output: draft model
'''
import os
import cobra
import My_def
import pandas as pd
os.chdir('../../ComplementaryData/Step_02_DraftModels/')
case = 'other' #'first' or 'other'
# %% <build>
if case =='frist':
#Gram positive
os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml');
#all
os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml');
# %% <standstandardlization>
def CarveMe_processing(covermemodel):
#change gene id 'G_id'
for gen in covermemodel.genes:
gen.id = gen.id.replace('G_','')
# combine met according report
My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel)
return covermemodel
Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml')
Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe'
Lreu_ca_gp.id = 'Lreu_ca_gp'
Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml')
Lreu_ca.description = 'GEM of L reuteri by CarveMe'
Lreu_ca.id = 'Lreu_ca'
bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\t')
bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\t')
Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_')
Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df,
compartment='_')
# %% <Manual change according the report>
Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized)
Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized)
cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json')
cobra.io.save_json_model(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json')
#My_def.io_outtxt(Lreu_ca,'CarveMe/Lreu_ca.txt',True)
#My_def.io_outtxt(Lreu_ca_gp,'CarveMe/Lreu_ca_gp.txt',True)
| 2.03125 | 2 |
tools/cal_effect_field_tool.py | yuanliangxie/YOLOv3_simple_baseline | 1 | 12798720 | import torch.nn as nn
import torch
import numpy as np
import cv2 as cv
def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野
for module in model.modules():
try:
nn.init.constant_(module.weight, 0.05)
nn.init.zeros_(module.bias)
nn.init.zeros_(module.running_mean)
nn.init.ones_(module.running_var)
except Exception as e:
pass
if type(module) is nn.BatchNorm2d:
module.eval()
input = torch.ones(1, 3, 640, 640, requires_grad= True)
model.zero_grad()
features = model(input)
for i in range(len(features)):
# if i != len(features)-1:
# continue
x = features[i]
#g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]])
g_x = torch.zeros_like(x)
h, w = g_x.shape[2]//2, g_x.shape[3]//2
g_x[:, :, h, w] = 1
x.backward(g_x, retain_graph = True)
# x = torch.mean(x, 1, keepdim=True)
# fake_fp = x * g_x[0, 0, ...]
# fake_loss = torch.mean(fake_fp)
# fake_loss.backward(retain_graph=True)
show(input, i)
model.zero_grad()
input.grad.data.zero_()
cv.waitKey(2000)
cv.waitKey(0)
def cal_rf_wh(grad_input):
binary_map: np.ndarray = (grad_input[:, :] > 0.0)
x_cs: np.ndarray = binary_map.sum(-1) >= 1
y_cs: np.ndarray = binary_map.sum(0) >= 1
width = x_cs.sum()
height = y_cs.sum()
return (width, height)
def show(input, i):
grad_input = np.abs(input.grad.data.numpy())
grad_input = grad_input / np.max(grad_input)
grad_input = grad_input.mean(0).mean(0)
# 有效感受野 0.75 - 0.85
#grad_input = np.where(grad_input > 0.85,1,0)
#grad_input_ = np.where(grad_input > 0.75, 1, grad_input)
# effient_values = grad_input > 0.0
# samll_effient_values = grad_input <= 0.2
# grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1
#grad_input = grad_input * 100
width, height = cal_rf_wh(grad_input)
print("width:", width, "height:", height)
grad_input_ERF = np.where(grad_input>0.01, 1, 0)
width, height = cal_rf_wh(grad_input_ERF)
print("ERF_width:", width, "ERF_height:", height)
np.expand_dims(grad_input, axis=2).repeat(3, axis=2)
grad_input = (grad_input * 255).astype(np.uint8)
cv.imshow("receip_field"+str(i), grad_input)
#cv.imwrite("./receip_field"+str(i)+".png", grad_input)
| 2.59375 | 3 |
Projects/Foreign_Exchange/helper_python/help_display.py | pedwards95/Springboard_Class | 0 | 12798721 | from flask import Flask, flash
def display_error(error,preface="",postface=""):
flash(f"{preface} {error} {postface}") | 2.296875 | 2 |
typing_protocol_intersection/mypy_plugin.py | klausweiss/typing-protocol-intersection | 0 | 12798722 | import sys
import typing
from collections import deque
from typing import Callable, Optional
import mypy.errorcodes
import mypy.errors
import mypy.nodes
import mypy.options
import mypy.plugin
import mypy.types
if sys.version_info >= (3, 10): # pragma: no cover
from typing import TypeGuard
else: # pragma: no cover
from typing_extensions import TypeGuard
SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext]
class ProtocolIntersectionPlugin(mypy.plugin.Plugin):
# pylint: disable=unused-argument
def get_type_analyze_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]:
if fullname == "typing_protocol_intersection.types.ProtocolIntersection":
return type_analyze_hook(fullname)
return None
def get_method_signature_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]:
return intersection_function_signature_hook
def get_function_signature_hook(
self, fullname: str
) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]:
return intersection_function_signature_hook
class TypeInfoWrapper(typing.NamedTuple):
type_info: mypy.nodes.TypeInfo
base_classes: typing.List[mypy.nodes.TypeInfo]
class IncomparableTypeName(str):
"""A string that never returns True when compared (equality) with another instance of this type."""
def __eq__(self, x: object) -> bool:
if isinstance(x, IncomparableTypeName):
return False
return super().__eq__(x)
def __hash__(self) -> int: # pylint: disable=useless-super-delegation
return super().__hash__()
def mk_protocol_intersection_typeinfo(
name: str,
*,
# For ProtocolIntersections to not be treated as the same type, but just as protocols,
# their fullnames need to differ - that's it's an IncomparableTypeName.
fullname: IncomparableTypeName,
symbol_table: Optional[mypy.nodes.SymbolTable] = None,
) -> mypy.nodes.TypeInfo:
defn = mypy.nodes.ClassDef(
name=name,
defs=mypy.nodes.Block([]),
base_type_exprs=[
mypy.nodes.NameExpr("typing.Protocol"),
# mypy expects object to be here at the last index ('we skip "object" since everyone implements it')
mypy.nodes.NameExpr("builtins.object"),
],
type_vars=[],
)
defn.fullname = IncomparableTypeName(fullname)
defn.info.is_protocol = True
type_info = mypy.nodes.TypeInfo(
names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(),
defn=defn,
module_name="typing_protocol_intersection",
)
type_info.mro = [type_info]
type_info.is_protocol = True
return type_info
class ProtocolIntersectionResolver:
def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type:
folded_type = self.fold_intersection(type_)
if isinstance(folded_type, mypy.types.Instance):
folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args)
return folded_type
def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type:
if not self._is_intersection(type_):
return type_
type_info = mk_protocol_intersection_typeinfo(
"ProtocolIntersection",
fullname=IncomparableTypeName("typing_protocol_intersection.types.ProtocolIntersection"),
)
type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, []))
args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes]
return mypy.types.Instance(type_info_wrapper.type_info, args=args)
def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper:
intersections_to_process = deque([type_])
while intersections_to_process:
intersection = intersections_to_process.popleft()
for arg in intersection.args:
if self._is_intersection(arg):
intersections_to_process.append(arg)
continue
if isinstance(arg, mypy.types.Instance):
self._add_type_to_intersection(intersection_type_info_wrapper, arg)
return intersection_type_info_wrapper
@staticmethod
def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None:
name_expr = mypy.nodes.NameExpr(typ.type.name)
name_expr.node = typ.type
intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr)
intersection_type_info_wrapper.type_info.mro.insert(0, typ.type)
intersection_type_info_wrapper.base_classes.insert(0, typ.type)
@staticmethod
def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]:
return isinstance(typ, mypy.types.Instance) and typ.type.fullname == (
"typing_protocol_intersection.types.ProtocolIntersection"
)
def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike:
resolver = ProtocolIntersectionResolver()
signature = context.default_signature
signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type)
signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types]
return signature
def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]:
def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type:
args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args)
symbol_table = mypy.nodes.SymbolTable()
for arg in args:
if isinstance(arg, mypy.types.Instance):
if not arg.type.is_protocol:
context.api.fail(
"Only Protocols can be used in ProtocolIntersection.", arg, code=mypy.errorcodes.VALID_TYPE
)
symbol_table.update(arg.type.names)
type_info = mk_protocol_intersection_typeinfo(
context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table
)
return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column)
return _type_analyze_hook
def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]:
# ignore version argument if the plugin works with all mypy versions.
return ProtocolIntersectionPlugin
| 1.867188 | 2 |
epuap_watchdog/institutions/migrations/0014_auto_20170718_0443.py | ad-m/epuap-watchdog | 2 | 12798723 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-18 04:43
from __future__ import unicode_literals
from django.db import migrations, models
def update_names(apps, schema_editor):
for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator():
x.name = x.data.get('nazwa', '')
x.save()
for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator():
x.name = x.data.get('name', '')
x.save()
class Migration(migrations.Migration):
dependencies = [
('institutions', '0013_auto_20170718_0256'),
]
operations = [
migrations.AddField(
model_name='regon',
name='name',
field=models.CharField(default='', max_length=200, verbose_name='Name'),
preserve_default=False,
),
migrations.AddField(
model_name='resp',
name='name',
field=models.CharField(default='', max_length=200, verbose_name='Name'),
preserve_default=False,
),
migrations.RunPython(update_names)
]
| 1.960938 | 2 |
main/auth/github.py | chdb/DhammaMap1 | 0 | 12798724 | # coding: utf-8
# pylint: disable=missing-docstring, invalid-name
import flask
import auth
import config
from main import app
import model.user as user #import User#, UserVdr
github_config = dict(
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
base_url='https://api.github.com/',
# consumer_key=config.CONFIG_DB.auth_github_id,
# consumer_secret=config.CONFIG_DB.auth_github_secret,
request_token_params={'scope': 'user:email'},
)
github = auth.create_oauth_app(github_config, 'github')
@app.route('/_s/callback/github/oauth-authorized/')
def github_authorized():
response = github.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
flask.session['oauth_token'] = (response['access_token'], '')
me = github.get('user')
usr = retrieve_user_from_github(me.data)
return auth.signin_via_social(usr)
@github.tokengetter
def get_github_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/github/')
def signin_github():
return auth.signin_oauth(github)
def retrieve_user_from_github(response):
auth_id = 'github_%s' % str(response['id'])
usr = User.get_by('authIDs_', auth_id)
bio = response['bio'][:user.bio_span[1]] if response['bio'] else ''
location = response['location'][:user.location_span[1]] if response['location'] else ''
return usr or auth.create_or_get_user_db(
auth_id,
response.get('name', ''),
response.get('login'),
response.get('email', ''),
location=location,
bio=bio,
github=response.get('login')
)
# Todo replace opaque and repeated code such as
# bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else ''
# with
# bio = getField(response, 'bio')
def getField(response, name):
field = response[name]
if field:
span = name + '_span' # depend on validators following this naming convention
max = getattr(user, span)[1]
return field [:max]
return ''
| 2.46875 | 2 |
src/amtrakconn/__init__.py | scienceopen/amtrak-connections | 0 | 12798725 | <reponame>scienceopen/amtrak-connections
from pathlib import Path
from urllib.request import urlopen
from numpy import nan, in1d, atleast_1d, logical_and
from datetime import timedelta
from zipfile import ZipFile
from bs4 import BeautifulSoup
from re import compile
from io import StringIO
from time import sleep
import pandas as pd
from matplotlib.pyplot import figure, show
"""
This function should be used politely and sparingly
"""
# from http.client import HTTPConnection
def plottrains(delays, actual, days, trains, dates, doplot):
"""
http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python
can connection be made?
"""
# set_trace()
if (
len(trains) == 2
and len(actual[trains[0]]) > 0
and len(actual[trains[1]]) > 0
and len(dates) > int(days[trains[0]])
):
stations = []
for t in trains:
stations.append(delays[t].index.values.tolist())
overlapind = in1d(stations[0], stations[1])
overlapstation = atleast_1d(stations[0][overlapind])
if overlapstation.size == 1:
overlapstation = overlapstation[0]
otherind = in1d(stations[1], overlapstation)
if otherind > overlapind:
daydiff = int(days[trains[1]]) - 1
arrival = actual[trains[1]].ix[overlapstation, :-daydiff]
depart = actual[trains[0]].ix[overlapstation, daydiff:]
else:
daydiff = int(days[trains[0]]) - 1
arrival = actual[trains[0]].ix[overlapstation, :-daydiff]
depart = actual[trains[1]].ix[overlapstation, daydiff:]
# set_trace()
goodtimes = logical_and(depart.notnull(), arrival.notnull()).values
timelefthours = (
(depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600
)
timelefthours = pd.DataFrame(
timelefthours, index=depart[goodtimes].index, columns=["hoursleft"]
)
missedind = (timelefthours < 0).values
missedhours = timelefthours[missedind]
if missedind.sum() > 0:
print(missedhours)
else:
print("no missed connections detected for " + str(trains))
if goodtimes.size < 6 and in1d(["conn", "all"], doplot).any():
ax = timelefthours.plot(ax=figure().gca(), marker=".", legend=False)
ax.set_xlabel("date")
elif in1d(["conn", "all"], doplot).any():
ax = timelefthours.boxplot(
return_type="axes", rot=90, whis=[10, 90], ax=figure().gca()
)
ax.set_title(str(trains) + " made connection at " + overlapstation)
ax.set_ylabel("Hours left to connect")
show()
# print(goodtimes)
# print(depart[goodtimes].index)
# print((depart.values-arrival.values))
# print((depart.values-arrival.values).astype(float))
# print(arrival.values)
# print(depart.values)
elif overlapstation.size == 0:
print("no connecting station found")
else:
print("more than 1 connection found, this case isnt handled yet")
else:
print("skipped connection analysis due to missing train info or too few dates")
def plottrain(delay, train, dates, stop, doplot):
if stop is None:
stop = -1
laststop = delay.index[-1]
else:
laststop = stop
if doplot and delay.shape[1] > 0 and in1d(["delay", "all"], doplot).any():
if delay.shape[1] < 6:
ax = delay.plot(ax=figure().gca())
ax.legend(loc="best", fontsize=8)
else:
ax = delay.T.boxplot(return_type="axes", rot=90, whis=[10, 90], ax=figure().gca())
ax.set_xlabel("Station")
ax.set_ylabel("hours delay")
ax.set_title(
"Train #"
+ train
+ " "
+ dates[0].strftime("%Y/%m/%d")
+ " to "
+ dates[-1].strftime("%Y/%m/%d")
)
if delay.shape[1] > 1:
# late vs. date end of route
ax = delay.ix[stop].plot(
ax=figure().gca(), linestyle="", marker="*"
) # plots last station
ax.set_title("Hours late to " + laststop)
ax.set_ylabel("Hours Late")
ax.set_xlabel("date")
# histogram
ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12)
ax.set_title("Histogram: Hours late to " + laststop)
ax.set_xlabel("Hours Late")
ax.set_ylabel("p(late)")
show()
else:
print("* skipped plotting due to no data")
def tohdf5(fn, data, date):
from pandas import HDFStore
h5 = HDFStore(fn)
h5[date.strftime("d%Y%m%d")] = data
h5.close()
def tozip(zipfn, txt, date, train):
from zipfile import ZIP_DEFLATED
# store as text file like website
# ziptop = 'test' + buildziptop(train,date)
with ZipFile(zipfn, "a") as z:
zippath = buildzippath(train, date)
z.writestr(zippath, txt, compress_type=ZIP_DEFLATED)
# %%
def getday(datafn, date, train, zipfn, doscrape):
try:
txt = filehandler(datafn, train, date)
except FileNotFoundError:
if doscrape:
print("* WARNING: beginning web scrape--be polite, they ban for overuse!")
url = buildurl(train, date)
# mass download, throttle to be polite
sleep(2)
html = gethtml(url)
txt = gettxt(html)
else:
exit(
"you dont seem to have the needed data file for Train # "
+ train
+ " on "
+ date.strftime("%Y-%m-%d")
)
if zipfn is not None:
print("writing " + date.strftime("%Y-%m-%d") + " to " + zipfn)
tozip(zipfn, txt, date, train)
try:
data = getdata(txt, date)
except StopIteration:
data = None
print("failed to process " + date.strftime("%Y-%m-%d"))
return data
# %%
def gettxt(html):
soup = BeautifulSoup(html)
txt = soup.get_text()
return txt
def getdata(txt, datereq):
# %% first the departures
data, datestr = getdept(txt, datereq)
data["sked"] = str2datetime(data["sked"], data["day"], datestr)
data["act"] = str2datetime(data["act"], data["day"], datestr)
# %% have to skip ahead a day when delay rolls past midnight!
# train wouldn't be more than 4 hours early!
dayflip = (data["act"] - data["sked"]).astype("timedelta64[h]") < -4 # hours
data.ix[dayflip, "act"] += timedelta(days=1)
data["delayhours"] = (data["act"] - data["sked"]).astype(
"timedelta64[m]"
) / 60 # .values.astype(float)/1e9/3600
data["diffdelay"] = data["delayhours"].diff()
# we don't expect the delay to jump more than 12 hours between stations
if (data["diffdelay"].abs() > 12).any():
print("** WARNING: excessive time difference detected, possible parsing error!")
print(txt)
print(data)
data = None
return data
def getdept(txt, datereq):
firstheadpat = compile(r"\d{2}/\d{2}/\d{4}") # not for zip files!
# trainpat = compile('(?<=\* Train )\d+')
lastheadpat = compile(r"^\* V")
datestr = None
with StringIO(txt) as inpt:
for line in inpt:
tmp = firstheadpat.findall(line)
if len(tmp) > 0:
datestr = tmp[0]
if len(lastheadpat.findall(line)) > 0:
if datestr is None:
# must be a zip file where no dates are give
datestr = datereq.strftime("%m/%d/%Y")
break
# data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0)
# data.columns = ['city','skedarv','skeddep','actarv','actdep']
data = pd.read_fwf(
inpt,
colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)],
index_col=0,
header=None,
skiprows=0,
)
# %% append last arrival (destination)
arv = getarv(txt)
# %% drop blank rows before appending arrival
data = data.dropna(axis=0, how="all") # needed for trailing blank lines
data = data.replace("*", nan) # now that blank lines are gone, we swap for nan
data.ix[-1] = arv.ix[0] # we know arrival is one line, the last line of the file
data.columns = ["day", "sked", "act"]
return data, datestr
def getarv(txt):
llrgx = compile("(?<=\n).+(?=\r*\n+$)") # no \r in lookbehind
lastline = llrgx.findall(txt)[0]
with StringIO(lastline) as inpt:
arv = pd.read_fwf(
inpt,
colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)],
index_col=0,
header=None,
skiprows=0,
converters={1: str},
)
return arv
def str2datetime(data, day, datestr):
dstr = data.str.extract(r"(\d+)")
ampm = data.str.extract("([AP])") + "M"
dint = dstr.astype(float) # int can't use nan
# ZERO PAD HOURS
for i, sd in enumerate(dint):
if sd != "NaN":
dstr[i] = "{:04d}".format(sd.astype(int))
dstr = datestr + "T" + dstr + ampm # add date to front
# finally put to datetime
datadt = pd.to_datetime(
dstr, format="%m/%d/%YT%I%M%p", utc=True
) # seems to put time-zone aware to Eastern time..
# multi-day trips
datadt[day == "2"] += timedelta(days=1) # NOT relativedelta(days=1)
datadt[day == "3"] += timedelta(days=2)
return datadt
def buildurl(trainnum, date):
url = "http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain="
url += str(trainnum)
url += "&selyear=" + date.strftime("%Y")
url += "&selmonth=" + date.strftime("%m")
url += "&selday=" + date.strftime("%d")
return url
def buildziptop(train, date):
return Path(date.strftime("%Y")) / (str(train) + ".zip")
def buildzippath(train, date):
return Path(train) / "".join([train, "_", date.strftime("%Y%m%d"), ".txt"])
def filehandler(fn, train, date):
fn = Path(fn).expanduser()
if fn.suffix in "html": # single train
with open(fn, "r") as f:
html = f.read()
txt = [gettxt(html)]
elif fn.suffix == "txt": # single train
with open(fn, "r") as f:
txt = [f.read()]
elif fn.suffix == "": # single or multiple trains
try:
ziptop = buildziptop(train, date)
with ZipFile(ziptop, "r") as z:
zippath = buildzippath(train, date)
with z.open(zippath, "r") as f:
txt = f.read().decode("utf-8")
except KeyError:
print("I dont find", zippath)
txt = None
else:
raise ValueError("I dont know how to parse", fn)
return txt
def gethtml(url):
response = urlopen(url)
html = response.read().decode("utf-8")
# session.request("GET", url)
# response = session.getresponse()
# if response.status == 200:
# html = response.read().decode('utf-8')
# elif response.status == 301:
# print('** 301 moved to ' + str(response.getheader('Location')))
# else:
# print('** error ' + str(response.status) + ' could not read ' + url)
# html = '** could not read ' + url
return html
| 3.203125 | 3 |
joplin/pages/service_page/factories.py | cityofaustin/joplin | 15 | 12798726 | from pages.service_page.models import ServicePage
from pages.topic_page.factories import JanisBasePageWithTopicsFactory
from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify
class ServicePageFactory(JanisBasePageWithTopicsFactory):
@classmethod
def create(cls, *args, **kwargs):
if 'dynamic_content' in kwargs:
kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content'])
step_keywords = ['steps', 'steps_es']
for step_keyword in step_keywords:
if step_keyword in kwargs:
kwargs[step_keyword] = streamfieldify(kwargs[step_keyword])
return super(ServicePageFactory, cls).create(*args, **kwargs)
class Meta:
model = ServicePage
| 2 | 2 |
src/pyfonycore/bootstrap/config/Config.py | pyfony/core | 0 | 12798727 | <filename>src/pyfonycore/bootstrap/config/Config.py
class Config:
def __init__(
self,
container_init_function: callable,
kernel_class: type,
root_module_name: str,
allowed_environments: list,
):
self.__container_init_function = container_init_function
self.__kernel_class = kernel_class
self.__root_module_name = root_module_name
self.__allowed_environments = allowed_environments
@property
def container_init_function(self):
return self.__container_init_function
@property
def kernel_class(self):
return self.__kernel_class
@property
def root_module_name(self):
return self.__root_module_name
@property
def allowed_environments(self):
return self.__allowed_environments
| 2.1875 | 2 |
leetcode/majority-element.py | zhangao0086/Python-Algorithm | 3 | 12798728 | <reponame>zhangao0086/Python-Algorithm
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> int:
count, candidate = 0, 0
for num in nums:
if count == 0:
candidate = num
count += (1 if num == candidate else -1)
return candidate
if __name__ == '__main__':
assert Solution().majorityElement([3,2,3]) == 3
assert Solution().majorityElement([2,2,1,1,1,2,2]) == 2 | 3.703125 | 4 |
python/dlbs/utils.py | tfindlay-au/dlcookbook-dlbs | 0 | 12798729 | # (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`.
"""
import os
import copy
import json
import gzip
import re
import logging
import subprocess
import importlib
from multiprocessing import Process
from multiprocessing import Queue
from glob import glob
from dlbs.exceptions import ConfigurationError
class OpenFile(object):
"""Class that can work with gzipped and regular textual files."""
def __init__(self, fname, mode='r'):
self.__fname = fname
self.__flags = ['rb', 'r'] if mode == 'r' else ['wb', 'w']
def __enter__(self):
if self.__fname.endswith('.gz'):
self.__fobj = gzip.open(self.__fname, self.__flags[0])
else:
self.__fobj = open(self.__fname, self.__flags[1])
return self.__fobj
def __exit__(self, type, value, traceback):
self.__fobj.close()
class IOUtils(object):
"""Container for input/output helpers"""
@staticmethod
def mkdirf(file_name):
"""Makes sure that parent folder of this file exists.
The file itself may not exist. A typical usage is to ensure that we can
write to this file. If path to parent folder does not exist, it will be
created.
See documentation for :py:func:`os.makedirs` for more details.
:param str file_name: A name of the file for which we want to make sure\
its parent directory exists.
"""
dir_name = os.path.dirname(file_name)
if dir_name != '' and not os.path.isdir(dir_name):
os.makedirs(dir_name)
@staticmethod
def find_files(directory, file_name_pattern, recursively=False):
"""Find files in a directory, possibly, recursively.
Find files which names satisfy *file_name_pattern* pattern in folder
*directory*. If *recursively* is True, scans subfolders as well.
:param str directory: A directory to search files in.
:param str file_name_pattern: A file name pattern to search. For instance,
is can be '*.log'
:param bool recursively: If True, search in subdirectories.
:return: List of file names satisfying *file_name_pattern* pattern.
"""
if not recursively:
files = [f for f in glob(os.path.join(directory, file_name_pattern))]
else:
files = [f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))]
return files
@staticmethod
def gather_files(path_specs, file_name_pattern, recursively=False):
"""Find/get files specified by an `inputs` parameter.
:param list path_specs: A list of file names / directories.
:param str file_name_pattern: A file name pattern to search. Only
used for entries in path_specs that
are directories.
:param bool recursively: If True, search in subdirectories. Only used
for entries in path_specs that are directories.
:return: List of file names satisfying *file_name_pattern* pattern.
"""
files = []
for path_spec in path_specs:
if os.path.isdir(path_spec):
files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively))
elif os.path.isfile(path_spec):
files.append(path_spec)
return files
@staticmethod
def get_non_existing_file(file_name, max_attempts = 1000):
"""Return file name that does not exist.
:param str file_name: Input file name.
:rtype: str
:return: The 'file_name' if this file does not exist else find first
file name that file does not exist.
"""
if not os.path.exists(file_name):
return file_name
attempt = 0
while True:
candidate_file_name = "%s.%d" % (file_name, attempt)
if not os.path.exists(candidate_file_name):
return candidate_file_name
attempt += 1
if attempt >= max_attempts:
msg = "Cannot find non existing file from pattern %s"
raise ValueError(msg % file_name)
@staticmethod
def check_file_extensions(fname, extensions):
"""Checks that fname has one of the provided extensions.
:param str fname: The file name to check.
:param tuple extensions: A tuple of extensions to use.
Raises exception of fname does not end with one of the extensions.
"""
if fname is None:
return
assert isinstance(extensions, tuple), "The 'extensions' must be a tuple."
if not fname.endswith(extensions):
raise ValueError("Invalid file extension (%s). Must be one of %s" % extensions)
@staticmethod
def read_json(fname, check_extension=False):
"""Reads JSON object from file 'fname'.
:param str fname: File name.
:param boolean check_extension: If True, raises exception if fname does not end
with '.json' or '.json.gz'.
:rtype: None or JSON object
:return: None of fname is None else JSON loaded from the file.
"""
if fname is None:
return None
if check_extension:
IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))
with OpenFile(fname, 'r') as fobj:
return json.load(fobj)
@staticmethod
def write_json(fname, data, check_extension=False):
""" Dumps *dictionary* as a json object to a file with *file_name* name.
:param dict dictionary: Dictionary to serialize.
:param any data: A data to dump into a JSON file.
:param str file_name: Name of a file to serialie dictionary in.
"""
if fname is None:
raise ValueError("File name is None")
if check_extension:
IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))
IOUtils.mkdirf(fname)
with OpenFile(fname, 'w') as fobj:
json.dump(data, fobj, indent=4)
class DictUtils(object):
"""Container for dictionary helpers."""
@staticmethod
def subdict(dictionary, keys):
"""Return subdictionary containing only keys from 'keys'.
:param dict dictionary: Input dictionary.
:param list_or_val keys: Keys to extract
:rtype: dict
:return: Dictionary that contains key/value pairs for key in keys.
"""
if keys is None:
return dictionary
return dict((k, dictionary[k]) for k in keys if k in dictionary)
@staticmethod
def contains(dictionary, keys):
"""Checkes if dictionary contains all keys in 'keys'
:param dict dictionary: Input dictionary.
:param list_or_val keys: Keys to find in dictionary
:rtype: boolean
:return: True if all keys are in dictionary or keys is None
"""
if keys is None:
return True
keys = keys if isinstance(keys, list) else [keys]
for key in keys:
if key not in dictionary:
return False
return True
@staticmethod
def ensure_exists(dictionary, key, default_value=None):
""" Ensures that the dictionary *dictionary* contains key *key*
If key does not exist, it adds a new item with value *default_value*.
The dictionary is modified in-place.
:param dict dictionary: Dictionary to check.
:param str key: A key that must exist.
:param obj default_value: Default value for key if it does not exist.
"""
if key not in dictionary:
dictionary[key] = copy.deepcopy(default_value)
@staticmethod
def lists_to_strings(dictionary, separator=' '):
""" Converts every value in dictionary that is list to strings.
For every item in *dictionary*, if type of a value is 'list', converts
this list into a string using separator *separator*.
The dictictionary is modified in-place.
:param dict dictionary: Dictionary to modify.
:param str separator: An item separator.
"""
for key in dictionary:
if isinstance(dictionary[key], list):
dictionary[key] = separator.join(str(elem) for elem in dictionary[key])
@staticmethod
def filter_by_key_prefix(dictionary, prefix, remove_prefix=True):
"""Creates new dictionary with items which keys start with *prefix*.
Creates new dictionary with items from *dictionary* which keys
names starts with *prefix*. If *remove_prefix* is True, keys in new
dictionary will not contain this prefix.
The dictionary *dictionary* is not modified.
:param dict dictionary: Dictionary to search keys in.
:param str prefix: Prefix of keys to be extracted.
:param bool remove_prefix: If True, remove prefix in returned dictionary.
:return: New dictionary with items which keys names start with *prefix*.
"""
return_dictionary = {}
for key in dictionary:
if key.startswith(prefix):
return_key = key[len(prefix):] if remove_prefix else key
return_dictionary[return_key] = copy.deepcopy(dictionary[key])
return return_dictionary
@staticmethod
def dump_json_to_file(dictionary, file_name):
""" Dumps *dictionary* as a json object to a file with *file_name* name.
:param dict dictionary: Dictionary to serialize.
:param str file_name: Name of a file to serialie dictionary in.
"""
if file_name is not None:
IOUtils.mkdirf(file_name)
with open(file_name, 'w') as file_obj:
json.dump(dictionary, file_obj, indent=4)
@staticmethod
def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):
""" Updates *dictionary* with items from *iterable* object.
This method modifies/updates *dictionary* with items from *iterable*
object. This object must support ``for something in iterable`` (list,
opened file etc). Only those items in *iterable* are considered, that match
*pattern* (it's a regexp epression). If a particular item does not match,
and *must_match* is True, *ConfigurationError* exception is thrown.
Regexp pattern must return two groups (1 and 2). First group is considered
as a key, and second group is considered to be value. Values must be a
json-parseable strings.
If *add_only_keys* is not None, only those items are added to *dictionary*,
that are in this list.
Existing items in *dictionary* are overwritten with new ones if key already
exists.
One use case to use this method is to populate a dictionary with key-values
from log files.
:param dict dictionary: Dictionary to update in-place.
:param obj iterable: Iterable object (list, opened file name etc).
:param str patter: A regexp pattern for matching items in ``iterable``.
:param bool must_match: Specifies if every element in *iterable* must match\
*pattern*. If True and not match, raises exception.
:param list add_only_keys: If not None, specifies keys that are added into\
*dictionary*. Others are ignored.
:param boolean ignore_erros: If true, ignore errors.
:raises ConfigurationError: If *must_match* is True and not match or if value\
is not a json-parseable string.
"""
matcher = re.compile(pattern)
for line in iterable:
match = matcher.match(line)
if not match:
if must_match:
raise ConfigurationError("Cannot match key-value from '%s' with pattern '%s'. Must match is set to true" % (line, pattern))
else:
continue
key = match.group(1).strip()
try:
value = match.group(2).strip()
value = json.loads(value) if len(value) > 0 else None
if add_only_keys is None or key in add_only_keys:
dictionary[key] = value
logging.debug("Key-value item (%s=%s) has been parsed and added to dictionary", key, str(value))
except ValueError as err:
if not ignore_errors:
raise ConfigurationError("Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s" % (value, key, line, str(err)))
@staticmethod
def match(dictionary, query, policy='relaxed', matches=None):
""" Match *query* against *dictionary*.
The *query* and *dictionary* are actually dictionaries. If policy is 'strict',
every key in query must exist in dictionary with the same value to match.
If policy is 'relaxed', dictionary may not contain all keys from query
to be matched. In this case, the intersection of keys in dictionary and query
is used for matching.
It's assuemd we match primitive types such as numbers and strings not
lists or dictionaries. If values in query are lists, then condition OR applies.
For instance:
match(dictionary, query = { "framework": "tensorflow" }, policy='strict')
Match dictionary only if it contains key 'framework' with value "tensorflow".
match(dictionary, query = { "framework": "tensorflow" }, policy='relaxed')
Match dictionary if it does not contain key 'framework' OR contains\
key 'framework' with value "tensorflow".
match(dictionary, query = { "framework": ["tensorflow", "caffe2"] }, policy='strict')
Match dictionary only if it contains key 'framework' with value "tensorflow" OR\
"caffe2".
match(dictionary, query = { "framework": ["tensorflow", "caffe2"], "batch": [16, 32] }, policy='strict')
Match dictionary only if it (a) contains key 'framework' with value "tensorflow" OR "caffe2"\
and (b) it contains key 'batch' with value 16 OR 32.
:param dict dictionary: Dictionary to match.
:param dict query: Query to use.
:param ['relaxed', 'strict'] policy: Policy to match.
:param dict matches: Dictionary where matches will be stored if match has been identified.
:return: True if match or query is None
:rtype: bool
"""
if query is None:
return True
assert policy in ['relaxed', 'strict'], ""
for field, value in query.iteritems():
if field not in dictionary:
if policy == 'relaxed':
continue
else:
return False
if isinstance(value, list) or not isinstance(value, basestring):
values = value if isinstance(value, list) else [value]
if dictionary[field] not in values:
return False
if matches is not None:
matches['%s_0' % (field)] = dictionary[field]
else:
if value == '':
# Take special care if value is an empty string
if value != dictionary[field]:
return False
elif matches is not None:
matches['%s_0' % (field)] = dictionary[field]
continue
else:
match = re.compile(value).match(dictionary[field])
if not match:
return False
else:
if matches is not None:
matches['%s_0' % (field)] = dictionary[field]
for index, group in enumerate(match.groups()):
matches['%s_%d' % (field, index+1)] = group
continue
return True
class ConfigurationLoader(object):
"""Loads experimenter configuration from multiple files."""
@staticmethod
def load(path, files=None):
"""Loads configurations (normally in `conigs`) folder.
:param str path: Path to load configurations from
:param list files: List of file names to load. If None, all files with
JSON extension in **path** are loaded.
:return: A tuple consisting of a list of config files, configuration
object (dictionary) and dictionary of parameters info
This method loads configuration files located in 'path'. If `files` is
empty, all json files are loaded from that folder.
This method fails if one parameter is defined in multiple files. This
is intended behaviour for now (this also applies for update_param_info method).
"""
if path is None:
raise ValueError("Configuration load error. The 'path' parameter cannot be None.")
if not os.path.isdir(path):
raise ValueError("Configuration load error. The 'path' parameter (%s) must point to an existing directory." % path)
if files is not None:
config_files = [os.path.join(path, f) for f in files]
else:
config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')]
config = {} # Configuration with params/vars/extensions
param_info = {} # Information on params such as type and help messages
for config_file in config_files:
if not os.path.isfile(config_file):
raise ValueError("Configuration load error. Configuration data cannot be loaded for not a file (%s)" % config_file)
with open(config_file) as file_obj:
try:
# A part of global configuration from this particular file
config_section = json.load(file_obj)
# Update parameters info.
ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False)
# Joing configuration from this single file.
ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section))
except ValueError:
logging.error("Configuration load error. Invalid JSON configuration in file %s", config_file)
raise
return (config_files, config, param_info)
@staticmethod
def update_param_info(param_info, config, is_user_config=False):
"""Update parameter info dictionary based on configurationi in **config**
:param dict param_info: A parameter info dictionary that maps parameter
name to its description dictionary that contains
such fileds as value, help message, type, constraints
etc.
:param dict config: A dictionary with configuration section that may contain
parameters, variables and extensions. The **config** is
a result of parsing a JSON configuration file.
:param bool is_user_config: If True, the config object represents user-provided
configuration. If False, this is a system configuration.
Based on this flag, we deal with parameters in config
that redefine parameters in existing param_info
differently. See comments below.
We are interested here only in parameters section where parameter information
is defined. There are two scenarios this method is used:
1. Load standard configuration. In this case, parameter redefinition is
prohibited. If `parameters` section in `config` redefines existing
parameters in param_info (already loaded params), program terminates.
2. Load user-provided configuration. In this case, we still update parameter
info structure, but deal with it in slightly different way. If parameter in
`config` exists in param_info, it means user has provided their specific
value for this parameter.
Types of user defined parameters are defined either by user in a standard way as
we define types for standard parameters or induced automatically based on JSON
parse result.
"""
if 'parameters' not in config:
return
params = config['parameters']
for name in params:
val = params[name]
if not is_user_config:
# If this is not a user-provided configuration, we disallow parameter redefinition.
if name in param_info:
raise ConfigurationError(
"Parameter info update error."
" Parameter redefinition is not allowed for non-user configuration."
" This is a system configuration error that must not happen."
" Parameter %s=%s, new parameter definition (value) is %s" % (name, str(param_info[name]), val)
)
if isinstance(val, dict):
# This is a complete parameter definition with name, value and description.
if 'val' not in val:
raise ConfigurationError(
"Parameter info update error."
" Parameter that is defined by a dictionary must contain 'val' field that"
" defines its default value. Found this definition: %s=%s" % (name, val)
)
if name not in param_info:
param_info[name] = copy.deepcopy(val) # New parameter, set it info object.
# TODO what about parameter type and description?
else:
logging.warn(
" Parameter (%s) entirely redefines existing parameter (%s)."
" Normally, only value needs to be provided."
" We will proceed but you may want to fix this.",
json.dumps(val),
json.dumps(param_info[name])
)
param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value
else:
# Just parameter value
val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__
if name not in param_info:
param_info[name] = {
'val': val,
'type': val_type,
'desc': "No description for this parameter provided (it was automatically converted from its value)."
}
else:
param_info[name]['val'] = val
# Do final validations
if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):
raise ConfigurationError(
"Parameter info update error."
" Parameter has invalid type = '%s'."
" Parameter definition is %s = %s" % (param_info[name]['type'], name, param_info[name])
)
if 'type' not in param_info[name] or 'desc' not in param_info[name]:
logging.warn(
"Parameter definition does not contain type ('type') and/or description ('desc')."
" You should fix this. Parameter definition is"
" %s = %s", name, param_info[name]
)
@staticmethod
def remove_info(config):
"""In parameter section of a **config** the function removes parameter info
leaving only their values
:param dict config: A dictionary with configuration section that may contain
parameters, variables and extensions. The **config** is
a result of parsing a JSON configuration file.
:return: A copy of **config** with info removed
"""
clean_config = copy.deepcopy(config)
if 'parameters' in clean_config:
params = clean_config['parameters']
for name in params:
val = params[name]
if isinstance(val, dict):
# This should not generally happen since we deal with it in update_param_info, but just in case
if 'val' not in val:
raise ConfigurationError(
"Parameter info remove error."
" Parameter that is defined by a dictionary must contain 'val' field that"
" defines its default value. Found this definition: %s=%s" % (name, val)
)
params[name] = val['val']
return clean_config
@staticmethod
def update(dest, source, is_root=True):
"""Merge **source** dictionary into **dest** dictionary assuming source
and dest are JSON configuration configs or their members.
:param dict dest: Merge data to this dictionary.
:param dict source: Merge data from this dictionary.
:param bool is_root: True if **dest** and *source** are root configuration
objects. False if these objects are members.
"""
def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types):
raise ConfigurationError(
"Configuration update error - expecting value types to be same and one of %s but"
" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__)
)
# Types and expected key names. Types must always match, else exception is thrown.
if is_root:
schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']}
else:
schema = {'types':(list, basestring, int, float, long)}
for key in source:
# Firstly, check that type of value is expected.
val_type = type(source[key]).__name__
if not isinstance(source[key], schema['types']):
raise ConfigurationError(
"Configuration update error - unexpected type of key value: "
" is_root=%s, key=%s, value type=%s, expected type is one of %s" % \
(str(is_root), key, val_type, str(schema['types']))
)
# So, the type is expected. Warn if key value is suspicious - we can do it only for root.
if is_root and key not in schema[val_type]:
logging.warn("The name of a root key is '%s' but expected is one of '%s'", key, schema[val_type])
if key not in dest:
# The key in source dictionary is not in destination dictionary.
dest[key] = copy.deepcopy(source[key])
else:
# The key from source is in dest.
both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict)
both_lists = isinstance(dest[key], list) and isinstance(source[key], list)
both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long))
if is_root:
if not both_dicts and not both_lists:
_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]')
if both_dicts:
ConfigurationLoader.update(dest[key], source[key], is_root=False)
else:
dest[key].extend(source[key])
else:
if not both_lists and not both_primitive:
_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]')
dest[key] = copy.deepcopy(source[key]) if both_lists else source[key]
class ResourceMonitor(object):
"""The class is responsible for launching/shutting down/communicating with
external resource manager that monitors system resource consumption.
proc_pid date virt res shrd cpu mem power gpus_power
"""
def __init__(self, launcher, pid_folder, frequency, fields_specs):
"""Initializes resource monitor but does not create queue and process.
:param str launcher: A full path to resource monitor script.
:param str pid_folder: A full path to folder where pid file is created. The
file name is fixed and its value is `proc.pid`.
:param float frequency: A sampling frequency in seconds. Can be something like
0.1 seconds
"""
self.launcher = launcher
self.pid_file = os.path.join(pid_folder, 'proc.pid')
self.frequency = frequency
self.queue = None
self.monitor_process = None
# Parse fields specs
# time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8:
self.fields = {}
raw_fields = fields_specs.split(',')
for raw_field in raw_fields:
fields_split = raw_field.split(':')
assert len(fields_split) in (3, 4),\
"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count" % raw_field
field_name = fields_split[0]
assert field_name not in self.fields,\
"Found duplicate timeseries field (%s)" % field_name
field_type = fields_split[1]
assert field_type in ('str', 'int', 'float', 'bool'),\
"Invalid field type (%s). Must be one of ('str', 'int', 'float', 'bool')" % field_type
index = int(fields_split[2])
if len(fields_split) == 3:
count = -1
elif fields_split[3] == '':
count = 0
else:
count = int(fields_split[3])
self.fields[field_name] = {
'type': field_type,
'index': index,
'count': count
}
@staticmethod
def monitor_function(launcher, pid_file, frequency, queue):
"""A main monitor worker function.
:param str launcher: A full path to resource monitor script.
:param str pid_folder: A full path to folder where pid file is created. The
file name is fixed and its value is `proc.pid`.
:param float frequency: A sampling frequency in seconds. Can be something like
0.1 seconds
:param multiprocessing.Queue queue: A queue to communicate measurements.
A resource monitor is launched as a subprocess. The thread is reading its
output and will put the data into a queue. A main thread will then dequeue all
data at once once experiment is completed.
"""
cmd = [
launcher,
pid_file,
'',
str(frequency)
]
process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
# The 'output' is a string printed out by a resource monitor
# script. It's a whitespace separated string of numbers.
queue.put(output.strip())
@staticmethod
def str_to_type(str_val, val_type):
if val_type == 'str':
return str_val
elif val_type == 'int':
return int(str_val)
elif val_type == 'float':
return float(str_val)
elif val_type == 'bool':
v = str_val.lower()
assert v in ('true', 'false', '1', '0', 'on', 'off'),\
"Invalid boolean value in string (%s)" % str_val
return v in ('true', 1, 'on')
else:
assert False, "Invalid value type %s" % val_type
def get_measurements(self):
"""Dequeue all data, put it into lists and return them.
time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8-
:return: Dictionary that maps metric field to a time series of its value.
"""
metrics = {}
for key in self.fields.keys():
metrics[key] = []
# What's in output:
# proc_pid date virt res shrd cpu mem power gpus_power
while not self.queue.empty():
data = self.queue.get().strip().split()
for field in self.fields:
tp = self.fields[field]['type']
idx = self.fields[field]['index']
count = self.fields[field]['count']
if count == -1:
metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))
elif count == 0:
metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])
else:
metrics[field].append([
ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)
])
return metrics
def remove_pid_file(self):
"""Deletes pif file from disk."""
try:
os.remove(self.pid_file)
except OSError:
pass
def empty_pid_file(self):
"""Empty pid file."""
try:
with open(self.pid_file, 'w'):
pass
except IOError:
pass
def write_pid_file(self, pid):
"""Write the pid into pid file.
:param int pid: A pid to write.
This is a debugging function and most likely should not be used.
"""
with open(self.pid_file, 'w') as fhandle:
fhandle.write('%d' % pid)
def run(self):
"""Create queue and start resource monitor in background thread.
Due to possible execution of benchmarks in containers, we must not delete
file here, but create or empty it in host OS.
"""
self.empty_pid_file()
self.queue = Queue()
self.monitor_process = Process(
target=ResourceMonitor.monitor_function,
args=(self.launcher, self.pid_file, self.frequency, self.queue)
)
self.monitor_process.start()
def stop(self):
"""Closes queue and waits for resource monitor to finish."""
with open(self.pid_file, 'w') as fhandle:
fhandle.write('exit')
self.queue.close()
self.queue.join_thread()
self.monitor_process.join()
self.remove_pid_file()
class _ModuleImporter(object):
"""A private class that imports a particular models and return boolean
variable indicating if import has been succesfull or not. Used by a Modules
class to identify if optional python modules are available.
"""
@staticmethod
def try_import(module_name):
"""Tries to import module.
:param str module_name: A name of a module to try to import, something like
'numpy', 'pandas', 'matplotlib' etc.
:return: True if module has been imported, False otherwise.
"""
have_module = True
try:
importlib.import_module(module_name)
except ImportError:
logging.warn("Module '%s' cannot be imported, certain system information will not be available", module_name)
have_module = False
return have_module
class Modules(object):
"""A class that enumerates non-standard python modules this project depends on.
They are optional, so we can disable certain functionality if something is missing.
"""
HAVE_NUMPY = _ModuleImporter.try_import('numpy')
HAVE_PANDAS = _ModuleImporter.try_import('pandas')
HAVE_MATPLOTLIB = _ModuleImporter.try_import('matplotlib')
| 2.46875 | 2 |
sickbeard/lib/tidysub/cleaner.py | Branlala/docker-sickbeardfr | 0 | 12798730 | <reponame>Branlala/docker-sickbeardfr<filename>sickbeard/lib/tidysub/cleaner.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import re
from datetime import timedelta
from datetime import datetime
from regex import strings
from sickbeard import logger
#Definition of the TidySub class
class TidySub:
"""Load the subtitle, the file containing regex for removal
and perform the cleaning and formatting actions"""
def __init__(self, path_to_sub):
#Boolean to stock if file is loaded
self._is_file_loaded = False
#Path to the subtitles file
if re.match(r'^.+\.srt$', path_to_sub, re.UNICODE):
self._path_to_sub = path_to_sub
else:
logger.log("TidySub : TidySub only corrects .srt files", logger.DEBUG)
return
self._team_list = list()
self._sub_list = list()
#Load the subtitles file
self._sub_list = self._load_file(self._path_to_sub, True)
if self._sub_list is not None:
logger.log("TidySub : INFO: Subtitles file loaded", logger.DEBUG)
return
#Load a text file into a list in utf8
def _load_file(self, path_to_file, removeEOL=False):
try:
fileToRead = codecs.open(path_to_file, "r", "latin-1")
except IOError:
logger.log("TidySub : File does not exist or sub is in mkv", logger.DEBUG)
return
except:
try:
fileToRead = codecs.open(path_to_file, "r", "utf-8")
except:
logger.log("TidySub : File not encoded in UTF-8 neither in latin-1", logger.DEBUG)
return
return
tempList = list ()
self._is_file_loaded = True
#If the EOL must be removed
if removeEOL:
for i in fileToRead:
tempList.append(i.rstrip('\n\r'))
else:
for i in fileToRead:
tempList.append(i)
fileToRead.close()
return tempList
#Write a file
def _write_file(self, path_to_file, toWrite):
if not self._is_file_loaded:
logger.log("TidySub : No subtitles file was loaded", logger.DEBUG)
return
fileDest = codecs.open(path_to_file, "w", "latin-1")
for i in toWrite:
fileDest.write(i)
fileDest.close()
logger.log("TidySub : INFO: Subtitles file saved", logger.DEBUG)
#Try to detect subtitles language
def _detect_language(self, path_to_sub):
if not self._is_file_loaded:
logger.log("TidySub : No subtitles file was loaded", logger.DEBUG)
return
if re.match("^.+\.[a-z]{2}\.srt$", path_to_sub.lower(), re.UNICODE):
path_to_sub = re.sub(r'\.[a-z]+$', '', path_to_sub.lower())
return path_to_sub[len(path_to_sub)-2:len(path_to_sub)]
else:
return self._guess_language()
def _guess_language(self):
if not self._is_file_loaded:
logger.log("TidySub : No subtitles file was loaded", logger.DEBUG)
return
#combine words into one regex string
_french = "(^|[ ])" + "((" + ")|(".join(strings.get_guess_french(),True) + "))" + "([ ]|$)"
_english = "(^|[ ])" + "((" + ")|(".join(strings.get_guess_english(),True) + "))" + "([ ]|$)"
_count_french = 0
_count_english = 0
i = 0
# Count the number of occurences of the words for each language
while i < len(self._sub_list):
if re.search(_french, self._sub_list[i].lower(), re.UNICODE):
_count_french += 1
if re.search(_english, self._sub_list[i].lower(), re.UNICODE):
_count_english += 1
i += 1
#Return the language which has the highest count
if _count_french > _count_english:
logger.log("TidySub : INFO: Guessed language is French", logger.DEBUG)
return "fr"
elif _count_english > _count_french:
logger.log("TidySub : INFO: Guessed language is English", logger.DEBUG)
return "en"
else:
return "undefined"
#Test Regex for team words
def _clean_team(self):
#combine team names into one regex string
combined = "(" + ")|(".join(strings.get_teams()) + ")"
i = 0
while i < len(self._sub_list):
if re.search(combined, self._sub_list[i], re.UNICODE):
del self._sub_list[i]
continue
i += 1
#Clean Hi in the subtitles file with regex
def _clean_hi(self):
i = 0
while i < len(self._sub_list):
#remove parentheses and content
self._sub_list[i] = re.sub(r'\([^)]*\)', '', self._sub_list[i], re.UNICODE)
#remove parentheses split in two lines
if i < (len(self._sub_list) - 1) and re.match(r'^.*\(', self._sub_list[i], re.UNICODE) and not re.match(r'\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\)', self._sub_list[i+1], re.UNICODE):
self._sub_list[i] = re.sub(r'\(.*$', '', self._sub_list[i], re.UNICODE)
self._sub_list[i+1] = re.sub(r'^.*\)', '', self._sub_list[i+1], re.UNICODE)
#remove brackets and content
self._sub_list[i] = re.sub(r'\[[^)]*\]', '', self._sub_list[i], re.UNICODE)
#remove brackets split in two lines
if i < (len(self._sub_list) - 1) and re.match(r'^.*\[', self._sub_list[i], re.UNICODE) and not re.match(r'\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\]', self._sub_list[i+1], re.UNICODE):
self._sub_list[i] = re.sub(r'\[.*$', '', self._sub_list[i], re.UNICODE)
self._sub_list[i+1] = re.sub(r'^.*\]', '', self._sub_list[i+1], re.UNICODE)
#remove braces and content
self._sub_list[i] = re.sub(r'\{[^)]*\}', '', self._sub_list[i], re.UNICODE)
#remove braces split in two lines
if i < (len(self._sub_list) - 1) and re.match(r'^.*\{', self._sub_list[i], re.UNICODE) and not re.match(r'\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\}', self._sub_list[i+1], re.UNICODE):
self._sub_list[i] = re.sub(r'\{.*$', '', self._sub_list[i], re.UNICODE)
self._sub_list[i+1] = re.sub(r'^.*\}', '', self._sub_list[i+1], re.UNICODE)
#remove name of speaker in front of the line
self._sub_list[i] = re.sub(r'^[ \t]*[A-Z]+[ \t]*\:', '', self._sub_list[i], re.UNICODE)
#remove leading and trailing spaces
self._sub_list[i] = re.sub(r'^[ \t]+|[ \t]+$', '', self._sub_list[i], re.UNICODE)
#remove multiple whitespaces
self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)
#Remove line with just a single hyphen
self._sub_list[i] = re.sub(r'^\-$', '', self._sub_list[i], re.UNICODE)
#delete empty balise
self._sub_list[i] = re.sub(r'\<[^ ]+\>\<\/[^ ]+\>', '', self._sub_list[i], re.UNICODE)
i += 1
#French: Try to correct punctuation in the subtitles file with regex
def _clean_punctuation_fr(self):
i = 0
while i < len(self._sub_list):
if not re.match(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]):
#remove leading and trailing spaces
self._sub_list[i] = re.sub(r'^[ \t]+|[ \t]+$', '', self._sub_list[i], re.UNICODE)
#remove multiple whitespaces
self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)
#Correct comma
if re.match("^.+ \,",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \,', ',', self._sub_list[i], re.UNICODE)
if re.match("^.+\,[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\,(?!\")', ', ', self._sub_list[i], re.UNICODE)
#Correct semi-colon
if re.match("^.*[^ ]+\;",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\;', ' ;', self._sub_list[i], re.UNICODE)
if re.match("^.*\;[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\;', '; ', self._sub_list[i], re.UNICODE)
#Correct colon
if re.match("^.*[^ ]+\:",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\:', ' :', self._sub_list[i], re.UNICODE)
if re.match("^.*\:[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\:(?!\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE)
#Correct dots
if re.match("^.+ \.",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \.', '.', self._sub_list[i], re.UNICODE)
#if re.match("^.+\.[^ ]+",self._sub_list[i], re.UNICODE):
# self._sub_list[i] = re.sub(r'(?<=[A-Z]\.)\.(?!\")(?![A-Z]\.)', '. ', self._sub_list[i], re.UNICODE)
#Correct question mark
if re.match("^.+[^ ]+\?",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\?', ' ?', self._sub_list[i], re.UNICODE)
if re.match("^.+\?[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\?(?!\")', '. ', self._sub_list[i], re.UNICODE)
#Correct exclamation mark
if re.match("^.+[^ ]+\!",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\!', ' !', self._sub_list[i], re.UNICODE)
if re.match("^.+\![^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\!(?!\")', '! ', self._sub_list[i], re.UNICODE)
#Correct hyphen
if re.match("^\-[^ ]",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'^\-', '- ', self._sub_list[i], re.UNICODE)
#Correct not regular expressions
self._sub_list[i] = re.sub(r'\? \!', '?!', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\? \? \?', '???', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\. \. \.', '...', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\. \.', '..', self._sub_list[i], re.UNICODE)
#remove leading and trailing spaces
self._sub_list[i] = re.sub(r'^[ \t]+|[ \t]+$', '', self._sub_list[i], re.UNICODE)
#remove multiple whitespaces
self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)
#remove space before closing balise
if re.search(r' \<\/[^ ]+\>',self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \<\/', '</', self._sub_list[i], re.UNICODE)
i += 1
#English: Try to correct punctuation in the subtitles file with regex
def _clean_punctuation_en(self):
i = 0
while i < len(self._sub_list):
if not re.match(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]):
#remove leading and trailing spaces
self._sub_list[i] = re.sub(r'^[ \t]+|[ \t]+$', '', self._sub_list[i], re.UNICODE)
#remove multiple whitespaces
self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)
#Correct comma
if re.match("^.+ \,",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \,', ',', self._sub_list[i], re.UNICODE)
if re.match("^.+\,[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\,(?!\")', ', ', self._sub_list[i], re.UNICODE)
#Correct semi-colon
if re.match("^.* \;",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \;', ';', self._sub_list[i], re.UNICODE)
if re.match("^.*\;[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\;', '; ', self._sub_list[i], re.UNICODE)
#Correct colon
if re.match("^.* \:",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \:', ':', self._sub_list[i], re.UNICODE)
if re.match("^.*\:[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\:(?!\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE)
#Correct dots
if re.match("^.+ \.",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \.', '.', self._sub_list[i], re.UNICODE)
if re.match("^.+\.[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'(?<=[A-Z]\.)\.(?!\")(?![A-Z]\.)', '. ', self._sub_list[i], re.UNICODE)
#Correct question mark
if re.match("^.+ \?",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \?', '?', self._sub_list[i], re.UNICODE)
if re.match("^.+\?[^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\?(?!\")', '. ', self._sub_list[i], re.UNICODE)
#Correct exclamation mark
if re.match("^.+ \!",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r' \!', '!', self._sub_list[i], re.UNICODE)
if re.match("^.+\![^ ]+",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'\!(?!\")', '! ', self._sub_list[i], re.UNICODE)
#Correct hyphen
if re.match("^\-[^ ]",self._sub_list[i], re.UNICODE):
self._sub_list[i] = re.sub(r'^\-', '- ', self._sub_list[i], re.UNICODE)
#Correct not regular expressions
self._sub_list[i] = re.sub(r'\? \!', '?!', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\? \? \?', '???', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\. \. \.', '...', self._sub_list[i], re.UNICODE)
self._sub_list[i] = re.sub(r'\. \.', '..', self._sub_list[i], re.UNICODE)
#remove leading and trailing spaces
self._sub_list[i] = re.sub(r'^[ \t]+|[ \t]+$', '', self._sub_list[i], re.UNICODE)
#remove multiple whitespaces
self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)
i += 1
#Remove music from line
def _clean_music(self):
i = 0
while i < len(self._sub_list):
if re.search(u'\u266a', self._sub_list[i], re.UNICODE):
del self._sub_list[i]
continue
i += 1
#Clean formatting
#Remove blank lines
#Test numbers
#Formatting of time
def _clean_formatting(self):
#Remove unwanted blank lines
self._clean_blank_lines()
#Remove BOM character
self._sub_list[0] = re.sub(u'\ufeff', '', self._sub_list[0], re.UNICODE)
#Delete unnecessary lines
i = 0
count = 1
while i < len(self._sub_list):
j = 1
#If the line is a number
if re.match('^[0-9]+$', self._sub_list[i]):
#First line must always be 1
if i == 0:
self._sub_list[i] = str('1')
count = 1
else:
self._sub_list[i] = str(count)
#Exception if last line
if i == len(self._sub_list)-1:
del self._sub_list[len(self._sub_list)-1]
if self._sub_list[len(self._sub_list)-1] == "":
del self._sub_list[len(self._sub_list)-1]
break
#Check the second line
#Check if it's a time range
if re.match(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+', self._sub_list[i+1]):
self._clean_time_range(i+1)
j += 1
#Exception if last line
if (i+1) == len(self._sub_list)-1:
del self._sub_list[i+1]
continue
elif (i+2) == len(self._sub_list)-1:
break
elif (i+3) == len(self._sub_list)-1:
break
#If the third line is empty and 4th is a number again
if self._sub_list[i+2] == "" and re.match('^[0-9]+$', self._sub_list[i+3]):
del self._sub_list[i]
del self._sub_list[i]
del self._sub_list[i]
continue
elif self._sub_list[i+2] == "" and not re.match('^[0-9]+$', self._sub_list[i+3]):
del self._sub_list[i+2]
continue
#if 3rd line is not empty
elif self._sub_list[i+3] == "" and not re.match('^[0-9]+$', self._sub_list[i+4]):
del self._sub_list[i+3]
continue
elif self._sub_list[i+3] == "" and re.match('^[0-9]+$', self._sub_list[i+4]):
j += 2
elif self._sub_list[i+3] is not "" and self._sub_list[i+4] == "" and not re.match('^[0-9]+$', self._sub_list[i+5]):
del self._sub_list[i+4]
continue
elif self._sub_list[i+3] is not "" and self._sub_list[i+4] is not "" and re.match('^[0-9]+$', self._sub_list[i+5]):
j += 3
elif self._sub_list[i+3] is not "" and self._sub_list[i+4] is not "" and self._sub_list[i+5] is not "" and re.match('^[0-9]+$', self._sub_list[i+6]):
j += 4
count += 1
else:
logger.log("TidySub : Formatting error : timerange", logger.DEBUG)
else:
logger.log("TidySub : Formatting error : number line", logger.DEBUG)
i += j
#Re add the EOL character
i = 0
while i < len(self._sub_list)-1:
self._sub_list[i] += '\r\n'
i += 1
#Remove unwanted blank lines in the subtitles file
def _clean_blank_lines(self):
#Remove a blank line if it is not before a number
i = 0
while i < len(self._sub_list)-1:
if self._sub_list[i] == "" and not re.match('^[0-9]+$', self._sub_list[i+1]):
del self._sub_list[i]
continue
i += 1
#Delete 1st line if blank
if self._sub_list[0] == "":
del self._sub_list[0]
#Delete last line if blank
if self._sub_list[len(self._sub_list)-1] == "":
del self._sub_list[len(self._sub_list)-1]
def _clean_time_format(self, string):
if re.match(r'^[0-9]{2}\:[0-9]{2}\:[0-9]{2}\,[0-9]{3}$', string):
return string
else:
#correct hours
if re.match(r'^[0-9]{1}\:', string):
string = re.sub(r'^', '0', string, re.UNICODE)
#correct minutes
if re.match(r'^[0-9]{2}\:[0-9]{1}\:', string):
string = string[0:3] + "0" + string[3:len(string)]
#correct seconds
if re.match(r'^[0-9]{2}\:[0-9]{2}\:[0-9]{1}\,', string):
string = string[0:6] + "0" + string[6:len(string)]
#correct ms
if re.match(r'^[0-9]{2}\:[0-9]{2}\:[0-9]{2}\,[0-9]{1}$', string):
string = string[0:9] + "00" + string[9:len(string)]
if re.match(r'^[0-9]{2}\:[0-9]{2}\:[0-9]{2}\,[0-9]{2}$', string):
string = string[0:9] + "0" + string[9:len(string)]
return string
#Try to correct the format of the time
def _clean_time_range(self, i):
if re.match(r'^[0-9]{2}\:[0-9]{2}\:[0-9]{2}\,[0-9]{3} \-\-\> [0-9]{2}\:[0-9]{2}\:[0-9]{2}\,[0-9]{3}$', self._sub_list[i]):
return
if re.match(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+\s\-\-\>\s[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+', self._sub_list[i]):
_start = re.sub("\s\-\-\>\s[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+$",'', self._sub_list[i], re.UNICODE)
_end = re.sub(r'\r\n','', self._sub_list[i], re.UNICODE)
_end = re.sub("^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+\s\-\-\>\s",'', _end, re.UNICODE)
self._sub_list[i] = self._clean_time_format(_start) + " --> " + self._clean_time_format(_end)
#Main function to clean subtitles
def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = ""):
if not self._is_file_loaded:
logger.log("TidySub : No subtitles file was loaded", logger.DEBUG)
return
#Try to determine the language of the file
if not force_language:
_language = self._detect_language(self._path_to_sub)
else:
_language = force_language
#If the team strings must be removed
if removeTeam:
logger.log("TidySub : INFO: Removing teams names", logger.DEBUG)
#Call the function
self._clean_team()
#If music strings must be removed
if removeMusic:
logger.log("TidySub : INFO: Removing lyrics", logger.DEBUG)
self._clean_music()
#If Hi must be removed
if removeHi:
logger.log("TidySub : INFO: Removing HI", logger.DEBUG)
self._clean_hi()
#If punctuation must be corrected
if correct_punctuation:
if _language == "fr":
logger.log("TidySub : INFO: Correcting punctuation (French)", logger.DEBUG)
self._clean_punctuation_fr()
elif _language == "en":
logger.log("TidySub : INFO: Correcting punctuation (English)", logger.DEBUG)
self._clean_punctuation_en()
#Clean the formatting before saving the subtitles
self._clean_formatting()
#Write file
self._write_file(self._path_to_sub, self._sub_list)
def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0):
if not self._is_file_loaded:
logger.log("TidySub : No subtitles file was loaded", logger.DEBUG)
return
_correct = True
# Check consistency of the parameters
if _sign is not "+" and _sign is not "-":
logger.log("TidySub : Bad sign for offset", logger.DEBUG)
_correct = False
if (not isinstance(_hour, int)) or _hour < 0 or _hour > 5:
logger.log("TidySub : Hour is not correct for offset", logger.DEBUG)
_correct = False
if (not isinstance(_minute, int)) or _minute < 0 or _minute >= 60:
logger.log("TidySub : Minute is not correct for offset", logger.DEBUG)
_correct = False
if (not isinstance(_second, int)) or _second < 0 or _second >= 60:
logger.log("TidySub : Second is not correct for offset", logger.DEBUG)
_correct = False
if (not isinstance(_ms, int)) or _ms < 0 or _ms >= 1000:
logger.log("TidySub : Milisecond is not correct for offset", logger.DEBUG)
_correct = False
if not _correct:
return False
#Save time to offset into a timedelta
_time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000))
i = 0
while i < len(self._sub_list):
if re.match(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+\s\-\-\>\s[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+', self._sub_list[i]):
#remove EOL
self._sub_list[i] = re.sub(r'\r\n$', '', self._sub_list[i], re.UNICODE)
#Extract start time and save in timedelta
_time_start = datetime.strptime('01/01/10 ' + re.sub(r' \-\-\> [0-9]+\:[0-9]+\:[0-9]+\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f')
#Extract end time and save in timedelta
_time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\:[0-9]+\:[0-9]+\,[0-9]+\s\-\-\>\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f')
#Calculate the new time
if _sign == "+":
_time_start += _time_offset
_time_end += _time_offset
elif _sign == "-":
_time_start -= _time_offset
_time_end -= _time_offset
#create the new time range line
self._sub_list[i] = str(_time_start.hour) + ":" + str(_time_start.minute) + ":" + str(_time_start.second) + "," + str(_time_start.microsecond/1000) + " --> " + \
str(_time_end.hour) + ":" + str(_time_end.minute) + ":" + str(_time_end.second) + "," + str(_time_end.microsecond/1000)
#correct the time range line format
self._clean_time_range(i)
#re add EOL
self._sub_list[i] += '\r\n'
i += 1
#Write the new SRT file
self._write_file(self._path_to_sub, self._sub_list)
| 2.34375 | 2 |
pystratis/api/wallet/requestmodels/pubkeyrequest.py | TjadenFroyda/pyStratis | 8 | 12798731 | <reponame>TjadenFroyda/pyStratis
from pydantic import Field
from pystratis.api import Model
from pystratis.core.types import Address
# noinspection PyUnresolvedReferences
class PubKeyRequest(Model):
"""A request model used for /wallet/pubkey endpoint.
Args:
wallet_name (str): The name of the wallet to search for pubkey in.
external_address (Address): The external address of a wanted pubkey.
"""
wallet_name: str = Field(alias='walletName')
external_address: Address = Field(alias='externalAddress')
| 2.546875 | 3 |
aivle-worker/apis.py | edu-ai/aivle-worker | 0 | 12798732 | import json
import os
import requests
from client import Submission
from settings import API_BASE_URL, ACCESS_TOKEN
def get_task_url(task_id: int):
return API_BASE_URL + f"/tasks/{task_id}/download_grader/"
def get_agent_url(submission_id: int):
return API_BASE_URL + f"/submissions/{submission_id}/download/"
def start_job(job_id, task_id) -> Submission:
worker_name = "unknown_worker"
if os.getenv("WORKER_NAME") is not None:
worker_name = os.getenv("WORKER_NAME")
resp = requests.get(API_BASE_URL + f"/jobs/{job_id}/start_job/",
headers={"Authorization": f"Token {ACCESS_TOKEN}"},
data={
"worker_name": worker_name,
"task_id": task_id
})
if resp.status_code != 200:
raise Exception(resp.content)
obj = json.loads(resp.content)
return Submission(sid=obj["submission"], task_url=get_task_url(obj["task"]),
agent_url=get_agent_url(obj["submission"]))
def submit_job(job_id, task_id, result):
resp = requests.get(API_BASE_URL + f"/jobs/{job_id}/submit_job/",
headers={"Authorization": f"Token {ACCESS_TOKEN}"},
data={
"result": result,
"task_id": task_id
})
return resp
| 2.546875 | 3 |
utils/GraphThread.py | vonNiklasson/graph-client | 0 | 12798733 | import time
import logging
from extended_networkx_tools import Analytics, AnalyticsGraph
from timeit import default_timer as timer
from utils import Solvers
from utils.GraphUtils import GraphUtils
from utils.ServerUtil import ServerUtil
from datetime import datetime
class GraphThread:
@staticmethod
def start_thread(base_url, client_name, thread_id, color=None, recalc=False):
current_sleep = 10
gt = GraphThread(base_url, client_name, thread_id, color)
while True:
try:
gt.run(recalc)
current_sleep = 10
except Exception as e:
logging.exception("Failed when running thread")
gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL)
time.sleep(current_sleep)
current_sleep += 10
client_name: str
server: ServerUtil
thread_id: int
color: None
def __init__(self, base_url, client_name, thread_id, color):
self.client_name = client_name
self.thread_id = thread_id
self.server = ServerUtil(base_url)
self.color = color
def run(self, recalc=False):
# Get a new task from the server
task = self.get_task(recalc)
self.print("(%d) Received graph (%d nodes), type %s" % (task['Id'], task['NodeCount'], task['SolveType']))
# Solve it and get a graph
start = timer()
analytics_graph, custom_data = self.solve_task(task=task)
end = timer()
# Calculate deltatime
delta_time = end - start
time_minutes = round((delta_time / 60)-0.49)
time_seconds = round(delta_time % 60)
self.print("(%d) Solved graph (%d nodes) in %sm %ss" %
(task['Id'], task['NodeCount'], time_minutes, time_seconds))
# Get the results
results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data)
# Upload the results to the server
self.upload_results(results=results, analytics_graph=analytics_graph)
self.print("(%d) Uploaded results (%d nodes)" % (task['Id'], task['NodeCount']))
def get_task(self, recalc=False):
if recalc:
task = self.server.get_recalc_task()
else:
task = self.server.get_task(self.client_name)
return task
@staticmethod
def solve_task(task) -> (AnalyticsGraph, object):
solve_type = task['SolveType']
if solve_type == 'diff':
return Solvers.Diff.solve(task)
elif solve_type == 'spec':
return Solvers.Spec.solve(task)
elif solve_type == 'random':
return Solvers.Random.solve(task)
elif solve_type == 'field' or solve_type == 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr':
return Solvers.Field.solve(task)
else:
return Solvers.Random.solve(task)
def upload_results(self, results, analytics_graph: AnalyticsGraph):
worker_id = results['Id']
self.server.upload_results(worker_id, results)
self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())})
self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())})
def print(self, msg, type=None):
start_color = None
if type is None:
start_color = self.color
ts = datetime.now().strftime('%H:%M:%S')
print("%s%s%s %s P%d: %s%s" %
(Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC))
class Styles:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | 2.546875 | 3 |
src/entities/__init__.py | alliance-genome/agr_neo4j_qc | 2 | 12798734 | <filename>src/entities/__init__.py
from .generic import GenericEntities | 1.078125 | 1 |
zusha/registrations/forms.py | samsonmuoki/zusha_web_app | 1 | 12798735 | from django import forms
SACCO_DRIVER_STATUS_OPTIONS = [
('Approved', ('Approved to operate')),
('Suspended', ('Suspended for the time being')),
('Blacklisted', ('Blacklisted from operating'))
]
class VehicleForm(forms.Form):
# sacco = forms.CharField(label="Sacco", max_length=100)
regno = forms.CharField(label="Registration Number", max_length=7)
# def get_sacco(self):
# """Return the name of the sacco."""
# return self.sacco
def get_regno(self):
"""Return the regno of the vehicle."""
return self.regno
class DriverForm(forms.Form):
"""Viewset for add driver."""
national_id = forms.CharField(max_length=8)
# first_name = forms.CharField(max_length=10)
# last_name = forms.CharField(max_length=10)
# # sacco = forms.CharField(max_length=10)
# email = forms.CharField(max_length=15)
# phone_number = forms.CharField(max_length=12)
class UpdateSaccoDriverStatusForm(forms.Form):
"""."""
status = forms.CharField(
widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS)
)
description = forms.CharField(widget=forms.Textarea)
class SearchDriverIdForm(forms.Form):
"""Search for a driver."""
national_id = forms.CharField(max_length=10, help_text="Enter driver id")
| 2.28125 | 2 |
terminal/test zips/outlab_3_boilerplate_files/q4.py | Phantom-Troupe-CS251/RedPlag | 0 | 12798736 | <gh_stars>0
class Node(object):
"""
Node contains two objects - a left and a right child, both may be a Node or both None,
latter representing a leaf
"""
def __init__(self, left=None, right=None):
super(Node, self).__init__()
self.left = left
self.right = right
def __str__(self):
"""
Default inorder print
"""
if self.left is None and self.right is None:
return "( )"
else:
return "( " + str(self.left) + " " + str(self.right) + " )"
def __eq__(self, other):
if self.left is None and self.right is None:
return other.left is None and other.right is None
elif other.left is None and other.right is None:
return False
else:
return self.left == other.left and self.right == other.right
def mirrorTree(node):
"""
Returns the mirror image of the tree rooted at node
"""
pass
def allTrees(n):
"""
Returns a list of all unique trees with n internal nodes
"""
pass
def allSymTrees(n):
"""
Returns a list of all unique symmetrical trees with n internal nodes
"""
pass
if __name__ == '__main__':
for x in allSymTrees(int(input())):
print(x)
node = Node(Node(Node(), Node()), Node())
print(node) | 3.90625 | 4 |
emacs/formatting.py | jcaw/talon_conf | 9 | 12798737 | from typing import Optional
from talon import Context
from user.emacs.utils.voicemacs import rpc_call
from user.utils.formatting import SurroundingText
context = Context()
context.matches = r"""
tag: user.emacs
"""
@context.action_class("self")
class UserActions:
def surrounding_text() -> Optional[SurroundingText]:
# TODO: If the voicemacs server is inactive, return nothing.
raw_info = rpc_call(
"voicemacs-surrounding-text",
[":chars-before", 30000, ":chars-after", 30000],
# Use a very long timeout
timeout=10,
)
return SurroundingText(
text_before=raw_info["text-before"], text_after=raw_info["text-after"]
)
| 2.6875 | 3 |
tests/python-reference/scope/unboundlocal-augassign.py | jpolitz/lambda-py-paper | 25 | 12798738 | <gh_stars>10-100
# the variable in f() is lifted as a local and assigned
# unbound, because it is the left side of an assignment.
global_x = 1
def f():
global_x += 1
___assertRaises(UnboundLocalError, f)
| 3.078125 | 3 |
archive/hackerrank/nearby_attraction.py | tanchao/algo | 2 | 12798739 | <filename>archive/hackerrank/nearby_attraction.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tanchao'
import sys
import math
PI = 3.14159265359
EARTH_RADIUS = 6371 # in km
TRANSPORTS = {
'metro': 20,
'bike': 15,
'foot': 5
}
'''
{'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094),
'1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289),
'5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348),
'6': (52.367066, 4.893381)}
'''
def nearby_attraction(addresses, reqs):
for req in reqs:
req_point = req['location']
req_trans = req['transport']
req_time = req['time']
req_addresses = {}
for addr in addresses:
req_addresses[addr['id']] = addr['location']
res = ''
while True:
nearest = 10000000 # INT MAX
near_id = ''
for addr_id in req_addresses:
addr_point = req_addresses[addr_id]
distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f
if nearest > distance or (nearest == distance and int(near_id) > int(addr_id)):
nearest = distance
near_id = addr_id
time = float(nearest / TRANSPORTS[req_trans] * 60)
req_time -= time
req_point = req_addresses.pop(near_id) # remove point travelled
print req_addresses, '----->', near_id, req_point, nearest, req_time, time
if req_time >= 0:
res += str(near_id) + ' '
else:
break
if not req_addresses:
break
print res.strip()
def nearby_attractions(addrs, reqs):
for req in reqs:
distances = []
for addr in addrs:
addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f
distances.append(addr) # addrs with distances
distances.append({'id': 0, 'location': req['location'], 'distance': 0.00})
distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id
# print distances
total_time, res = req['time'], ''
# for d in distances:
# time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes
# total_time -= time
# if total_time >= 0:
# res += str(d['id']) + ' '
# else:
# break
for i in xrange(len(distances) - 1):
j = i + 1
d = round(distance_between(distances[j]['location'], distances[i]['location']), 2)
time = d / TRANSPORTS[req['transport']] * 60 # minutes
total_time -= time
print distances[i], distances[j], d, time, total_time
if total_time >= 0:
res += str(distances[j]['id']) + ' '
else:
break
print res.strip()
def distance_between(point1, point2):
point1_lat_in_radians = degree2radians(point1[0])
point1_long_in_radians = degree2radians(point1[1])
point2_lat_in_radians = degree2radians(point2[0])
point2_long_in_radians = degree2radians(point2[1])
return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) +
math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) *
math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS)
def degree2radians(degree):
return float(degree * 2 * PI / 360)
if __name__ == '__main__':
arg = sys.argv[-1] # get input file name
addresses, reqs = [], []
with open(arg, 'r') as input_file:
n = int(input_file.readline())
for i in xrange(n):
line = input_file.readline()
lines = line.split()
address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))}
addresses.append(address)
m = int(input_file.readline())
for i in xrange(m):
line = input_file.readline()
lines = line.split()
req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time': int(lines[3])}
reqs.append(req)
nearby_attraction(addresses, reqs) | 3.140625 | 3 |
config/settings/base.py | hussu010/Backend-1 | 1 | 12798740 | from decouple import config
import os
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'corsheaders',
'djoser',
'v1.users',
'v1.shop',
'v1.item',
'v1.category',
'v1.partner',
'v1.dashboard',
]
SITE_ID = 1
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_USER_MODEL = 'users.User'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
# Djoser Endpoints Config
DJOSER = {
'USER_CREATE_PASSWORD_RETYPE': True,
'ACTIVATION_URL': '#/activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
'SET_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120),
'REFRESH_TOKEN_LIFETIME': timedelta(days=30),
'UPDATE_LAST_LOGIN': True,
'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule'
}
CORS_ORIGIN_ALLOW_ALL = True
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
| 1.601563 | 2 |
my_lambdata/my_test.py | CurdtMillion/Lambdata-DSPT6 | 0 | 12798741 | <filename>my_lambdata/my_test.py
import unittest
from my_mod import enlarge, decimate
class TestMathFunctions(unittest.TestCase):
def test_enlarge(self):
self.assertEqual(enlarge(10), 1000)
def test_decimate(self):
self.assertEqual(decimate(100), 90)
if __name__ == '__main__':
unittest.main()
| 2.765625 | 3 |
kbc.py | JayPrakash916/KBC-CLI-Game | 1 | 12798742 | <gh_stars>1-10
# question_list=["1.who wrote the mahabharat?","2.what is the capital of India?","3.what is apples colour?","4.what is tree colour?","5.how many months there are in a year?","6.who is the computer invetor?","7.What was the of the first computer?","8.When was the search for a modern computer first?","9.when did the great revalution in the field of computer?","10.what is hindi name of computer?","11.computer literacy day is celebrated?","12.what is the full name of CPU","13.which of these is the search engine?","14.which of the input units is?","15.how many bytes of 1 KB are equal to?"]
# first_options=["1.vedavyas","1.Delhi","1.red","1.purple","1.15","1. wannumen","1.ATARIS","1.1949","1.1977","1.garna karnewaala","1.5 Disember","1.Central processing Unit","1.Google","1.mouse","1.1024 byte"]
# second_options=["2.valmiki","2.bhopal","2.blue","2.Green","2.6","2.GS kilvi","2.ENIC","2.1951","2.2000","2.sangndak","2.14.Disember","2.Central problem Unit","2.Yahoo","2.key_board","2.1024 Gega byte"]
# third_options=["3.tulsidas","3.jaipur","3.yello","3.white","3.13","3.charls baibej","3.TANDY","3.1946","3.1955","3.hisab karnewaala","3.22 Disember","3.Central processing Union","3.Baidu","3.scanner","3.1024 mega byte"]
# fourth_options=["4.non of the above","4.chandigarh","4.black","4.pink","4.12","4.non of the above","4.NOVELLA","4.1947","4.1960","4.parigadak","4.2 Disember","4.non of the above","4.Wolfrom Alpha","4.non of the above","4.non of the above"]
# win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000]
# ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1]
# for i in range(len(question_list)):
# print question_list[i], len(question_list[i])
# print first_options[i]
# print second_options[i]
# print third_options[i]
# print fourth_options[i]
# user = int(raw_input("Enter the correct option "))
# if user == ans_key[i]:
# print "App jeet gaaye,"
# print "win_Rs",win_Rs[i+1]
# else:
# print "App haar gaaye "
# print "total_Rs",win_Rs[i]
# break
# if i==4:
# print "congrats! Aapka padaav pura ho gaya hai"
# print " "
# elif i==9:
# print "congrats! Aapka padaav pura ho gaya hai"
# print " "
# print " "
# print "Congratulation Aap",win_Rs[i],"Aap etane rupees jeet chuke hai"
####kbc2###
# question_list = ["1.How many continents are there?", "2.What is the capital of India?", "3.NG mei kaun se course padhaya jaata hai?"]
# options_list = [["Four", "Nine", "Seven", "Eight"],["Chandigarh", "Bhopal", "Chennai", "Delhi"],["Software Engineering", "Counseling", "Tourism", "Agriculture"]]
# solution_list = [3, 4, 1]
# lifeline = 0
# for i in range(len(question_list)):
# print(question_list[i]),len(question_list[i])
# print(1,options_list[i][0])
# print(2,options_list[i][1])
# print(3,options_list[i][2])
# print(4,options_list[i][3])
# user = int(input("Enter the correct option "))
# if user == solution_list[i]:
# print("congrats! Aapka answer sahi hai")
# print()
# elif user == 5050:
# if lifeline == 0:
# lifeline+=1
# a = solution_list[i]-1
# print(question_list[i])
# print(1,options_list[i][a])
# print(2,options_list[i][i])
# user_input = int(input("Enter the correct option "))
# if user_input == 1:
# print("Congratulation Aapka answer sahi hai")
# print()
# else:
# print("Aap lifelife use kr chuke hai")
# print()
# else:
# print("sadly! Aapka jawab galat hai")
| 2.6875 | 3 |
clumpy/__init__.py | bkimmig/clump | 0 | 12798743 | <filename>clumpy/__init__.py
__version__ = 1.0
from .em import *
from .functions_em import *
from .py3 import *
from .rotation import * | 1.179688 | 1 |
ztfin2p3/calibration/flat.py | MickaelRigault/ztfin2p3 | 0 | 12798744 | <filename>ztfin2p3/calibration/flat.py
""" library to build the ztfin2p3 pipeline screen flats """
import os
import numpy as np
import dask
import dask.array as da
import warnings
from astropy.io import fits
from ztfimg.base import _Image_, FocalPlane
LED_FILTER = {"zg":[2,3,4,5],
"zr":[7,8,9,10],
"zi":[11,12,13],
}
def ledid_to_filtername(ledid):
""" """
for f_,v_ in LED_FILTER.items():
if int(ledid) in v_:
return f_
raise ValueError(f"Unknown led with ID {ledid}")
def get_build_datapath(date, ccdid=None, ledid=None, groupby="day"):
""" """
# IRSA metadata
from ..metadata import get_rawmeta
from ..io import get_filepath
meta = get_rawmeta("flat", date, ccdid=ccdid, ledid=ledid, getwhat="filepath", in_meta=True)
# Parsing out what to do:
if groupby == "day":
meta[groupby] = meta.filefracday.astype("str").str[:8]
elif groupby == "month":
meta[groupby] = meta.filefracday.astype("str").str[:6]
else:
raise ValueError(f"Only groupby day or month implemented: {groupby} given")
datapath = meta.groupby([groupby,"ccdid","ledid"])["filepath"].apply(list).reset_index()
datapath["filtername"] = datapath["ledid"].apply(ledid_to_filtername)
datapath["fileout"] = [get_filepath("flat", str(s_[groupby]),
ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername)
for id_, s_ in datapath.iterrows()]
return datapath
def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs):
""" """
if not assume_exist:
from ztfquery import io
outs = []
for i_, s_ in build_dataframe.iterrows():
#
fileout = s_.fileout
os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed
files = s_["filepath"]
if not assume_exist:
files = io.bulk_get_file(files)
#
bflat = FlatBuilder.from_rawfiles(files, persist=False)
data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs)
output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite)
outs.append(output)
return outs
class Flat( _Image_ ):
SHAPE = 6160, 6144
QUADRANT_SHAPE = 3080, 3072
def __init__(self, data, header=None, use_dask=True):
""" """
_ = super().__init__(use_dask=use_dask)
self.set_data(data)
if header is not None:
self.set_header(header)
# ============== #
# I/O #
# ============== #
@classmethod
def from_filename(cls, filename, use_dask=True, assume_exist=True):
""" loads the object given the input file.
Parameters
----------
assume_exist: [bool]
Shall this run ztfquery.io.get_file() ?
"""
from ztfquery import io
basename = os.path.basename(filename)
if not basename.startswith("ztfin2p3"):
filename = io.get_file(filename)
if ".fits" in basename:
return cls.read_fits(filename, use_dask=use_dask)
else:
raise NotImplementedError(f"Only fits file loader implemented (read_fits) ; {filename} given")
@classmethod
def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs):
""" """
from ..io import get_filepath
filename = get_filepath("flat", date, ccdid=ccdid, ledid=ledid)
return cls.from_filename(filename, use_dask=use_dask, **kwargs)
@classmethod
def read_fits(cls, fitsfile, use_dask=True):
""" """
if use_dask:
data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile),
shape=cls.SHAPE, dtype="float")
header= dask.delayed(fits.getheader)(fitsfile)
else:
data = fits.getdata(fitsfile)
header= fits.getheader(fitsfile)
this = cls(data=data, header=header, use_dask=use_dask)
this._filename = fitsfile
return this
@classmethod
def build_from_rawfiles(cls, rawfiles, **kwargs):
""" """
bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False)
data, header = bflat.build(set_it=False, **kwargs)
return cls(data, header=None, use_dask=True)
# ============== #
# Method #
# ============== #
def get_quadrant_data(self, qid, **kwargs):
""" **kwargs goes to get_data() this then split the data.
Parameters
----------
qid: [int or None/'*']
which quadrant you want ?
- int: 1,2,3 or 4
- None or '*'/'all': all quadrant return as list [1,2,3,4]
**kwargs goes to get_data()
Returns
-------
ndarray (numpy or dask)
"""
if qid in ["*","all"]:
qid = None
if qid is not None:
qid = int(qid)
dataccd = self.get_data(**kwargs)
# this accounts for all rotation and rebin did before
qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype="int")
if qid == 1:
data_ = dataccd[qshape[0]:, qshape[1]:]
elif qid == 2:
data_ = dataccd[qshape[0]:, :qshape[1]]
elif qid == 3:
data_ = dataccd[:qshape[0], :qshape[1]]
elif qid == 4:
data_ = dataccd[:qshape[0], qshape[1]:]
elif qid is None or qid in ["*","all"]:
data_ = [dataccd[qshape[0]:, qshape[1]:],
dataccd[qshape[0]:, :qshape[1]],
dataccd[:qshape[0], :qshape[1]],
dataccd[:qshape[0], qshape[1]:]
]
else:
raise ValueError(f"qid must be 1->4 {qid} given")
return data_
class FlatFocalPlane( FocalPlane ):
@classmethod
def from_filenames(cls, flatfilenames, use_dask=True, **kwargs):
""" """
this = cls(use_dask=use_dask)
for file_ in flatfilenames:
ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs)
ccdid = int(file_.split("_")[-3].replace("c",""))
this.set_ccd(ccd_, ccdid=ccdid)
this._filenames = flatfilenames
return this
@classmethod
def from_date(cls, date, ledid, use_dask=True, **kwargs):
""" """
from ..io import get_filepath
ccdids = np.arange(1,17)
filenames = [get_filepath("flat", date, ccdid=ccdid_, ledid=ledid)
for ccdid_ in ccdids]
return cls.from_filenames(filenames, use_dask=use_dask, **kwargs)
# ============= #
# Methods #
# ============= #
def get_quadrant_data(self, rcid, **kwargs):
""" """
ccdid, qid = self.rcid_to_ccdid_qid(rcid)
return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs)
def get_quadrant(self, *args, **kwargs):
""" """
raise NotImplemented("get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().")
# ==================== #
# #
# Flat Builder #
# #
# ==================== #
from .builder import CalibrationBuilder
class FlatBuilder( CalibrationBuilder ):
# -------- #
# BUILDER #
# -------- #
def build(self, corr_nl=True, corr_overscan=True, clipping=True,
set_it=False, inclheader=True, **kwargs):
""" """
return super().build(corr_nl=corr_nl,
corr_overscan=corr_overscan,
clipping=clipping,
set_it=set_it, inclheader=inclheader,
**kwargs)
def build_header(self, keys=None, refid=0, inclinput=False):
""" """
from astropy.io import fits
if keys is None:
keys = ["ORIGIN","OBSERVER","INSTRUME","IMGTYPE","EXPTIME",
"CCDSUM","CCD_ID","CCDNAME","PIXSCALE","PIXSCALX","PIXSCALY",
"FRAMENUM","ILUM_LED", "ILUMWAVE", "PROGRMID","FILTERID",
"FILTER","FILTPOS","RA","DEC", "OBSERVAT"]
header = self.imgcollection.get_singleheader(refid, as_serie=True)
if type(header) == dask.dataframe.core.Series:
header = header.compute()
header = header.loc[keys]
newheader = fits.Header(header.loc[keys].to_dict())
newheader.set(f"NINPUTS",self.imgcollection.nimages, "num. input images")
if inclinput:
basenames = self.imgcollection.filenames
for i, basename_ in enumerate(basenames):
newheader.set(f"INPUT{i:02d}",basename_, "input image")
return newheader
| 2.5625 | 3 |
python/2020/day4/part1.py | CalebRoberts65101/AdventOfCode | 0 | 12798745 | <reponame>CalebRoberts65101/AdventOfCode
with open('python\\2020\day4\data.txt') as f:
all_text = f.read()
chuncks = all_text.split('\n\n')
# print(chuncks)
valid_count = 0
# for each chunck split and check if valid
for text in chuncks:
parts = text.split()
print(text)
print(parts)
parts_found = 0
has_cid = False
invalid = False
for part in parts:
parts = part.split(':')
left = parts[0]
right = parts[1]
print(left, right)
if left == 'ecl':
print( 'ecl', parts_found)
valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if right in valid_eye_color:
parts_found +=1
else:
print('invalid eye')
invalid = True
elif left == 'pid':
print( 'pid', parts_found)
if len(right) == 9:
parts_found +=1
else:
print('invalid pid')
invalid = True
elif left == 'eyr':
print( 'eyr', parts_found)
if len(right) == 4 and (int(right) >= 2020 and int(right) <= 2030):
parts_found +=1
else:
print('invalid eyr')
invalid = True
elif left == 'hcl':
print( 'hcl', parts_found)
valid = True
valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
valid = valid and right[0] == '#'
valid = valid and right[1] in valid_chars
valid = valid and right[2] in valid_chars
valid = valid and right[3] in valid_chars
valid = valid and right[4] in valid_chars
valid = valid and right[5] in valid_chars
valid = valid and right[6] in valid_chars
if valid:
parts_found += 1
else:
print('invalid hcl')
invalid = True
elif left == 'byr':
print( 'byr', parts_found)
if len(right) == 4 and (int(right) >= 1920 and int(right) <= 2002):
parts_found +=1
else:
print('invalid byr')
invalid = True
elif left == 'iyr':
print( 'iyr', parts_found)
if len(right) == 4 and (int(right) >= 2010 and int(right) <= 2020):
parts_found +=1
else:
print('invalid iyr')
invalid = True
elif left == 'cid':
has_cid = True
elif left == 'hgt':
print('hgt', parts_found)
num = right[:-2]
unit = right[-2:]
if unit == 'cm':
if int(num) >= 150 and int(num) <=193:
parts_found +=1
else:
print('invalid cm')
invalid = True
elif unit == 'in':
if int(num) >= 59 and int(num) <= 76:
parts_found +=1
else:
print('invalid cm')
invalid = True
else:
print('invalid unit')
invalid = True
print(parts_found, invalid)
if parts_found == 7 and not invalid:
valid_count+=1
print("valid passports:", valid_count)
# end part 1
| 3.53125 | 4 |
scripts/make_nvz.py | NON906/NVC_train | 0 | 12798746 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import zipfile
def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None):
if pitch_file is not None:
files = [nvm_file, target_file, pitch_file]
arc_names = ['target.nvm', 'target.pb', 'pitch.pb']
else:
files = [nvm_file, target_file]
arc_names = ['target.nvm', 'target.pb']
with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
for file_loop in range(len(arc_names)):
new_zip.write(files[file_loop], arcname=arc_names[file_loop])
if __name__ == '__main__':
output_file = 'outputs/target.nvz'
if len(sys.argv) > 1:
output_file = sys.argv[1]
nvm_file = 'outputs/target.nvm'
target_file = 'outputs/target.pb'
pitch_file = 'outputs/pitch.pb'
if len(sys.argv) == 5:
nvm_file = sys.argv[2]
target_file = sys.argv[3]
pitch_file = sys.argv[4]
make_nvz_main(output_file, nvm_file, target_file, pitch_file)
| 2.84375 | 3 |
mutacc/cli/remove_command.py | northwestwitch/mutacc | 1 | 12798747 | import click
from mutacc.mutaccDB.remove_case import remove_case_from_db
@click.command('remove')
@click.argument('case_id')
@click.pass_context
def remove_command(context, case_id):
"""
Deletes case from mutacc DB
"""
adapter = context.obj['adapter']
remove_case_from_db(adapter, case_id)
| 2.09375 | 2 |
src/masonite/authentication/guards/__init__.py | cercos/masonite | 1,816 | 12798748 | <reponame>cercos/masonite
from .WebGuard import WebGuard
| 1.085938 | 1 |
Reversing/FullColor.py | LeanVel/Tools | 130 | 12798749 | # encoding: utf-8
# http://www.hexblog.com/?p=120
# Default IDA Pro Paths:
# MAC /Applications/IDA\ Pro\ X/idaq.app/Contents/MacOS/plugins/
# Windows C:\Program Files (x86)\IDA X\plugins
# to make it autoexec on openfile
# add this to plugins.cfg
# ; Other plugins
#FullColor FullColor.py 0 0 SILENT
# thanks @JR0driguezB for help :)
from __future__ import print_function
from idautils import Heads
from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM
import idaapi
#idaapi.auto_wait()
PLUGIN_TEST = 1
class FullColor_t(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Set colors :)"
help = "No help needed"
wanted_name = "FullColor"
wanted_hotkey = ""
def init(self):
#idaapi.msg("init() called!\n")
#self.run(0)
return idaapi.PLUGIN_OK
def run(self, arg=0):
print("hell2")
idaapi.msg("run() called with %d!\n" % arg)
heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea()))
funcCalls = []
xor = []
antiVM = []
for i in heads:
# Color the Calls off-white
if print_insn_mnem(i) == "call":
funcCalls.append(i)
# Color Anti-VM instructions Red and print their location
elif print_insn_mnem(i) in ("sidt", "sgdt", "sldt", "smsw", "str", "in", "cpuid"):
antiVM.append(i)
# Color non-zeroing out xor instructions Orange
elif print_insn_mnem(i) == "xor" and (print_operand(i,0) != print_operand(i,1)):
xor.append(i)
print("Number of calls: %d" % (len(funcCalls)))
for i in funcCalls:
set_color(i, CIC_ITEM, 0xc7fdff)
print("Number of potential Anti-VM instructions: %d" % (len(antiVM)))
for i in antiVM:
print("Anti-VM potential at %x" % i)
set_color(i, CIC_ITEM, 0x0000ff)
print("Number of xor: %d" % (len(xor)))
for i in xor:
set_color(i, CIC_ITEM, 0x00a5ff)
def term(self):
idaapi.msg("term() called!\n")
def PLUGIN_ENTRY():
return FullColor_t()
if PLUGIN_TEST:
# Create form
f = PLUGIN_ENTRY()
f.init()
f.run()
f.term()
| 2.234375 | 2 |
random_sample.py | zimolzak/glacier-tools | 1 | 12798750 | import os
import random
import subprocess
MYPATH = './out-of-dropbox-2020-08to12-'
FILES = os.listdir(MYPATH)
INP = ''
while INP != 'q':
INP = input('q to quit, enter anything else to continue')
file_choice = random.choice(FILES)
pathname_choice = MYPATH + '/' + file_choice
subprocess.run(["open", pathname_choice])
| 2.6875 | 3 |
Subsets and Splits