hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
406a21613d9b1dbc55f543cfe42bc9ef9b68a79c | 1,749 | py | Python | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
]
| 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
]
| 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
]
| 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_2678
# title: Full outer join cannot use available indices (very slow execution)
# decription:
# tracker_id: CORE-2678
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create table td_data1 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d1 float not null
);
create index idx_td_data1 on td_data1(c1,c2,c3);
commit;
create table td_data2 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d2 float not null
);
create index idx_td_data2 on td_data2(c1,c2,c3);
commit;
set planonly;
select
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3,
coalesce(sum(d1.d1), 0) t1,
coalesce(sum(d2.d2), 0) t2
from td_data1 d1
full join td_data2 d2
on
d2.c1 = d1.c1
and d2.c2 = d1.c2
and d2.c3 = d1.c3
group by
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN SORT (JOIN (JOIN (D2 NATURAL, D1 INDEX (IDX_TD_DATA1)), JOIN (D1 NATURAL, D2 INDEX (IDX_TD_DATA2))))
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 23.958904 | 109 | 0.619211 | 0 | 0 | 0 | 0 | 183 | 0.104631 | 0 | 0 | 1,290 | 0.737564 |
406ae0237fa650007fb4d1f31a942b053762212b | 962 | py | Python | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
]
| 1 | 2020-07-15T14:23:30.000Z | 2020-07-15T14:23:30.000Z | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
]
| null | null | null | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
]
| null | null | null | from flask import current_app
class Category:
def __init__(self, id, name, description, thumb):
self._id = id
self._name = name
self._description = description
def setId(self, id):
self._id = id
def getId(self):
return self._id
def setName(self, name):
self._name = name
def getName(self):
return self._name
def setDescription(self, description):
self._description = description
def getDescription(self):
return self._description
def setThumb(self, thumb):
self._thumb = thumb
def getThumb(self):
return self._thumb
def get_video_category_id (self):
videos = current_app.config ['videos']
videos_category = []
for i, video in enumerate (videos.get_video_list()):
if video.getCategory_id () == self.getId():
videos_category.append (video)
return videos_category | 24.666667 | 60 | 0.613306 | 931 | 0.967775 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.008316 |
406b1ddec2cc13a47e3515d6e9a2b41aa445fd1b | 76 | py | Python | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
]
| 117 | 2020-11-23T02:07:23.000Z | 2022-03-21T16:14:53.000Z | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
]
| 226 | 2020-10-19T19:58:13.000Z | 2022-03-27T18:54:30.000Z | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
]
| 9 | 2020-11-24T12:45:10.000Z | 2022-03-13T15:58:23.000Z | """Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""
| 38 | 75 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.986842 |
406bcb88801f876f8613c7d8e41183ccf8efc7dd | 153 | py | Python | bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | bf3072b6f7dd87b60e50d7c2130ca3c393a5709f | [
"MIT"
]
| 115 | 2020-06-15T16:43:14.000Z | 2022-03-21T21:11:57.000Z | bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | bf3072b6f7dd87b60e50d7c2130ca3c393a5709f | [
"MIT"
]
| 83 | 2020-06-17T17:19:29.000Z | 2022-03-08T18:50:35.000Z | bricks/ev3dev/modules/pybricks/robotics.py | BertLindeman/pybricks-micropython | 8f22a99551100e66ddf08d014d9f442f22b33b4d | [
"MIT"
]
| 40 | 2020-06-15T18:36:39.000Z | 2022-03-28T13:22:43.000Z | # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""Pybricks robotics module."""
from _pybricks.robotics import DriveBase
| 21.857143 | 46 | 0.771242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.699346 |
406bff6901669314a484753b5d5e8d18397cb7b2 | 3,693 | py | Python | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
]
| null | null | null | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
]
| null | null | null | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
]
| 1 | 2019-04-02T12:30:13.000Z | 2019-04-02T12:30:13.000Z | #!/usr/bin/env python
import psycopg2
import time
from ..models import User
class StorageManager:
def __init__(self):
self.conn = None
self._connect()
self._create_table()
def _connect(self):
while True:
try:
self.conn = psycopg2.connect(
host='storage',
database='app_storage',
user='admin',
password='admin'
)
except psycopg2.Error:
print('Cannot connect to database, sleeping 3 seconds')
time.sleep(3)
else:
break
def _create_table(self):
while True:
try:
cursor = self.conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS users \
(id SERIAL PRIMARY KEY, login VARCHAR(128), \
email VARCHAR(128), hash_password VARCHAR(132), \
confirmed BOOLEAN)')
except psycopg2.Error:
print('Database error, reconnecting')
self._connect()
else:
break
def insert(self, user):
'''
If insert is success, the function returns true,
Else, it returns false
'''
while True:
try:
if self.select(user.login, category='login') is not None:
return False
cursor = self.conn.cursor()
cursor.execute('INSERT INTO users(login, email, hash_password, confirmed) \
VALUES (%s, %s, %s, %s)', (user.login, user.email, user.hash_password, user.confirmed))
self.conn.commit()
return True
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def select(self, value, category='login'):
'''
The function returns None, if there is no user with very value of
category, else it returns User instance
'''
while True:
try:
cursor = self.conn.cursor()
cursor.execute('SELECT * FROM users WHERE %s = %%s' % category, (value,))
self.conn.commit()
fetch = cursor.fetchall()
if len(fetch) == 0:
return None
user = User(fetch[0][1], fetch[0][2])
user.id = fetch[0][0]
user.hash_password = fetch[0][3]
user.confirmed = fetch[0][4]
return user
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def confirm(self, value, category='login'):
'''
The function sets \'confirmed\' parameter of the user with very value
of category as True\n
If such user not found, returns False, else returns True
'''
while True:
try:
if self.select(value, category=category) is not None:
cursor = self.conn.cursor()
cursor.execute('UPDATE users SET confirmed = TRUE WHERE %s = %%s' % category, (value,))
self.conn.commit()
return True
else:
return False
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
| 33.572727 | 107 | 0.479285 | 3,614 | 0.978608 | 0 | 0 | 0 | 0 | 0 | 0 | 1,072 | 0.290279 |
406c19e470ed1397c6d2535e8a38599b7798d3a3 | 2,906 | py | Python | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
]
| null | null | null | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
]
| null | null | null | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
]
| null | null | null | """Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
from coloraide.css.colors import Color, SRGB
from coloraide.colors import _parse as parse
from coloraide import util
import copy
import re
class ASRGB(SRGB):
"""SRGB that looks for alpha first in hex format."""
MATCH = re.compile(r"(?i)\#(?:{hex}{{8}}|{hex}{{6}})\b".format(**parse.COLOR_PARTS))
@classmethod
def match(cls, string, start=0, fullmatch=True):
"""Match a CSS color string."""
m = cls.MATCH.match(string, start)
if m is not None and (not fullmatch or m.end(0) == len(string)):
return cls.split_channels(m.group(0)), m.end(0)
return None, None
@classmethod
def translate_channel(cls, channel, value):
"""Translate channel string."""
if -1 <= channel <= 2:
return parse.norm_hex_channel(value)
else:
raise ValueError("Unexpected channel index of '{}'".format(channel))
@classmethod
def split_channels(cls, color):
"""Split channels."""
if len(color) == 9:
return (
cls.translate_channel(0, "#" + color[3:5]),
cls.translate_channel(1, "#" + color[5:7]),
cls.translate_channel(2, "#" + color[7:]),
cls.translate_channel(-1, "#" + color[1:3]),
)
else:
return (
cls.translate_channel(0, "#" + color[1:3]),
cls.translate_channel(1, "#" + color[3:5]),
cls.translate_channel(2, "#" + color[5:]),
1.0
)
def to_string(
self, *, options=None, alpha=None, precision=util.DEF_PREC, fit=True, **kwargs
):
"""Convert to Hex format."""
if options is None:
options = {}
show_alpha = alpha is not False and (alpha is True or self.alpha < 1.0)
template = "#{:02x}{:02x}{:02x}{:02x}" if show_alpha else "#{:02x}{:02x}{:02x}"
if options.get("hex_upper"):
template = template.upper()
# Always fit hex
coords = self.fit_coords()
if show_alpha:
value = template.format(
int(util.round_half_up(self.alpha * 255.0)),
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
else:
value = template.format(
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
return value
class ColorAlphaHex(Color):
"""Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
CS_MAP = copy.copy(Color.CS_MAP)
CS_MAP["srgb"] = ASRGB
| 33.022727 | 100 | 0.547144 | 2,682 | 0.922918 | 0 | 0 | 1,214 | 0.417756 | 0 | 0 | 509 | 0.175155 |
406c1c0028a84aba8bcd01a2421dbf11b583f400 | 2,115 | py | Python | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
]
| null | null | null | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
]
| 2 | 2021-11-29T16:26:03.000Z | 2021-11-29T16:27:14.000Z | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
]
| null | null | null | import random
import jobs
import balance
from economy import roman_numbers
class Earth:
def __init__(self):
self.current_day = 1
self.main_quest = None
self.amount_location = 7 # max 8
self.locations = []
#
self.location_names = []
def new_quest(self):
self.main_quest = jobs.Quest()
def generate_location(self):
x = 0
for place in range(self.amount_location):
self.locations.append(Location(place + 1, self.amount_location, x)) # level, overall location number
x += 1
def name_generator():
prefix = ["", "Green", "Dark", "Toxic", "Inferno", "Orc", "Goblin", "Dragon"]
core = ["Forest", "Cave", "Dungeon", "Town", "Village", "Mountains", "Graveyard"]
# suffix = ["", ""]
new_unique = False
new_name = ""
cheking_wrong_balance = 0
while not new_unique:
cheking_wrong_balance += 1
if cheking_wrong_balance > balance.world.amount_location * 5:
print("Error: cannot create random new location name")
exit(343)
new_name = prefix[random.randint(0, len(prefix)-1)] + " " + core[random.randint(0, len(core)-1)]
if new_name in balance.world.location_names:
new_unique = False
else:
new_unique = True
balance.world.location_names.append(new_name)
return new_name
class Location:
def __init__(self, location_level, amount, id_x):
self.id = id_x
self.name = name_generator()
self.level = location_level
self.quest_level = location_level + 2
if self.quest_level > balance.max_power:
self.quest_level = balance.max_power
self.chest_gold = location_level * balance.medium
self.density = 5 # number of events in location
self.chest_chance = 3 # %(10) chest chance
self.quest_enemy = 5 # %(10) chance of quest related enemy
self.location_names = []
self.amount_location = amount
def short_print(self):
return self.name + " " + roman_numbers(self.level)
| 27.467532 | 113 | 0.613239 | 1,214 | 0.573995 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.147991 |
406d45c5b1e3edd5a8eec1e610e28a22eb3881b2 | 2,190 | py | Python | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
]
| 1 | 2019-05-26T22:24:01.000Z | 2019-05-26T22:24:01.000Z | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
]
| 6 | 2019-01-22T14:53:43.000Z | 2020-09-22T16:20:28.000Z | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
]
| null | null | null | from django.db import models
from .media import Water
from .media import Electricity
from .media import Gas
from .media import WasteWater
from .media import Telecommunication
from .generic import Attachment
from .generic import Photo
from .generic import Location as EstateLocation
from cigeo.models import GenericNote as EstateNote
class ScientificParkTelecommunication(Telecommunication):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkWasteWater(WasteWater):
diameter = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkAttachment(Attachment):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkPhoto(Photo):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
pass
class ScientificParkTechnologicalWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkElectricity(Electricity):
distance = None
capacity = None
current = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkDrinkWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGas(Gas):
diameter = pressure = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkLocation(EstateLocation):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGenericNote(EstateNote):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
| 20.660377 | 57 | 0.696347 | 1,826 | 0.83379 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.073059 |
406e0a83e413ef1e4bba7c5add21f6292e7188e7 | 2,328 | py | Python | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
]
| 5 | 2021-07-24T16:05:12.000Z | 2022-01-21T15:06:03.000Z | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
]
| null | null | null | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
]
| 2 | 2021-07-24T16:05:14.000Z | 2022-03-25T21:24:40.000Z | import json
import ntpath
import shutil
from pathlib import Path
import pickle5
def load_pickle_files_as_data(file_paths):
"""
Load pickle files containing decision outputs as an data array.
:param file_paths: A List of file paths to the individual pickle files.
:return: A data array.
"""
data = []
for file_path in file_paths:
with (open(file_path, "rb")) as handle:
data.append(pickle5.load(handle))
return data
def dump_pusion_data(data, file_path):
"""
Dump classification output data to the given file using pickle.
:param data: A data dictionary.
:param file_path: Location of the output pickle file.
"""
with open(file_path, "wb") as handle:
pickle5.dump(data, handle, protocol=pickle5.HIGHEST_PROTOCOL)
def dump_data_as_txt(data, name, identifier):
"""
Dump a data dictionary to the JSON file for a given evaluation unit.
:param data: A data dictionary.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
with open(directory + "/" + name + ".txt", 'w') as file:
file.write(json.dumps(data, indent=4))
def save(plot_instance, name, identifier):
"""
Save the plot instance for a given evaluation unit to the SVG and the PDF file, respectively.
:param plot_instance: `matplotlib.pyplot`-instance.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
plot_instance.savefig(directory + "/" + name + ".svg", bbox_inches="tight")
plot_instance.savefig(directory + "/" + name + ".pdf", bbox_inches="tight")
def save_evaluator(file, identifier):
"""
Save the evaluation script for a given evaluation unit.
:param file: The Python file. (E.g. referenced by __file__).
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
shutil.copy(file, directory + "/" + ntpath.basename(file) + ".txt")
| 32.333333 | 97 | 0.681701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,200 | 0.515464 |
407208de4a5ad6967ea27d59e0496b7b2dfa6fe5 | 747 | py | Python | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
]
| null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
]
| null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
]
| null | null | null | from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAdminUser
from goods.models import SPU, SPUSpecification
from meiduo_admin.serializers.spus import SPUSimpleSerializer, SPUSpecSerializer
class SPUSimpleView(ListAPIView):
permission_classes = [IsAdminUser]
queryset = SPU.objects.all()
serializer_class = SPUSimpleSerializer
# GET/meiduo_admin/goods/(?P<pk>\d+)/specs/
class SPUSpecView(ListAPIView):
"""获取SPU商品的规格选项数据"""
permission_classes = [IsAdminUser]
# 指定视图类所使用的查询集
def get_queryset(self):
pk = self.kwargs['pk']
specs = SPUSpecification.objects.filter(spu_id=pk)
return specs
# 指定视图类所使用的序列化器类
serializer_class = SPUSpecSerializer
| 24.096774 | 80 | 0.749665 | 540 | 0.657734 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.208283 |
4072139f6fa73549f4c92cc0b2aa6d9bd1e96911 | 1,172 | py | Python | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
]
| null | null | null | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
]
| null | null | null | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
]
| null | null | null | # Hacked by Ry2uko :D
import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **balls):
self.contents = []
for color in balls:
for n in range(0,balls[color]):
self.contents.append(color)
def draw(self, num):
drawn = []
if num >= len(self.contents):
return self.contents
for n in range(0, num):
if len(self.contents) == 0:
break
randindex = random.randint(0, len(self.contents)-1)
drawn.append(self.contents.pop(randindex))
return drawn
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
m = 0
for count in range(0, num_experiments):
hat_copy = copy.deepcopy(hat)
drawn = hat_copy.draw(num_balls_drawn)
valid = True
for color in expected_balls:
if expected_balls[color] > drawn.count(color):
valid = False
break
if valid:
m += 1
return m / num_experiments
if __name__ == '__main__':
# Test here
pass | 26.044444 | 70 | 0.551195 | 564 | 0.481229 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.073379 |
40738ad4ddc2dca3384f1a7a4b98ec684eed9a5c | 1,611 | py | Python | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
]
| 1 | 2020-09-22T17:10:21.000Z | 2020-09-22T17:10:21.000Z | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
]
| 1 | 2020-03-06T21:18:10.000Z | 2020-03-06T21:18:10.000Z | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
]
| null | null | null | import tkinter as tk
from tkinter.messagebox import showerror
from constants.frames import MAIN_FRAME_NAME
from util import add_new_quantity
class AddQuantityFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="Добавление новой величины", font="Helvetica 30 bold")
self.main_label.pack(pady=50)
self.info_label = tk.Label(self, text="Введите название величины", font="Helvetica 20")
self.info_label.pack(pady=40)
self.quantity_name_entry = tk.Entry(self, width=24)
self.quantity_name_entry.pack()
self.add_button = tk.Button(self, text="Добавить величину", width=20, height=3, command=self.__add_quantity)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="Назад", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_quantity(self):
quantity_name = self.quantity_name_entry.get()
if quantity_name == "":
showerror("Название величины", "Введите название величины")
return
if len(quantity_name) > 30:
showerror("Длинное название", "Название величины может содержать не более 30 символов")
return
add_new_quantity(quantity_name)
self.controller.show_frame(MAIN_FRAME_NAME)
def render(self):
self.clear()
def clear(self):
self.quantity_name_entry.delete(0, tk.END)
| 33.5625 | 116 | 0.666667 | 1,632 | 0.918402 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.225661 |
4073fadf1987f151aaa0076f63a670fabd02b58e | 226 | py | Python | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
]
| 6 | 2020-09-23T21:31:48.000Z | 2022-03-14T23:59:35.000Z | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
]
| 2 | 2022-01-13T03:48:47.000Z | 2022-03-12T00:58:26.000Z | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
]
| 1 | 2021-06-28T14:17:11.000Z | 2021-06-28T14:17:11.000Z | from setuptools import setup
import versioneer
setup(name='gym_pysc2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=['gym'] # And any other dependencies foo needs
) | 28.25 | 70 | 0.738938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.238938 |
40747f1fe0cf14a0bae5770661298c543ddc7ac6 | 1,395 | py | Python | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
]
| null | null | null | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
]
| null | null | null | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
]
| null | null | null | import zmq
import sys
import os
import math
def loadFiles(path):
files = {}
dataDir = os.fsencode(path)
for file in os.listdir(dataDir):
filename = os.fsdecode(file)
print("Loading {}".format(filename))
files[filename] = file
return files
def main():
if len(sys.argv) != 3:
print("Error")
exit()
directory = sys.argv[2]
port = sys.argv[1]
context = zmq.Context()
s = context.socket(zmq.REP)
s.bind("tcp://*:{}".format(port))
files = loadFiles(directory)
while True:
msg = s.recv_json()
if msg["op"] == "list":
s.send_json({"files": list(files.keys())})
elif msg["op"] == "download":
size = 1024*1024*32
filename = msg["file"]
if filename in files:
if not "part" in msg:
file = os.stat(directory + "/" +filename)
s.send_json({"parts": math.ceil(file[6]/size)})
else:
with open(directory + "/" +filename, "rb") as input:
input.seek(size * int(msg["part"]))
data = input.read(size)
s.send(data)
else:
s.send_string("Song does not exits! Marranito")
else:
print("Unsupported action!")
if __name__ == '__main__':
main()
| 26.320755 | 72 | 0.496774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.114695 |
40757d236a917305a24dbe63896ecb49966f293c | 1,618 | py | Python | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
]
| null | null | null | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
]
| null | null | null | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
]
| 2 | 2017-08-02T08:57:50.000Z | 2020-03-20T13:32:54.000Z | """
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
class NCA(BaseMetricLearner):
def __init__(self, num_dims=None, max_iter=100, learning_rate=0.01):
self.num_dims = num_dims
self.max_iter = max_iter
self.learning_rate = learning_rate
def transformer(self):
return self.A_
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
X, labels = check_X_y(X, y)
n, d = X.shape
num_dims = self.num_dims
if num_dims is None:
num_dims = d
# Initialize A to a scaling matrix
A = np.zeros((num_dims, d))
np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))
# Run NCA
dX = X[:,None] - X[None] # shape (n, n, d)
tmp = np.einsum('...i,...j->...ij', dX, dX) # shape (n, n, d, d)
masks = labels[:,None] == labels[None]
for it in xrange(self.max_iter):
for i, label in enumerate(labels):
mask = masks[i]
Ax = A.dot(X.T).T # shape (n, num_dims)
softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1)) # shape (n)
softmax[i] = 0
softmax /= softmax.sum()
t = softmax[:, None, None] * tmp[i] # shape (n, d, d)
d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
A += self.learning_rate * A.dot(d)
self.X_ = X
self.A_ = A
self.n_iter_ = it
return self
| 26.966667 | 74 | 0.600742 | 1,310 | 0.809642 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.191595 |
4075a5272343f25994c7b713935ff6736a8b4fb7 | 2,923 | py | Python | rl_repr/batch_rl/evaluation.py | xxdreck/google-research | dac724bc2b9362d65c26747a8754504fe4c615f8 | [
"Apache-2.0"
]
| 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | rl_repr/batch_rl/evaluation.py | xxdreck/google-research | dac724bc2b9362d65c26747a8754504fe4c615f8 | [
"Apache-2.0"
]
| 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | rl_repr/batch_rl/evaluation.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
]
| 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy evaluation."""
import typing
import tensorflow.compat.v2 as tf
def evaluate(
env,
policy,
num_episodes = 10,
ctx_length = None,
embed_training_window = None,
state_mask_fn = None, # pylint: disable=g-bare-generic
):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
ctx_length: number of previous steps to compute context from.
embed_training_window: window size used during embed training.
state_mask_fn: state masking function for partially obs envs.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0.0
def apply_mask(observation):
if state_mask_fn:
return tf.convert_to_tensor(state_mask_fn(observation.numpy()))
return observation
for _ in range(num_episodes):
timestep = env.reset()
if ctx_length:
states = [apply_mask(timestep.observation) for _ in range(ctx_length)]
actions = [
tf.zeros(policy.action_spec.shape)[None, :] for _ in range(ctx_length)
]
rewards = [[0.] for _ in range(ctx_length)]
latent_action = None
i = 0
while not timestep.is_last():
if embed_training_window and (i % embed_training_window == 0 or
embed_training_window <= 2):
latent_action = None
if ctx_length:
states.append(apply_mask(timestep.observation))
if len(states) > ctx_length:
states.pop(0)
actions.pop(0)
rewards.pop(0)
action = policy.act(
tf.stack(states, axis=1),
actions=tf.stack(actions, axis=1),
rewards=tf.stack(rewards, axis=1))
actions.append(action)
else:
if embed_training_window:
action, latent_action = policy.act(
apply_mask(timestep.observation), latent_action=latent_action)
else:
action = policy.act(apply_mask(timestep.observation))
timestep = env.step(action)
if ctx_length:
rewards.append(timestep.reward)
total_returns += timestep.reward[0]
total_timesteps += 1
i += 1
return total_returns / num_episodes, total_timesteps / num_episodes
| 31.771739 | 80 | 0.671912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,093 | 0.373931 |
4075b24c28e51db8658934eede3f2eedb744d8c0 | 4,721 | py | Python | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwbn-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
]
| null | null | null | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwbn-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
]
| 6 | 2020-01-31T13:29:40.000Z | 2020-03-27T13:09:32.000Z | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwb-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
]
| 1 | 2019-11-24T05:08:06.000Z | 2019-11-24T05:08:06.000Z | """Authors: Cody Baker and Ben Dichter."""
from pathlib import Path
import spikeextractors as se
from pynwb.ecephys import ElectricalSeries
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ....utils import get_schema_from_hdmf_class, FilePathType
try:
from pyintan.intan import read_rhd, read_rhs
HAVE_PYINTAN = True
except ImportError:
HAVE_PYINTAN = False
INSTALL_MESSAGE = "Please install pyintan to use this extractor!"
class IntanRecordingInterface(BaseRecordingExtractorInterface):
"""Primary data interface class for converting a IntanRecordingExtractor."""
RX = se.IntanRecordingExtractor
def __init__(self, file_path: FilePathType, verbose: bool = True):
assert HAVE_PYINTAN, INSTALL_MESSAGE
super().__init__(file_path=file_path, verbose=verbose)
if ".rhd" in Path(self.source_data["file_path"]).suffixes:
intan_file_metadata = read_rhd(self.source_data["file_path"])[1]
else:
intan_file_metadata = read_rhs(self.source_data["file_path"])[1]
exclude_chan_types = ["AUX", "ADC", "VDD"]
valid_channels = [
x for x in intan_file_metadata if not any([y in x["native_channel_name"] for y in exclude_chan_types])
]
group_names = [channel["native_channel_name"].split("-")[0] for channel in valid_channels]
unique_group_names = set(group_names)
group_electrode_numbers = [channel["native_order"] for channel in valid_channels]
channel_ids = self.recording_extractor.get_channel_ids()
for channel_id, channel_group in zip(channel_ids, group_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_name", value=f"Group{channel_group}"
)
if len(unique_group_names) > 1:
for channel_id, group_electrode_number in zip(channel_ids, group_electrode_numbers):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="group_electrode_number", value=group_electrode_number
)
custom_names = [channel["custom_channel_name"] for channel in valid_channels]
if any(custom_names):
for channel_id, custom_name in zip(channel_ids, custom_names):
self.recording_extractor.set_channel_property(
channel_id=channel_id, property_name="custom_channel_name", value=custom_name
)
def get_metadata_schema(self):
metadata_schema = super().get_metadata_schema()
metadata_schema["properties"]["Ecephys"]["properties"].update(
ElectricalSeries_raw=get_schema_from_hdmf_class(ElectricalSeries)
)
return metadata_schema
def get_metadata(self):
channel_ids = self.recording_extractor.get_channel_ids()
property_names = self.recording_extractor.get_shared_channel_property_names()
ecephys_metadata = dict(
Ecephys=dict(
Device=[
dict(
name="Intan",
description="Intan recording",
manufacturer="Intan",
),
],
ElectrodeGroup=[
dict(
name=group_name,
description=f"Group {group_name} electrodes.",
device="Intan",
location="",
)
for group_name in set(
[
self.recording_extractor.get_channel_property(
channel_id=channel_id, property_name="group_name"
)
for channel_id in channel_ids
]
)
],
Electrodes=[
dict(name="group_name", description="The name of the ElectrodeGroup this electrode is a part of.")
],
ElectricalSeries_raw=dict(name="ElectricalSeries_raw", description="Raw acquisition traces."),
)
)
if "group_electrode_number" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="group_electrode_number", description="0-indexed channel within a group.")
)
if "custom_channel_name" in property_names:
ecephys_metadata["Ecephys"]["Electrodes"].append(
dict(name="custom_channel_name", description="Custom channel name assigned in Intan.")
)
return ecephys_metadata
| 44.121495 | 118 | 0.615336 | 4,244 | 0.898962 | 0 | 0 | 0 | 0 | 0 | 0 | 824 | 0.174539 |
40771f48cc35e55bf1ed0377d840f200b12f6982 | 739 | py | Python | Use.py | XtremeCoder1384/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
]
| 1 | 2019-03-04T02:26:41.000Z | 2019-03-04T02:26:41.000Z | Use.py | XtremeCoder1384/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
]
| 1 | 2018-12-20T02:32:35.000Z | 2019-03-11T12:51:15.000Z | Use.py | IngeniousCoder/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
]
| null | null | null | import os
import youtube_dl
os.system("setup.bat")
playlist = input("Paste the Youtube Playlist URL Here.")
track = 1
print("""THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3.""")
for x in range(1000):
file = open("Downloader.bat","w")
file.write("youtube-dl -x --playlist-start {} --audio-format mp3 --playlist-end {} {}".format(str(track),str(track),playlist))
file.close
os.system("Downloader.bat")
track = track + 1
print("DONE! You may now close this window.")
| 36.95 | 129 | 0.663058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.67253 |
40776dbc5b7aba40a9cfd205d779833d8dd62541 | 1,903 | py | Python | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
]
| null | null | null | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
]
| null | null | null | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
]
| null | null | null | # coding: utf-8
"""
manage.py
~~~~~~~~~
"""
import os
import sys
import shutil
import platform
from app import app
from gen import Gen
from flask_script import Manager
"""编码设置"""
if (platform.python_version().split('.')[0] == '2'):
# reload(sys) is evil :)
reload(sys)
sys.setdefaultencoding('utf-8')
"""Git配置"""
git_url = app.config['GIT_URL']
git_branch = app.config['BRANCH']
manager = Manager(app)
def first_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout -b %s' % git_branch)
os.popen('git pull %s %s' % (git_url, git_branch))
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def other_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout %s' % git_branch)
os.popen('git add .')
os.popen('git commit -m "railgun site update...✅ "')
os.popen('git push -u %s %s' % (git_url, git_branch))
def update_static_res():
static_folder = os.path.join(os.getcwd(), 'app/static')
static_build_folder = os.path.join(os.getcwd(), 'app/build/static')
if os.path.isdir(static_build_folder):
shutil.rmtree(static_build_folder)
shutil.copytree(static_folder, static_build_folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
_gen = Gen(app)
_gen.gen()
# update static resources
update_static_res()
elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload':
first_upload()
elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload':
other_upload()
else:
manager.run()
| 25.039474 | 71 | 0.60536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.237624 |
40780e501d35b1715806673a2e143e24f1727e1c | 3,152 | py | Python | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
]
| null | null | null | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
]
| 4 | 2021-04-23T12:05:45.000Z | 2021-04-25T11:38:01.000Z | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
]
| null | null | null | from typing import Any
import torch
import torch.nn as nn
from cvmodels.segmentation import unet, deeplab as dl
def output(model: nn.Module, input_batch: torch.Tensor) -> Any:
model.eval()
with torch.no_grad():
return model(input_batch)
def numel(m: torch.nn.Module, only_trainable: bool = True) -> int:
parameters = m.parameters()
if only_trainable:
parameters = list(p for p in parameters if p.requires_grad)
unique = dict((p.data_ptr(), p) for p in parameters).values()
return sum(p.numel() for p in unique)
def test_unet_out_transpose(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=False, outputs=1)
assert numel(model) > 31_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_unet_out_bilinear(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=True, outputs=1)
assert numel(model) < 30_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
def test_deeplabv3plus_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
| 36.651163 | 115 | 0.703046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40788e305d7f2fee1abfae85125753bcd3fa071f | 10,981 | py | Python | bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
]
| 635 | 2021-06-11T03:03:11.000Z | 2022-03-31T14:52:57.000Z | bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
]
| 181 | 2021-06-10T12:27:19.000Z | 2022-03-31T04:08:19.000Z | bagua/torch_api/contrib/sync_batchnorm.py | shjwudp/bagua | 7e1b438e27e3119b23e472f5b9217a9862932bef | [
"MIT"
]
| 71 | 2021-06-10T13:16:53.000Z | 2022-03-22T09:26:22.000Z | # Copyright (c) Uber Technologies, Inc. and its affiliates.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from distutils.version import LooseVersion
import torch
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import bagua.torch_api as bagua
from bagua.torch_api.communication import allgather, allreduce
# Backward compat for old PyTorch
if not hasattr(torch.jit, "unused"):
torch.jit.unused = lambda x: x
_SYNC_BN_V2 = LooseVersion(torch.__version__) >= LooseVersion("1.5.0") and LooseVersion(
torch.__version__
) <= LooseVersion("1.6.0")
_SYNC_BN_V3 = LooseVersion(torch.__version__) >= LooseVersion("1.6.0")
_SYNC_BN_V4 = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class SyncBatchNorm(_BatchNorm):
r"""Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s).
See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details.
Arguments:
num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`.
eps: A value added to the denominator for numerical stability. Default: 1e-5.
momentum: The value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1.
affine: A boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``.
track_running_stats: A boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``.
.. note:: Only GPU input tensors are supported in the training mode.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(
"expected at least 2D input (got {}D input)".format(input.dim())
)
def _run_bn(self, input):
return F.batch_norm(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
@torch.jit.unused
def _maybe_run_sync_bn(self, input):
if bagua.get_world_size() == 1:
return self._run_bn(input)
return _SyncBatchNorm.apply(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.momentum,
)
def forward(self, input):
# currently only GPU input is supported by underlying kernel from PyTorch
if not input.is_cuda:
raise ValueError("SyncBatchNorm expected input tensor to be on GPU")
self._check_input_dim(input)
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked = self.num_batches_tracked + 1
if not self.training and self.track_running_stats:
return self._run_bn(input)
else:
return self._maybe_run_sync_bn(input)
@classmethod
def convert_sync_batchnorm(cls, module):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
`torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers.
Arguments:
module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
.. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method.
Example::
>>> # Network with nn.BatchNorm layer
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model)
>>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce())
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child))
del module
return module_output
class _SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum):
input = input.contiguous()
size = input.numel() // input.size(1)
count = torch.tensor([size])
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count, mean, invstd = count.cuda(), mean.cuda(), invstd.cuda()
nums_ranks = bagua.get_world_size()
count_all = torch.tensor(
[torch.empty_like(count).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
mean_all = torch.tensor(
[torch.empty_like(mean).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
invstd_all = torch.tensor(
[torch.empty_like(invstd).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
allgather(count.unsqueeze(0), count_all)
allgather(mean.unsqueeze(0), mean_all)
allgather(invstd.unsqueeze(0), invstd_all)
if _SYNC_BN_V3:
counts_for_bngswc = count_all.view(-1).float().to(input.device)
else:
# backwards compatibility
counts_for_bngswc = count_all.view(-1).tolist()
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts_for_bngswc,
)
self.save_for_backward(input, weight, mean, invstd, count_all)
# apply element-wise normalization
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_all = self.saved_tensors
need_input_grad, need_weight_grad, need_bias_grad = self.needs_input_grad[0:3]
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
need_input_grad,
need_weight_grad,
need_bias_grad,
)
if need_input_grad:
# synchronizing stats used to calculate input gradient.
allreduce(sum_dy, sum_dy)
allreduce(sum_dy_xmu, sum_dy_xmu)
if _SYNC_BN_V4:
# from 1.9.0 on we need a count tensor on all devices
# count_all is calculated as total count across all ranks in forward function
count_all = count_all.to(dtype=torch.int, device=grad_output.device)
elif _SYNC_BN_V2 or _SYNC_BN_V3:
# before 1.9.0 we need the count as an integer to compute means values
count = count_all.sum()
else:
# before 1.5.0, sum_dy was sum of means from every worker, so we just
# need to divide it by number of workers
count = bagua.get_world_size()
# backward pass for gradient calculation
# we are calling into a non-public undocumented function which broke moving to 1.9.0
# https://github.com/pytorch/pytorch/issues/57900
if _SYNC_BN_V4:
# from 1.9.0 on, sums and count parameters expected
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_all,
)
else:
# before 1.9.0, mean parameters expected, not sums and count
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy / count,
sum_dy_xmu / count,
)
else:
grad_input = None
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not need_weight_grad:
grad_weight = None
if weight is None or not need_bias_grad:
grad_bias = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
| 38.128472 | 160 | 0.601858 | 10,035 | 0.913851 | 0 | 0 | 7,439 | 0.677443 | 0 | 0 | 4,024 | 0.366451 |
407a65f9c4b9f958fde5ab42bad4bdd15788bb31 | 4,046 | py | Python | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
]
| 2 | 2021-07-31T20:52:37.000Z | 2022-02-15T21:05:17.000Z | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
]
| 2 | 2021-08-25T16:16:43.000Z | 2022-02-10T05:26:14.000Z | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
]
| 1 | 2019-05-21T15:31:24.000Z | 2019-05-21T15:31:24.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
| 34.87931 | 85 | 0.505685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.067721 |
407b22ddf13dab3659fb801ada3a7cb31608cf9a | 200 | py | Python | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | EMbeDS-education/StatsAndComputing20212022 | 971e418882b206a1b5606d15d222cef1a5a04834 | [
"MIT"
]
| 2 | 2022-02-24T09:35:15.000Z | 2022-03-14T20:34:33.000Z | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
]
| null | null | null | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
]
| 2 | 2022-03-15T21:40:35.000Z | 2022-03-26T14:51:31.000Z | city_country = {}
for _ in range(int(input())):
country, *cities = input().split()
for city in cities:
city_country[city] = country
for _ in range(int(input())):
print(city_country[input()]) | 28.571429 | 36 | 0.665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
407cd39412220721420002d2204aeef22618cb4c | 1,562 | py | Python | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
]
| null | null | null | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
]
| null | null | null | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
]
| null | null | null | import os
from dotenv import load_dotenv
load_dotenv()
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = os.getenv('MAIL_SERVER') or 'smtp.googlemail.com'
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
MUDAWEN_MAIL_SUBJECT_PREFIX = '[Mudawen]'
MUDAWEN_MAIL_SENDER = 'Mudawen Admin <[email protected]>'
MUDAWEN_ADMIN = os.getenv('MUDAWEN_ADMIN')
MUDAWEN_POSTS_PER_PAGE = 20
MUDAWEN_FOLLOWERS_PER_PAGE = 50
MUDAWEN_COMMENTS_PER_PAGE = 30
MUDAWEN_QUERY_TIME_LIMIT = 0.5
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
ENV = 'development'
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.getenv('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevConfig
}
| 28.4 | 70 | 0.691421 | 1,308 | 0.837388 | 0 | 0 | 49 | 0.03137 | 0 | 0 | 361 | 0.231114 |
407ce0ad1e21c01e8414bc4b63e17958aa42df9e | 998 | py | Python | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
]
| null | null | null | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
]
| null | null | null | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
]
| null | null | null | import asyncio
async def wait_sec(l):
print("Before wait")
await asyncio.sleep(1)
print("After wait")
l[0] = False
async def main():
# await asyncio.gather(wait_sec([True]), wait_sec([True]), wait_sec([True]))
run = [True]
asyncio.create_task(wait_sec(run))
await asyncio.sleep(0)
print("continuing main")
while run[0]:
print(".")
await asyncio.sleep(0.1)
# for i in range(10):
# print(i)
# # time.sleep(0.2)
# # await asyncio.sleep(0)
# await asyncio.sleep(0.2)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
# Completing unfinished tasks (throws a warning)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# pending = asyncio.Task.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
| 23.209302 | 80 | 0.607214 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.5501 | 499 | 0.5 |
407d150b5548e5dc5c3decda923610fd51eb2141 | 1,438 | py | Python | vk_bot/mods/util/calculator.py | triangle1984/GLaDOS | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 3 | 2019-12-12T05:48:34.000Z | 2020-12-07T19:23:41.000Z | vk_bot/mods/util/calculator.py | anar66/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 1 | 2019-11-15T14:28:49.000Z | 2019-11-15T14:28:49.000Z | vk_bot/mods/util/calculator.py | triangle1984/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 5 | 2019-11-20T14:20:30.000Z | 2022-02-05T10:37:01.000Z | # from vk_bot.core.modules.basicplug import BasicPlug
# import math
# class Calculator(BasicPlug):
# doc = "Калькулятор"
# command = ("калькулятор",)
# def main(self):
# try:
# x = self.text[1]; x = int(x)
# encalc = self.text[2]; encalc = encalc.lower()
# y = self.text[3]; y = int(y)
# except:
# self.sendmsg("""Пример команды: /калькулятор 2 + 2
# Использовать можно только 2 числа, и только через пробел""")
# return
# if encalc == "+" or encalc == "сложение":
# result = x + y
# elif encalc == "-" or encalc == "вычитание":
# result = x - y
# elif encalc == "*" or encalc == "умножение":
# result = x * y
# elif encalc == "**" or encalc == "степень" or encalc == "^":
# if x > 999 or y > 999:
# return
# result = x ** y
# elif encalc == "":
# try:
# x / y
# except ZeroDivisionError:
# result = "взорвать планету хочешь?"
# elif encalc == "корень":
# result = math.sqrt(x), math.sqrt(y)
# elif encalc == "синус":
# result = math.sin(x), math.sin(y)
# elif encalc == "косинус":
# result = math.cos(x), math.cos(y)
# else:
# return
# self.sendmsg(f"Ваш результат: {result}")
| 36.871795 | 74 | 0.462448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,576 | 0.976456 |
407dc792a754cf8c4cf33cd4fb3c31fe49507ba3 | 9,201 | py | Python | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
]
| 97 | 2015-01-02T02:13:45.000Z | 2022-03-09T14:12:45.000Z | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
]
| 325 | 2015-01-07T17:43:14.000Z | 2022-02-21T17:22:00.000Z | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
]
| 73 | 2015-01-17T19:11:58.000Z | 2022-03-24T16:31:37.000Z | import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| 46.705584 | 79 | 0.29236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,806 | 0.196283 |
407e66ad31400c201f52210276cc27484a563068 | 22,314 | py | Python | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
]
| 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
]
| null | null | null | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| 71.519231 | 113 | 0.791118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18,795 | 0.842296 |
407f96b82e23f251ebe7b0d09ba3c8416a7e9d98 | 5,279 | py | Python | PNN/model.py | jingxiufenghua/rec-model | 23204f70fc1bf384d3cdd0cc85e43117d3394074 | [
"MIT"
]
| 1,323 | 2020-08-24T02:34:25.000Z | 2022-03-31T06:03:28.000Z | PNN/model.py | yiLinMaster/Recommender-System-with-TF2.0 | cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822 | [
"MIT"
]
| 65 | 2020-08-25T06:07:41.000Z | 2022-03-18T20:10:53.000Z | PNN/model.py | yiLinMaster/Recommender-System-with-TF2.0 | cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822 | [
"MIT"
]
| 395 | 2020-08-24T00:57:08.000Z | 2022-03-31T12:41:13.000Z | """
Created on July 20, 2020
Updated on May 19, 2021
model: Product-based Neural Networks for User Response Prediction
@author: Ziyao Geng([email protected])
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
class PNN(Model):
def __init__(self, feature_columns, hidden_units, mode='in', dnn_dropout=0.,
activation='relu', embed_reg=1e-6, w_z_reg=1e-6, w_p_reg=1e-6, l_b_reg=1e-6):
"""
Product-based Neural Networks
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param mode: A string. 'in' IPNN or 'out'OPNN.
:param activation: A string. Activation function of dnn.
:param dnn_dropout: A scalar. Dropout of dnn.
:param embed_reg: A scalar. The regularizer of embedding.
:param w_z_reg: A scalar. The regularizer of w_z_ in product layer
:param w_p_reg: A scalar. The regularizer of w_p in product layer
:param l_b_reg: A scalar. The regularizer of l_b in product layer
"""
super(PNN, self).__init__()
# inner product or outer product
self.mode = mode
self.sparse_feature_columns = feature_columns
# the number of feature fields
self.field_num = len(self.sparse_feature_columns)
self.embed_dim = self.sparse_feature_columns[0]['embed_dim']
# The embedding dimension of each feature field must be the same
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# parameters
self.w_z = self.add_weight(name='w_z',
shape=(self.field_num, self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_z_reg),
trainable=True
)
if mode == 'in':
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
hidden_units[0]),
initializer='random_uniform',
reguarizer=l2(w_p_reg),
trainable=True)
# out
else:
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_p_reg),
trainable=True)
self.l_b = self.add_weight(name='l_b', shape=(hidden_units[0], ),
initializer='random_uniform',
regularizer=l2(l_b_reg),
trainable=True)
# dnn
self.dnn_network = DNN(hidden_units[1:], activation, dnn_dropout)
self.dense_final = Dense(1)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])]
sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2]) # (None, field_num, embed_dim)
# product layer
row = []
col = []
for i in range(len(self.sparse_feature_columns) - 1):
for j in range(i + 1, len(self.sparse_feature_columns)):
row.append(i)
col.append(j)
p = tf.gather(sparse_embed, row, axis=1)
q = tf.gather(sparse_embed, col, axis=1)
if self.mode == 'in':
l_p = tf.tensordot(p*q, self.w_p, axes=2) # (None, hidden[0])
else: # out
u = tf.expand_dims(q, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
v = tf.expand_dims(p, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
l_p = tf.tensordot(tf.matmul(tf.transpose(u, [0, 1, 3, 2]), v), self.w_p, axes=3) # (None, hidden[0])
l_z = tf.tensordot(sparse_embed, self.w_z, axes=2) # (None, hidden[0])
l_1 = tf.nn.relu(tf.concat([l_z + l_p + self.l_b], axis=-1))
# dnn layer
dnn_x = self.dnn_network(l_1)
outputs = tf.nn.sigmoid(self.dense_final(dnn_x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
| 47.990909 | 114 | 0.544232 | 4,906 | 0.929343 | 0 | 0 | 0 | 0 | 0 | 0 | 1,346 | 0.254973 |
407fa3643267388dca73bf3b3496b61a0c5f9491 | 314 | py | Python | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
]
| null | null | null | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
]
| null | null | null | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
]
| null | null | null | nume1 = int(input("Digite um numero"))
nume2 = int(input("Digite um numero"))
nume3 = int(input("Digite um numero"))
nume4 = int(input("Digite um numero"))
nume5 = int(input("Digite um numero"))
table = [nume1,nume2,nume3,nume4,nume5]
tableM = (float((nume1 + nume2 + nume3 + nume4 + nume5)))
print(float(tableM)) | 34.888889 | 57 | 0.691083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.286624 |
40804fd1f1dd57a07519de8f44b10f0b6f6d1a54 | 274 | py | Python | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
]
| 1 | 2019-11-01T09:08:50.000Z | 2019-11-01T09:08:50.000Z | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
]
| null | null | null | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
]
| null | null | null | from typing import TypeVar
from .abstract import AbstractBox
T = TypeVar('T')
class ValueBox(AbstractBox[T]):
_value: T
@property
def value(self) -> T:
return self._value
@value.setter
def value(self, value: T):
self._value = value
| 15.222222 | 33 | 0.635036 | 191 | 0.69708 | 0 | 0 | 134 | 0.489051 | 0 | 0 | 3 | 0.010949 |
408169e338ef415cc1c1cefeaa3179019885ca4e | 79 | py | Python | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
]
| null | null | null | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
]
| null | null | null | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
]
| null | null | null | from mongoengine import *
class Subject(Document):
subject = StringField() | 19.75 | 27 | 0.746835 | 52 | 0.658228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40826ce560682ad3ad560f8fecc12e0ab6658bc0 | 767 | py | Python | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
]
| 1 | 2020-12-04T07:38:16.000Z | 2020-12-04T07:38:16.000Z | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
]
| null | null | null | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
]
| null | null | null | class Solution:
def combinationSum(self, candidates, target):
def lookup(candidates, index, target, combine, result):
if target == 0:
result.append(combine)
return
if index >= len(candidates) and target > 0:
return
if target >= candidates[index]:
lookup(candidates, index, target - candidates[index], list(combine) + [candidates[index]], result)
lookup(candidates, index + 1, target, list(combine), result)
sorted(candidates)
result = []
lookup(candidates, 0, target, [], result)
return result
s = Solution()
print(s.combinationSum([2,3,6,7], 7))
print(s.combinationSum([2,3,5], 8))
| 34.863636 | 114 | 0.555411 | 676 | 0.881356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4082bcb5f99112c93d2d504f08622c615955a33b | 1,204 | py | Python | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
]
| 1 | 2015-03-04T14:06:33.000Z | 2015-03-04T14:06:33.000Z | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
]
| 2 | 2015-03-04T02:48:18.000Z | 2015-03-04T14:18:32.000Z | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>]
Options:
--url <url>
--mail <mail>
--pass <pass>
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
url_channel_toppage = args['--url']
login_mail = args['--mail']
login_pass = args['--pass']
path_sqlite = args['--sqlite']
path_csv = args['--csv']
ncrawler = NicoCrawler(login_mail, login_pass)
ncrawler.connect_sqlite(path_sqlite)
df = ncrawler.get_all_video_url_of_season(url_channel_toppage)
ncrawler.initialize_csv_from_db(path_csv)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, path_csv, max_page=3)
# ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1)
| 26.173913 | 102 | 0.671096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 763 | 0.603639 |
40830eea2a3d7f03b3b7dae05b19fdc253a0e60b | 2,095 | py | Python | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
]
| null | null | null | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
]
| 2 | 2021-08-25T16:04:29.000Z | 2022-02-10T01:50:44.000Z | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
]
| null | null | null | # Functions to do the greedy similarity maximisation for article:node assignments
# All code is original
import random
def computeSimSum(G, similarityMatrix, asgn):
""" Compute the total similarity sum for the current node:article assignment """
S = sum([similarityMatrix[asgn[j], asgn[i]]
for j in range(len(G)) for i in list(G[j])])
return S
def greedySimilarityMax(G, similarityMatrix, nrounds=5):
pairList = [(a,b) for a in range(len(G)) for b in range(a)]
maxSimSums = []
asgns = []
for i in range(nrounds):
# get random indices for initial node:article assignment
init_ids = list(range(len(G)))
random.shuffle(init_ids)
# assign articles to nodes and compute initial similarity sum
curAsgn = dict((key, init_ids[key]) for key in range(len(G)))
curSimSum = computeSimSum(G, similarityMatrix, curAsgn)
# maximisation loop - repeats until S can't increase
while True:
# for each node pair, swap the nodes recompute similarity sum
simSums = []
for edge in pairList:
tempAsgn = dict(curAsgn)
tempAsgn[edge[0]] = curAsgn[edge[1]]
tempAsgn[edge[1]] = curAsgn[edge[0]]
# Recompute similarity sum
tempSimSum = computeSimSum(G, similarityMatrix, tempAsgn)
simSums.append(tempSimSum)
# find the max possible new similarity score
# then update curAsgn if the new max score > old score
maxNewSimSum = max(simSums)
if maxNewSimSum > curSimSum:
nodesToSwap = pairList[simSums.index(maxNewSimSum)]
oldAsgn = dict(curAsgn)
curAsgn[nodesToSwap[0]] = oldAsgn[nodesToSwap[1]]
curAsgn[nodesToSwap[1]] = oldAsgn[nodesToSwap[0]]
curSimSum = maxNewSimSum # no need to recompute, know the value already
else:
break
maxSimSums.append(curSimSum)
asgns.append(curAsgn)
bestRound = maxSimSums.index(max(maxSimSums))
bestAsgn = asgns[bestRound]
print('Best S = ' + str(maxSimSums[bestRound]))
return bestAsgn
| 32.230769 | 82 | 0.651074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.285442 |
40836d6113e4a1359c6e3078275ec9078aa642e4 | 23,463 | py | Python | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
]
| null | null | null | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
]
| null | null | null | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
]
| null | null | null | import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
class Idq801(object):
def __init__(
self,
deviceId=-1,
timestamp_buffer_size=int(1e6),
integration_time_ms=0.5 * 1e3,
coincidence_window_bins=1000,
max_retry=3,
delay_retry_sec=0.01,
clean_data_directory=False,
data_directory="Idq801Data",
processing="external",
):
self._max_retry = max_retry
self._set_check_delay = delay_retry_sec # Delay in seconds between setting and
# checking that a parameter was set.
self._data_directory = data_directory
self._wait_for_settings = 1
self._processing_dict = {"i": "internal", "e": "external"}
processing = processing.lower()
assert processing in self._processing.values()
self._processing = processing
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
if clean_data_directory:
self.clean_data_directory()
module_path = os.path.dirname(__file__) + "/"
if sys.platform == "linux":
self.idq801Lib = ct.CDLL(module_path + "libtdcbase.so")
elif sys.platform == "win32":
self.idq801Lib = ct.CDLL(module_path + "./tdcbase.dll")
else:
raise OSError("Invalid operating system")
if self.idq801Lib.TDC_init(deviceId):
raise RuntimeError("Could not connect to the ID801 counter.")
# Initial parameters.
self.unset_channel(-1)
self.set_timestamp_buffer_size(timestamp_buffer_size)
self.integration_time_ms = integration_time_ms
if self._processing == self._processing_dict["i"]:
self.set_integration_time(integration_time_ms)
else:
self.set_integration_time(1.0e-3) # 1us integration time.
self.set_coincidence_window_bins(1000)
self._time_last_get_timestamps = time.time()
self.channel_delays = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
self.set_channel_delays_ns(self.channel_delays)
self.accidental_delay = 0
def __del__(self):
self.idq801Lib.TDC_deInit()
def _set_value(self, set_value, setter, getter):
"""Sets a value and makes sure it was set."""
attempt = 0
is_set = False
while not is_set and attempt < self._max_retry:
attempt += 1
setter(set_value)
time.sleep(self._set_check_delay)
try:
if list(set_value) == list(getter()):
is_set = True
except TypeError:
if set_value == getter():
is_set = True
if not is_set:
raise RuntimeError(
"Unable to set the value using %s to %s after %i attempts."
% (setter.__name__, str(set_value), self._max_retry)
)
def _get_device_params(self):
cm = ct.c_int32()
cw = ct.c_int32()
ew = ct.c_int32()
self.idq801Lib.TDC_getDeviceParams(ct.byref(cm), ct.byref(cw), ct.byref(ew))
return (cm, cw, ew)
def _set_processing(self, processing):
processing = processing.lower()
assert processing in self._processing_dict.values()
self._processing = processing
if processing == self._processing_dict["i"]:
self.set_integration_time(self.integration_time_ms)
return self._processing
def set_processing_internal(self):
return self._set_processing("internal")
def set_processing_external(self):
return self._set_processing("external")
def clean_data_directory(self):
"""
Deletes all data in the `Idq801Data` directory.
"""
shutil.rmtree(self._data_directory, ignore_errors=True)
os.mkdir(self._data_directory)
def get_timebase(self):
self.idq801Lib.TDC_getTimebase.restype = ct.c_double
tb = self.idq801Lib.TDC_getTimebase()
return tb
def get_mask_channels(self):
cm, _, _ = self._get_device_params()
return cm.value
def get_status_channels(self):
cm, cw, ew = self._get_device_params()
channels_enabled = [bool(int(c)) for c in bin(cm.value)[2:]][::-1]
padLength = 8 - len(channels_enabled)
channels_enabled.extend([False] * padLength)
return tuple(channels_enabled)
def get_enabled_channels(self):
channels_status = self.get_status_channels()
channels_enabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == True
)
return channels_enabled
def get_disabled_channels(self):
channels_status = self.get_status_channels()
channels_disabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == False
)
return channels_disabled
def is_channel_enabled(self, channel):
assert 1 <= channel <= 8, "Invalid choice channel range."
channel -= 1
channel_status = self.get_status_channels()[channel]
return channel_status
def _get_channel_mask(self, channel, set_unset):
def channel_mask_from_channel_list(channels_enabled):
channel_mask = 0
for b in channels_enabled[::-1]:
channel_mask = (channel_mask << b - 1) | True
return channel_mask
set_unset = set_unset.lower()
assert set_unset in ("set", "unset"), (
"Invalid `set_unset` choice %s." % set_unset
)
if isinstance(channel, str):
channel = channel.lower()
if channel == "all" or channel == -1:
channel_mask = 0xFF
elif channel in range(1, 9):
channel_mask = 1 << channel
elif isinstance(channel, collections.Iterable):
channel_mask = channel_mask_from_channel_list(channel)
else:
raise TypeError("Invalid `channel` choice.")
if set_unset == "unset":
channel_mask ^= 0xFF
return channel_mask
def _set_unset_channel(self, channel, set_unset):
self._channel_mask = self._get_channel_mask(channel, set_unset)
self._set_value(
self._channel_mask,
self.idq801Lib.TDC_enableChannels,
self.get_mask_channels,
)
return self._channel_mask
def set_channel(self, channel):
"""Choose which channels to enable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be enabled.
* An iterable containing the channels
to be enables. e.g. (1,4,5)
* Default is no channels are enabled.
"""
return self._set_unset_channel(channel, "set")
def unset_channel(self, channel):
"""Choose which channels to disable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be disabled.
* An iterable containing the channels
to be disables. e.g. (1,4,5)
* Default is no channels are disabled.
"""
return self._set_unset_channel(channel, "unset")
def get_coincidence_window_bins(self):
cm, cw, ew = self._get_device_params()
return cw.value
def get_coincidence_window_ns(self):
bin = self.get_timebase()
return bin * self.get_coincidence_window_bins() * 1e9
def set_coincidence_window_bins(self, coincidence_window_bins):
coincidence_window_bins = int(coincidence_window_bins)
if not 0 < coincidence_window_bins <= 65535:
raise ValueError(
"The chosen number of coincidence \
window bins is not in the range (0,65535]."
)
self._set_value(
coincidence_window_bins,
self.idq801Lib.TDC_setCoincidenceWindow,
self.get_coincidence_window_bins,
)
def set_coincidence_window_ns(self, coincidence_window_ns):
bin = self.get_timebase()
coincidence_window_bins = int(coincidence_window_ns * 1e-9 / bin)
return self.set_coincidence_window_bins(coincidence_window_bins)
def get_integration_time(self):
cm, cw, ew = self._get_device_params()
return ew.value
def freeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(True)
def unfreeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(False)
def set_integration_time(self, window_time_ms):
window_time_ms = round(window_time_ms)
if self._processing == self._processing_dict["i"]:
if not 0 < window_time_ms <= 65535:
raise ValueError(
"The chosen exposure window is not \
in the range (0,65535]. Can't do more than 65.5s \
integration time internally."
)
self._set_value(
self.window_time_ms,
self.idq801Lib.TDC_setExposureTime,
self.get_integration_time,
)
def get_data_lost_status(self):
"""Returns true if data is being lost, and false
if data is not being lost.
"""
# Get the status of the lost latch.
lost = ct.c_int32()
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
latch = lost.value
# Calls the function again to clear the lost latch.
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
return latch
def get_timestamp_buffer_size(self):
size = ct.c_int32()
self.idq801Lib.TDC_getTimestampBufferSize(ct.byref(size))
return size.value
def set_timestamp_buffer_size(self, size):
"""`size` is the amount of timestamps that the
the counter will store. Range is 1->1000000
"""
self._set_value(
size,
self.idq801Lib.TDC_setTimestampBufferSize,
self.get_timestamp_buffer_size,
)
def get_timestamps(self, clear_retrieved_timestamps=True, trim_time_s=None):
"""
Gets all the time stamps in the buffer and returns
a dictionary corresponding to the timestamps in each
channel.
args:
clear_retrieved_timestamps(bool): Clears the timestamp
buffer of the IDQ801 after reading.
trim_time_s(float, None): The amount of timestamps, in
seconds, from the import first timestamps to keep.
If `None`, all timestamps are returned. Multiple
channels are all trimmed starting from the lowest
timestamps of all the channels combined.
returns:
dict: A dictionary containing numpy arrays with the
timestamps of each channel. The time from the
last calling of this function is also returned
in the dictionary.
"""
if self.get_timestamp_buffer_size() == 0:
raise RuntimeError(
"The timestamp buffer size is 0. \
Can't get timestamps. Need to set the timestamp \
buffer."
)
r = ct.c_int32(clear_retrieved_timestamps)
ts = (ct.c_int64 * self.get_timestamp_buffer_size())()
c = (ct.c_int8 * self.get_timestamp_buffer_size())()
v = ct.c_int32()
self.idq801Lib.TDC_getLastTimestamps(r, ts, c, ct.byref(v))
time_read = time.time()
time_diff = time_read - self._time_last_get_timestamps
self._time_last_get_timestamps = time_read
channel = np.frombuffer(c, dtype=np.int8)
channel_masks = [
channel == i for i in range(4) if self._channel_mask & (1 << i)
]
timestamps = np.frombuffer(ts, dtype=np.int64)
timestamps_masked = {
str(c + 1): timestamps[c_m] for c, c_m in enumerate(channel_masks)
}
timestamps_masked.update((k, v[v > 0]) for k, v in timestamps_masked.items())
last_counts = []
if trim_time_s:
for timestamps in timestamps_masked.values():
if timestamps.size:
first_count = timestamps[0]
last_counts.append(
first_count + int(trim_time_s / self.get_timebase() + 0.5)
)
if len(last_counts):
last_count = np.min(last_counts)
for channel, timestamps in timestamps_masked.items():
if timestamps.size:
last_idx = np.searchsorted(timestamps, last_count, "right")
timestamps_masked[channel] = timestamps[: last_idx - 1]
timestamps_masked["time_diff"] = time_diff
return timestamps_masked
def _get_coins(self, timestamps_1, timestamps_2, method="2"):
t2 = np.array(timestamps_2, dtype=np.int64)
assert method in ("1", "2"), "Invalid method chosen."
if method == "1":
t1 = np.empty(len(timestamps_1) + 2, dtype=np.int64)
t1[0] = 0
t1[-1] = np.iinfo(np.int64).max
t1[1:-1] = timestamps_1
t2_pos = np.searchsorted(t1, t2)
t1_pos_forw = t2_pos
t1_pos_back = t2_pos - 1
t1_pos_back[t1_pos_back == -1] = 0
dt_forw = np.abs(t1[t1_pos_forw] - t2) <= self.get_coincidence_window_bins()
dt_back = np.abs(t1[t1_pos_back] - t2) <= self.get_coincidence_window_bins()
coin_forw_args = dt_forw.nonzero()[0]
coin_back_args = dt_back.nonzero()[0]
coins_forw = np.c_[t1_pos_forw[coin_forw_args] - 1, coin_forw_args]
coins_back = np.c_[t1_pos_back[coin_back_args] - 1, coin_back_args]
coins = np.vstack((coins_back, coins_forw))
elif method == "2":
t1 = np.array(timestamps_1, dtype=np.int64)
l = np.searchsorted(t1, t2 - self.get_coincidence_window_bins() / 2)
r = np.searchsorted(t1, t2 + self.get_coincidence_window_bins() / 2)
args = np.where(l != r)[0]
coins = np.c_[r[args], args]
return coins
def get_coin_counts(
self, coin_channels, accidentals_delay_ns=None, trim_time_s=None
):
bin = self.get_timebase()
timestamps = self.get_timestamps(
clear_retrieved_timestamps=True, trim_time_s=trim_time_s
)
time_diff = timestamps["time_diff"]
timestamps.pop("time_diff", None)
coin_counts = {}
acc_counts = {}
# Get singles counts
for c in coin_channels:
if str(c) in timestamps:
coin_counts[str(c)] = len(timestamps[str(c)])
else:
coin_counts[str(c)] = 0
coin_combinations = list(it.combinations(coin_channels, 2))
for c in coin_combinations:
# Get coincidence counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
coin_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(timestamps[str(c[0])], timestamps[str(c[1])])
)
else:
coin_counts[str(c[0]) + "/" + str(c[1])] = 0
if accidentals_delay_ns != None:
accidentals_delay_bin = int(accidentals_delay_ns * 1e-9 / bin)
for c in coin_combinations:
# Get accidental counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
acc_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(
timestamps[str(c[0])],
timestamps[str(c[1])] + accidentals_delay_bin,
)
)
else:
acc_counts[str(c[0]) + "/" + str(c[1])] = 0
return coin_counts, acc_counts, timestamps
def scan_channel_delay(
self, coin_channels, scan_channel, scan_range_ns, integration_time=1.0
):
"""
Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins
Args:
coin_channels: channels to look at coins
scan_channel: channel to scan
scan_range_ns: +/- range of delay in ns
integration_time: initial integration time
Returns: max coin reading, delay in ns of the max, all coin counts, delay range
"""
current_delays_bins = self.get_channel_delays_bins()
self.set_channel_delays_ns({str(coin_channels[0]): 0, str(coin_channels[1]): 0})
bin = self.get_timebase()
self.get_timestamps()
time.sleep(integration_time)
original_timestamps = self.get_timestamps()
delay_range = range(-scan_range_ns, scan_range_ns + 1)
coin_counts = np.zeros(len(delay_range))
timestamps = copy.deepcopy(original_timestamps)
for idd, d in enumerate(delay_range):
timestamps[str(scan_channel)] = copy.deepcopy(
original_timestamps[str(scan_channel)]
) + int(d * 1e-9 / bin)
coin_counts[idd] = len(
self._get_coins(
timestamps[str(coin_channels[0])], timestamps[str(coin_channels[1])]
)
)
print(
"delay channel = %s, delay = %s ns, coin counts = %s"
% (scan_channel, d, int(coin_counts[idd]))
)
max_coin = np.max(coin_counts)
max_coin_delay = delay_range[np.argmax(coin_counts)]
self.set_channel_delays_bins(current_delays_bins)
return max_coin, max_coin_delay, coin_counts, delay_range
def get_timestamps_continuous(self, seconds=-1):
"""Runs `gets_timestamps` continuously in a separate
thread for `seconds` amount of seconds in a loop.
If seconds == -1, it doesn't timeout. Returns a
thread object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
clear_retrieved_timestamps = True
t = ThreadStoppable(
self.get_timestamps, seconds, True, args=(clear_retrieved_timestamps,)
)
return t
def write_timestamps_to_file(self):
"""Writes the timestamps in the buffer to a
file.
"""
timestamp_dir = "Timestamps"
if not os.path.isdir(self._data_directory + "/" + timestamp_dir):
os.mkdir(self._data_directory + "/" + timestamp_dir)
filename_prefix = (
self._data_directory + "/" + timestamp_dir + "/" + "timestamp_channel_"
)
filenames = [filename_prefix + str(i) + ".dat" for i in range(1, 9)]
for fn in filenames:
if not os.path.exists(fn):
open(fn, "w").close()
ts = self.get_timestamps(clear_retrieved_timestamps=True)
for i, fn in enumerate(filenames):
with open(fn, "a") as fs:
try:
for t in ts[str(i + 1)]:
fs.write(str(t) + "\n")
except KeyError:
pass
def write_timestamps_to_file_continuous(self, seconds=-1):
"""Runs `write_timestamps_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread object
that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.write_timestamps_to_file, seconds)
return t
def get_counters(self):
"""Returns a list of the most recent value of
of the counters.
"""
counters = (ct.c_int32 * 19)()
self.idq801Lib.TDC_getCoincCounters(counters, None)
return list(counters)
def get_counters_continuous(self, seconds=-1):
"""Runs `get_counters` continuously in a separate thread for
`seconds` amount of seconds in a loop. If seconds == -1,
it doesn't timeout. Returns a thread object that can be
stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.get_counters, seconds, True)
return t
def write_counters_to_file(self, filename="counters.dat"):
"""Writes the most recent values of the internal
counters and coincidence counters to a file
named `filename`.
"""
fn = self._data_directory + "/" + filename
if not os.path.exists(fn):
with open(fn, "w") as fs:
header = (
"1,2,3,4,5,6,7,8,1/2,1/3,1/4,2/3,2/4,3/4,"
"1/2/3,1/2/4,1/3/4,2/3/4,1/2/3/4"
)
fs.write("#" + header + "\n")
counters = self.get_counters()
counters_str = ",".join([str(c) for c in counters])
with open(fn, "a") as fs:
fs.write(counters_str + "\n")
def write_counters_to_file_continuous(self, seconds=-1, filename="counters.dat"):
"""Runs `write_counters_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread
object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(
self.write_counters_to_file, seconds, False, args=(filename,)
)
return t
def _get_channel_delays(self):
channels = range(8)
channels = (ct.c_int32 * len(channels))(*channels)
self.idq801Lib.TDC_getChannelDelays(channels)
return channels
def get_channel_delays_bins(self):
return list(self._get_channel_delays())
def get_channel_delays_ns(self):
bin = self.get_timebase()
delays_bins = list(self._get_channel_delays())
return [d * 1e9 * bin for d in delays_bins]
def set_channel_delays_bins(self, delays_bins):
delays = (ct.c_int * len(delays_bins))(*delays_bins)
return self._set_value(
delays, self.idq801Lib.TDC_setChannelDelays, self._get_channel_delays
)
def set_channel_delays_ns(self, delays_ns_dict):
"""
Set channel delays in ns. The delays are in a dictionary.
Args:
delays_ns_dict:
Returns:
"""
delays_ns = self.get_channel_delays_ns()
for channel in delays_ns_dict.keys():
self.channel_delays[str(channel)] = delays_ns[int(channel) - 1]
delays_ns[int(channel) - 1] = delays_ns_dict[str(channel)]
bin = self.get_timebase()
delays_bins = [int(d * 1e-9 / bin) for d in delays_ns]
return self.set_channel_delays_bins(delays_bins)
def main():
idq801 = Idq801()
idq801.clean_data_directory()
idq801.set_channel((1, 2))
# t1 = idq801.write_counters_to_file_continuous(2)
# t2 = idq801.write_timestamps_to_file_continuous(2)
#
if __name__ == "__main__":
main()
| 35.931087 | 112 | 0.592507 | 22,977 | 0.979287 | 0 | 0 | 0 | 0 | 0 | 0 | 5,143 | 0.219196 |
408378ae2d1cd6ca599deacc2843f436a637b9b1 | 7,472 | py | Python | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
]
| 7 | 2019-11-21T08:42:37.000Z | 2021-08-13T15:49:18.000Z | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
]
| 2 | 2021-05-08T08:22:38.000Z | 2022-01-20T23:43:03.000Z | import sys, numpy, argparse, os
def loadSamplelist(fin_samples, sample_fin_list, sample_header, sample_name_field, sample_size):
for l in open(fin_samples):
ls=l.strip()
sample_fin_list.append(ls)
for r in open(ls):
rs=map(lambda x:x.split('/')[-sample_name_field].split('.bam')[0],r.strip().strip(',').split(','))
#rs=map(lambda x:x.split('/')[-2],r.strip().strip(',').split(','))
if sample_name_field==2:
sn_list=r.strip().strip(',').split(',')
for e,sn in enumerate(rs):
if len(sn)==0:
rs[e]=sn_list[e].split('/')[-1].split('.')[0]
sample_header+=rs
sample_size[ls]=len(r.split(','))
return sample_fin_list, sample_header, sample_size
def mergeEvents(events_fin_list):
total_event_dict={}
for events_fin in events_fin_list:
for index,event_l in enumerate(open(events_fin)):
if index==0:
continue
event_ls=event_l.strip().split('\t')
events_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if events_cord in total_event_dict:
continue
total_event_dict[events_cord]=''
return total_event_dict
def writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path):
total_event_dict=mergeEvents(events_fin_list)
print len(total_event_dict)
total_event_list=sorted(total_event_dict.keys())
fout=open(fout_path+'/prefilter_events.splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
for e in total_event_list:
fout.write(e.strip()+'\n')
fout.close()
return total_event_list
def mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path):
for b in range(0,len(total_event_list),batch):
Intercep_Matrix={}
print '[INFO] Merging in progress. Working on batch ',b
batch_event_list= total_event_list[b:min(b+batch,len(total_event_list))]
batch_event_dict= dict.fromkeys(batch_event_list, 0)
for n,fin in enumerate(fin_list):
eventID={}
for index,event_l in enumerate(open(events_fin_list[n])):
if index==0:
continue
event_ls=event_l.strip().split('\t')
event_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if event_cord in batch_event_dict:
eventID[event_ls[0]]=event_cord
print '[INFO] Merging file: ', fin, len(eventID)
for index,r in enumerate(open(fin)):
if index==0:
continue
rs=r.strip().split('\t')
if rs[0] not in eventID:
continue
Incl=map(float,rs[1].split(','))
Skip=map(float,rs[2].split(','))
Cov=[num+Skip[o] for o,num in enumerate(Incl)]
psi_values=[]
for i,I in enumerate(Incl):
if int(I)+int(Skip[i])==0:
psi_values.append('NaN')
else:
psi_values.append(str(round(I/int(rs[5])/(I/int(rs[5])+Skip[i]/int(rs[6])),4)))
if eventID[rs[0]] not in Intercep_Matrix:
Intercep_Matrix[eventID[rs[0]]]={}
if sample_fin_list[n] not in Intercep_Matrix[eventID[rs[0]]]:
Intercep_Matrix[eventID[rs[0]]][sample_fin_list[n]]=(psi_values,Cov)
if len(psi_values)!=sample_size[sample_fin_list[n]]:
exit('[Abort] Sample number does not match observations in JC file.')
file_batch_list.append(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt')
fout=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt','w')
fout.write('AC\tGeneName\tchr\tstrand\texonStart\texonEnd\tupstreamEE\tdownstreamES\t'+'\t'.join(sample_header)+'\n')
for k in sorted(Intercep_Matrix.keys()):
psi_value_all=[]
cov_all=[]
for sample in sample_fin_list:
if sample in Intercep_Matrix[k]:
psi_value_all+=Intercep_Matrix[k][sample][0]
cov_all+=Intercep_Matrix[k][sample][1]
else:
psi_value_all+=['NaN']*sample_size[sample]
mean=numpy.mean(cov_all)
if mean>=cov_cutoff:
fout.write(k+'\t'+'\t'.join(psi_value_all)+'\n')
fout.close()
return file_batch_list
def mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path):
fout_merge=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
header=0
for file_batch in file_batch_list:
for j,l in enumerate(open(file_batch)):
if j==0:
if header==0:
header+=1
fout_merge.write(l)
continue
fout_merge.write(l)
fout_merge.close()
os.system('rm '+fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_*.txt')
return 'splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt'
def index_PsiMatrix(fn,outdir,delim):
out_fp = outdir+'/'+fn.split('/')[-1]+'.idx'
line_formatter = "{id}\t{offset}\n"
offset = 0
with open(fn, 'r') as fin:
with open(out_fp, 'w') as fout:
offset += len(fin.readline())
for line in fin:
ele = line.strip().split(delim)
eid = ':'.join([ele[0].split('_')[0].split('.')[0]]+ele[1:8])
fout.write( line_formatter.format(id=eid, offset=offset) )
offset += len(line)
return
def main(args):
cov_cutoff=args.cov_cutoff
data_name=args.data_name
sample_name_field=args.sample_name_field
splicing_event_type=args.splicing_event_type
if sample_name_field==1:
print '[INFO] Sample name parsed from bam file. (alternatively can be parsed from up level folder)'
if sample_name_field==2:
print '[INFO] Sample name parsed from folder name above the bam file. (alternatively can be parsed from bam file)'
db_dir=args.iris_db_path.rstrip('/')
#prepare files/folders in IRIS db directory
os.system('mkdir -p '+db_dir+'/'+data_name+' '+db_dir+'/'+data_name+'/splicing_matrix')
fout_path=db_dir+'/'+data_name
print '[INFO] output path: '+fout_path
fin_list=[]
sample_fin_list=[]
events_fin_list=[]
sample_size={}
sample_header=[]
file_batch_list=[]
#PARSING INPUT FILE LISTS
fin_list=[l.strip().rstrip('/')+'/JC.raw.input.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
events_fin_list=[l.strip().rstrip('/')+'/fromGTF.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
sample_fin_list, sample_header, sample_size= loadSamplelist(args.rmats_sample_order,sample_fin_list, sample_header,sample_name_field, sample_size)
#MAKING MERGED EVENTS LIST
total_event_list= writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path)
if args.merge_events_only:
exit('[INFO] Done merging events only.')
print '[INFO] Done loading file dir', len(total_event_list)
#START MERGING MATRICES IN BATCH MODE FOLLOWING EVENTS LIST GENERATED.
batch=20000
file_batch_list=mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path)
print '[INFO] Done merging matrices by batch.'
merged_file_name=mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path)
print '[INFO] Done merging matrices: '+merged_file_name
#create index in IRIS db directory
index_PsiMatrix(fout_path+'/splicing_matrix/'+merged_file_name,fout_path+'/splicing_matrix','\t')
print '[INFO] Finished. Created matrix: '+fout_path
if __name__ == '__main__':
main()
| 42.454545 | 204 | 0.722564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,509 | 0.201954 |
408407cd45d1d31df97defaffbefa6540d0ab484 | 7,444 | py | Python | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
]
| 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
]
| null | null | null | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
]
| null | null | null | # This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | 39.595745 | 116 | 0.657174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,991 | 0.4018 |
4084a9455c8745ebe8cdb17a3177996a15d02016 | 210 | py | Python | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
]
| null | null | null | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
]
| null | null | null | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
]
| null | null | null | """Database setup"""
# Third party library
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# initialization of the database and migration
database = SQLAlchemy()
migrate = Migrate()
| 21 | 46 | 0.795238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.414286 |
4085bccb38fa4dfee0e895626450b9f141da766f | 4,111 | py | Python | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
]
| 1 | 2022-01-31T16:53:40.000Z | 2022-01-31T16:53:40.000Z | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
]
| 71 | 2021-01-22T20:09:47.000Z | 2022-03-30T16:53:18.000Z | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
]
| 7 | 2021-04-02T14:45:21.000Z | 2022-01-17T22:23:38.000Z | import datetime as dt
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from powersimdata.input.check import _check_time_series
from postreise.analyze.time import change_time_zone
def plot_heatmap(
series,
time_zone=None,
time_zone_label=None,
title=None,
cmap="PiYG",
scale=None,
save_filename=None,
origin="upper",
vmin=None,
vmax=None,
cbar_format=None,
cbar_tick_values=None,
cbar_label=None,
cbar_tick_labels=None,
contour_levels=None,
figsize=(16, 8),
):
"""Show time-series values via an imshow where each column is one color-coded day.
:param pandas.Series series: a time-series of values to be color-coded.
:param str time_zone: a time zone to be passed as `tz` kwarg to
:func:`postreise.analyze.time.change_time_zone`.
:param str time_zone_label: a time zone label to be added to the y axis label.
:param str title: a title to be added to the figure.
:param str/matplotlib.colors.Colormap cmap: colormap specification to be passed
as `cmap` kwarg to :func:`matplotlib.pyplot.imshow`.
:param int/float scale: a scaling factor to be applied to the series values.
:param str save_filename: a path to save the figure to.
:param str origin: the vertical location of the origin, either "upper" or "lower".
:param int/float vmin: Minimum value for coloring, to be passed as `vmin` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param int/float vmax: Maximum value for coloring, to be passed as `vmax` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param str/matplotlib.ticker.Formatter cbar_format: a formatter for colorbar labels,
to be passed as `format` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param iterable cbar_tick_values: colorbar tick locations, to be passed as
`ticks` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param str cbar_label: axis label for colorbar.
:param iterable cbar_tick_labels: colorbar tick labels.
:param iterable contour_levels: values at which to draw contours, passed as `levels`
kwarg to :func:`matplotlib.pyplot.contour`.
:param tuple(int/float, int/float) figsize: size of figure.
"""
_check_time_series(series, "series")
df = series.to_frame(name="values").asfreq("H")
year = df.index[0].year
if time_zone is not None:
df = change_time_zone(df, time_zone)
df["date"] = df.index.date
df["hour"] = df.index.hour
df_reshaped = pd.pivot(
df,
index="date",
columns="hour",
values="values",
)
xlims = mdates.date2num([df_reshaped.index[0], df_reshaped.index[-1]])
ylims = mdates.date2num([dt.datetime(year, 1, 1, 0), dt.datetime(year, 1, 1, 23)])
if scale is not None:
df_reshaped *= scale
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
# if necessary, flip ylims so labels follow data from top to bottom
extent = [*xlims, *ylims] if origin == "lower" else [*xlims, ylims[1], ylims[0]]
im = plt.imshow(
df_reshaped.T,
cmap=cmap,
aspect="auto",
extent=extent,
origin=origin,
vmin=vmin,
vmax=vmax,
)
if contour_levels is not None:
ax.contour(df_reshaped.T, extent=extent, levels=contour_levels, origin=origin)
date_format = mdates.DateFormatter("%m/%d")
ax.xaxis_date()
ax.xaxis.set_major_formatter(date_format)
ax.set_xlabel("Date")
time_format = mdates.DateFormatter("%H:%M")
ax.yaxis_date()
ax.yaxis.set_major_formatter(time_format)
y_axis_label = "Time" if time_zone_label is None else f"Time {time_zone_label}"
ax.set_ylabel(y_axis_label)
cbar = fig.colorbar(im, format=cbar_format, ticks=cbar_tick_values)
if cbar_label is not None:
cbar.set_label(cbar_label)
if title is not None:
plt.title(title)
if cbar_tick_labels is not None:
cbar.ax.set_yticklabels(cbar_tick_labels)
if save_filename is not None:
plt.savefig(save_filename, bbox_inches="tight")
| 37.036036 | 88 | 0.683289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,889 | 0.459499 |
4086e4dd21e9a774c97734bcd63cd0233cf32c3d | 4,000 | py | Python | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
]
| null | null | null | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
]
| null | null | null | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of the ClientData abstract base class."""
import collections
import os.path
from typing import Callable, Mapping
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.simulation import client_data
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class FilePerUserClientData(client_data.ClientData):
"""A `tf.simulation.ClientData` that maps a set of files to a dataset.
This mapping is restricted to one file per user.
"""
def __init__(self, client_ids_to_files: Mapping[str, str],
dataset_fn: Callable[[str], tf.data.Dataset]):
"""Constructs a `tf.simulation.ClientData` object.
Args:
client_ids_to_files: A mapping from string client IDs to filepaths
containing the user's data.
dataset_fn: A factory function that takes a filepath (must accept
both strings and tensors) and returns a `tf.data.Dataset` corresponding
to this path.
"""
py_typecheck.check_type(client_ids_to_files, collections.abc.Mapping)
if not client_ids_to_files:
raise ValueError('`client_ids` must have at least one client ID')
py_typecheck.check_callable(dataset_fn)
self._client_ids = sorted(client_ids_to_files.keys())
def create_dataset_for_filename_fn(client_id):
return dataset_fn(client_ids_to_files[client_id])
@tff.tf_computation(tf.string)
def dataset_computation(client_id):
client_ids_to_path = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(client_ids_to_files.keys()),
list(client_ids_to_files.values())), '')
client_path = client_ids_to_path.lookup(client_id)
return dataset_fn(client_path)
self._create_tf_dataset_fn = create_dataset_for_filename_fn
self._dataset_computation = dataset_computation
g = tf.Graph()
with g.as_default():
tf_dataset = self._create_tf_dataset_fn(self._client_ids[0])
self._element_type_structure = tf_dataset.element_spec
@property
def client_ids(self):
return self._client_ids
def create_tf_dataset_for_client(self, client_id):
tf_dataset = self._create_tf_dataset_fn(client_id)
tensor_utils.check_nested_equal(tf_dataset.element_spec,
self._element_type_structure)
return tf_dataset
@property
def element_type_structure(self):
return self._element_type_structure
@classmethod
def create_from_dir(cls, path, create_tf_dataset_fn=tf.data.TFRecordDataset):
"""Builds a `tff.simulation.FilePerUserClientData`.
Iterates over all files in `path`, using the filename as the client ID. Does
not recursively search `path`.
Args:
path: A directory path to search for per-client files.
create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object
for a given file in the directory specified in `path`.
Returns:
A `tff.simulation.FilePerUserClientData` object.
"""
client_ids_to_paths_dict = {
filename: os.path.join(path, filename)
for filename in tf.io.gfile.listdir(path)
}
return FilePerUserClientData(client_ids_to_paths_dict, create_tf_dataset_fn)
@property
def dataset_computation(self):
return self._dataset_computation
| 36.363636 | 80 | 0.74325 | 2,990 | 0.7475 | 0 | 0 | 1,368 | 0.342 | 0 | 0 | 1,635 | 0.40875 |
4086e6c92cd0f6bf0670ff63d76bbec71943f194 | 162 | py | Python | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
]
| null | null | null | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
]
| null | null | null | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| 23.142857 | 38 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.179012 |
408710371dd0d37abadd9978ea2c4a4f85a8ec3b | 6,459 | py | Python | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
]
| 3 | 2019-08-01T03:14:49.000Z | 2020-01-31T08:55:22.000Z | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
]
| 353 | 2019-04-14T14:53:28.000Z | 2022-03-11T03:26:08.000Z | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
]
| 1 | 2020-08-01T06:23:08.000Z | 2020-08-01T06:23:08.000Z | from unittest.mock import NonCallableMock, sentinel
from pytest import mark, raises, fixture
from preacher.compilation.argument import Argument
from preacher.compilation.error import CompilationError, NamedNode, IndexedNode
from preacher.compilation.request.request import RequestCompiler, RequestCompiled
from preacher.compilation.request.request_body import RequestBodyCompiler
from preacher.core.request import Method
PKG = "preacher.compilation.request.request"
@fixture
def body():
body = NonCallableMock(RequestBodyCompiler)
body.of_default.return_value = sentinel.new_body_compiler
return body
@fixture
def default() -> RequestCompiled:
return RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.default_body,
)
@fixture
def compiler(body, default: RequestCompiled) -> RequestCompiler:
return RequestCompiler(body=body, default=default)
@mark.parametrize(
("obj", "expected_path"),
(
([], []),
({"method": 1}, [NamedNode("method")]),
({"method": "invalid"}, [NamedNode("method")]),
({"path": {"key": "value"}}, [NamedNode("path")]),
({"headers": ""}, [NamedNode("headers")]),
({"headers": {"int": 1}}, [NamedNode("headers")]),
({"headers": {1: "not-a-string-key"}}, [NamedNode("headers")]),
),
)
def test_given_an_invalid_obj(compiler: RequestCompiler, obj, expected_path):
with raises(CompilationError) as error_info:
compiler.compile(obj)
assert error_info.value.path == expected_path
def test_given_an_empty_mapping(compiler: RequestCompiler):
compiled = compiler.compile({})
assert compiled.method is sentinel.default_method
assert compiled.path is sentinel.default_path
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
@mark.parametrize(
("method_obj", "expected"),
(
("get", Method.GET),
("POST", Method.POST),
("Put", Method.PUT),
("Delete", Method.DELETE),
),
)
def test_given_a_valid_method(compiler: RequestCompiler, method_obj, expected):
obj = {"method": Argument("method")}
arguments = {"method": method_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.method is expected
@mark.parametrize(
("headers_obj", "expected"),
(
({}, {}),
({"null": None, "empty": ""}, {"empty": ""}),
({"n1": "v1", "n2": "v2"}, {"n1": "v1", "n2": "v2"}),
),
)
def test_given_valid_headers(compiler: RequestCompiler, headers_obj, expected):
obj = {"headers": Argument("headers")}
arguments = {"headers": headers_obj}
compiled = compiler.compile(obj, arguments)
assert compiled.headers == expected
def test_given_an_invalid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.side_effect = CompilationError("msg", node=NamedNode("x"))
with raises(CompilationError) as error_info:
compiler.compile({"params": sentinel.params})
assert error_info.value.path == [NamedNode("params"), NamedNode("x")]
compile_params.assert_called_once_with(sentinel.params, None)
def test_given_valid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.return_value = sentinel.compiled_params
compiled = compiler.compile({"params": sentinel.params}, sentinel.args)
assert compiled.params == sentinel.compiled_params
compile_params.assert_called_once_with(sentinel.params, sentinel.args)
def test_given_invalid_body(compiler: RequestCompiler, body):
body.compile.side_effect = CompilationError("x", node=IndexedNode(1))
with raises(CompilationError) as error_info:
compiler.compile({"body": sentinel.body_obj})
assert error_info.value.path == [NamedNode("body"), IndexedNode(1)]
body.compile.assert_called_once_with(sentinel.body_obj, None)
def test_given_valid_body(compiler: RequestCompiler, body):
body.compile.return_value = sentinel.body
compiled = compiler.compile({"body": sentinel.body_obj}, sentinel.args)
assert compiled.body is sentinel.body
body.compile.assert_called_once_with(sentinel.body_obj, sentinel.args)
def test_given_a_string(compiler: RequestCompiler):
compiled = compiler.compile(Argument("path"), {"path": "/path"})
assert compiled.method is sentinel.default_method
assert compiled.path == "/path"
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
def test_of_default_no_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=body,
default=RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
body=sentinel.default_body,
),
)
body.of_default.assert_not_called()
def test_of_default_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(body=sentinel.new_default_body)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=sentinel.new_body_compiler,
default=RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.new_default_body,
),
)
body.of_default.assert_called_once_with(sentinel.new_default_body)
| 34.174603 | 81 | 0.71203 | 0 | 0 | 0 | 0 | 2,065 | 0.319709 | 0 | 0 | 562 | 0.08701 |
408784a24cae84367d1864aa02a8ff6e4a8e197a | 1,109 | py | Python | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
]
| null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
]
| null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
]
| null | null | null | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | 38.241379 | 102 | 0.734896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.156898 |
4087ac882a0e642cb2645d67bfb2e7473130d2e9 | 265 | py | Python | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
]
| null | null | null | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
]
| 1 | 2021-06-02T00:58:26.000Z | 2021-06-02T00:58:26.000Z | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
]
| null | null | null | # 英制单位英寸和公制单位厘米互换
value =float(input('请输入长度:'))
unit =input('请输入单位:')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == '厘米' or unit == 'cm':
print('%f 厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位') | 26.5 | 49 | 0.558491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.501393 |
4088dc579c34d53321481174879bd2850ab8f43e | 485 | py | Python | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
]
| 4 | 2021-07-05T20:21:41.000Z | 2021-09-02T14:13:26.000Z | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
]
| null | null | null | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
]
| 1 | 2021-03-25T15:04:12.000Z | 2021-03-25T15:04:12.000Z | from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
def test1(network, alice_wallet, dtfactory_address):
dtfactory = DTFactory(dtfactory_address)
dt_address = dtfactory.createToken('foo_blob', 'DT1', 'DT1', to_base_18(1000), from_wallet=alice_wallet)
dt = DataToken(dtfactory.get_token_address(dt_address))
assert isinstance(dt, DataToken)
assert dt.blob() == 'foo_blob'
| 37.307692 | 108 | 0.781443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.061856 |
408a713d6a5b30cf98528302f34eefe2000e2530 | 4,223 | py | Python | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
]
| 100 | 2020-01-30T08:14:25.000Z | 2022-03-30T08:59:33.000Z | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
]
| 4 | 2021-06-08T22:34:33.000Z | 2022-03-12T00:50:13.000Z | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
]
| 15 | 2020-04-13T22:56:27.000Z | 2022-03-10T02:44:26.000Z | # coding=utf-8
"""PyTorch optimization for BERT model."""
from apex.contrib.optimizers import FP16_Optimizer
class FP16_Optimizer_State(FP16_Optimizer):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
super(FP16_Optimizer_State, self).__init__(init_optimizer,
static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
current.data.copy_(saved.data)
| 52.135802 | 117 | 0.656642 | 4,110 | 0.973242 | 0 | 0 | 0 | 0 | 0 | 0 | 2,612 | 0.618518 |
408ac0aced4fa7689e5bb64bd87a616424377650 | 46 | py | Python | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
]
| null | null | null | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
]
| null | null | null | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
]
| null | null | null | from .generator import *
from .types import *
| 15.333333 | 24 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
408c88fb92a834b62165870e3156152b98dd330c | 956 | py | Python | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
]
| null | null | null | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
]
| null | null | null | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
]
| null | null | null | import Bugdetectionuniversalframe
import os
import re
class overflowdetection(Bugdetectionuniversalframe.uniframe):
def __init__(self):
Bugdetectionuniversalframe.uniframe.__init__(self)
def deploy(self):#Re-write deploy method
flag=0
self.filesort()
if self.path != "":
command=" splint +weak +bounds -hints -varuse +posixlib "+self.path
os.system(command)
r= os.popen(command)
textlist=r.readlines()
final=""
for text in textlist:
#print(text) # 打印cmd输出结果
final=final+text
if re.search(r"out-of-bounds|buffer overflow",text):
flag=1
if flag:
final=final+"\n Looks like there is a stack overflow vulnerability."
else:
final="Seems no overflow vulnerability."
self.toolbox.textupdate(self.tokentext,final)
| 30.83871 | 84 | 0.582636 | 912 | 0.942149 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.242769 |
408e5eee21b5e0ed193fbd1da82ee85348eb987d | 7,517 | py | Python | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
]
| null | null | null | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
]
| null | null | null | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
]
| null | null | null | """
station.py
"""
from datetime import datetime, timedelta
import gzip
import numpy as np
import requests
import urllib
_BASEURL = 'http://www.ndbc.noaa.gov/data'
_SENSOR_URL = _BASEURL+'/stations/buoyht.txt'
_REALTIME_URL = _BASEURL+'/realtime2/'
_RECENT_URL = _BASEURL+'/stdmet/'
_HISTORICAL_URL = _BASEURL+'/historical/stdmet/'
_STATION_URL = _BASEURL+'/stations/station_table.txt'
class Station(object):
"""NDBC Station class."""
def __init__(self, station_id, starttime=None, endtime=None):
self.id = str(station_id)
self.time = []; self.wspd = []; self.wdir = []; self.gst = []
self.wvht = []; self.dpd = []; self.apd = []; self.mwd = []
self.pres = []; self.atmp = []; self.wtmp = []; self.dewp = []
self.vis = []; self.ptdy = []; self.tide = []
self._get_info()
if starttime and endtime:
self.get_stdmet(starttime, endtime)
def _get_info(self):
"""Collects station metadata."""
r = requests.get(_STATION_URL)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+_STATION_URL)
lines = r.text.split('\n')
try:
data = [line for line in lines if self.id == line[:5]].pop()
except IndexError:
raise ValueError('Station '+self.id+' not found in '+_STATION_URL)
station_id, self.owner, self.ttype, self.hull, self.name, self.payload,\
self.location, self.timezone, self.forecast, self.note = data.split('|')
loc = self.location.split()
self.lat, self.lon = float(loc[0]), float(loc[2])
if loc[1] == 'S':
self.lat = -self.lat
if loc[3] == 'W':
self.lon = -self.lon
def get_stdmet(self, starttime, endtime):
"""Gets the standard meteorological data given start and end times."""
# re-initialize if we are to overwrite data
#if self.time != [] and self.time != None :
self.__init__(self.id)
if starttime.year < datetime.utcnow().year:
datatype = 'historical'
elif starttime > datetime.utcnow() - timedelta(days=45):
self._get_stdmet_realtime()
return
elif starttime.year == datetime.utcnow().year:
datatype = 'recent'
else:
raise ValueError('starttime cannot be in the future')
time = starttime
while True:
if datatype == 'historical':
filename = self.id+'h'+str(time.year)+'.txt.gz'
fileurl = _HISTORICAL_URL+filename
elif datatype == 'recent':
filename = self.id+str(time.month)+str(time.year)+'.txt.gz'
fileurl = _RECENT_URL+time.strftime('%b')+'/'+filename
f = gzip.open(urllib.request.urlopen(fileurl))
if time.year >= 2007:
datastart = 2
else:
datastart = 1
lines = [line.decode().strip() for line in f.readlines()]
for line in lines[datastart:]:
line = line.split()
try:
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
nn = 5
except ValueError:
self.time.append(datetime.strptime(''.join(line[:4]), '%Y%m%d%H'))
nn = 4
self.wdir.append(np.nan if line[nn] == '999' else float(line[nn]))
self.wspd.append(np.nan if line[nn+1] == '99.0' else float(line[nn+1]))
self.gst.append(np.nan if line[nn+2] == '99.0' else float(line[nn+2]))
self.wvht.append(np.nan if line[nn+3] == '99.0' else float(line[nn+3]))
self.dpd.append(np.nan if line[nn+4] == '99.0' else float(line[nn+4]))
self.apd.append(np.nan if line[nn+5] == '99.0' else float(line[nn+5]))
self.mwd.append(np.nan if line[nn+6] == '999' else float(line[nn+6]))
self.pres.append(np.nan if line[nn+7] == '9999.0' else float(line[nn+7]))
self.atmp.append(np.nan if line[nn+8] == '99.0' else float(line[nn+8]))
self.wtmp.append(np.nan if line[nn+9] == '99.0' else float(line[nn+9]))
self.dewp.append(np.nan if line[nn+10] == '99.0' else float(line[nn+10]))
if self.time[-1] > endtime:
break
year = time.year
month = time.month
if datatype == 'historical':
year += 1
time = datetime(year, month, 1)
continue
elif datatype == 'recent':
month += 1
if month > 12:
break
else:
continue
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
def _get_stdmet_realtime(self):
"""
Reads the full realtime data feed (last 45 days) from the NDBC server.
"""
fileurl = _REALTIME_URL+self.id+'.txt'
r = requests.get(fileurl)
if not r.status_code == 200:
raise RuntimeError('Received response status '
+str(r.status_code)+' from '+fileurl)
lines = r.text.split('\n')
for line in lines[-2:1:-1]:
line = line.split()
self.time.append(datetime.strptime(''.join(line[:5]), '%Y%m%d%H%M'))
self.wdir.append(np.nan if line[5] == 'MM' else float(line[5]))
self.wspd.append(np.nan if line[6] == 'MM' else float(line[6]))
self.gst.append(np.nan if line[7] == 'MM' else float(line[7]))
self.wvht.append(np.nan if line[8] == 'MM' else float(line[8]))
self.dpd.append(np.nan if line[9] == 'MM' else float(line[9]))
self.apd.append(np.nan if line[10] == 'MM' else float(line[10]))
self.mwd.append(np.nan if line[11] == 'MM' else float(line[11]))
self.pres.append(np.nan if line[12] == 'MM' else float(line[12]))
self.atmp.append(np.nan if line[13] == 'MM' else float(line[13]))
self.wtmp.append(np.nan if line[14] == 'MM' else float(line[14]))
self.dewp.append(np.nan if line[15] == 'MM' else float(line[15]))
self.vis.append(np.nan if line[16] == 'MM' else float(line[16]))
self.ptdy.append(np.nan if line[17] == 'MM' else float(line[17]))
self.tide.append(np.nan if line[18] == 'MM' else float(line[18]))
self.time = np.array(self.time)
self.wdir = np.array(self.wdir)
self.wspd = np.array(self.wspd)
self.gst = np.array(self.gst)
self.wvht = np.array(self.wvht)
self.dpd = np.array(self.dpd)
self.apd = np.array(self.apd)
self.mwd = np.array(self.mwd)
self.pres = np.array(self.pres)
self.atmp = np.array(self.atmp)
self.wtmp = np.array(self.wtmp)
self.dewp = np.array(self.dewp)
self.vis = np.array(self.vis)
self.ptdy = np.array(self.ptdy)
self.tide = np.array(self.tide)
| 41.994413 | 89 | 0.543036 | 7,128 | 0.948251 | 0 | 0 | 0 | 0 | 0 | 0 | 855 | 0.113742 |
408f68f533f8c5055f6e751095cb737571178a12 | 765 | py | Python | main.py | kajuna0amendez/Cython_Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
]
| null | null | null | main.py | kajuna0amendez/Cython_Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
]
| 2 | 2021-02-02T23:02:12.000Z | 2021-08-23T20:51:22.000Z | main.py | kajuna0amendez/Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "Andres Mendez-Vazquez"
__copyright__ = "Copyright 2018"
__credits__ = ["Andres Mendez-Vazquez"]
__license__ = "Apache"
__version__ = "v1.0.0"
__maintainer__ = "Andres Mendez-Vazquez"
__email = "[email protected]"
__status__ = "Development"
from data_model.load_data import create_connection, select_all_tasks
from tools.data_frames import dframe_t_db
def main():
database = "/Cython_Code/database/heart.db"
# create a database connection
conn = create_connection(database)
with conn:
print("2. Query all tasks")
rows, name = select_all_tasks(conn, 'heart_table')
return dframe_t_db(rows, name)
if __name__ == '__main__':
df = main()
print(df)
| 23.181818 | 68 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.376471 |
408f7d16d7791c4eaced84288001a4eccaab5dae | 54 | py | Python | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
]
| null | null | null | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
]
| null | null | null | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
]
| null | null | null |
# version of the graw package
__version__ = "0.1.0"
| 10.8 | 29 | 0.685185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.666667 |
408fa80f7b62ab2142b5ebe87fafa4317281b530 | 6,779 | py | Python | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, arn=None, domain=None, id=None, key_types=None, most_recent=None, statuses=None, tags=None, types=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_types and not isinstance(key_types, list):
raise TypeError("Expected argument 'key_types' to be a list")
pulumi.set(__self__, "key_types", key_types)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if statuses and not isinstance(statuses, list):
raise TypeError("Expected argument 'statuses' to be a list")
pulumi.set(__self__, "statuses", statuses)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if types and not isinstance(types, list):
raise TypeError("Expected argument 'types' to be a list")
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def arn(self) -> str:
"""
Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyTypes")
def key_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_types")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter
def statuses(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags for the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
arn=self.arn,
domain=self.domain,
id=self.id,
key_types=self.key_types,
most_recent=self.most_recent,
statuses=self.statuses,
tags=self.tags,
types=self.types)
def get_certificate(domain: Optional[str] = None,
key_types: Optional[Sequence[str]] = None,
most_recent: Optional[bool] = None,
statuses: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
types: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to get the ARN of a certificate in AWS Certificate
Manager (ACM), you can reference
it by domain without having to hard code the ARNs as input.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
issued = aws.acm.get_certificate(domain="tf.example.com",
statuses=["ISSUED"])
amazon_issued = aws.acm.get_certificate(domain="tf.example.com",
most_recent=True,
types=["AMAZON_ISSUED"])
rsa4096 = aws.acm.get_certificate(domain="tf.example.com",
key_types=["RSA_4096"])
```
:param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
:param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`.
:param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false.
:param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
`INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
are returned.
:param Mapping[str, str] tags: A mapping of tags for the resource.
:param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['keyTypes'] = key_types
__args__['mostRecent'] = most_recent
__args__['statuses'] = statuses
__args__['tags'] = tags
__args__['types'] = types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acm/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
arn=__ret__.arn,
domain=__ret__.domain,
id=__ret__.id,
key_types=__ret__.key_types,
most_recent=__ret__.most_recent,
statuses=__ret__.statuses,
tags=__ret__.tags,
types=__ret__.types)
| 39.184971 | 260 | 0.651424 | 3,311 | 0.48842 | 354 | 0.05222 | 2,872 | 0.423661 | 0 | 0 | 2,969 | 0.43797 |
40902a024648160483f25a5dd670916ae7cc2c01 | 2,688 | py | Python | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
]
| 20 | 2016-08-08T12:16:51.000Z | 2022-03-26T19:56:09.000Z | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
]
| null | null | null | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
]
| 8 | 2016-08-08T14:22:13.000Z | 2020-05-30T07:05:36.000Z | import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| 29.866667 | 74 | 0.553943 | 0 | 0 | 164 | 0.059227 | 0 | 0 | 0 | 0 | 424 | 0.153124 |
4090bb4b6d1ad59682a210fa58e3049a7296547f | 4,103 | py | Python | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
]
| 2 | 2018-08-07T16:18:58.000Z | 2018-08-09T16:59:48.000Z | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
]
| null | null | null | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
]
| null | null | null | import castle
from typing import Tuple
def select_player_types() -> Tuple[castle.PlayerType, castle.PlayerType]:
player1, player2 = None, None
while True:
print(f'1) Play a person')
print(f'2) Play the computer')
print(f'3) Play the computer against itself')
choice_str = input(f'Select an option: ')
try:
choice = int(choice_str)
if choice not in [1, 2, 3]:
raise ValueError
except ValueError:
print('Invalid option.\n')
continue
if choice == 1:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.HUMAN
elif choice == 2:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.COMPUTER
elif choice == 3:
player1 = castle.PlayerType.COMPUTER
player2 = castle.PlayerType.COMPUTER
break
return player1, player2
def play_constructed_game(g: castle.Game):
g.board.pretty_print()
while not g.finished:
print(f'white short {g.can_castle(castle.Color.WHITE, True)}')
print(f'white long {g.can_castle(castle.Color.WHITE, False)}')
print(f'black short {g.can_castle(castle.Color.BLACK, True)}')
print(f'black long {g.can_castle(castle.Color.BLACK, False)}')
g.play_turn()
winning_prefix = f'Game over by '
if g.winner == castle.Winner.DRAW:
winning_prefix += 'stalemate'
else:
winning_prefix += 'checkmate'
winning_text = f'{winning_prefix}. Winner: {g.winner.name.title()}'
print(winning_text)
def play_game():
player1, player2 = select_player_types()
g = castle.Game(player1, player2)
play_constructed_game(g)
def test_perft():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.pretty_print()
for i in range(10):
print(f'perft({i}) = {g.perft(i)}')
def test_perft2():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.clear()
# https://sites.google.com/site/numptychess/perft/position-3
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'a8')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.BLACK), 'e8')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'h8')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'a7')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'h7')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.WHITE), 'a5')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'b4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'c4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'e4')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.BLACK), 'd3')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'a2')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'h2')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'a1')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.WHITE), 'e1')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'h1')
g.board.pretty_print()
for i in range(2):
print(f'perft({i}) = {g.perft(i)}')
def fen():
# f = castle.FenGameConstructor('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
game = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
game = 'r3k2r/p6p/8/B7/1pp1p3/3b4/P6P/R3K2R w KQkq - 0 1'
game = '8/5p2/8/2k3P1/p3K3/8/1P6/8 b - - 0 1'
f = castle.FenGameConstructor(game)
return f.game
def main():
print(f'Welcome to castle, a litte chess engine.\n')
# test_perft()
g = fen()
print('returned')
g.print_perft(5)
play_constructed_game(g)
# play_game()
if __name__ == '__main__':
main()
| 37.3 | 95 | 0.663417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.228613 |
40914f27511088ce3ade62cbe86245a30a969a5b | 2,603 | py | Python | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
]
| 44 | 2017-11-17T12:03:11.000Z | 2022-02-03T20:57:56.000Z | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
]
| 13 | 2018-10-09T15:34:15.000Z | 2022-02-24T20:03:17.000Z | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
]
| 23 | 2017-12-14T18:08:33.000Z | 2022-02-03T15:33:40.000Z | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`switch_configuration_show` - PyFOS util for configuring switch operation
********************************************************************************
The :mod:`switch_configuration_show` util provides for configuring switch \
operation.
This module is a stand-alone script that can be used to display switch
attributes.
* Input:
* -L=<login>: The login ID. If not provided, an interactive
prompt will request one.
* -P=<password>: The password. If not provided, an interactive
prompt will request one.
* -i=<IP address>: The IP address.
* -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified,
a VFID of 128 is assumed.
* Output:
* The switch attributes in JSON format.
.. function:: show_switch_conf(session)
Example Usage of the Method::
ret = switch_configuration_show.show_switch_conf(session)
print (ret)
Details::
switch_conf_obj = switch_configuration()
result = switch_conf_obj.get(session)
return result
* Input:
:param session: The session returned by login.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the configuration parameters of the switch.
"""
import sys
from pyfos import pyfos_auth
import pyfos.pyfos_brocade_fibrechannel_configuration as py_fc
from pyfos import pyfos_util
from pyfos.utils import brcd_util
switch = py_fc.switch_configuration
def show_switch_conf(session):
switch_conf_obj = switch()
result = switch_conf_obj.get(session)
return result
def main(argv):
filters = []
inputs = brcd_util.parse(argv, switch, filters)
session = brcd_util.getsession(inputs)
result = show_switch_conf(inputs['session'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 28.293478 | 80 | 0.683826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,975 | 0.75874 |
409191dc8e0d6b6abf220bc3d47860d3cfbeff3a | 31,078 | py | Python | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
]
| 105 | 2020-09-27T16:10:44.000Z | 2022-03-31T18:08:36.000Z | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
]
| 1 | 2021-07-15T21:36:09.000Z | 2021-07-15T21:36:09.000Z | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
]
| 87 | 2020-10-02T11:45:42.000Z | 2022-03-25T16:43:22.000Z | from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
from django.conf import settings
from django.db.models import Q
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/index.html')
#for showing signup/login button for customer
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/customerclick.html')
#for showing signup/login button for mechanics
def mechanicsclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/mechanicsclick.html')
#for showing signup/login button for ADMIN(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin')
def customer_signup_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request,'vehicle/customersignup.html',context=mydict)
def mechanic_signup_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('mechaniclogin')
return render(request,'vehicle/mechanicsignup.html',context=mydict)
#for checking user customer, mechanic or admin(by sumit)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
def is_mechanic(user):
return user.groups.filter(name='MECHANIC').exists()
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-dashboard')
elif is_mechanic(request.user):
accountapproval=models.Mechanic.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('mechanic-dashboard')
else:
return render(request,'vehicle/mechanic_wait_for_approval.html')
else:
return redirect('admin-dashboard')
#============================================================================================
# ADMIN RELATED views start
#============================================================================================
@login_required(login_url='adminlogin')
def admin_dashboard_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
dict={
'total_customer':models.Customer.objects.all().count(),
'total_mechanic':models.Mechanic.objects.all().count(),
'total_request':models.Request.objects.all().count(),
'total_feedback':models.Feedback.objects.all().count(),
'data':zip(customers,enquiry),
}
return render(request,'vehicle/admin_dashboard.html',context=dict)
@login_required(login_url='adminlogin')
def admin_customer_view(request):
return render(request,'vehicle/admin_customer.html')
@login_required(login_url='adminlogin')
def admin_view_customer_view(request):
customers=models.Customer.objects.all()
return render(request,'vehicle/admin_view_customer.html',{'customers':customers})
@login_required(login_url='adminlogin')
def delete_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
user.delete()
customer.delete()
return redirect('admin-view-customer')
@login_required(login_url='adminlogin')
def update_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,request.FILES,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customerForm.save()
return redirect('admin-view-customer')
return render(request,'vehicle/update_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_add_customer_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('/admin-view-customer')
return render(request,'vehicle/admin_add_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_customer_enquiry_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_customer_enquiry.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_view_customer_invoice_view(request):
enquiry=models.Request.objects.values('customer_id').annotate(Sum('cost'))
print(enquiry)
customers=[]
for enq in enquiry:
print(enq)
customer=models.Customer.objects.get(id=enq['customer_id'])
customers.append(customer)
return render(request,'vehicle/admin_view_customer_invoice.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_mechanic_view(request):
return render(request,'vehicle/admin_mechanic.html')
@login_required(login_url='adminlogin')
def admin_approve_mechanic_view(request):
mechanics=models.Mechanic.objects.all().filter(status=False)
return render(request,'vehicle/admin_approve_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def approve_mechanic_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.status=True
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-mechanic')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-approve-mechanic')
@login_required(login_url='adminlogin')
def admin_add_mechanic_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mechanicSalary=forms.MechanicSalaryForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanicSalary':mechanicSalary}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if userForm.is_valid() and mechanicForm.is_valid() and mechanicSalary.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.status=True
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('admin-view-mechanic')
else:
print('problem in form')
return render(request,'vehicle/admin_add_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-view-mechanic')
@login_required(login_url='adminlogin')
def update_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanicForm.save()
return redirect('admin-view-mechanic')
return render(request,'vehicle/update_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_salary_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic_salary.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def update_salary_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-mechanic-salary')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def admin_request_view(request):
return render(request,'vehicle/admin_request.html')
@login_required(login_url='adminlogin')
def admin_view_request_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_request.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def change_status_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_delete_request_view(request,pk):
requests=models.Request.objects.get(id=pk)
requests.delete()
return redirect('admin-view-request')
@login_required(login_url='adminlogin')
def admin_add_request_view(request):
enquiry=forms.RequestForm()
adminenquiry=forms.AdminRequestForm()
mydict={'enquiry':enquiry,'adminenquiry':adminenquiry}
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
adminenquiry=forms.AdminRequestForm(request.POST)
if enquiry.is_valid() and adminenquiry.is_valid():
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=adminenquiry.cleaned_data['customer']
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status='Approved'
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('admin-view-request')
return render(request,'vehicle/admin_add_request.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_approve_request_view(request):
enquiry=models.Request.objects.all().filter(status='Pending')
return render(request,'vehicle/admin_approve_request.html',{'enquiry':enquiry})
@login_required(login_url='adminlogin')
def approve_request_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_view_service_cost_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
print(customers)
return render(request,'vehicle/admin_view_service_cost.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def update_cost_view(request,pk):
updateCostForm=forms.UpdateCostForm()
if request.method=='POST':
updateCostForm=forms.UpdateCostForm(request.POST)
if updateCostForm.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.cost=updateCostForm.cleaned_data['cost']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-service-cost')
return render(request,'vehicle/update_cost.html',{'updateCostForm':updateCostForm})
@login_required(login_url='adminlogin')
def admin_mechanic_attendance_view(request):
return render(request,'vehicle/admin_mechanic_attendance.html')
@login_required(login_url='adminlogin')
def admin_take_attendance_view(request):
mechanics=models.Mechanic.objects.all().filter(status=True)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
print(mechanics[i].id)
print(int(mechanics[i].id))
mechanic=models.Mechanic.objects.get(id=int(mechanics[i].id))
AttendanceModel.mechanic=mechanic
AttendanceModel.save()
return redirect('admin-view-attendance')
else:
print('form invalid')
return render(request,'vehicle/admin_take_attendance.html',{'mechanics':mechanics,'aform':aform})
@login_required(login_url='adminlogin')
def admin_view_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date)
mechanicdata=models.Mechanic.objects.all().filter(status=True)
mylist=zip(attendancedata,mechanicdata)
return render(request,'vehicle/admin_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'vehicle/admin_view_attendance_ask_date.html',{'form':form})
@login_required(login_url='adminlogin')
def admin_report_view(request):
reports=models.Request.objects.all().filter(Q(status="Repairing Done") | Q(status="Released"))
dict={
'reports':reports,
}
return render(request,'vehicle/admin_report.html',context=dict)
@login_required(login_url='adminlogin')
def admin_feedback_view(request):
feedback=models.Feedback.objects.all().order_by('-id')
return render(request,'vehicle/admin_feedback.html',{'feedback':feedback})
#============================================================================================
# ADMIN RELATED views END
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views start
#============================================================================================
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_dashboard_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(customer_id=customer.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).count()
new_request_made=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Pending") | Q(status="Approved")).count()
bill=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).aggregate(Sum('cost'))
print(bill)
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_request_made':new_request_made,
'bill':bill['cost__sum'],
'customer':customer,
}
return render(request,'vehicle/customer_dashboard.html',context=dict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_request.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id , status="Pending")
return render(request,'vehicle/customer_view_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_delete_request_view(request,pk):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=models.Request.objects.get(id=pk)
enquiry.delete()
return redirect('customer-view-request')
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_add_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=forms.RequestForm()
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
if enquiry.is_valid():
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=customer
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicle/customer_add_request.html',{'enquiry':enquiry,'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_profile.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm,'customer':customer}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customerForm.save()
return HttpResponseRedirect('customer-profile')
return render(request,'vehicle/edit_customer_profile.html',context=mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_feedback_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent_by_customer.html',{'customer':customer})
return render(request,'vehicle/customer_feedback.html',{'feedback':feedback,'customer':customer})
#============================================================================================
# CUSTOMER RELATED views END
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_dashboard_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing Done').count()
new_work_assigned=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Approved').count()
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_work_assigned':new_work_assigned,
'salary':mechanic.salary,
'mechanic':mechanic,
}
return render(request,'vehicle/mechanic_dashboard.html',context=dict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_work_assigned_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
works=models.Request.objects.all().filter(mechanic_id=mechanic.id)
return render(request,'vehicle/mechanic_work_assigned.html',{'works':works,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicle/mechanic_update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_attendance_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
attendaces=models.Attendance.objects.all().filter(mechanic=mechanic)
return render(request,'vehicle/mechanic_view_attendance.html',{'attendaces':attendaces,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_feedback_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent.html',{'mechanic':mechanic})
return render(request,'vehicle/mechanic_feedback.html',{'feedback':feedback,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_salary_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
workdone=models.Request.objects.all().filter(mechanic_id=mechanic.id).filter(Q(status="Repairing Done") | Q(status="Released"))
return render(request,'vehicle/mechanic_salary.html',{'workdone':workdone,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
return render(request,'vehicle/mechanic_profile.html',{'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def edit_mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanic':mechanic}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanicForm.save()
return redirect('mechanic-profile')
return render(request,'vehicle/edit_mechanic_profile.html',context=mydict)
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
# for aboutus and contact
def aboutus_view(request):
return render(request,'vehicle/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False)
return render(request, 'vehicle/contactussuccess.html')
return render(request, 'vehicle/contactus.html', {'form':sub})
| 41.327128 | 150 | 0.702491 | 0 | 0 | 0 | 0 | 25,357 | 0.815915 | 0 | 0 | 6,581 | 0.211758 |
409204c88e09d10160109d7dfc196e9a1647012b | 28,322 | py | Python | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
]
| null | null | null | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
]
| null | null | null | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
]
| null | null | null | import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def get_tags(s, open_delim='<', close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end+len(close_delim):]
else:
return
def remove_uttseg_tag(tag):
tags = get_tags(tag)
final_tag = ""
for t in tags:
m = re.search(r'<[ct]*/>', t)
if m:
continue
final_tag += t
return final_tag
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_to_simple_idx(tag, rep='1_trp'):
tag = convert_to_simple_label(tag, rep)
simple_tags = """<e/><cc/>
<e/><ct/>
<e/><tc/>
<e/><tt/>
<f/><cc/>
<f/><ct/>
<f/><tc/>
<f/><tt/>
<rm-0/><cc/>
<rm-0/><ct/>""".split("\n")
simple_tag_dict = {}
for s in range(0, len(simple_tags)):
simple_tag_dict[simple_tags[s].strip()] = s
return simple_tag_dict[tag]
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
| 40.634146 | 85 | 0.49541 | 0 | 0 | 711 | 0.025104 | 0 | 0 | 0 | 0 | 9,263 | 0.32706 |
4093599c05b72acbbffdbf768053f07822e78e65 | 124 | py | Python | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
]
| null | null | null | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
]
| null | null | null | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
]
| null | null | null | from rest_framework.pagination import PageNumberPagination
class FavouritePagination(PageNumberPagination):
page_size=4 | 31 | 58 | 0.870968 | 64 | 0.516129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40946ed59b952cc97c649459f7de1a75d4265832 | 564 | py | Python | Python-Math/Python-Math/check_prime.py | rgabeflores/Scripts | c8138cb4543e576924de2107abb5a65f0b84264c | [
"MIT"
]
| 2 | 2018-05-12T10:58:51.000Z | 2021-11-16T11:52:27.000Z | src/Python-Math/check_prime.py | learn-py/Posts | da394236db0a52c93ca1c0374ad121b263555272 | [
"MIT"
]
| null | null | null | src/Python-Math/check_prime.py | learn-py/Posts | da394236db0a52c93ca1c0374ad121b263555272 | [
"MIT"
]
| null | null | null | '''
@author Gabriel Flores
Checks the primality of an integer.
'''
def is_prime(x):
'''
Checks the primality of an integer.
'''
sqrt = int(x ** (1/2))
for i in range(2, sqrt, 1):
if x % i == 0:
return False
return True
def main():
try:
print("\n\n")
a = int(input(" Enter an integer to check if it is prime: "))
if is_prime(a):
print("\n ",a,"is a prime number.\n")
else:
print("\n ",a,"is not a prime number.\n")
except ValueError as e:
print("\n\n Please enter a valid choice.\n")
if __name__ == "__main__":
main() | 18.8 | 66 | 0.592199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.496454 |
4095239ac8155507cd8501376f1d1a88028e9392 | 1,580 | py | Python | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | #!/usr/bin/env python
"""Plot the performance of different variants of the string routines
for one size.
"""
import libplot
import pylab
def plot(records, bytes):
records = [x for x in records if x.bytes==bytes]
variants = libplot.unique(records, 'variant', prefer='this')
functions = libplot.unique(records, 'function')
X = pylab.arange(len(functions))
width = 1.0/(len(variants)+1)
colours = libplot.make_colours()
pylab.figure(1).set_size_inches((16, 12))
pylab.clf()
for i, variant in enumerate(variants):
heights = []
for function in functions:
matches = [x for x in records if x.variant==variant and x.function==function and x.src_alignment==8]
if matches:
vals = [match.bytes*match.loops/match.elapsed/(1024*1024) for
match in matches]
mean = sum(vals)/len(vals)
heights.append(mean)
else:
heights.append(0)
pylab.bar(X+i*width, heights, width, color=colours.next(), label=variant)
axes = pylab.axes()
axes.set_xticklabels(functions)
axes.set_xticks(X + 0.5)
pylab.title('Performance of different variants for %d byte blocks' % bytes)
pylab.ylabel('Rate (MB/s)')
pylab.legend(loc='upper left', ncol=3)
pylab.grid()
pylab.savefig('top-%06d.png' % bytes, dpi=72)
def main():
records = libplot.parse()
for bytes in libplot.unique(records, 'bytes'):
plot(records, bytes)
pylab.show()
if __name__ == '__main__':
main()
| 25.483871 | 112 | 0.61519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.153165 |
40958b5deb96439390eb8a34bb5ed7d5f2983d33 | 3,292 | py | Python | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
]
| null | null | null | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
]
| null | null | null | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
]
| null | null | null | import sqlite3
import time, datetime, random
import matplotlib
matplotlib.use("Agg")
#added due to error, possibly due to install configuration
import matplotlib.pyplot as plt
print(matplotlib.get_backend())
import matplotlib.dates as mdates
from matplotlib import style
style.use('fivethirtyeight')
conn = sqlite3.connect("part1.db")
c = conn.cursor()
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS stufftoplot(unix REAL, datestamp TEXT, keyword TEXT, value REAL)')
def data_entry():
c.execute("INSERT into stufftoplot VALUES(123456, '2016-01-01', 'some keywords', 5)")
conn.commit()
def data_insert(unix, date, keyword, value):
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES(?, ?, ?, ?) ", (unix, date, keyword, value))
conn.commit()
def select_all_tasks(c):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
c.execute("SELECT * FROM stufftoplot")
rows = c.fetchall()
for row in rows:
print(row)
def dynamic_data_entry():
unix = time.time()
value = random.randrange(0,10)
print ("unix:", type(unix), unix, "value:", value)
date = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))
keyword = 'Python'
c.execute("INSERT into stufftoplot (unix, datestamp, keyword, value) VALUES (?, ?, ?, ?)", (unix, date, keyword, value))
conn.commit()
def read_from_db():
#c.execute('SELECT * FROM stufftoplot')
#c.execute("SELECT * FROM stufftoplot WHERE value = '5' AND keyword='python' COLLATE NOCASE")
#c.execute("SELECT * FROM stufftoplot WHERE value = 3 AND keyword='Python'")
c.execute("SELECT * FROM stufftoplot WHERE unix > 1529020514")
data = c.fetchall()
print (type(data))
print(data)
for row in data:
print (row)
def graph_data():
c.execute('SELECT unix, value FROM stufftoplot')
data = c.fetchall()
print (type(data))
dates = []
values = []
for row in data:
print (row[0])
print (datetime.datetime.fromtimestamp(row[0]))
dates.append(datetime.datetime.fromtimestamp(row[0]))
values.append(row[1])
plt.plot_date(dates, values, '-')
#plt.show()
plt.savefig("charts/output_chart.png")
print("chart plotted to file")
def del_and_update():
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
before = len(temp)
c.execute("SELECT * FROM stufftoplot WHERE value>5")
temp = c.fetchall()
num_matches = len(temp)
c.execute("UPDATE stufftoplot SET value=99 WHERE value=8")
conn.commit()
c.execute("SELECT * FROM stufftoplot")
temp = c.fetchall()
[print (row) for row in temp]
after = len(temp)
print ("before:", before)
print ("after:", after)
print ("num_matches:", num_matches)
def create_n_rows(n):
for i in range(n):
dynamic_data_entry()
time.sleep(1)
create_table()
#data_entry()
#data_insert(1111, "2016-01-02", "more keywords", 1)
#data_insert(2222, "2016-01-03", "less keywords", 2)
#dynamic_data_entry()
# time.sleep(1)
#select_all_tasks(c)
#read_from_db()
#graph_data()
create_n_rows(10)
del_and_update()
c.close()
conn.close()
| 25.92126 | 124 | 0.65401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.401276 |
40964229b92108c25937fb12522f648ac39e3e91 | 42,098 | py | Python | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
]
| null | null | null | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
]
| null | null | null | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
]
| null | null | null | import json
import os
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
import responses
from freezegun import freeze_time
from jwkest import BadSignature
from jwkest.jwk import SYMKey
from oic.oauth2.message import MissingSigningKey
from oic.oauth2.message import WrongSigningAlgorithm
from oic.oic import DEF_SIGN_ALG
from oic.oic import Server
from oic.oic import response_types_to_grant_types
from oic.oic.consumer import IGNORE
from oic.oic.consumer import Consumer
from oic.oic.consumer import clean_response
from oic.oic.message import AccessTokenRequest
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationResponse
from oic.oic.message import IdToken
from oic.oic.message import OpenIDSchema
from oic.oic.message import ProviderConfigurationResponse
from oic.oic.message import RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.keyio import keybundle_from_local_file
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import session_get
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
KC_SYM_VS = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "ver"})
KC_SYM_S = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "sig"})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
KC_RSA = keybundle_from_local_file(
os.path.join(BASE_PATH, "rsa.key"), "rsa", ["ver", "sig"]
)
SRVKEYS = KeyJar()
SRVKEYS[""] = [KC_RSA]
SRVKEYS["client_1"] = [KC_SYM_VS, KC_RSA]
CLIKEYS = KeyJar()
CLIKEYS["http://localhost:8088"] = [KC_RSA]
CLIKEYS[""] = [KC_RSA, KC_SYM_VS]
CLIKEYS["https://example.com"] = [KC_RSA]
SERVER_INFO = {
"version": "3.0",
"issuer": "https://localhost:8088",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"userinfo_endpoint": "http://localhost:8088/userinfo",
"flows_supported": ["code", "token"],
}
CONFIG = {
"authz_page": "authz",
"scope": ["openid"],
"response_type": "code",
"request_method": "parameter",
"password": "hemligt",
"max_age": 3600,
"user_info": {"name": None},
}
def _eq(l1, l2):
return set(l1) == set(l2)
def test_response_types_to_grant_types():
req_args = ["code"]
assert set(response_types_to_grant_types(req_args)) == {"authorization_code"}
req_args = ["code", "code id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
assert set(response_types_to_grant_types(req_args)) == {
"authorization_code",
"implicit",
}
req_args = ["code", "id_token code", "code token id_token"]
kwargs = {"grant_types": ["refresh_token", "authorization_code"]}
assert set(response_types_to_grant_types(req_args, **kwargs)) == {
"authorization_code",
"implicit",
"refresh_token",
}
with pytest.raises(ValueError):
response_types_to_grant_types(["foobar openid"])
def test_clean_response():
atr = AccessTokenResponse(
access_token="access_token",
token_type="bearer",
expires_in=600,
refresh_token="refresh",
steps=39,
stalls="yes",
)
catr = clean_response(atr)
atr_keys = atr.keys()
catr_keys = catr.keys()
assert _eq(
atr_keys,
[
"token_type",
"access_token",
"expires_in",
"refresh_token",
"steps",
"stalls",
],
)
assert _eq(catr_keys, ["token_type", "access_token", "expires_in", "refresh_token"])
class TestOICConsumer:
@pytest.fixture(autouse=True)
def setup_consumer(self, session_db_factory):
client_id = "client_1"
client_config = {
"client_id": client_id,
"client_authn_method": CLIENT_AUTHN_METHOD,
}
self.consumer = Consumer(
DictSessionBackend(), CONFIG, client_config, SERVER_INFO
)
self.consumer.behaviour = {
"request_object_signing_alg": DEF_SIGN_ALG["openid_request_object"]
}
self.consumer.keyjar = CLIKEYS
self.consumer.redirect_uris = ["https://example.com/cb"]
self.consumer.authorization_endpoint = "https://example.com/authorization"
self.consumer.token_endpoint = "https://example.com/token"
self.consumer.userinfo_endpoint = "https://example.com/userinfo" # type: ignore
self.consumer.client_secret = "hemlig"
self.consumer.secret_type = "basic"
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
def test_backup_keys(self):
keys = self.consumer.__dict__.keys()
_dict = self.consumer.dictionary()
dkeys = [key for key in keys if key not in _dict.keys()]
assert _eq(dkeys, IGNORE)
def test_backup_restore(self):
authz_org_url = "http://example.org/authorization"
_dict = sorted(list(self.consumer.__dict__.items()))
self.consumer._backup("sid")
self.consumer.restore("sid")
assert sorted(_dict) == sorted(list(self.consumer.__dict__.items()))
self.consumer.authorization_endpoint = authz_org_url
assert _dict != sorted(list(self.consumer.__dict__.items()))
self.consumer.restore("sid")
assert _dict == sorted(list(self.consumer.__dict__.items()))
def test_backup_restore_update(self):
authz_org_url = "http://example.org/authorization"
self.consumer._backup("sid")
self.consumer.authorization_endpoint = authz_org_url
self.consumer.token_endpoint = "https://example.org/token"
self.consumer.userinfo_endpoint = "" # type: ignore
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert self.consumer.userinfo_endpoint == "" # type: ignore
self.consumer.update("sid")
assert self.consumer.authorization_endpoint == authz_org_url
assert self.consumer.token_endpoint == "https://example.org/token"
assert (
self.consumer.userinfo_endpoint # type: ignore
== "https://example.com/userinfo"
)
def test_begin(self):
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin("openid", "code")
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"state",
"max_age",
"claims",
"response_type",
"client_id",
"scope",
"redirect_uri",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
def test_begin_file(self, tmpdir):
path = tmpdir.strpath
external_path = "/exported"
self.consumer.consumer_config["request_method"] = "file"
self.consumer.consumer_config["temp_dir"] = path
self.consumer.consumer_config["temp_path"] = external_path
self.consumer.consumer_config["authz_page"] = "/authz"
srv = Server()
srv.keyjar = SRVKEYS
sid, location = self.consumer.begin(
"openid", "code", path="http://localhost:8087"
)
with responses.RequestsMock() as rsps:
p = urlparse(self.consumer.request_uri)
assert p.netloc == "localhost:8087"
# Map the URL path to the local path
relative_path = os.path.relpath(p.path, external_path)
file_path = os.path.join(path, relative_path)
with open(file_path) as f:
rsps.add(
rsps.GET,
self.consumer.request_uri,
body=f.read(),
status=200,
content_type="application/urlencoded",
)
authreq = srv.parse_authorization_request(url=location)
assert _eq(
list(authreq.keys()),
[
"max_age",
"state",
"redirect_uri",
"response_type",
"client_id",
"scope",
"claims",
],
)
assert authreq["state"] == sid
assert authreq["scope"] == self.consumer.consumer_config["scope"]
assert authreq["client_id"] == self.consumer.client_id
assert authreq["redirect_uri"].startswith("http://localhost:8087/authz")
def test_complete(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_parse_authz(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup(_state)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
atr = part[0]
assert part[1] is None
assert part[2] is None
assert isinstance(atr, AuthorizationResponse)
assert atr["state"] == _state
assert "code" in atr
def test_parse_authz_implicit(self):
self.consumer.consumer_config["response_type"] = ["token"]
_state = "statxxx"
args = {
"client_id": self.consumer.client_id,
"response_type": "implicit",
"scope": ["openid"],
"redirect_uri": "https://example.com/cb",
}
location = (
"https://example.com/cb?access_token=token&token_type=bearer&state=statxxx"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
part = self.consumer.parse_authz(query=result.headers["location"])
assert isinstance(part, tuple)
assert part[0] is None
atr = part[1]
assert part[2] is None
assert isinstance(atr, AccessTokenResponse)
assert atr["state"] == _state
assert "access_token" in atr
def test_complete_secret_auth(self):
_state = "state0"
del self.consumer.consumer_config["password"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
resp = self.consumer.complete(_state)
assert isinstance(resp, AccessTokenResponse)
assert _eq(resp.keys(), ["token_type", "state", "access_token", "scope"])
assert resp["state"] == _state
def test_complete_auth_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code", "token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
location = (
"https://example.com/cb?code=some_code&state=state0&access_token=token&token_type=bearer"
"&client_id=client_1&scope=openid"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
parsed = urlparse(result.headers["location"])
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
acc = part[1]
assert part[2] is None
assert isinstance(auth, AuthorizationResponse)
assert isinstance(acc, AccessTokenResponse)
assert _eq(
auth.keys(),
["code", "access_token", "token_type", "state", "client_id", "scope"],
)
assert _eq(acc.keys(), ["token_type", "state", "access_token", "scope"])
def test_complete_auth_token_idtoken(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="HS256"
)
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_no_alg_config(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=[SYMKey(key="hemlig")], algorithm="HS256")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query, algs={"sign": "HS256"})
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert auth is None
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(),
[
"access_token",
"id_token",
"id_token_jwt",
"token_type",
"state",
"scope",
],
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_code(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["code"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
at_hash="aaa",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["code"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
part = self.consumer.parse_authz(query=parsed.query)
assert isinstance(part, tuple)
auth = part[0]
atr = part[1]
idt = part[2]
assert isinstance(auth, AuthorizationResponse)
assert isinstance(atr, AccessTokenResponse)
assert _eq(
atr.keys(), ["access_token", "id_token", "token_type", "state", "scope"]
)
assert isinstance(idt, IdToken)
def test_complete_auth_token_idtoken_none_cipher_token(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["token"]
self.consumer.registration_response = RegistrationResponse(
id_token_signed_response_alg="none"
)
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
self.consumer.sdb[_state] = {"redirect_uris": []}
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_complete_auth_token_idtoken_cipher_downgrade(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token", "token"]
self.consumer.provider_info = ProviderConfigurationResponse(
issuer="https://example.com"
) # abs min
self.consumer.authz_req = {} # Store AuthzReq with state as key
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
"nonce": "nonce",
}
token = IdToken(
iss="https://example.com",
aud="client_1",
sub="some_sub",
exp=1565348600,
iat=1565348300,
nonce="nonce",
)
# Downgrade the algorithm to `none`
location = (
"https://example.com/cb?state=state0&access_token=token&token_type=bearer&"
"scope=openid&id_token={}".format(
token.to_jwt(key=KC_RSA.keys(), algorithm="none")
)
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
query = parse_qs(urlparse(result.request.url).query)
assert query["client_id"] == ["client_1"]
assert query["scope"] == ["openid"]
assert query["response_type"] == ["id_token token"]
assert query["state"] == ["state0"]
assert query["nonce"] == ["nonce"]
assert query["redirect_uri"] == ["https://example.com/cb"]
parsed = urlparse(result.headers["location"])
with freeze_time("2019-08-09 11:00:00"):
with pytest.raises(WrongSigningAlgorithm):
self.consumer.parse_authz(query=parsed.query)
def test_userinfo(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "[email protected]",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_sign_userinfo(self):
_state = "state0"
self.consumer.client_prefs = {"userinfo_signed_response_alg": "RS256"}
del self.consumer.consumer_config["request_method"]
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "[email protected]",
"nickname": "Ilja",
"verified": True,
},
)
self.consumer.begin("openid", "code")
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
self.consumer.complete(_state)
result = self.consumer.get_user_info(_state)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def test_get_userinfo_claims(self):
_state = "state0"
args = {
"client_id": self.consumer.client_id,
"response_type": "code",
"scope": ["openid"],
}
location = "https://example.com/cb?code=code&state=state0"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
rsps.add(
responses.POST,
"https://example.com/token",
content_type="application/json",
json={
"access_token": "some_token",
"token_type": "bearer",
"state": "state0",
"scope": "openid",
},
)
rsps.add(
responses.POST,
"https://example.com/userinfo",
content_type="application/json",
json={
"name": "Ilja",
"sub": "some_sub",
"email": "[email protected]",
"nickname": "Ilja",
"verified": True,
},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
parsed = urlparse(result.headers["location"])
self.consumer.parse_response(
AuthorizationResponse, info=parsed.query, sformat="urlencoded"
)
response = self.consumer.complete(_state)
result = self.consumer.get_userinfo_claims(
response["access_token"],
self.consumer.userinfo_endpoint, # type: ignore
)
assert isinstance(result, OpenIDSchema)
assert _eq(result.keys(), ["name", "email", "verified", "nickname", "sub"])
def real_test_discover(self):
c = Consumer(None, None)
principal = "[email protected]"
res = c.discover(principal)
assert isinstance(res, ProviderConfigurationResponse)
assert _eq(
res.keys(),
[
"registration_endpoint",
"scopes_supported",
"identifiers_supported",
"token_endpoint",
"flows_supported",
"version",
"userinfo_endpoint",
"authorization_endpoint",
"x509_url",
"issuer",
],
)
assert res.version == "3.0" # type: ignore
assert _eq(
res.flows_supported, # type: ignore
[
"code",
"token",
"id_token",
"code token",
"code id_token",
"id_token token",
],
)
def test_discover(self):
c = Consumer(None, None)
webfinger = {
"subject": "acct:[email protected]",
"links": [
{
"rel": "http://openid.net/specs/connect/1.0/issuer",
"href": "https://localhost:8088/",
}
],
}
principal = "[email protected]"
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/.well-known/webfinger"
"?resource=acct%3Afoo%40example.com&rel=http%3A%2F%2Fopenid.net%2Fspecs%2Fconnect%2F1.0%2Fissuer",
json=webfinger,
)
res = c.discover(principal)
assert res == "https://localhost:8088/"
def test_client_register(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
reg_resp = {
"client_id": "some_client",
"client_secret": "super_secret",
"client_secret_expires_at": 123456789,
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, "https://example.com/register/", json=reg_resp)
c.register("https://example.com/register/")
assert json.loads(str(rsps.calls[0].request.body)) == {
"application_type": "web",
"response_types": ["code"],
"redirect_uris": ["https://example.com/authz"],
"grant_types": ["authorization_code"],
}
assert c.client_id == "some_client"
assert c.client_secret == "super_secret"
assert c.registration_expires == 123456789
def test_client_register_token(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token="initial_registration_token",
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer aW5pdGlhbF9yZWdpc3RyYXRpb25fdG9rZW4="
def test_client_register_token_b64(self):
c = Consumer(None, None)
c.redirect_uris = ["https://example.com/authz"]
client_info = {
"client_id": "clientid",
"redirect_uris": ["https://example.com/authz"],
}
registration_token = (
"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6IC"
"JlYjc1N2M3Yy00MWRlLTRmZDYtOTkwNy1hNGFiMDY1ZjEzMmEifQ.eyJqdGkiOiI2ZWY0MDZi"
"MC02YzA3LTQ0NzctOWU1YS1hY2FiZjNiMWNiMjgiLCJleHAiOjAsIm5iZiI6MCwiaWF0Ijox"
"NTczNzMxNjg5LCJpc3MiOiJodHRwczovL29wZW5pZC1wcm92aWRlci5leGFtcGxlLmNvbS9h"
"dXRoL3JlYWxtcy9tYXN0ZXIiLCJhdWQiOiJodHRwczovL29wZW5pZC1wcm92aWRlci5leGFt"
"cGxlLmNvbS9hdXRoL3JlYWxtcy9tYXN0ZXIiLCJ0eXAiOiJJbml0aWFsQWNjZXNzVG9rZW4i"
"fQ.0XTlit_JcxPZeIy8A4BzrHn1NvegVP7ws8KI0ySFex8"
)
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://provider.example.com/registration/",
json=client_info,
)
c.register(
"https://provider.example.com/registration/",
registration_token=registration_token,
)
header = rsps.calls[0].request.headers["Authorization"]
assert header == "Bearer " + registration_token
def _faulty_id_token(self):
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://alpha.cloud.nds.rub.de",
"exp": 1420823073,
"iat": 1420822473,
"aud": "TestClient",
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=[SYMKey(key="TestPassword")], algorithm="HS256")
# Mess with the signed id_token
p = _signed_jwt.split(".")
p[2] = "aaa"
return ".".join(p)
def test_faulty_id_token(self):
_faulty_signed_jwt = self._faulty_id_token()
with pytest.raises(BadSignature):
IdToken().from_jwt(_faulty_signed_jwt, key=[SYMKey(key="TestPassword")])
# What if no verification key is given ?
# Should also result in an exception
with pytest.raises(MissingSigningKey):
IdToken().from_jwt(_faulty_signed_jwt)
def test_faulty_id_token_in_access_token_response(self):
c = Consumer(None, None)
c.keyjar.add_symmetric("", "TestPassword", ["sig"])
_info = {
"access_token": "accessTok",
"id_token": self._faulty_id_token(),
"token_type": "Bearer",
}
_json = json.dumps(_info)
with pytest.raises(ValueError):
c.parse_response(AccessTokenResponse, _json, sformat="json")
def test_faulty_idtoken_from_accesstoken_endpoint(self):
_state = "state0"
self.consumer.consumer_config["response_type"] = ["id_token"]
args = {
"client_id": self.consumer.client_id,
"response_type": self.consumer.consumer_config["response_type"],
"scope": ["openid"],
}
location = (
"https://example.com/cb?state=state0&id_token=eyJhbGciOiJSUzI1NiJ9"
".eyJpc3MiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg4IiwgInN1YiI6ICJhNWRkMjRiMmYwOGE2ODZmZDM4NmMyMmM"
"zZmY4ZWUyODFlZjJmYmZmMWZkZTcwMDg2NjhjZGEzZGVjZmE0NjY5IiwgImF1ZCI6IFsiY2xpZW50XzEiXSwgImV"
"4cCI6IDE1NzIwOTk5NjAsICJhY3IiOiAiMiIsICJpYXQiOiAxNTcyMDEzNTYwLCAibm9uY2UiOiAibmdFTGZVdmN"
"PMWoyaXNWcXkwQWNwM0NOYlZnMGdFRDEifQ.aaa"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
"https://example.com/authorization",
status=302,
headers={"location": location},
)
result = self.consumer.do_authorization_request(
state=_state, request_args=args
)
self.consumer._backup("state0")
assert result.status_code == 302
query = urlparse(result.headers["location"]).query
with pytest.raises(BadSignature):
self.consumer.parse_authz(query=query)
def test_get_session_management_id(self):
now = utc_time_sans_frac()
smid = "session_management_id"
idval = {
"nonce": "KUEYfRM2VzKDaaKD",
"sub": "EndUserSubject",
"iss": "https://example.com",
"exp": now + 3600,
"iat": now,
"aud": self.consumer.client_id,
"sid": smid,
}
idts = IdToken(**idval)
_signed_jwt = idts.to_jwt(key=KC_RSA.keys(), algorithm="RS256")
_state = "state"
self.consumer.sdb[_state] = {"redirect_uris": ["https://example.org/cb"]}
resp = AuthorizationResponse(id_token=_signed_jwt, state=_state)
self.consumer.consumer_config["response_type"] = ["id_token"]
self.consumer.authz_req[_state] = AccessTokenRequest(nonce="KUEYfRM2VzKDaaKD")
self.consumer.parse_authz(resp.to_urlencoded())
assert self.consumer.sso_db["state"]["smid"] == smid
assert session_get(self.consumer.sso_db, "smid", smid) == [_state]
| 35.585799 | 114 | 0.551071 | 38,260 | 0.908832 | 0 | 0 | 1,043 | 0.024776 | 0 | 0 | 10,931 | 0.259656 |
409660d0cd505763586410c6b2b0e9f378f6b60a | 2,338 | py | Python | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
]
| 1 | 2020-11-23T23:44:32.000Z | 2020-11-23T23:44:32.000Z | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
]
| null | null | null | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
from codecs import open
from setuptools import setup, find_packages
def walk_subpkg(name):
data_files = []
package_dir = 'cartoframes'
for parent, dirs, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for f in files:
data_files.append(os.path.join(sub_dir, f))
return data_files
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.6.0,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.24.2<1.0',
'shapely>=1.6.4,<2.0',
'tqdm>=4.32.1,<5.0',
'unidecode>=1.1.0,<2.0',
'webcolors>=1.9.1,<2.0'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, 'cartoframes', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
url=about['__url__'],
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='carto data science maps spatial pandas',
packages=find_packages(),
install_requires=REQUIRES,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
include_package_data=True,
package_dir={'cartoframes': 'cartoframes'},
package_data=PACKAGE_DATA,
)
| 28.168675 | 82 | 0.597092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,030 | 0.440547 |
40967bb7a1cadc5b1fb7c5cf4f834747f6a13132 | 1,026 | py | Python | Tests/test_BioSQL_mysql_connector_online.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
]
| 2 | 2019-10-25T18:20:34.000Z | 2019-10-28T15:26:40.000Z | Tests/test_BioSQL_mysql_connector_online.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
]
| 9 | 2020-05-05T00:54:23.000Z | 2020-06-09T17:10:45.000Z | Tests/test_BioSQL_mysql_connector_online.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
]
| 3 | 2020-06-29T13:07:46.000Z | 2021-06-14T20:11:55.000Z | #!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using MySQL."""
import unittest
# Really do want "import *" to get all the test clases:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:
from common_BioSQL import load_biosql_ini, check_config
from common_BioSQL_online import share_config
import requires_internet
requires_internet.check()
DBDRIVER = "mysql.connector"
DBTYPE = "mysql"
DBHOST, DBUSER, DBPASSWD, TESTDB = load_biosql_ini(DBTYPE)
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
share_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 30.176471 | 70 | 0.770955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.44152 |
4096eff81f78a7602d75dd243df5e2e64ac51f0d | 429 | py | Python | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
]
| null | null | null | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
]
| 3 | 2021-03-19T01:19:04.000Z | 2021-06-04T22:44:35.000Z | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.6 on 2020-04-05 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saraswati', '0003_auto_20200402_1918'),
]
operations = [
migrations.AddField(
model_name='ritual',
name='people_name',
field=models.TextField(blank=True, default=None, max_length=108, null=True),
),
]
| 22.578947 | 88 | 0.617716 | 336 | 0.783217 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.242424 |
40997199af5c3427ea68e5bd37b9d827653408fe | 14,709 | py | Python | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
]
| null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
]
| null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
class NoSuchJobException( Exception ):
def __init__( self, jobStoreID ):
super( NoSuchJobException, self ).__init__( "The job '%s' does not exist" % jobStoreID )
class ConcurrentFileModificationException( Exception ):
def __init__( self, jobStoreFileID ):
super( ConcurrentFileModificationException, self ).__init__(
'Concurrent update to file %s detected.' % jobStoreFileID )
class NoSuchFileException( Exception ):
def __init__( self, fileJobStoreID ):
super( NoSuchFileException, self ).__init__( "The file '%s' does not exist" % fileJobStoreID )
class JobStoreCreationException( Exception ):
def __init__( self, message ):
super( JobStoreCreationException, self ).__init__( message )
class AbstractJobStore( object ):
"""
Represents the physical storage for the jobs and associated files in a toil.
"""
__metaclass__ = ABCMeta
def __init__( self, config=None ):
"""
:param config: If config is not None then the
given configuration object will be written to the shared file "config.pickle" which can
later be retrieved using the readSharedFileStream. See writeConfigToStore.
If this file already exists it will be overwritten. If config is None,
the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore.
"""
#Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
self.__config = cPickle.load(fileHandle)
else:
self.__config = config
self.writeConfigToStore()
def writeConfigToStore(self):
"""
Re-writes the config attribute to the jobStore, so that its values can be retrieved
if the jobStore is reloaded.
"""
with self.writeSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
cPickle.dump(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)
@property
def config( self ):
return self.__config
@staticmethod
def _checkJobStoreCreation(create, exists, jobStoreString):
"""
Consistency checks which will result in exceptions if we attempt to overwrite an existing
jobStore.
:type create: boolean
:type exists: boolean
:raise JobStoreCreationException: Thrown if create=True and exists=True or create=False
and exists=False
"""
if create and exists:
raise JobStoreCreationException("The job store '%s' already exists. "
"Use --restart or 'toil restart' to resume this jobStore, "
"else remove it to start from scratch" % jobStoreString)
if not create and not exists:
raise JobStoreCreationException("The job store '%s' does not exist, so there "
"is nothing to restart." % jobStoreString)
@abstractmethod
def deleteJobStore( self ):
"""
Removes the jobStore from the disk/store. Careful!
"""
raise NotImplementedError( )
##Cleanup functions
def clean(self):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
"""
#Collate any jobs that were in the process of being created/deleted
jobsToDelete = set()
for job in self.jobs():
for updateID in job.jobsToDelete:
jobsToDelete.add(updateID)
#Delete the jobs that should be deleted
if len(jobsToDelete) > 0:
for job in self.jobs():
if job.updateID in jobsToDelete:
self.delete(job.jobStoreID)
#Cleanup the state of each job
for job in self.jobs():
changed = False #Flag to indicate if we need to update the job
#on disk
if len(job.jobsToDelete) != 0:
job.jobsToDelete = set()
changed = True
#While jobs at the end of the stack are already deleted remove
#those jobs from the stack (this cleans up the case that the job
#had successors to run, but had not been updated to reflect this)
while len(job.stack) > 0:
jobs = [ command for command in job.stack[-1] if self.exists(command[0]) ]
if len(jobs) < len(job.stack[-1]):
changed = True
if len(jobs) > 0:
job.stack[-1] = jobs
break
else:
job.stack.pop()
else:
break
#Reset the retry count of the job
if job.remainingRetryCount < self._defaultTryCount():
job.remainingRetryCount = self._defaultTryCount()
changed = True
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(self)
changed = True
if changed: #Update, but only if a change has occurred
self.update(job)
#Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x : None)
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
@abstractmethod
def create( self, command, memory, cores, disk, updateID=None,
predecessorNumber=0 ):
"""
Creates a job, adding it to the store.
Command, memory, cores, updateID, predecessorNumber
are all arguments to the job's constructor.
:rtype : toil.jobWrapper.JobWrapper
"""
raise NotImplementedError( )
@abstractmethod
def exists( self, jobStoreID ):
"""
Returns true if the job is in the store, else false.
:rtype : bool
"""
raise NotImplementedError( )
@abstractmethod
def getPublicUrl( self, FileName):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl( self, jobStoreFileID):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def load( self, jobStoreID ):
"""
Loads a job for the given jobStoreID and returns it.
:rtype: toil.jobWrapper.JobWrapper
:raises: NoSuchJobException if there is no job with the given jobStoreID
"""
raise NotImplementedError( )
@abstractmethod
def update( self, job ):
"""
Persists the job in this store atomically.
"""
raise NotImplementedError( )
@abstractmethod
def delete( self, jobStoreID ):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
"""
raise NotImplementedError( )
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
@abstractmethod
def writeFile( self, localFilePath, jobStoreID=None ):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def writeFileStream( self, jobStoreID=None ):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def getEmptyFileStoreID( self, jobStoreID=None ):
"""
:rtype : string, the ID of a new, empty file.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
"""
raise NotImplementedError( )
@abstractmethod
def readFile( self, jobStoreFileID, localFilePath ):
"""
Copies the file referenced by jobStoreFileID to the given local file path. The version
will be consistent with the last copy of the file written/updated.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readFileStream( self, jobStoreFileID ):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def deleteFile( self, jobStoreFileID ):
"""
Deletes the file with the given ID from this job store.
This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file
will succeed silently.
"""
raise NotImplementedError( )
@abstractmethod
def fileExists(self, jobStoreFileID ):
"""
:rtype : True if the jobStoreFileID exists in the jobStore, else False
"""
raise NotImplementedError()
@abstractmethod
def updateFile( self, jobStoreFileID, localFilePath ):
"""
Replaces the existing version of a file in the jobStore. Throws an exception if the file
does not exist.
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
the physical storage represented by this job store
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
"""
raise NotImplementedError( )
@abstractmethod
def writeStatsAndLogging( self, statsAndLoggingString ):
"""
Adds the given statistics/logging string to the store of statistics info.
"""
raise NotImplementedError( )
@abstractmethod
def readStatsAndLogging( self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError( )
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
@classmethod
def _validateSharedFileName( cls, sharedFileName ):
return bool( cls.sharedFileNameRegex.match( sharedFileName ) )
| 37.143939 | 102 | 0.615066 | 13,899 | 0.944932 | 0 | 0 | 8,189 | 0.556734 | 0 | 0 | 8,258 | 0.561425 |
409a342355b661973139a052737ed840078d30d8 | 9,819 | py | Python | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
]
| null | null | null | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
]
| null | null | null | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
]
| null | null | null | from datetime import date
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
test_data = pd.read_csv("data/world_data.csv")
today = date.today()
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "COVID Dashboard - UK Edition"
app.layout = html.Div([
html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[
html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"),
# dcc.DatePickerRange(className="date-and-location",
# id="month-picker",
# min_date_allowed=date(2020, 1, 30),
# max_date_allowed=date(today.year, today.month, today.day),
# start_date=date(2020, 3, 1),
# end_date=date(today.year, today.month, today.day),
# style={"height": "50%"}
# ),
]),
html.Div(className="container-fluid", children=[
html.Div(className="row", children=[
html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[
html.Div(className="sidebar-sticky", children=[
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Custom Search"),
]),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("User Search", href="/home"),
])]),
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Preset Search"),
]),
dcc.Location(id="url", refresh=False),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("Africa", href="/africa"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Asia", href="/asia"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Europe", href="/europe"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("North America", href="/northamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("South America", href="/southamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Oceania", href="/oceania"),
html.Span(className="sr-only"),
]),
]),
html.Div(id='page-content'),
html.Ul(className="nav flex-column mb-2")
]),
]),
html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[
html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}),
html.Div(className="box-shadow", children=[
]),
dbc.Row(
[
dbc.Col(children=[
html.H1(children="Deaths"),
html.Hr(className="lead"),
html.Div(id="death-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Cases"),
html.Hr(className="lead"),
html.Div(id="cases-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Vaccines"),
html.Hr(className="lead"),
html.Div(id="vaccines-stats", children="######"),
]),
]
),
html.Div(className="graphs", children=[
dcc.Graph(
id="cases-graph"
),
dcc.Graph(
id="deaths-graph",
),
]),
])])])])
def dropdown(location, user_enabled, display):
return dcc.Dropdown(
id="location",
options=[
{"label": location, "value": location} for location in test_data["location"].unique()
],
value=location,
searchable=False,
disabled=user_enabled,
style={"display": display}
),
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/africa':
return dropdown("Africa", True, "none")
elif pathname == '/asia':
return dropdown("Asia", True, "none")
elif pathname == '/europe':
return dropdown("Europe", True, "none")
elif pathname == '/northamerica':
return dropdown("North America", True, "none")
elif pathname == '/southamerica':
return dropdown("South America", True, "none")
elif pathname == '/oceania':
return dropdown("Oceania", True, "none")
else:
return dropdown("United Kingdom", False, "block")
@app.callback(
[
Output("cases-graph", "figure"), Output("deaths-graph", "figure"),
Output("death-stats", "children"), Output("cases-stats", "children"),
Output("vaccines-stats", "children")
],
[
# Input('month-picker', "start_date"),
# Input("month-picker", "end_date"),
Input("location", "value"),
],
)
def update_personal_ouput(value):
# start_date, end_date, ):
filtered_data_cases = test_data.loc[(test_data["location"] == value)]
# //& (test_data["date"] >= start_date) & (test_data["date"] <= end_date)]
fig_deaths = px.bar(filtered_data_cases, x="date", y=["new_deaths_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Deaths - {value}", labels={"value": "Number of Deaths", "date": "Date", "variable": "Legend"})
fig_deaths.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_deaths.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_deaths_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
fig_cases = px.bar(filtered_data_cases, x="date", y=["new_cases_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Cases - {value}", labels={"value": "Number of Cases", "date": "Date", "variable": "Legend"})
fig_cases.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_cases.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_cases_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
latest_deaths = f'{filtered_data_cases["new_deaths"].iloc[-1]:.0f} today'
latest_cases = f'{filtered_data_cases["new_cases"].iloc[-1]:.0f} today'
latest_vaccines = f'{filtered_data_cases["new_vaccinations"].iloc[-2]:.0f} today'
return fig_deaths, fig_cases, latest_deaths, latest_cases, latest_vaccines
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_ui=False)
| 51.952381 | 250 | 0.440778 | 0 | 0 | 0 | 0 | 2,695 | 0.274468 | 0 | 0 | 2,624 | 0.267237 |
409ac3a28f63c2603ac7a86d7009827a8fa89371 | 979 | py | Python | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
]
| 7 | 2020-08-19T01:32:34.000Z | 2021-12-06T07:31:32.000Z | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
]
| 2 | 2021-01-28T07:35:45.000Z | 2021-06-20T14:19:01.000Z | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
]
| null | null | null | from collections import OrderedDict
import skimage.io as io
from config import get_config
config = get_config()
class LRUCache:
def __init__(self, capacity: int):
self._ordered_dict = OrderedDict()
self._capacity = capacity
def get(self, key):
self._move_to_end_if_exist(key)
return self._ordered_dict.get(key)
def put(self, key, value):
self._move_to_end_if_exist(key)
self._ordered_dict[key] = value
if len(self._ordered_dict) > self._capacity:
key, value = self._ordered_dict.popitem(last=False)
del key
del value
def _move_to_end_if_exist(self, key):
if key in self._ordered_dict:
self._ordered_dict.move_to_end(key)
_cache = LRUCache(config["data_queue_len"])
def get_image(path):
# image = _cache.get(path)
image = None
if image is None:
image = io.imread(path)
# _cache.put(path, image)
return image
| 23.309524 | 63 | 0.6476 | 639 | 0.652707 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.068437 |
409ad3c2aaa2132563a0928975965afc50081365 | 1,852 | py | Python | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
]
| null | null | null | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
]
| null | null | null | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
]
| null | null | null | import numpy
from heapq import *
import time
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(canvas, array, start, goal):
neighbours = [(0, 1), (0, -1), (1, 0), (-1, 0)]
close_set = set()
came_from = {}
gscore = {start: 0}
fscore = {start: heuristic(start, goal)}
heap_lst = []
heappush(heap_lst, (fscore[start], start))
canvas.in_search = True
while heap_lst:
current = heappop(heap_lst)[1]
if current == goal:
path = []
while current in came_from:
path.append(current)
current = came_from[current]
canvas.finish_search(path)
canvas.in_search = False
return path
close_set.add(current)
for w,h in neighbours:
neighbour = current[0] + w, current[1] + h
temp_g_score = gscore[current] + heuristic(current, neighbour)
if 0 <= neighbour[0] < array.shape[0]:
if 0 <= neighbour[1] < array.shape[1]:
if array[neighbour[0]][neighbour[1]] == 1:
continue
else:
continue
else:
continue
if neighbour in close_set and temp_g_score >= gscore.get(neighbour, 0):
continue
if temp_g_score < gscore.get(neighbour, 0) or neighbour not in [i[1] for i in heap_lst]:
canvas.highlight(neighbour)
time.sleep(canvas.get_root().options['speed']/1000)
came_from[neighbour] = current
gscore[neighbour] = temp_g_score
fscore[neighbour] = temp_g_score + heuristic(neighbour, goal)
heappush(heap_lst, (fscore[neighbour], neighbour))
canvas.in_search = False
return False
| 30.360656 | 100 | 0.532937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.00378 |
409bc944bcc8474410d41d3c5ed935bde146869f | 1,119 | py | Python | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
]
| null | null | null | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
]
| null | null | null | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
]
| null | null | null | import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| 22.836735 | 89 | 0.513852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.051832 |
409c909153fb2318680014346b00ba060e9d1ace | 699 | py | Python | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
]
| null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
]
| null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
]
| 3 | 2021-10-31T08:23:44.000Z | 2022-01-13T03:59:22.000Z | import torch
from transformers import PreTrainedTokenizerFast
from transformers import BartForConditionalGeneration
class AbsSummarization():
def __init__(self):
self.tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-summarization')
self.model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-summarization')
def predict(self, text):
raw_input_ids = self.tokenizer.encode(text)
input_ids = [self.tokenizer.bos_token_id] + raw_input_ids + [self.tokenizer.eos_token_id]
summary_ids = self.model.generate(torch.tensor([input_ids]))
return self.tokenizer.decode(summary_ids.squeeze().tolist(), skip_special_tokens=True)
abs_summary = AbsSummarization() | 38.833333 | 91 | 0.815451 | 548 | 0.783977 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.085837 |
409d329c8dc7ebfbbdbfdb66ef4f8976ba9ec528 | 12,413 | py | Python | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
]
| 18 | 2015-04-07T14:28:39.000Z | 2020-02-08T14:03:38.000Z | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
]
| 7 | 2016-10-05T05:14:06.000Z | 2021-05-20T02:07:22.000Z | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
]
| 11 | 2015-12-15T09:49:39.000Z | 2021-09-06T18:38:21.000Z | # -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
class ImageHelper(dpHelper):
def compare(self, i1, i2, error=0):
i1 = self.load(i1)
i2 = self.load(i2)
if not i1 or not i2:
return None
s1 = i1.size
s2 = i2.size
if s1[0] != s2[0] or s2[1] != s2[1]:
print('size ne,', s1, s2)
return False
i1 = i1.load()
i2 = i2.load()
for i in range(s1[0]):
for j in range(s1[1]):
if i1[i, j] != i2[i, j]:
if error:
for k in range(len(i1[i, j])):
if abs(i1[i, j][k] - i2[i, j][k]) > error:
print('pixel ne,', i1[i, j], i2[i, j], abs(i1[i, j][k] - i2[i, j][k]), error)
return False
else:
return False
return True
def _driver(self, options=None, **kwargs):
if not options and kwargs:
options = kwargs
if options and 'driver' in options and options['driver'] == 'wand':
return self.helper.io.image.driver.wand
return self.helper.io.image.driver.pillow
def load(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
tmp = None
drivers = []
pillow_image = self.helper.io.image.driver.pillow.Image
wand_image = self.helper.io.image.driver.wand.Image
if pillow_image:
drivers.append(pillow_image)
if wand_image:
drivers.append(wand_image)
try:
if isinstance(src, tuple(drivers)):
return src
elif self.helper.web.url.validate(src):
code, res = self.helper.web.http.get.raw(src)
if code != 200:
raise Exception('The specified image url is invalid.')
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(res)
tmp.close()
tmp = tmp.name
else:
tmp = None
if not tmp and not src:
raise Exception('The specified image is invalid.')
img = self._driver(options=options).load(tmp if tmp else src)
if not img:
raise Exception('The specified image is invalid.')
return img
except Exception as e:
self.logging.exception(e)
return False
finally:
if tmp:
self.helper.io.file.remove(tmp)
def execute(self, src, fn, options=None, **kwargs):
if not options and kwargs:
options = kwargs
img = self.load(src, options=options)
if not img:
return False
try:
return fn(img, options)
except Exception as e:
self.logging.exception(e)
return False
def size(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
if not img:
return -1, -1
return img.width, img.height
return self.execute(src, fn, options=options)
def crop(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
crop = kwargs['crop'] if 'crop' in kwargs else None
if not crop:
return img
e_top = 0
e_left = 0
e_right = 0
e_bottom = 0
if self.helper.misc.type.check.string(crop):
crop = crop.split(',')
crop = [int(e.strip()) for e in crop]
if self.helper.misc.type.check.numeric(crop):
e_top = e_left = e_right = e_bottom = crop
elif isinstance(crop, (tuple, list)):
if len(crop) == 1:
e_top = e_left = e_right = e_bottom = crop[0]
elif len(crop) == 2:
e_top = e_bottom = crop[0]
e_left = e_right = crop[1]
elif len(crop) == 4:
e_top = crop[0]
e_right = crop[1]
e_bottom = crop[2]
e_left = crop[3]
img = self._driver(options=kwargs).crop(img, e_left, e_top, img.size[0] - e_right, img.size[1] - e_bottom)
return img
return self.execute(src, fn, options=options)
def border(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not border:
return img
if '_org' in kwargs and 'radius' in kwargs and kwargs['radius']:
return img
img = self._driver(options=kwargs).border(img, border, border_color)
return img
return self.execute(src, fn, options=options)
def radius(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
radius = int(kwargs['radius'] or 0) if 'radius' in kwargs else None
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not radius:
return img
elif '__radius_processed__' in img.__dict__:
return img
img = self._driver(options=kwargs).radius(img, radius, border, border_color)
img.__dict__['__radius_processed__'] = True
return img
return self.execute(src, fn, options=options)
def colorize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
colorize = kwargs['colorize'] if 'colorize' in kwargs else None
if not colorize:
return img
img = self._driver(options=kwargs).colorize(img, colorize)
return img
return self.execute(src, fn, options=options)
def resize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
size = kwargs['size'] if 'size' in kwargs else None
mode = kwargs['mode'] if 'mode' in kwargs else None
scale = int(kwargs['scale']) if 'scale' in kwargs else 1
limit = True if 'limit' in kwargs and kwargs['limit'] else False
border = int(kwargs['border']) if 'border' in kwargs else 0
if not size:
return img
width_new, height_new = size
width_origin, height_origin = img.size
if scale > 1:
if limit:
scale_max_width = float(width_origin) / float(width_new)
scale_max_height = float(height_origin) / float(height_new)
scale_max = min(scale, scale_max_width, scale_max_height)
else:
scale_max = scale
if scale_max > 1:
width_new = int(width_new * scale_max)
height_new = int(height_new * scale_max)
if not width_new:
width_new = width_origin * height_new / height_origin
mode = self.helper.io.image.mode.resize
if not height_new:
height_new = height_origin * width_new / width_origin
mode = self.helper.io.image.mode.resize
if border:
width_new -= border * 2
height_new -= border * 2
if not mode:
mode = self.helper.io.image.mode.resize
if mode not in self.helper.io.image.mode.modes:
raise Exception('The specified mode is not supported.')
seqs = []
for i, im in self._driver(options=kwargs).iter_seqs(img, kwargs):
# Image Resizing
if mode == self.helper.io.image.mode.center:
im = self._driver(options=kwargs).resize(im, width_new, height_new, kwargs)
elif mode == self.helper.io.image.mode.fill:
ratio_origin = float(width_origin) / float(height_origin)
ratio_new = float(width_new) / float(height_new)
if ratio_origin > ratio_new:
tw = int(round(height_new * ratio_origin))
im = self._driver(options=kwargs).resize(im, tw, height_new)
left = int(round((tw - width_new) / 2.0))
im = self._driver(options=kwargs).crop(im, left, 0, left + width_new, height_new)
elif ratio_origin < ratio_new:
th = int(round(width_new / ratio_origin))
im = self._driver(options=kwargs).resize(im, width_new, th)
top = int(round((th - height_new) / 2.0))
im = self._driver(options=kwargs).crop(im, 0, top, width_new, top + height_new)
else:
im = self._driver(options=kwargs).resize(im, width_new, height_new)
elif mode == self.helper.io.image.mode.resize:
if width_new > width_origin or height_new > height_origin:
width_new = width_origin
height_new = height_origin
im = self._driver(options=kwargs).resize(im, width_new, height_new)
seqs.append(im)
img = seqs[0]
seqs.remove(img)
img.__dict__['__frames__'] = seqs
return img
return self.execute(src, fn, options=options)
def save(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
ext = kwargs['format'] if 'format' in kwargs else None
dest = kwargs['dest'] if 'dest' in kwargs else None
if not dest:
return None
if not ext and self.helper.misc.type.check.string(dest):
ext = self.helper.io.path.ext(dest, dot='').lower()
if not ext and self.helper.misc.type.check.string(src):
ext = self.helper.io.path.ext(src, dot='').lower()
if not ext and '_org' in kwargs and kwargs['_org'] and self.helper.misc.type.check.string(kwargs['_org']):
ext = self.helper.io.path.ext(kwargs['_org'], dot='').lower()
if dest == 's3':
# TODO
return False
if not self._driver(options=kwargs).save(img, ext, dest, kwargs):
return False
return True
return self.execute(src, fn, options=options)
def manipulate(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
options['_org'] = src
try:
img = self.load(src, options=options)
# Crop
img = self.crop(img, options=options)
if not img:
return False
# Resize
img = self.resize(img, options=options)
if not img:
return False
# Radius
img = self.radius(img, options=options)
if not img:
return False
# Border
img = self.border(img, options=options)
if not img:
return False
# Colorize
img = self.colorize(img, options=options)
if not img:
return False
# Save
saved = self.save(img, options=options)
if saved is None:
return img
elif saved is False:
return False
return True
except Exception as e:
self.logging.exception(e)
return False
| 30.573892 | 118 | 0.512688 | 12,311 | 0.991783 | 0 | 0 | 0 | 0 | 0 | 0 | 646 | 0.052042 |
409e06685c9ecbd99f82a4b27833a85d0c5a9b1e | 4,385 | py | Python | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
]
| 1 | 2019-02-28T05:32:51.000Z | 2019-02-28T05:32:51.000Z | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
]
| null | null | null | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
]
| 2 | 2019-03-15T19:40:17.000Z | 2019-05-24T17:15:59.000Z | import requests
from bs4 import BeautifulSoup
import json
import re
# Range of Roll Number - User Input
start_roll = int(input("Starting Roll Number: "))
end_roll = int(input("Ending Roll Number: "))
# Semester - User Input
sem = int(input("Which Semester[1-8]: "))
# Verbosity
verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): "))
# Roll Number Tuple
roll_tuple = tuple(range(start_roll, end_roll+1))
# Getting the Websites
result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details'
get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details'
# Semester Codes
semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08')
def get_marks_of(rollNo, semester):
# Handle session cookies appropriately
s = requests.Session()
with s.get(result_url) as r:
while r.status_code != 200:
r = s.get(result_url)
# Parse CSRF-Token
soup = BeautifulSoup(r.text, 'html.parser')
csrf_token = soup.find("meta", {"name":"csrf-token"})['content']
# Create dict for post request
form_data = {'_token': csrf_token, 'p1':'', 'ROLLNO':str(rollNo), 'SEMCODE':semcode[semester-1], 'examtype':'result-details', 'all':''}
# Get Result Data
with s.post(get_result_details, data=form_data) as r:
while r.status_code != 200:
r = s.post(get_result_details, data=form_data)
result_data = json.loads(r.text)['html']
soup = BeautifulSoup(result_data, 'html.parser')
result_data = soup.find("div", {"id":"page-wrap"})
try:
result_data = result_data.get_text()
except AttributeError:
# This result has not yet been published
return
# Basic Data
name = re.findall("Name[^a-zA-Z]*([a-zA-Z ]+)", result_data)[0]
stream = re.findall("B.Tech[^A-Z]*([A-Z]+)", result_data)[0]
roll_num = re.findall("Roll[^0-9]*([0-9]+)", result_data)[0]
reg_num, batch = re.findall("Registration[^0-9]*([0-9]+) OF ([0-9-]+)", result_data)[0]
# Subject Data
def get_subject_data(result_data):
re_mp_fl = [ i for i in filter(lambda x: x!='', [i for i in map(lambda x: x.strip(), re.findall("([^\n]+)", result_data))])]
for i in range(re_mp_fl.index("Subject Code")+6, re_mp_fl.index("Total"),6):
yield(tuple([re_mp_fl[j] for j in range(i, i+6)]))
subject_data = get_subject_data(result_data)
# SGPA YGPA MAR - Prone to errors for odd and even sem
sgpa_odd, odd_year, sgpa_even, even_year, ygpa, cgpa = -1, -1, -1, -1, -1, -1
try:
sgpa_odd = re.findall("ODD\.*\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
odd_year = re.findall("ODD[^0-9]*([0-9])", result_data)[0]
sgpa_even = re.findall("EVEN\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
even_year = re.findall("EVEN[^0-9]*([0-9])", result_data)[0]
ygpa = re.findall("YGPA[^0-9]*([0-9.]+)", result_data)[0]
cgpa = re.findall("DGPA[^EVEN]*EVEN\s*\(.*\)[^0-9.]*[0-9.]+\s*([0-9.]+)[^YGPA]*YGPA", result_data)[0]
except IndexError:
pass
return {
'name': name,
'stream': stream,
'roll': roll_num,
'reg_num': reg_num,
'batch': batch,
'marks_per_subject': subject_data,
'sgpa_odd': sgpa_odd,
'odd_year': odd_year,
'sgpa_even': None if sgpa_even == -1 else sgpa_even,
'even_year': None if even_year == -1 else even_year,
'ygpa': None if ygpa == -1 else ygpa,
'cgpa': None if cgpa == -1 else cgpa
}
def print_marks_properly(roll, sem):
data = get_marks_of(roll, sem)
if data != "<TBD>":
for key, value in data.items():
if key == 'marks_per_subject':
print(key,"->")
for x in value:
print(x)
else:
print(key, "->", value)
if verbose == 1:
# Disply most recent
for roll in roll_tuple:
data = get_marks_of(roll, sem)
try:
print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})")
except:
pass
elif verbose == 2:
for roll in roll_tuple:
print_marks_properly(roll, sem)
else:
print("[!] Verbosity Level Wrong!")
| 35.650407 | 140 | 0.575143 | 0 | 0 | 2,909 | 0.663398 | 0 | 0 | 0 | 0 | 1,416 | 0.322919 |
409f7a2dc9434e9656e7bedb75a00b02b076a630 | 1,411 | py | Python | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
]
| null | null | null | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
]
| null | null | null | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
__author__ = "Adeel Ahmad"
__email__ = "[email protected]"
__status__ = "Production"
import matplotlib.pyplot as plt
import numpy as np
import skimage as ski
import Image
def cartoonify(im, display=False):
"""
function receives an image and add its gradient magnitude in it and add it
to the original image to return a semi-cartoon image.
Note: You will have to scale the gradient-magnitue image
before adding it back to the input image.
Input:
im: input image to cartoonify
display: whether to display image or not...
NOTE: This function expects a gaussian filtered image
"""
kernel, kern_size = np.array([[-1,-1,-1] ,[0,0,0] ,[1,1,1]]), 3
gx, gy = np.zeros_like(im, dtype=float), np.zeros_like(im, dtype=float)
for i in range(im.shape[0] - (kern_size-1)):
for j in range(im.shape[1] - (kern_size-1)):
window = im[i:i + kern_size, j:j + kern_size]
gx[i,j], gy[i,j] = np.sum(window * kernel.T), np.sum(window * kernel)
magnitude = np.sqrt(gx**2 + gy**2)
magnitude = magnitude.astype(np.int64, copy=False)
cartoon = im + (im + magnitude)
if display == 1:
plt.imshow(cartoon, cmap='gray')
plt.suptitle('Cartoon')
plt.show()
return cartoon
| 31.355556 | 255 | 0.59674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.345145 |
40a00c80a3cc741480575d8150f065c48c9b4231 | 4,341 | py | Python | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
]
| null | null | null | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
]
| null | null | null | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
]
| null | null | null | from dataclasses import dataclass
import json
import re
@dataclass
class KeyMapper(dict):
"""
Example:
km = KeyMapper({'messages': {'message1': 'Hello World!'}}})
print(km['messages.message1'])
Variables:
__delimiter__ is set to dot-notation by default, unless specified otherwise.
"""
__delimiter__ = "." # Default
__schema__ = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs:
if 'delimiter' in kwargs:
self.__delimiter__ = kwargs['delimiter']
elif 'schema' in kwargs:
self.__schema__ = kwargs['schema']
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if self.__schema__:
if self.__schema__[k] == type(v):
self.__dict__.update({k: v})
else:
raise ValueError(
f'TypeMismatchError: value {type(v)} does not match type {type(self.__schema__[k])} defined in schema')
else:
self.__dict__.update({k: v})
def __repr__(self):
return '{}(dict={})'.format(self.__class__, self.__dict__)
def __str__(self):
return '{}'.format(self.__dict__)
def __getattr__(self, attr):
try:
return self.get(attr)
except Exception as e:
raise e
def __setattr__(self, key, value):
try:
self.__setitem__(key, value)
except Exception as e:
raise e
def __delattr__(self, item):
try:
self.__delitem__(item)
except Exception as e:
raise e
def __getitem__(self, key):
try:
if self.__delimiter__ in key:
return self.__mapper__(self.__dict__, key.split(self.__delimiter__), self.__getitem__.__name__)
else:
return self.get(key)
except Exception as e:
raise e
def __setitem__(self, key, value):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__setitem__.__name__, value)
else:
super().__setitem__(key, value)
self.__dict__.update({key: value})
except Exception as e:
raise e
def __delitem__(self, key):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__delitem__.__name__)
else:
super().__delitem__(key)
del self.__dict__[key]
except Exception as e:
raise e
def pprint(self, *args):
try:
if len(args) > 0:
return json.dumps(args[0], indent=4, ensure_ascii=False)
return json.dumps(self, indent=4, ensure_ascii=False)
except Exception as e:
raise e
@classmethod
def __mapper__(cls, d, m, callback, *args, **kwargs):
for i, k in enumerate(m):
key = k if not re.search(r'^[0-9]+$', k) else int(k)
try:
if str(key) in d or type(key) == int and d[key]:
if str(key) != m[-1] or i != len(m) - 1:
return cls.__mapper__(d[key], m[1:], callback, *args, **kwargs)
elif str(key) == m[-1] and i == len(m) - 1:
if callback == '__setitem__':
d[key] = args[0]
return None
elif callback == '__delitem__':
del d[key]
return None
else:
return d[key]
except Exception as e:
raise e
else:
if i == len(m) - 1:
if callback == '__setitem__':
d[m[-1]] = args[0]
return None
else:
raise KeyError('{}'.format(m[i]))
else:
if callback == '__getitem__':
return d
| 33.651163 | 135 | 0.476618 | 4,272 | 0.984105 | 0 | 0 | 4,283 | 0.986639 | 0 | 0 | 467 | 0.107579 |
40a02814845a829728726e29b79dfead7feb2132 | 3,401 | py | Python | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Demo37_PythonforDataScience.ipynb
# PYTHON FOR DATA SCIENCE
We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!!
This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done.
This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial.
- Python Basics
- Object Oriented Python
- **Python for Data Science**
- NumPy
- Pandas
- Plotting
- Matplotlib
- Seaborn
Let's get coding !!
"""
#Variables can not start with a number
12var = 1
_13var = 1
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {} {} and I am {} years old.".format(name, surname, age))
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age))
"""### INDEXING AND SLICING
One of the most important Python concept for data scientists is the slicing operator ':'
"""
str = "ONE TWO THREE FOUR FIVE"
print(str[0])
print(str[5])
print(str[len(str)-1])
str[:5]
str[5:]
str[1]="a"
nested = [1,2,3,['_1','_2','_3',['__1']]]
nested[0]
nested[3][0]
len(nested)
len(nested[3])
nested[3][3]
nested[3][3][0]
dict = {'key1':'value1', \
'key2': 'value2', \
'key3':'value3'}
dict['key1']
T = True
F = False
var = 10
for i in range(var):
print(i)
for i in range(var):
bool = (i==2)
if bool:
break
print(i)
[1,2,3,1,1,2,3,4]
(1,2,3,1,1,2,3,4)
{1,2,3,1,1,2,3,4}
new_set = set([1,2,3,1,1,2,3,4])
new_set.add(5)
new_set
for item in new_set:
print(item)
list(range(4))
my_list = list(range(5,10))
output = []
for number in my_list:
output.append(number**3)
output
output = [num**3 for num in my_list]
output
"""### FUNCTIONS"""
def my_function(parameter):
print(parameter)
my_function("Jalebi (Hungry okay?)")
def my_function(parameter="Default"):
print(parameter)
my_function()
num = 4
def change(par):
par =5
return par
change(num)
num
num = 4
def change(par):
par =5
return par
change(num)
num
num = [4]
def change(par):
par.append(5)
del par[0]
return par
change(num)
num
my_list
"""### LAMBDA EXPRESSIONS"""
def square(x): return x*x
list(map(square, my_list))
list(map(lambda x:x*x, my_list))
"""### BUILT-IN FUNCTIONS"""
s = "We have a hulk !!!"
s.lower()
s.upper()
s.split()
dict = {'key1':1,'key2':2}
dict.keys()
dict.values()
dict.items()
my_list.pop()
my_list
"""### TUPLE UNPACKING"""
list_of_tuples =[(1,2),(3,4),(5,6)]
for (a,b) in list_of_tuples:
print (a)
print (b)
"""### WELCOME TO THE END OF THE TUTORIAL
You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding.
---------------------------------------------------------------------------------
Copyrights © 2018, All Rights Reserved.
- Author: Mahnoor Anjum.
- Course: The Complete Hands-On Machine Learning Course
- Date Created: 2018-06-27
- Date Modified: -
""" | 18.284946 | 259 | 0.643046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,852 | 0.544386 |
40a0b272172c34d79349dc117521f3988050cbff | 4,401 | py | Python | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
]
| null | null | null | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
]
| null | null | null | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
]
| 1 | 2019-06-12T11:34:27.000Z | 2019-06-12T11:34:27.000Z | # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def list_databases(service=None):
"""
List databases.
Parameters
----------
service : str, optional
only list databases for this service
Returns
-------
list
list of databases
"""
params = {}
if service:
params["service"] = service
response = houston.get("/db/databases", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_databases(*args, **kwargs):
return json_to_cli(list_databases, *args, **kwargs)
def download_database(database, outfile):
"""
Download a database from the db service and write to a local file.
Parameters
----------
database : str, required
the filename of the database (as returned by the list_databases)
outfile: str, required
filename to write the database to
Returns
-------
None
"""
response = houston.get("/db/databases/{0}".format(database), stream=True)
houston.raise_for_status_with_json(response)
with open(outfile, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def _cli_download_database(*args, **kwargs):
return json_to_cli(download_database, *args, **kwargs)
def s3_push_databases(service, codes=None):
"""
Push database(s) to Amazon S3.
Parameters
----------
serivce : str, required
only push databases for this service (specify 'all' to push all services)
codes: list of str, optional
only push databases identified by these codes (omit to push all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.put("/db/s3/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_push_databases(*args, **kwargs):
return json_to_cli(s3_push_databases, *args, **kwargs)
def s3_pull_databases(service, codes=None, force=False):
"""
Pull database(s) from Amazon S3 to the db service.
Parameters
----------
serivce : str, required
only pull databases for this service (specify 'all' to pull all services)
codes: list of str, optional
only pull databases identified by these codes (omit to pull all databases for service)
force: bool
overwrite existing database if one exists (default is to fail if one exists)
Returns
-------
json
status message
"""
params = {}
if codes:
params["codes"] = codes
if force:
params["force"] = force
response = houston.get("/db/s3/{0}".format(service), params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_pull_databases(*args, **kwargs):
return json_to_cli(s3_pull_databases, *args, **kwargs)
def optimize_databases(service, codes=None):
"""
Optimize database file(s) to improve performance.
Parameters
----------
serivce : str, required
only optimize databases for this service (specify 'all' to optimize all services)
codes: list of str, optional
only optimize databases identified by these codes (omit to optimize all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.post("/db/optimizations/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_optimize_databases(*args, **kwargs):
return json_to_cli(optimize_databases, *args, **kwargs)
| 29.536913 | 102 | 0.667803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,492 | 0.566235 |
40a0c02ad22b941af8159f65f284f536c99461a2 | 3,889 | py | Python | ink2canvas/GradientHelper.py | greipfrut/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
]
| 4 | 2016-05-06T21:29:39.000Z | 2020-02-25T08:47:48.000Z | ink2canvas/GradientHelper.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
]
| null | null | null | ink2canvas/GradientHelper.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
]
| null | null | null | from ink2canvas.lib.simpletransform import parseTransform
class GradientHelper(object):
def __init__(self, abstractShape):
self.abstractShape = abstractShape
def hasGradient(self, key):
style = self.abstractShape.getStyle()
if key in style:
styleParamater = style[key]
if styleParamater.startswith("url(#linear"):
return "linear"
if styleParamater.startswith("url(#radial"):
return "radial"
return None
def getGradientHref(self, key):
style = self.abstractShape.getStyle()
if key in style:
return style[key][5:-1]
return
def setGradientFill(self):
gradType = self.hasGradient("fill")
if (gradType):
gradient = self.setComponentGradient("fill", gradType)
self.abstractShape.canvasContext.setFill("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.fill();
self.abstractShape.canvasContext.restore()
return True
def setGradientStroke(self):
gradType = self.hasGradient("stroke")
if (gradType):
gradient = self.setComponentGradient("stroke", gradType)
self.abstractShape.canvasContext.setStroke("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.stroke();
self.abstractShape.canvasContext.restore()
return True
def hasGradientTransform(self, gradient):
return bool(gradient.attr("gradientTransform"))
def setGradientTransform(self, gradient):
dataString = gradient.attr("gradientTransform")
dataMatrix = parseTransform(dataString)
m11, m21, dx = dataMatrix[0]
m12, m22, dy = dataMatrix[1]
self.abstractShape.canvasContext.transform(m11, m12, m21, m22, dx, dy)
def setComponentGradient(self, key, gradType):
gradientId = self.getGradientHref(key)
if(gradType == "linear"):
gradient = self.abstractShape.rootTree.getLinearGradient(gradientId)
if(gradType == "radial"):
gradient = self.abstractShape.rootTree.getRadialGradient(gradientId)
if(gradient.link != None):
gradient.colorStops = self.abstractShape.rootTree.getLinearGradient(gradient.link).colorStops
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.save()
self.setGradientTransform(gradient)
if(gradType == "linear"):
x1, y1, x2, y2 = gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
if(gradType == "radial"):
cx, cy, fx, fy, r = gradient.getData()
self.abstractShape.canvasContext.createRadialGradient("grad", cx, cy, 0, fx, fy, r)
for stopKey, stopValue in gradient.colorStops.iteritems():
offset = float(stopKey)
color = self.abstractShape.canvasContext.getColor(stopValue.split(";")[0].split(":")[1] , stopValue.split(";")[1].split(":")[1] )
self.abstractShape.canvasContext.addColorStop("grad", offset, color)
return gradient
def createLinearGradient(self):
x1, y1, x2, y2 = self.gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
for stop in self.gradient.stops:
color = self.canvasContext.getColor(stop.split(";")[0].split(":")[1] , stop.split(";")[1].split(":")[1])
offset = float(stop.split(";")[2].split(":")[1])
self.abstractShape.canvasContext.addColorStop("grad", offset, color) | 43.696629 | 141 | 0.609154 | 3,826 | 0.9838 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.059141 |
40a15dfaa12f9d16539bfd378e8a390a22b70eb7 | 529 | py | Python | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
]
| null | null | null | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
]
| null | null | null | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kay management script.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import logging
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import kay
kay.setup_env(manage_py_env=True)
from werkzeug import script
from kay.management import *
import appengine_config
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append("--help")
script.run()
| 19.592593 | 66 | 0.701323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.387524 |
40a223cfd00d5ab5d2f6c9db56030a295e86ca65 | 1,624 | py | Python | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
]
| 30 | 2017-03-02T14:43:48.000Z | 2022-02-25T13:22:22.000Z | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
]
| 14 | 2019-12-29T12:41:29.000Z | 2021-12-01T21:13:20.000Z | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
]
| 18 | 2017-03-21T12:43:19.000Z | 2022-03-22T14:30:06.000Z | from pymicro.crystal.microstructure import *
from pymicro.crystal.texture import *
from pymicro.examples import PYMICRO_EXAMPLES_DATA_DIR
from matplotlib import pyplot as plt, colors, colorbar, cm
import pathlib as pl
'''This example demonstrate how a field can be used to color each symbol on
the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field`
method.
'''
#orientations = Orientation.read_euler_txt('../data/orientation_set.inp')
#for i in range(600):
# micro.grains.append(Grain(i, orientations[i + 1]))
euler_list = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'orientation_set.inp').tolist()
micro = Microstructure(name='field', autodelete=True)
micro.add_grains(euler_list)
# load strain from dat files
strain_field = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'strain_avg_per_grain.dat')[19, ::2]
# build custom pole figures
pf = PoleFigure(microstructure=micro)
pf.mksize = 40
pf.set_map_field('strain', strain_field, field_min_level=0.015, field_max_level=0.025)
fig = plt.figure()
# direct PF
ax1 = fig.add_axes([0.05, 0.05, 0.8, 0.9], aspect='equal')
pf.plot_pf(ax=ax1)
plt.title('111 pole figure, cubic elasticity')
# to add the color bar
ax2 = fig.add_axes([0.8, 0.05, 0.05, 0.9])
norm = colors.Normalize(vmin=0.015, vmax=0.025)
cb = colorbar.ColorbarBase(ax2, cmap=cm.hot, norm=norm, orientation='vertical')
cb.set_label('Average strain (mm/mm)')
image_name = os.path.splitext(__file__)[0] + '.png'
print('writing %s' % image_name)
plt.savefig('%s' % image_name, format='png')
del pf
del micro
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| 34.553191 | 93 | 0.75431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 572 | 0.352217 |
40a57bcb86583811e90d8029258fdefae6a38a7d | 11,634 | py | Python | model/img2seq_torch.py | marcoleewow/LaTeX_OCR | 0980ea719f8d3175a6bbf6af18873dd72d04b8c7 | [
"Apache-2.0"
]
| 290 | 2019-04-04T01:52:32.000Z | 2022-03-30T08:07:53.000Z | model/img2seq_torch.py | w32zhong/LaTeX_OCR | 1e1f196468e678c93dfa2d8ab2ba02fbda38a3c0 | [
"Apache-2.0"
]
| 23 | 2019-06-11T05:07:58.000Z | 2022-03-11T23:44:17.000Z | model/img2seq_torch.py | w32zhong/LaTeX_OCR | 1e1f196468e678c93dfa2d8ab2ba02fbda38a3c0 | [
"Apache-2.0"
]
| 81 | 2019-04-06T11:40:34.000Z | 2022-02-28T15:08:05.000Z | import time
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from model.base_torch import BaseModel
from model.utils.general import init_dir, get_logger
from model.utils.general import Progbar
from model.utils.general import Config
from model.utils.general import minibatches
from model.components.SimpleCNN import SimpleCNN
from model.components.ResNet import ResNet9
from model.components.DenseNet import DenseNet169
from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.image import pad_batch_images_2
from model.utils.text import pad_batch_formulas
from torch.utils.data import Dataset
import h5py
import json
from model.utils.data_generator import DataGenerator
class ImgFormulaDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_generator: DataGenerator, transform=None):
"""
:param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline
"""
self.data_generator = data_generator
# PyTorch transformation pipeline for the image (normalizing, etc.)
self.transform = transform
def __getitem__(self, i):
# Remember, the Nth caption corresponds to the (N // captions_per_image)th image
(img, formula) = self.data_generator.__getitem__(i)
img = pad_batch_images_2([img], [800, 800, 1])
# img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C)
# img = img.squeeze(0)
# img = img.permute(2, 0, 1) # (C, W, H)
# if self.transform is not None:
# img = self.transform(img)
# formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN)
return img, formula
def __len__(self):
return len(self.data_generator)
class Img2SeqModel(BaseModel):
def __init__(self, config, dir_output, vocab):
super(Img2SeqModel, self).__init__(config, dir_output)
self._vocab = vocab
def getModel(self, model_name="CNN"):
if model_name == "CNN":
return SimpleCNN()
elif model_name == "ResNet9":
return ResNet9()
elif model_name == "DenseNet169":
return DenseNet169(pretrained=True)
elif model_name == "Img2Seq":
self.encoder = EncoderCNN(self._config)
self.decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=self._vocab.n_tok,
dropout=0.5)
return Img2Seq(self._config, self._vocab)
def getOptimizer(self, lr_method='adam', lr=0.001):
self.encoder_optimizer = torch.optim.Adam(params=self.encoder.parameters(), lr=lr)
self.decoder_optimizer = torch.optim.Adam(params=self.decoder.parameters(), lr=lr)
return super().getOptimizer(lr_method=lr_method, lr=lr)
def _run_train_epoch(self, config, train_set, val_set, epoch, lr_schedule):
"""Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
"""
# logging
batch_size = config.batch_size
nbatches = (len(train_set) + batch_size - 1) // batch_size
prog = Progbar(nbatches)
self.model.train()
self.encoder.train()
self.decoder.train()
train_loader = torch.utils.data.DataLoader(ImgFormulaDataset(train_set),
batch_size=batch_size,
shuffle=True, num_workers=3, pin_memory=True)
# for i, (img, formula) in enumerate(train_loader):
for i, (img, formula) in enumerate(minibatches(train_set, batch_size)):
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
loss_eval = self.getLoss(img, formula=formula, lr=lr_schedule.lr, dropout=config.dropout, training=True)
prog.update(i + 1, [("loss", loss_eval), ("lr", lr_schedule.lr)])
# update learning rate
lr_schedule.update(batch_no=epoch*nbatches + i)
self.logger.info("- Training: {}".format(prog.info))
# evaluation
config_eval = Config({"dir_answers": self._dir_output + "formulas_val/", "batch_size": config.batch_size})
scores = self.evaluate(config_eval, val_set)
score = scores["perplexity"]
lr_schedule.update(score=score)
return score
def getLoss(self, img, formula, lr, dropout, training=True):
# Move to GPU, if available
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(
imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
loss.backward()
# Update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
return -loss.item()
def _run_evaluate_epoch(self, config, test_set):
"""Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.model.eval()
self.encoder.eval()
self.decoder.eval()
# initialize containers of references and predictions
if self._config.decoding == "greedy":
refs, hyps = [], [[]]
elif self._config.decoding == "beam_search":
refs, hyps = [], [[] for i in range(self._config.beam_size)]
references = list() # references (true captions) for calculating BLEU-4 score
hypotheses = list() # hypotheses (predictions)
with torch.no_grad():
nbatches = len(test_set)
prog = Progbar(nbatches)
test_loader = torch.utils.data.DataLoader(ImgFormulaDataset(test_set),
batch_size=nbatches,
shuffle=True, num_workers=3, pin_memory=True)
for i, (img, formula) in enumerate(minibatches(test_set, nbatches)):
# print(type(img), len(img), img[0].shape)
# print(type(formula), formula)
# Move to GPU, if available
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
print(scores.shape, targets.shape)
print(loss)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss_eval = loss.item()
prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval))])
# Store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
# print("---------------------------------------------------------------formula and prediction :")
for form, preds in zip(formula, scores):
refs.append(form)
# print(form, " ---------- ", preds[0])
for i, pred in enumerate(preds):
hyps[i].append(pred)
files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
scores = score_files(files[0], files[1])
# perp = - np.exp(ce_words / float(n_words))
# scores["perplexity"] = perp
self.logger.info("- Evaluating: {}".format(prog.info))
return {
"perplexity": loss.item()
}
def predict_batch(self, images):
preds = []
images = images.to(self.device)
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
pr = outputs[:, 1].detach().cpu().numpy()
for i in pr:
preds.append(i)
return preds
def predict(self, img):
return self.predict_batch([img])
| 42.772059 | 147 | 0.577016 | 10,686 | 0.918515 | 0 | 0 | 0 | 0 | 0 | 0 | 3,171 | 0.272563 |
40a5a49f8963d40bc4247496570aa980197c909d | 719 | py | Python | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
]
| 21 | 2021-06-04T21:08:21.000Z | 2022-03-04T14:21:34.000Z | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
]
| 1 | 2021-01-21T14:45:59.000Z | 2021-01-21T14:45:59.000Z | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
]
| 9 | 2021-03-16T09:29:26.000Z | 2022-01-06T08:38:10.000Z | #!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This entry point runs all script tests."""
import logging.config
import unittest
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
suite = unittest.TestLoader().loadTestsFromNames([
'templateloader_test', 'pegparser_test', 'idlparser_test',
'idlnode_test', 'idlrenderer_test', 'database_test',
'databasebuilder_test', 'emitter_test', 'dartgenerator_test',
'multiemitter_test'
])
unittest.TextTestRunner().run(suite)
| 37.842105 | 76 | 0.719054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.655076 |
40a5c13d7bfe8ebdc535f6e928718db2cd73a81f | 623 | py | Python | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
]
| 11 | 2020-09-20T15:17:11.000Z | 2022-03-17T12:43:33.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
]
| 3 | 2021-10-30T07:51:36.000Z | 2022-03-09T05:19:23.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
]
| 13 | 2021-01-21T03:19:08.000Z | 2022-03-28T10:44:58.000Z | """
11367. Report Card Time
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 18일
"""
def main():
for _ in range(int(input())):
name, score = input().split()
score = int(score)
if score < 60: grade = 'F'
elif score < 67: grade = 'D'
elif score < 70: grade = 'D+'
elif score < 77: grade = 'C'
elif score < 80: grade = 'C+'
elif score < 87: grade = 'B'
elif score < 90: grade = 'B+'
elif score < 97: grade = 'A'
else: grade = 'A+'
print(name + ' ' + grade)
if __name__ == '__main__':
main()
| 20.096774 | 37 | 0.499197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.294737 |
40a5dc8510f8fdf8f4c9c7d29bd318a4e6deedc1 | 49,691 | py | Python | imgaug/augmentables/bbs.py | bill0714/imgaug | 5abdc4d9d7f512ba34c78955557b174a64ad22a6 | [
"MIT"
]
| 1 | 2019-10-25T17:43:20.000Z | 2019-10-25T17:43:20.000Z | imgaug/augmentables/bbs.py | RTANC/imgaug | 2a3161550a4a1895a227bb8856d525e69a7d503d | [
"MIT"
]
| null | null | null | imgaug/augmentables/bbs.py | RTANC/imgaug | 2a3161550a4a1895a227bb8856d525e69a7d503d | [
"MIT"
]
| null | null | null | from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right
corners. Both are given as x and y-coordinates. The corners are intended
to lie inside the bounding box area. As a result, a bounding box that lies
completely inside the image but has maximum extensions would have
coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that
coordinates are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def coords(self):
"""Get the top-left and bottom-right coordinates as one array.
Returns
-------
ndarray
A ``(N, 2)`` numpy array with ``N=2`` containing the top-left
and bottom-right coordinates.
"""
arr = np.empty((2, 2), dtype=np.float32)
arr[0, :] = (self.x1, self.y1)
arr[1, :] = (self.x2, self.y2)
return arr
@property
def x1_int(self):
"""Get the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x1))
@property
def y1_int(self):
"""Get the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y1))
@property
def x2_int(self):
"""Get the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x2))
@property
def y2_int(self):
"""Get the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y2))
@property
def height(self):
"""Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. ``height * width``.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""Estimate whether the bounding box contains a given point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
Returns
-------
bool
``True`` if the point is contained in the bounding box,
``False`` otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
@classmethod
def from_point_soup(cls, xy):
"""Convert a ``(2P,) or (P,2) ndarray`` to a BB instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number
Array containing ``P`` points in xy-form denoting a soup of
points around which to place a bounding box.
The array should usually be of dtype ``float32``.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box around the points.
"""
xy = np.array(xy, dtype=np.float32)
assert len(xy) > 0, (
"Expected to get at least one point to place a bounding box "
"around, got shape %s." % (xy.shape,))
assert xy.ndim == 1 or (xy.ndim == 2 and xy.shape[-1] == 2), (
"Expected input array of shape (P,) or (P, 2), "
"got shape %s." % (xy.shape,))
if xy.ndim == 1:
xy = xy.reshape((-1, 2))
x1, y1 = np.min(xy, axis=0)
x2, y2 = np.max(xy, axis=0)
return cls(x1=x1, y1=y1, x2=x2, y2=y2)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
@property
def items(self):
"""Get the bounding boxes in this container.
Returns
-------
list of BoundingBox
Bounding boxes within this container.
"""
return self.bounding_boxes
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
@property
def height(self):
"""Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
@property
def width(self):
"""Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""Determine whether this instance contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N, 4) ndarray or (N, 2, 2) array
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by its top-left and bottom-right
coordinates.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided corner coordinates.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 2)
if xyxy.shape[0] == 0:
return BoundingBoxesOnImage([], shape)
assert (
(xyxy.ndim == 2 and xyxy.shape[-1] == 4)
or (xyxy.ndim == 3 and xyxy.shape[1:3] == (2, 2))), (
"Expected input array of shape (N, 4) or (N, 2, 2), "
"got shape %s." % (xyxy.shape,))
xyxy = xyxy.reshape((-1, 2, 2))
boxes = [BoundingBox.from_point_soup(row) for row in xyxy]
return cls(boxes, shape)
@classmethod
def from_point_soups(cls, xy, shape):
"""Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance.
Parameters
----------
xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by a soup of ``P`` points.
If ``(N, P)`` then the second axis is expected to be in
xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...).
The final bounding box coordinates will be derived using ``min``
and ``max`` operations on the xy-values.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided point soups.
"""
xy = np.array(xy, dtype=np.float32)
# from_xy_array() already checks the ndim/shape, so we don't have to
# do it here
boxes = [BoundingBox.from_point_soup(row) for row in xy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
@ia.deprecated(alt_func="BoundingBoxesOnImage.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self):
return self.clip_out_of_image()
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"BoundingBoxesOnImage(%s, shape=%s)"
% (str(self.bounding_boxes), self.shape))
| 33.327297 | 127 | 0.55666 | 49,400 | 0.994144 | 0 | 0 | 8,788 | 0.176853 | 0 | 0 | 31,539 | 0.634702 |
40a5de5724e10b92314c5b47739791ffcddafb72 | 2,891 | py | Python | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
]
| null | null | null | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
]
| null | null | null | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
from twisted.internet import endpoints
from twisted.internet import protocol
from twisted.internet import defer
from twisted.mail import imap4
from scanner_relay.pipeline import Pipeline
from scanner_relay.authentication import PassStoreFetcher, PlainPasswordFetcher
import logging
# Global configuration for the logging. Note that we set the level to
# INFO so that only DEBUG logging does not get to stdout.
FORMAT = '[%(levelname)s] (%(name)s) %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger('run')
class ScannerRelayProtocol(imap4.IMAP4Client):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.pipeline = Pipeline(self, username, password_fetcher, onFinish)
def serverGreeting(self, unused_capabilities):
"""The entry point for the whole program.
It merely starts the long-running pipeline.
"""
# NOTE: Although twisted official example suggest using the capabilities
# returned here to decide what kind of authentication methods to
# register, I found it to be not true as real capabilities are only
# returned after the authentication is successful.
username = self.pipeline.username
self.registerAuthenticator(imap4.PLAINAuthenticator(username))
self.registerAuthenticator(imap4.LOGINAuthenticator(username))
self.registerAuthenticator(
imap4.CramMD5ClientAuthenticator(username))
self.pipeline.start()
class ScannerRelayProtocolFactory(protocol.ClientFactory):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.username = username
self.password_fetcher = password_fetcher
self.onFinish = onFinish
def buildProtocol(self, addr):
logger.info('Constructing client protocol to connect to %s:%d', addr.host, addr.port)
protocol = ScannerRelayProtocol(
self.username, self.password_fetcher, self.onFinish)
protocol.factory = self
return protocol
def clientConnectionFailed(self, connector, reason):
print('Connection failed.')
# TODO(breakds): And a more graceful (singal handling) way to terminate the program.
def clean_up(unused):
from twisted.internet import reactor
reactor.stop()
print('All workd done!')
if __name__ == '__main__':
# FIXME: Make these configurable
hostname = 'mail.breakds.org'
username = '[email protected]'.encode('ascii')
pass_store_entry = 'mail.breakds.org/bds'
port = 143
from twisted.internet import reactor
endpoint = endpoints.HostnameEndpoint(reactor, hostname, port)
factory = ScannerRelayProtocolFactory(
username, PassStoreFetcher(pass_store_entry), clean_up)
endpoint.connect(factory)
reactor.run()
| 34.416667 | 93 | 0.71982 | 1,636 | 0.565894 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.286406 |
40a624029504fa50d779ccdfdaa5ed5b7ed61a95 | 1,202 | py | Python | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
]
| null | null | null | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
]
| null | null | null | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
]
| null | null | null | """
Created by akiselev on 2019-06-14
There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then
.
When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks.
Input Format
The first line contains a single integer
, the number of test cases.
For each test case, there are lines.
The first line of each test case contains , the number of cubes.
The second line contains
space separated integers, denoting the sideLengths of each cube in that order.
Constraints
Output Format
For each test case, output a single line containing either "Yes" or "No" without the quotes.
Sample Input
2
6
4 3 2 1 3 4
3
1 3 2
Sample Output
Yes
No
"""
for T in range(int(input())):
n = int(input())
cubes_h = list(map(int, input().split()))
i = 0
while i < n - 1 and cubes_h[i] >= cubes_h[i+1]:
i += 1
while i < n - 1 and cubes_h[i] <= cubes_h[i+1]:
i += 1
print("Yes" if i == n - 1 else "No")
| 23.115385 | 205 | 0.689684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 928 | 0.772047 |
40a999434640883a137547c775c396581c90f8a2 | 14,674 | py | Python | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| null | null | null | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| null | null | null | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| null | null | null | #
#
# File: flask_web_py3.py
#
#
#
import os
import json
import redis
import urllib
import flask
from flask import Flask
from flask import render_template,jsonify
from flask_httpauth import HTTPDigestAuth
from flask import request, session, url_for
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from web_core.load_static_pages_py3 import Load_Static_Files
from web_core.load_redis_access_py3 import Load_Redis_Access
from redis_support_py3.construct_data_handlers_py3 import Redis_RPC_Client
from bootstrap_web_system_control_py3 import PI_Web_System_Control
from bootstrap_web_monitoring_py3 import PI_Web_Monitor_Server
from bootstrap_mqtt_client_py3 import PI_MQTT_Client_Monitor
from bootstrap_eto_py3 import ETO_Management
from file_server_library.file_server_lib_py3 import Construct_RPC_Library
from bootstrap_irrigation_scheduling_py3 import Irrigation_Scheduling
from irrigation_control.load_irrigation_control_py3 import Load_Irrigation_Control
class URL_Rule_Class(object):
def __init__(self,app,auth):
self.subsystems = {}
self.subsystem_order = []
self.app = app
self.auth = auth
def add_get_rules(self,subsystem_name,function_list,url_list):
slash_name = "/"+subsystem_name+"/"
assert(len(function_list)==len(url_list))
menu_list = []
menu_data = {}
for i in range(0,len(function_list)):
a1 = self.auth.login_required( function_list[i] )
self.app.add_url_rule(slash_name+url_list[i][0]+url_list[i][1],slash_name+url_list[i][0],a1)
menu_data[url_list[i][0]] =[a1,url_list[i][0]+url_list[i][2],url_list[i][3]]
menu_list.append(url_list[i][0])
self.subsystems[subsystem_name] = {"menu_list":menu_list,"menu_data":menu_data}
self.subsystem_order.append(subsystem_name)
def move_directories(self,path):
#print("move directory path",path)
path_test = path.split("/")
if len(path_test) != 1:
path_dest = path_test[1]
else:
path_dest = path
#print(path)
#print(path_dest)
os.system('mkdir flask_templates/'+path_dest)
os.system('mkdir flask_templates/js/'+path_dest)
#os.system("ls flask_templates")
#print("path",path,path_dest)
os.system('cp -r ' +path+'/templates/* flask_templates/'+path_dest)
os.system('cp -r ' +path+'/js/* flask_templates/js/'+path_dest)
return path_dest
class Load_App_Sys_Files(object):
def __init__( self, app, auth, request, file_server_library ):
self.app = app
self.auth = auth
self.request = request
self.file_server_library = file_server_library
a1 = auth.login_required( self.get_system_file )
app.add_url_rule("/ajax/get_system_file/<path:file_name>","get_system_file",a1)
a1 = auth.login_required( self.get_app_file )
app.add_url_rule("/ajax/get_app_file/<path:file_name>","get_app_file",a1)
a1 = auth.login_required( self.save_app_file )
app.add_url_rule("/ajax/save_app_file/<path:file_name>","save_app_file",a1,methods=["POST"])
a1 = auth.login_required( self.save_sys_file )
app.add_url_rule("/ajax/save_sys_file/<path:file_name>","save_sys_file",a1,methods=["POST"])
def get_system_file(self, file_name):
data = self.file_server_library.load_file( "application_files",file_name)
return json.dumps(data)
def get_app_file(self,file_name):
data = self.file_server_library.load_file( "system_files",file_name)
return json.dumps(data )
def save_app_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file("application_files",file_name, json_object );
return json.dumps('SUCCESS')
def save_sys_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file( "system_files",file_name, json_object );
return json.dumps('SUCCESS')
class PI_Web_Server_Core(object):
def __init__(self , name, site_data ):
redis_handle_pw = redis.StrictRedis(site_data["host"],
site_data["port"],
db=site_data["redis_password_db"],
decode_responses=True)
self.site_data = site_data
startup_dict = redis_handle_pw.hgetall("web")
self.qs = Query_Support( site_data)
self.file_server_library = Construct_RPC_Library(self.qs,self.site_data)
self.app = Flask(name)
self.auth = HTTPDigestAuth()
self.url_rule_class = URL_Rule_Class(self.app,self.auth)
self.auth.get_password( self.get_pw )
self.startup_dict = startup_dict
self.app.template_folder = 'flask_templates'
self.app.static_folder = 'static'
self.app.config['SECRET_KEY'] = startup_dict["SECRET_KEY"]
self.users = json.loads(startup_dict["users"])
Load_Static_Files(self.app,self.auth) #enable static files to be fetched
self.redis_access = Load_Redis_Access(self.app, self.auth, request ) #enable web access for redis operations
Load_App_Sys_Files( self.app, self.auth, request, self.file_server_library )
self.subsystems = []
self.modules = {}
self.load_specified_modules()
def load_specified_modules(self):
results=self.common_qs_search(["WEB_SERVER","WEB_SERVER"])
result = results[0]
modules = result["modules"]
for i in modules:
if i == "monitoring":
print(i)
PI_Web_Monitor_Server(self)
elif i == "system_control":
print(i)
PI_Web_System_Control(self)
elif i == "mqtt_client":
print(i)
PI_MQTT_Client_Monitor(self )
elif i == "eto":
print(i)
ETO_Management(self)
elif i == "irrigation_scheduling":
print(i)
Irrigation_Scheduling(self)
elif i == "irrigation_control":
print(i)
Load_Irrigation_Control(self)
elif i == "modbus_control":
print("do nothing right now")
else:
raise ValueError("bad web module")
self.result = result
if "status_function" in self.result:
print(self.result["status_function"])
else:
self.result["status_function"] = ""
print("status function not defined")
file_handle = open("flask_templates/js/status_definition.js","w")
file_handle.write('__status_option__ = "'+self.result["status_function"]+'"; \n')
file_handle.close()
def common_qs_search(self,search_list): # generalized graph search
query_list = []
query_list = self.qs.add_match_relationship( query_list,relationship="SITE",label=self.site_data["site"] )
for i in range(0,len(search_list)-1):
if type(search_list[i]) == list:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i][0],label = search_list[i][1] )
else:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i] )
if type(search_list[-1]) == list:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1][0],label = search_list[-1][1] )
else:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1] )
node_sets, node_sources = self.qs.match_list(query_list)
return node_sources
def get_pw( self,username):
if username in self.users:
return self.users[username]
return None
def generate_menu_page(self):
self.subsystems.sort()
self.generate_menu_template()
self.generate_modal_template()
def generate_default_index_page(self):
self.app.add_url_rule("/","home_page",self.links_a1)
def generate_index_page(self,module,element):
menu_data = self.url_rule_class.subsystems[module]["menu_data"]
menu_element = menu_data[element]
self.app.add_url_rule("/","home page",menu_element[0])
def generate_site_map(self):
self.links_a1 = self.auth.login_required( self.site_map_function )
self.app.add_url_rule("/link_page","/links_page",self.links_a1)
def site_map_function(self):
links = []
for rule in self.app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
#url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((rule.endpoint))
links.sort()
return render_template("list_of_endpoints",endpoints = links)
def run_http( self):
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',port=self.port,debug =self.debug )
def run_https( self ):
startup_dict = self.startup_dict
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',debug =self.debug,
port=self.port ,ssl_context=("/data/cert.pem", "/data/key.pem"))
def generate_menu_template(self):
f = open( self.app.template_folder+'/menu', 'w')
output_string = '''
<nav class="navbar navbar-expand-sm bg-dark navbar-dark">
<!-- Links -->
<ul class="navbar-nav">
<!-- Dropdown -->
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbardrop" data-toggle="dropdown">Menu</a>
<div class="dropdown-menu">
'''
f.write(output_string)
self.url_rule_class.subsystems
for i in self.url_rule_class.subsystems:
temp = ' <a class="dropdown-item" href="#" data-toggle="modal" data-target="#'+i+'">'+i+"</a>\n"
f.write(temp)
output_string = '''
</div>
</li>
</ul>
<ul class="navbar-nav">
<button id="status_panel", class="btn " type="submit">Status</button>
</ul>
<nav class="navbar navbar-light bg-dark navbar-dark">
<span class="navbar-text" >
<h4 id ="status_display"> Status: </h4>
</span>
</nav>
</nav>
'''
f.write(output_string)
f.close()
def generate_modal_template(self):
f = open(self.app.template_folder+'/modals', 'w')
for i in self.url_rule_class.subsystem_order:
#print("generate_modal_template - i",i)
output_string = '<!–'+i+' –>\n'
f.write(output_string)
output_string ='<div class="modal fade" id='+i+' tabindex="-1" role="dialog" aria-labelledby="accountModalLabel" aria-hidden="true">\n'
f.write(output_string)
output_string = '''
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
'''
f.write(output_string)
f.write(' <h5 class="modal-title" id="accountModalLabel">'+i+'</h5>\n')
output_string = '''
<button type="button" class="close" data-dismiss="modal" aria-label="close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<ul >
'''
f.write(output_string)
# <li><a href ='/control/display_past_system_alerts' target="_self">Current System State</a></li>
sub_system_data = self.url_rule_class.subsystems[i]
temp = sub_system_data["menu_data"]
#
for j in sub_system_data['menu_list']:
data = temp[j]
#print("data",data)
format_output = '<li><a href='+'"/'+i+'/'+data[1]+'" target="_self">'+data[2]+'</a></li>\n'
f.write(format_output)
output_string = '''
</ul>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
'''
f.write(output_string)
f.close()
if __name__ == "__main__":
file_handle = open("/data/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
pi_web_server = PI_Web_Server_Core(__name__, redis_site_data )
pi_web_server.generate_menu_page()
pi_web_server.generate_site_map()
pi_web_server.generate_default_index_page()
port = pi_web_server.result["port"]
pi_web_server.port = port
debug = pi_web_server.result["debug"]
pi_web_server.debug = debug
https_flag = pi_web_server.result["https"]
if https_flag == False:
pi_web_server.run_https()
else:
pi_web_server.run_https()
| 34.855107 | 147 | 0.568284 | 12,806 | 0.872462 | 0 | 0 | 0 | 0 | 0 | 0 | 3,663 | 0.249557 |
40a99a0c9d4869b889926f6fe54b50b768c6cb98 | 4,160 | py | Python | git_talk/lib/changelog/main.py | cove9988/git-talk | 9f549d8565948a150834bcaa704b55ae15c094c1 | [
"MIT"
]
| 5 | 2020-04-06T11:00:27.000Z | 2020-09-30T15:16:56.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
]
| 3 | 2020-09-26T02:53:30.000Z | 2020-10-09T01:46:37.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
]
| 1 | 2020-09-25T23:41:54.000Z | 2020-09-25T23:41:54.000Z |
import os
import logging
from typing import Optional
import click
from git_talk.lib.changelog import generate_changelog
from git_talk.lib.changelog.presenter import MarkdownPresenter
from git_talk.lib.changelog.repository import GitRepository
# @click.command()
# @click.option(
# "-r",
# "--repo",
# type=click.Path(exists=True),
# default=".",
# help="Path to the repository's root directory [Default: .]",
# )
# @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]")
# @click.option("-d", "--description", help="Your project's description")
# @click.option(
# "-o",
# "--output",
# type=click.File("w"),
# default="CHANGELOG.md",
# help="The place to save the generated changelog [Default: CHANGELOG.md]",
# )
# @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links")
# @click.option("-v", "--latest-version", type=str, help="use specified version as latest release")
# @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes")
# @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags")
# @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id")
# @click.option(
# "--issue-pattern",
# default=r"(#([\w-]+))",
# help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used "
# "by issue-url.",
# )
# @click.option(
# "--tag-pattern",
# default=None,
# help="override regex pattern for release tags. "
# "By default use semver tag names semantic. "
# "tag should be contain in one group named 'version'.",
# )
# @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ')
# @click.option("--stdout", is_flag=True)
# @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags")
# @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="")
# @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD")
# @click.option(
# "--debug", is_flag=True, help="set logging level to DEBUG",
# )
def main(
repo,
description,
latest_version,
title="Changelog",
output="CHANGELOG.md",
remote ="origin",
unreleased=False,
diff_url=None,
issue_url=r"(#([\w-]+))",
issue_pattern=None,
tag_prefix="",
stdout=True,
tag_pattern=None,
starting_commit="",
stopping_commit ="HEAD",
debug = False
):
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Logging level has been set to DEBUG")
# Convert the repository name to an absolute path
repo = os.path.abspath(repo)
repository = GitRepository(
repo,
latest_version=latest_version,
skip_unreleased=not unreleased,
tag_prefix=tag_prefix,
tag_pattern=tag_pattern,
)
presenter = MarkdownPresenter()
changelog = generate_changelog(
repository,
presenter,
title,
description,
remote=remote,
issue_pattern=issue_pattern,
issue_url=issue_url,
diff_url=diff_url,
starting_commit=starting_commit,
stopping_commit=stopping_commit,
)
# if stdout:
# print(changelog)
# else:
# output.write(changelog)
changelog_file = os.path.join(repo, "CHANGELOG.md")
write_changelog(changelog_file, changelog)
def write_changelog(changelog_file, changelog):
if os.path.exists(changelog_file):
with open(changelog_file, 'r') as f:
data = f.read()
with open(changelog_file, 'w') as f:
# f.write(changelog + '\n\n' + data)
f.write(changelog)
else:
with open(changelog_file, 'w') as f:
f.write(changelog)
if __name__ == "__main__":
main() | 33.821138 | 122 | 0.629567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,357 | 0.566587 |
40ab6e634c98f0a3601e54792ef5853e9f4bf06f | 6,429 | py | Python | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
]
| 6 | 2019-09-10T02:56:47.000Z | 2021-12-18T11:17:16.000Z | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
]
| 1 | 2019-07-19T17:05:20.000Z | 2019-07-19T17:05:20.000Z | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
]
| 2 | 2019-10-02T00:35:14.000Z | 2019-10-23T21:13:14.000Z | # Copyright (c) Stanford University
#
# This source code is patent protected and being made available under the
# terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files.
# Author: Mario J Srouji
# Email: [email protected]
import copy
import sys
sys.path.append("../FormatParsers/")
sys.path.append("../Interface/")
import format_parser as P
import module_interface as I
def generate_constraints_file(MODULENAME, INPUTS, OUTPUTS, format_dicts):
# Get ISA information
isa_info = format_dicts["ISA"]
# Get register names
registers = format_dicts["REGISTERS"]
# Get memory fields needed for modification
memory = format_dicts["MEMORY"]
# Get constraints for qed module setup
qed_constraints = format_dicts["QEDCONSTRAINTS"]
# Get the instruction types
ins_types = format_dicts["INSTYPES"]
# Get the instruction fields for each type
ins_fields = format_dicts["INSFIELDS"]
# Get instruction types requirements
ins_reqs = format_dicts["INSREQS"]
# Get the bit fields
bit_fields = format_dicts["BITFIELDS"]
# Get all instruction types
instructions = {}
for ins in format_dicts["INSTYPES"].keys():
if ins != "CONSTRAINT":
instructions[ins] = format_dicts[ins]
# Verilog file
verilog = ""
# Adds module header definition
verilog += I.module_header(MODULENAME, INPUTS, OUTPUTS)
verilog += I.newline(2)
# Instantiate inputs
for inp in INPUTS:
verilog += I.signal_def(INPUTS[inp], "input", inp, num_spaces=2)
verilog += I.newline(1)
# Instantiate outputs
for out in OUTPUTS:
verilog += I.signal_def(OUTPUTS[out], "output", out, num_spaces=2)
verilog += I.newline(1)
# Instantiate bit fields
verilog += I.newline(1)
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
bits = int(msb) - int(lsb) + 1
verilog += I.signal_def(bits, "wire", bit_field, num_spaces=2)
verilog += I.newline(1)
# Instantiate instructions
verilog += I.newline(1)
for ins_type in instructions:
if ins_type != "NOP":
verilog += I.signal_def(1, "wire", "FORMAT_"+ins_type, num_spaces=2)
verilog += I.newline(1)
verilog += I.signal_def(1, "wire", "ALLOWED_"+ins_type, num_spaces=2)
verilog += I.newline(1)
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
verilog += I.signal_def(1, "wire", ins, num_spaces=2)
verilog += I.newline(1)
verilog += I.newline(1)
# Assign bit fields
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
verilog += I.assign_def(bit_field, I.signal_index("instruction", msb, lsb), num_spaces=2)
verilog += I.newline(1)
# Assign instruction types
verilog += I.newline(1)
for ins_type in instructions:
type_constraints = instructions[ins_type]["CONSTRAINT"]
constraints = type_constraints
if qed_constraints["half_registers"] == "1":
fields = ins_fields[ins_type].split()
for field in fields:
if field in registers:
constraints.append(I._lt(field, str(int(isa_info["num_registers"])/2), parens=True))
if ins_type != "NOP" and len(constraints) > 0:
expression = constraints[0]
for i in range(1, len(constraints)):
expression = I._and(expression, constraints[i], parens=False)
verilog += I.assign_def("FORMAT_"+ins_type, expression, num_spaces=2)
verilog += I.newline(1)
allowed_expression = ""
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
fields = instructions[ins_type][ins]
reqs = fields["CONSTRAINT"]
for field in fields:
if field != "CONSTRAINT":
if type(fields[field]) == type([]):
first = fields[field][0]
req_expression = I._equals(field, I._constant(len(first), first), parens=True)
for req in fields[field][1:]:
equality = I._equals(field, I._constant(len(req), req), parens=True)
req_expression = I._or(req_expression, equality, parens=False)
req_expression = "(" + req_expression + ")"
reqs.append(req_expression)
else:
equality = I._equals(field, I._constant(len(fields[field]), fields[field]), parens=True)
reqs.append(equality)
if ins != "NOP":
reqs_expression = "FORMAT_" + ins_type
for i in range(len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
else:
reqs_expression = reqs[0]
for i in range(1, len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
verilog += I.assign_def(ins, reqs_expression, num_spaces=2)
verilog += I.newline(1)
if allowed_expression == "":
allowed_expression = ins
else:
allowed_expression = I._or(allowed_expression, ins, parens=False)
verilog += I.assign_def("ALLOWED_"+ins_type, allowed_expression, num_spaces=2)
verilog += I.newline(2)
# Property assertion
assertions = instructions.keys()
property_expression = ""
for ins_type in assertions:
if property_expression == "":
property_expression = "ALLOWED_" + ins_type
else:
property_expression = I._or(property_expression, "ALLOWED_"+ins_type, parens=False)
verilog += I.always_def("clk", num_spaces=2) + I.begin(num_spaces=1)
verilog += I.newline(1)
verilog += I.property_def(property_expression, num_spaces=4)
verilog += I.newline(1)
verilog += I.end(num_spaces=2)
verilog += I.newline(1)
# End module with footer
verilog += I.newline(1)
verilog += I.module_footer()
return verilog
| 36.737143 | 116 | 0.586872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.177477 |
40abfa616e38a32a5059cf71c64a0d859cac8dd9 | 3,624 | py | Python | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
]
| null | null | null | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
]
| null | null | null | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
]
| null | null | null | import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 8])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:12], [num_batch * num_prior, 8])
landm_valid = tf.reshape(y_true[..., 12], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 13], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| 48.32 | 79 | 0.625828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.109823 |
40ac4ec777b7bc387be14a996d46bdf5f0da5291 | 2,416 | py | Python | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
]
| null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
]
| null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
]
| null | null | null | import unittest
import random
from time import sleep
import os
from bingmaps import *
class BingMapsTestError(Exception):
"""Bing Maps test exception"""
def __init__(self, reason):
self.reason = unicode(reason)
def __str__(self):
return self.reason
# TODO: enter your key for testing
api_key = ''
class DirectionsTests(unittest.TestCase):
def setUp(self):
self.api = BingMapsAPI(api_key=api_key)
def testBasicNav(self):
# start - 717 Market St
# end - Ferry Plaza, San Francisco, CA
# we shrunk the precision to match return values for easier comparison
start_lat = "37.786861"
start_lon = "-122.403689"
end_lat = "37.795556"
end_lon = "-122.392124"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start, end])
# verify start and end points are reflected in response
self.assertNotEqual(ret, {})
estimated_total = ret['resourceSets'][0]['estimatedTotal']
self.assertEqual(estimated_total, 1)
routeLegs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertEqual(len(routeLegs), 1)
itinerary_items = routeLegs[0]['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Driving')
def testPedestrianNav(self):
start_lat = "37.7868609332517"
start_lon = "-122.403689949149"
end_lat = "37.795556930015"
end_lon = "-122.392124051039"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start,end], travelMode='Walking')
self.assertNotEqual(ret, {})
legs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertNotEqual(legs, [])
legs = legs[0]
itinerary_items = legs['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Walking')
if __name__ == '__main__':
unittest.main() | 29.82716 | 78 | 0.598096 | 2,221 | 0.919288 | 0 | 0 | 0 | 0 | 0 | 0 | 701 | 0.290149 |
40acba1acfb883bbd0db070af8041dc100486a53 | 1,153 | py | Python | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
]
| 9 | 2020-04-29T14:25:06.000Z | 2021-12-30T03:28:05.000Z | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
]
| 18 | 2020-01-28T22:07:07.000Z | 2022-03-20T16:06:12.000Z | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
]
| null | null | null | from cds.CloudflareWrapper import suggest_set_up, cf_config_filename
from .FirewallWrapper import FirewallWrapper
import logging as log
def open_web_if_webserver_running():
fw = FirewallWrapper()
from .utils import is_process_running, query_yes_no
webserver_running = is_process_running('nginx')
if webserver_running:
zone = fw.fw.getDefaultZone()
zone_services = fw.fw.getServices(zone)
if 'http' not in zone_services or 'https' not in zone_services:
open_web = query_yes_no('Webserver is running. Open up HTTP/HTTPs ports?')
if open_web:
fw.add_service('http')
fw.add_service('https')
else:
log.info('Webserver is running and ports are already open.')
def action_config():
# if nginx runs, check/ask to ensure open web ports:
open_web_if_webserver_running()
# if cloudflare.cfg is missing, check/ask to ensure Cloudflare support:
from cds.CloudflareWrapper import CloudflareWrapper
cw = CloudflareWrapper()
if cw.use:
log.info('Cloudflare integration validated.')
else:
suggest_set_up()
| 36.03125 | 86 | 0.692975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.251518 |
40adb16a80ad4faf260352c08db6efc0124c7ac3 | 450 | py | Python | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
]
| null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
]
| null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awardapp', '0003_auto_20191024_1606'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link',
field=models.TextField(max_length=130),
),
]
| 21.428571 | 51 | 0.617778 | 294 | 0.653333 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.264444 |
40adc41d2f17560f722bc8589f813ea290672937 | 21,395 | py | Python | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
]
| null | null | null | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
]
| null | null | null | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Events that fire when channels are modified.
This does not include message events, nor reaction events.
"""
from __future__ import annotations
__all__: typing.List[str] = [
"ChannelEvent",
"GuildChannelEvent",
"DMChannelEvent",
"ChannelCreateEvent",
"GuildChannelCreateEvent",
"ChannelUpdateEvent",
"GuildChannelUpdateEvent",
"ChannelDeleteEvent",
"GuildChannelDeleteEvent",
"PinsUpdateEvent",
"GuildPinsUpdateEvent",
"DMPinsUpdateEvent",
"InviteCreateEvent",
"InviteDeleteEvent",
"WebhookUpdateEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import traits
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
import datetime
from hikari import guilds
from hikari import invites
from hikari import messages
from hikari import snowflakes
from hikari import webhooks
from hikari.api import shard as gateway_shard
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelEvent(shard_events.ShardEvent, abc.ABC):
"""Event base for any channel-bound event in guilds or private messages."""
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel the event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel this event relates to.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.PartialChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PartialChannel
A derivative of `hikari.channels.PartialChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
@base_events.requires_intents(intents.Intents.GUILDS)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in guilds."""
@property
@abc.abstractmethod
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that relates to this event.
"""
@property
def guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The gateway guild this event relates to, if known. Otherwise
this will return `builtins.None`.
"""
return self.app.cache.get_available_guild(self.guild_id) or self.app.cache.get_unavailable_guild(self.guild_id)
async def fetch_guild(self) -> guilds.RESTGuild:
"""Perform an API call to fetch the guild that this event relates to.
Returns
-------
hikari.guilds.RESTGuild
The guild that this event occurred in.
"""
return await self.app.rest.fetch_guild(self.guild_id)
@property
def channel(self) -> typing.Optional[channels.GuildChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
return self.app.cache.get_guild_channel(self.channel_id)
async def fetch_channel(self) -> channels.GuildChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.GuildChannel
A derivative of `hikari.channels.GuildChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildChannel)
return channel
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in private messages."""
async def fetch_channel(self) -> channels.PrivateChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PrivateChannel
A derivative of `hikari.channels.PrivateChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.PrivateChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelCreateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being created."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was created.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelCreateEvent(GuildChannelEvent, ChannelCreateEvent):
"""Event fired when a guild channel is created."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was created.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelUpdateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being updated."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was updated.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelUpdateEvent(GuildChannelEvent, ChannelUpdateEvent):
"""Event fired when a guild channel is edited."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
old_channel: channels.GuildChannel = attr.ib(repr=True)
"""Old guild channel object from cache.
Returns
-------
hikari.channels.GuildChannel
"""
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was updated.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelDeleteEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being deleted."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was deleted.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelDeleteEvent(GuildChannelEvent, ChannelDeleteEvent):
"""Event fired when a guild channel is deleted."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was deleted.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
# TODO: find out what private message intents are needed.
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class PinsUpdateEvent(ChannelEvent, abc.ABC):
"""Base event fired when a message is pinned/unpinned in a channel."""
@property
@abc.abstractmethod
def last_pin_timestamp(self) -> typing.Optional[datetime.datetime]:
"""Datetime of when the most recent message was pinned in the channel.
Will be `builtins.None` if nothing is pinned or the information is
unavailable.
Returns
-------
typing.Optional[datetime.datetime]
The datetime of the most recent pinned message in the channel,
or `builtins.None` if no pins are available.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.TextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.TextChannel
A derivative of `hikari.channels.TextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
async def fetch_pins(self) -> typing.Sequence[messages.Message]:
"""Perform an API call to fetch the pinned messages in this channel.
Returns
-------
typing.Sequence[hikari.messages.Message]
The pinned messages in this channel.
"""
return await self.app.rest.fetch_pins(self.channel_id)
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildPinsUpdateEvent(PinsUpdateEvent, GuildChannelEvent):
"""Event fired when a message is pinned/unpinned in a guild channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
@property
def channel(self) -> typing.Optional[channels.GuildTextChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildTextChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
channel = self.app.cache.get_guild_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
async def fetch_channel(self) -> channels.GuildTextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.GuildTextChannel
A derivative of `hikari.channels.GuildTextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
# TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent.
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMPinsUpdateEvent(PinsUpdateEvent, DMChannelEvent):
"""Event fired when a message is pinned/unpinned in a private channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
async def fetch_channel(self) -> channels.DMChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.DMChannel
A derivative of `hikari.channels.DMChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.DMChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteEvent(GuildChannelEvent, abc.ABC):
"""Base event type for guild invite updates."""
@property
@abc.abstractmethod
def code(self) -> str:
"""Code that is used in the URL for the invite.
Returns
-------
builtins.str
The invite code.
"""
async def fetch_invite(self) -> invites.Invite:
"""Perform an API call to retrieve an up-to-date image of this invite.
Returns
-------
hikari.invites.Invite
The invite object.
"""
return await self.app.rest.fetch_invite(self.code)
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteCreateEvent(InviteEvent):
"""Event fired when an invite is created in a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
invite: invites.InviteWithMetadata = attr.ib()
"""Invite that was created.
Returns
-------
hikari.invites.InviteWithMetadata
The created invite object.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.invite.channel_id
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
# This will always be non-None for guild channel invites.
assert self.invite.guild_id is not None
return self.invite.guild_id
@property
def code(self) -> str:
# <<inherited docstring from InviteEvent>>.
return self.invite.code
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteDeleteEvent(InviteEvent):
"""Event fired when an invite is deleted from a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
code: str = attr.ib()
# <<inherited docstring from InviteEvent>>.
if typing.TYPE_CHECKING:
# Invite will never be found.
async def fetch_invite(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILD_WEBHOOKS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class WebhookUpdateEvent(GuildChannelEvent):
"""Event fired when a webhook is created/updated/deleted in a channel.
Unfortunately, Discord does not provide any information on what webhook
actually changed, nor specifically whether it was created/updated/deleted,
so this event is pretty useless unless you keep track of the webhooks in
the channel manually beforehand.
"""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
async def fetch_channel_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this channel.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this channel.
"""
return await self.app.rest.fetch_channel_webhooks(self.channel_id)
async def fetch_guild_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this guild.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this guild.
"""
return await self.app.rest.fetch_guild_webhooks(self.guild_id)
| 33.906498 | 119 | 0.686282 | 17,046 | 0.796728 | 0 | 0 | 18,973 | 0.886796 | 5,217 | 0.243842 | 10,967 | 0.512596 |
40af0b1139a38fce3114910895fc7959fcc89bca | 8,649 | py | Python | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
]
| 34 | 2016-01-08T21:02:13.000Z | 2017-03-10T02:01:03.000Z | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
]
| 3 | 2016-01-27T19:11:14.000Z | 2016-12-02T21:29:53.000Z | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
]
| 5 | 2016-04-29T05:27:43.000Z | 2018-01-12T07:50:25.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the coordinator module
"""
from fabric.api import env
from mock import patch
from prestoadmin import coordinator
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
class TestCoordinator(BaseTestCase):
def test_build_all_defaults(self):
env.roledefs['coordinator'] = 'a'
env.roledefs['workers'] = ['b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties':
{'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_defaults_coord_is_worker(self):
env.roledefs['coordinator'] = ['a']
env.roledefs['worker'] = ['a', 'b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties': {
'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'true',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_validate_valid(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'true',
'discovery.uri': 'http://uri'}}
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_validate_default(self):
env.roledefs['coordinator'] = 'localhost'
env.roledefs['workers'] = ['localhost']
conf = coordinator.Coordinator().build_all_defaults()
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_invalid_conf(self):
conf = {'node.propoerties': {}}
self.assertRaisesRegexp(ConfigurationError,
'Missing configuration for required file: ',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_missing_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Must specify coordinator=true in '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'false',
'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Coordinator cannot be false in the '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf_empty_is_default(self, get_conf_from_file_mock,
write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
get_conf_from_file_mock.return_value = {}
self.assertEqual(coordinator.Coordinator().get_conf(),
coordinator.Coordinator().build_all_defaults())
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf(self, get_conf_from_file_mock, write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
file_conf = {'node.properties': {'my-property': 'value',
'node.environment': 'test'}}
get_conf_from_file_mock.return_value = file_conf
expected = {'node.properties':
{'my-property': 'value',
'node.environment': 'test'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://j:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(coordinator.Coordinator().get_conf(), expected)
| 47.78453 | 78 | 0.492889 | 7,851 | 0.907735 | 0 | 0 | 2,283 | 0.263961 | 0 | 0 | 3,528 | 0.407908 |
40af4a6cd5eabd173caad42e65be14761273bd46 | 3,911 | py | Python | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
]
| null | null | null | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
]
| null | null | null | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
]
| null | null | null | # Saul Castro
# Hiralben Hirpara
# config file format
import random
def openFile():
file = open("test.txt", 'r')
return file
def printFile(f):
print(f.read())
def readInput():
testout = "CarpenteRatcheThread"
file = open("test.txt", 'r')
s = str(file.read())
words = s.split(" ");
# print("Expected Output: " + testout)
for w in words:
if w == '\n': words.remove(w)
print(words)
testWords(words, False)
# makeChains(words)
def reduceRepeats(inputstr, showChecks=False):
print(" testing repeat filter start.___ ")
ret = []
# words, i = showlist(inputstr, False)
w = inputstr.split(" ");
print(w)
i = 0
for s in w:
i +=1
if showChecks: print(i, ": ", s)
if s not in ret: ret. append(s)
print(i, " elements checked, ", len(ret) , " unique words found. ? ")
# showlist(ret, False)
for w in ret:
print(" . . ", w)
print(" testing repeat filter end.___ ")
return ret
def testWords(words, showChecks=False):
if showChecks: print(" testing chatbot ")
for w in words:
print( w )
sen = []
for i in range(0, len(words) ):
sen. append( words[i])
for j in range(0, len(words) ):
sen. append( words[j]+" "+ words[i])
if j % 3 == 0: sen. append(" ")
# showlist(w, True)
# print(i, " : ", words[i])
# print(" ", words[j], words[i])
if showChecks:
print(sen ) #raw data
st = listtoString(sen, " ") #data only
print(st) #show it
ft = reduceRepeats(sen[0], False)
print(ft)
if showChecks:
print(len(words), " words found")
print(len(sen), " phrases found")
# list to string with sep 'char'
def listtoString( lst, sep):
s = ""
for c in lst: s += str(c) + sep
return s
# string to list, count
def showlist( li, showChecks=False):
w = li.split(" ");
print(w)
i = 0
for s in li:
i +=1
if showChecks: print(i, ": ", s)
return w, i
def makeChains(words, showChecks=False):
chain = []
wordcheck = []
for i in range(0, len(words) ):
# print(i )
se = [False, False] #used as start word, end word
wordcheck.append(se)
for i in range(0, len(words) ):
if showChecks: print(str(i) + " - " + words[i] )
aword = words[i]
for j in range(i+1, len(words) ):
#check the rest of the words for matching links
bword = words[j]
if showChecks: print(" " + str(j) + " ~ " + bword )
if wordcheck[j][0] == False and wordcheck[j][1] == False:
temp = checkLinks(aword, bword)
if showChecks: print("Check state: " + str(temp) )
if temp == 1: #word have not been swapped
wordcheck[i][0] = True
wordcheck[j][1] = True
chain.append(aword)
chain.append(bword)
elif temp == 2: #words have been swapped, swap flag indexes to match.
wordcheck[j][0] = True
wordcheck[i][1] = True
chain.append(bword)
chain.append(aword)
print(chain)
# k = 0
# for i in wordcheck:
# print("word check: " + str(i) + " = "+ words[k] )
#
# k+= 1
# compare words, return 0 for no match,
# 1 if end of a == start of b
# 2 if end of b == start of a
def checkLinks(a, b):
print(" " + a + " " + b)
s , e = getEnds(a)
ss , ee = getEnds(b)
if e == ss :
return 1
elif s == ee:
return 2
return 0
# st = "start: " + s + " end:" + e
# print("end" + st)
def getEnds(word):
st = word[0]
ed = word[len(word)-1]
return st, ed
if __name__ == '__main__':
readInput()
| 25.730263 | 85 | 0.504475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.269496 |
40b182cffd8ba6689e9b3d11caa57c733d863c65 | 2,646 | py | Python | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
]
| null | null | null | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
]
| 2 | 2021-04-10T18:12:44.000Z | 2022-02-09T23:36:43.000Z | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
]
| null | null | null | from global_utils import *
# target word
TARGET_WORD = 'right'
def display_lowpass_normal(wav, lowpass_signal, fs, label=''):
fig, (axs_raw, axs_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
# display the plot
axs_raw.plot(wav)
# label the axes
axs_raw.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_raw.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_raw.set_title("Audio sample : {}".format(label), fontsize=FONT_SIZE)
axs_low.plot(lowpass_signal)
# label the axes
axs_low.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_low.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_low.set_title("Audio sample with low pass filter", fontsize=FONT_SIZE)
f_raw, periodogram_raw = signal.periodogram(wav, fs)
f_raw, periodogram_low = signal.periodogram(lowpass_signal, fs)
fig, (axs_periodogram_raw, axs_periodogram_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
axs_periodogram_raw.semilogy(f_raw, periodogram_raw)
axs_periodogram_raw.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_title("Periodogram raw signal", fontsize=FONT_SIZE)
axs_periodogram_low.semilogy(f_raw, periodogram_low)
axs_periodogram_low.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_title("Periodogram low pass filtered signal", fontsize=FONT_SIZE)
def main(args):
if args.wavfile:
fs, wav = wavfile.read(args.wavfile, "wb")
lowpass_signal = low_pass_filter(wav, sample_rate=fs, cutoff_frequency=1000)
display_lowpass_normal(wav, lowpass_signal, fs)
plt.show()
elif args.indir:
data_dict = get_data(args.indir)
word_samples = data_dict[TARGET_WORD]
mean_lowpass_array, normal_array = mean_low_pass_filter(word_samples, SAMPLE_RATE, CUTOFF_FREQ)
display_lowpass_normal(normal_array, mean_lowpass_array, SAMPLE_RATE, TARGET_WORD)
plt.show()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wavfile',
help='Path to the .wav files',
required=False
)
parser.add_argument(
'--indir',
help='Absolute path to data directory containing .wav files',
required=False
)
args = parser.parse_args()
main(args)
| 29.730337 | 103 | 0.708239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.161376 |
40b1a05b02e671eeb4b12cc51ccc3740e6e21280 | 2,010 | py | Python | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
]
| null | null | null | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
]
| null | null | null | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
]
| null | null | null | # Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
class Proof(Node):
sign = '!p'
authority = None
def __init__(self,
text=None,
author=None,
agent=None,
authority=None,
factor=2,
**kw):
super().__init__(factor=factor, **kw)
if text:
for k in ('factor', 'bias', 'weight'):
kw.pop(k, None)
self.claim = Claim(text=text, **kw)
if not authority:
if agent:
authority = 'agent'
elif author:
authority = 'self'
if authority:
self.authority = Authority.create(name=authority)
@property
def weight(self):
p = self.partial(self.authority.weight, self.claim.weight)
return p + self.bias
@property
def credibility(self):
return self.weight
@property
def value(self):
a = self.authority.agency
return '{} {}: {}'.format(super().value, a, self.claim.value)
@property
def fields(self):
fs = self.authority.fields
fs.update(self.claim.fields)
fs.update(super().fields)
fs['Credibility'] = self.credibility
return fs
| 30.454545 | 79 | 0.570149 | 1,240 | 0.616915 | 0 | 0 | 524 | 0.260697 | 0 | 0 | 732 | 0.364179 |
40b2a0543dc5434ad9bc96313563cef627bd5d42 | 6,636 | py | Python | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
]
| null | null | null | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
]
| null | null | null | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
]
| null | null | null | import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# Store the data. Initialize: y to 591 and exploded to False.
self.screen = screen
self.x = x
self.y = 591
self.exploded = False
def move(self):
# Make self.y 5 smaller than it was (which will cause the Missile to move UP).
self.y = self.y - 5
def draw(self):
# Draw a vertical, 4 pixels thick, 8 pixels long, red (or green) line on the screen,
# where the line starts at the current position of this Missile.
pygame.draw.line(self.screen, (0, 255, 0), (self.x, self.y), (self.x, self.y - 8), 4)
class Fighter:
def __init__(self, screen, x, y):
# Store the data.
# Set self.missiles to the empty list.
# Load the file "fighter.png" as the image
# Set the colorkey to white (it has a white background that needs removed)
self.screen = screen
self.x = x
self.y = y
self.missiles = []
self.image = pygame.image.load("fighter.png")
self.image.set_colorkey(pygame.Color("White"))
def draw(self):
# Draw this Fighter, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
# Construct a new Missile 50 pixels to the right of this Fighter.
# Append that Missile to this Fighter's list of Missile objects.
new_missile = Missile(self.screen, self.x + 50)
self.missiles.append(new_missile)
def remove_exploded_missiles(self):
# Already complete
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
# Store the data.
# Set dead to False and original_x to x and move_right to True.
# Load the file "badguy.png" as the image. and set its colorkey to black.
self.screen = screen
self.x = x
self.y = y
self.dead = False
self.original_x = x
self.move_right = True
self.image = pygame.image.load("badguy.png")
self.image.set_colorkey(pygame.Color("Black"))
def move(self):
# Move 2 units in the current direction.
# Switch direction if this Badguy's position is more than 100 pixels from its original position.
if self.move_right:
self.x = self.x + 8
if self.x > self.original_x + 100:
self.move_right = False
self.y = self.y + 15
else:
self.x = self.x - 8
if self.x < self.original_x - 100:
self.move_right = True
self.y = self.y + 15
def draw(self):
# Draw this Badguy, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
# Return True if a 70x45 rectangle at this Badguy's current position
# collides with the xy point of the given missile.
# Return False otherwise.
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
# Already done. Prepares the list of Badguys.
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
# Return True if the number of badguys in this Enemy Fleet is 0,
# otherwise return False.
return len(self.badguys) == 0
def move(self):
# Make each badguy in this EnemyFleet move.
for badguy in self.badguys:
badguy.move()
def draw(self):
# Make each badguy in this EnemyFleet draw itself.
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
# Create a Scoreboard class (from scratch)
# Instance variables: screen, x, y, score, and font (size 30)
# Methods: draw (and __init__)
# Create a scoreboard at location 5, 5
# Draw the scoreboard in the game loop
class Scoreboard:
def __init__(self, screen):
self.screen = screen
self.score = 0
self.font = pygame.font.Font(None, 30)
def draw(self):
text_as_image = self.font.render("Score: " + str(self.score), True, (255, 255, 255))
self.screen.blit(text_as_image, (5, 5))
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("SPACE INVADERS!")
screen = pygame.display.set_mode((640, 650))
enemy_rows = 3
enemy = EnemyFleet(screen, enemy_rows)
fighter = Fighter(screen, 320, 590)
scoreboard = Scoreboard(screen)
gameover_image = pygame.image.load("gameover.png")
is_game_over = False
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
if event.type == KEYDOWN and pressed_keys[K_SPACE]:
fighter.fire()
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] and fighter.x > -50:
fighter.x = fighter.x - 5
if pressed_keys[K_RIGHT] and fighter.x < 590:
fighter.x = fighter.x + 5
fighter.draw()
enemy.move()
enemy.draw()
for missile in fighter.missiles:
missile.move()
missile.draw()
for badguy in enemy.badguys:
for missile in fighter.missiles:
if badguy.hit_by(missile):
scoreboard.score = scoreboard.score + 100
badguy.dead = True
missile.exploded = True
fighter.remove_exploded_missiles()
enemy.remove_dead_badguys()
if enemy.is_defeated:
enemy_rows = enemy_rows + 1
enemy = EnemyFleet(screen, enemy_rows)
scoreboard.draw()
if not is_game_over:
pygame.display.update()
for badguy in enemy.badguys:
if badguy.y > 545:
screen.blit(gameover_image, (170, 200))
pygame.display.update()
is_game_over = True
main()
| 32.851485 | 104 | 0.583484 | 4,449 | 0.670434 | 0 | 0 | 181 | 0.027275 | 0 | 0 | 1,717 | 0.25874 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.