hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
086cc04c9a62e2ff1bedaaac23c04ca27ca1b7b4 | 2,264 | py | Python | schedule/tests/scheduler_latest_test.py | conzty01/RA_Scheduler | 6bf4931871aef4058d93917e62ceb31766e06b3a | [
"MIT"
]
| 1 | 2021-03-31T05:26:17.000Z | 2021-03-31T05:26:17.000Z | schedule/tests/scheduler_latest_test.py | conzty01/RA_Scheduler | 6bf4931871aef4058d93917e62ceb31766e06b3a | [
"MIT"
]
| 83 | 2018-03-19T18:32:34.000Z | 2022-02-01T02:15:01.000Z | schedule/tests/scheduler_latest_test.py | conzty01/RA_Scheduler | 6bf4931871aef4058d93917e62ceb31766e06b3a | [
"MIT"
]
| 2 | 2021-01-15T22:16:00.000Z | 2021-02-10T01:03:32.000Z | from schedule.scheduler4_0 import schedule
from schedule.ra_sched import Schedule, RA
from unittest.mock import MagicMock, patch
from datetime import date
import unittest
import random
class TestScheduler(unittest.TestCase):
def setUp(self):
# -- Create a patchers for the logging --
self.patcher_loggingDEBUG = patch("logging.debug", autospec=True)
self.patcher_loggingINFO = patch("logging.info", autospec=True)
self.patcher_loggingWARNING = patch("logging.warning", autospec=True)
self.patcher_loggingCRITICAL = patch("logging.critical", autospec=True)
self.patcher_loggingERROR = patch("logging.error", autospec=True)
# Start the patcher - mock returned
self.mocked_loggingDEBUG = self.patcher_loggingDEBUG.start()
self.mocked_loggingINFO = self.patcher_loggingINFO.start()
self.mocked_loggingWARNING = self.patcher_loggingWARNING.start()
self.mocked_loggingCRITICAL = self.patcher_loggingCRITICAL.start()
self.mocked_loggingERROR = self.patcher_loggingERROR.start()
def tearDown(self):
self.patcher_loggingDEBUG.stop()
self.patcher_loggingINFO.stop()
self.patcher_loggingWARNING.stop()
self.patcher_loggingCRITICAL.stop()
self.patcher_loggingERROR.stop()
def test_scheduler_whenUnableToGenerateSchedule_returnsEmptyList(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_scheduler_whenAbleToGenerateSchedule_returnsScheduleObject(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_scheduler_returnsExpectedSchedule(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_createDateDict_buildsExpectedDateDictionary(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_createPreviousDuties_returnsLastDateAssignedDictionary(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_createPreviousDuties_returnsNumDoubleDaysDictionary(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
if __name__ == "__main__":
unittest.main()
| 31.444444 | 79 | 0.651502 | 2,027 | 0.895318 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.178887 |
086ccdd01316fbb3c32c9928ed64ba2001cd4f5d | 2,583 | py | Python | main.py | brpaz/ulauncher-dockerhub | 22e646bda40328373a4d90fa0aece2cac0187a42 | [
"MIT"
]
| 3 | 2020-09-04T07:56:47.000Z | 2022-01-05T13:19:25.000Z | main.py | brpaz/ulauncher-dockerhub | 22e646bda40328373a4d90fa0aece2cac0187a42 | [
"MIT"
]
| null | null | null | main.py | brpaz/ulauncher-dockerhub | 22e646bda40328373a4d90fa0aece2cac0187a42 | [
"MIT"
]
| null | null | null | """ Main Module """
import logging
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.OpenUrlAction import OpenUrlAction
from dockerhub.client import Client
logger = logging.getLogger(__name__)
class DockerHubExtension(Extension):
""" Main Extension Class """
def __init__(self):
""" Initializes the extension """
super(DockerHubExtension, self).__init__()
self.dockerhub = Client()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
def search_repositories(self, query):
""" Shows the a list of DockerHub repositories """
if len(query) < 3:
return RenderResultListAction([
ExtensionResultItem(
icon='images/icon.png',
name='Keep typing to search on Docker Hub ...',
highlightable=False,
on_enter=DoNothingAction())
])
repos = self.dockerhub.search_repos(query)
items = []
if not repos:
return RenderResultListAction([
ExtensionResultItem(
icon="images/icon.png",
name="No results found matching your criteria",
highlightable=False,
on_enter=HideWindowAction())
])
for repo in repos[:8]:
items.append(
ExtensionResultItem(icon='images/icon.png',
name="%s 🟊 %s" %
(repo["name"], repo["stars"]),
description=repo["description"],
on_enter=OpenUrlAction(repo["url"])))
return RenderResultListAction(items)
class KeywordQueryEventListener(EventListener):
""" Listener that handles the user input """
# pylint: disable=unused-argument,no-self-use
def on_event(self, event, extension):
""" Handles the event """
query = event.get_argument() or ""
return extension.search_repositories(query)
if __name__ == '__main__':
DockerHubExtension().run()
| 34.905405 | 85 | 0.622145 | 1,859 | 0.718871 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.16744 |
086fc0967062337eeb0ecb19108dc1ab0e1d65e3 | 957 | py | Python | geeklist_examples.py | juliengrenier/python-geeklist | 52528b099e94e539c3451bfd2e741e563f0924e9 | [
"MIT"
]
| 1 | 2015-02-28T10:22:02.000Z | 2015-02-28T10:22:02.000Z | geeklist_examples.py | juliengrenier/python-geeklist | 52528b099e94e539c3451bfd2e741e563f0924e9 | [
"MIT"
]
| null | null | null | geeklist_examples.py | juliengrenier/python-geeklist | 52528b099e94e539c3451bfd2e741e563f0924e9 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from geeklist.api import BaseGeeklistApi, GeekListOauthApi, GeekListUserApi
from access import consumer_info #please access.py which contains consumer_info = { 'key': YOUR_KEY, 'secret': secret}
BaseGeeklistApi.BASE_URL ='http://sandbox-api.geekli.st/v1'
oauth_api = GeekListOauthApi(consumer_info=consumer_info)
request_token = oauth_api.request_token(type='oob')
import webbrowser
webbrowser.open('http://sandbox.geekli.st/oauth/authorize?oauth_token=%s' % request_token['oauth_token'])
#read verifier
verifier = raw_input('Please enter verifier code>')
oauth_access_token = oauth_api.access_token(request_token=request_token, verifier=verifier)
access_token = {
'key':oauth_access_token['oauth_token'],
'secret':oauth_access_token['oauth_token_secret']
}
user_api = GeekListUserApi(consumer_info, access_token)
print user_api.user_info()
user_api.create_card(headline='First card created with the python wrapper API')
| 41.608696 | 118 | 0.797283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.368861 |
0871266d4d435da659b3d90a1e0729b53c28c39c | 2,448 | py | Python | game/gamesrc/objects/character.py | ranka47/battle-of-hogwarts | e7b2265ebe5661249dd28e472c49b74c1bbcdf23 | [
"BSD-3-Clause"
]
| 2 | 2019-02-24T00:20:47.000Z | 2020-04-24T15:50:31.000Z | game/gamesrc/objects/character.py | ranka47/battle-of-hogwarts | e7b2265ebe5661249dd28e472c49b74c1bbcdf23 | [
"BSD-3-Clause"
]
| null | null | null | game/gamesrc/objects/character.py | ranka47/battle-of-hogwarts | e7b2265ebe5661249dd28e472c49b74c1bbcdf23 | [
"BSD-3-Clause"
]
| 1 | 2019-01-05T15:51:37.000Z | 2019-01-05T15:51:37.000Z | """
Template for Characters
Copy this module up one level and name it as you like, then
use it as a template to create your own Character class.
To make new logins default to creating characters
of your new type, change settings.BASE_CHARACTER_TYPECLASS to point to
your new class, e.g.
settings.BASE_CHARACTER_TYPECLASS = "game.gamesrc.objects.mychar.MyChar"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Character as DefaultCharacter
from ev import Script
import random
class Character(DefaultCharacter):
"""
The Character is like any normal Object (see example/object.py for
a list of properties and methods), except it actually implements
some of its hook methods to do some work:
at_basetype_setup - always assigns the default_cmdset to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_post_puppet(player) - when Player disconnects from the Character, we
store the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_pre_puppet - just before Player re-connects, retrieves the character's
old location and puts it back on the grid with a "charname
has connected" message echoed to the room
"""
def at_object_creation(self):
self.db.score = 0
self.db.health_max = 100
self.db.health = self.db.health_max
self.db.will = 100
self.db.respawns = 0
houses = ["Gryffindor","Hufflepuff","Slytherin","Ravenclaw"]
self.db.house = houses[random.randint(0, len(houses) - 1)]
self.db.dementors = 0
self.db.spiders = 0
self.db.willow = 0
self.db.rodents = 0
self.db.boggart = 0
self.db.parallax = 0
self.db.dragon = 0
def respawn(self):
self.msg("You lost a life and respawn with all your default powers")
self.db.health = self.db.health_max
self.db.score -= 50
self.db.will = 100
self.db.respawns += 1 | 38.25 | 79 | 0.663807 | 1,849 | 0.75531 | 0 | 0 | 0 | 0 | 0 | 0 | 1,674 | 0.683824 |
0873053669c5a9be614101baec79eda2eb276cb9 | 3,170 | py | Python | lesson5/lesson5_task4.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
]
| null | null | null | lesson5/lesson5_task4.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
]
| null | null | null | lesson5/lesson5_task4.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
]
| null | null | null | """
Создать (не программно) текстовый файл со следующим содержимым:
One — 1
Two — 2
Three — 3
Four — 4
Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.
При этом английские числительные должны заменяться на русские.
Новый блок строк должен записываться в новый текстовый файл.
"""
def readfile(filepath):
res = ""
with open(filepath, 'r') as f:
res = f.read()
return res
def make_dict(task2_data, delimiter=" - "):
# print(task2_data)
res_dict = {}
for lnum, line in enumerate(task2_data.split("\n")):
lnum += 1 # номер строки начинается с 0
if line != "":
try:
strelemcnt = len(line.split(delimiter))
if strelemcnt == 2:
# print(f"Обработка строки {lnum} ok")
word, nn = line.split(delimiter)
res_dict[nn] = word
else:
raise RuntimeError(f"Ошибка ввода данных. Неверное количество аргументов в строке {lnum}.")
except ValueError as e:
raise ValueError(f"Неверный формат числа в строке {lnum}. Ошибка {e}")
return res_dict
def translate(en_dict, ru_dict):
pass
resdict = {}
for key in en_dict.keys():
resdict[key] = ru_dict[key]
return resdict
def write_dict(filepath, dict, delimeter):
pass
lines = []
for key in dict.keys():
line = dict[key] + delimeter + key
lines.append(line)
with open(filepath, 'w+') as f:
f.writelines("\n".join(lines))
f.seek(0)
print(f"содержимое выходного файла {filepath}\n{f.read()}")
def full_variant():
infile_name = "task4_data_in.txt"
outfile_name = "task4_data_out.txt"
ru_dict = {'1': 'Один', '2': 'Два', '3': 'Три', '4': 'Четыре'}
try:
task2_data = readfile(infile_name)
except IOError as e:
print(f"Ошибка работы с файлом: {e}")
try:
file_data_dict = make_dict(task2_data)
except ValueError as e:
print(f"{e}")
exit(1)
except RuntimeError as e:
print(f"{e}")
exit(2)
try:
resdict = translate(file_data_dict, ru_dict)
# print(resdict)
except KeyError as e:
print(f"В словаре переводчика нет значения для {e}")
exit(3)
write_dict(outfile_name, resdict, " - ")
print("Программа завершена")
def short_variant():
infile_name = "task4_data_in.txt"
outfile_name = "task4_data_out.txt"
ru_dict = {'1': 'Один', '2': 'Два', '3': 'Три', '4': 'Четыре'}
en_dict = {'1': 'one', '2': 'Two', '3': 'Three', '4': 'Four'}
delimeter = " - "
res_lines = []
with open(infile_name, "r") as ifile:
for line in ifile:
for kword in en_dict.keys():
if line.count(kword):
res_lines.append(ru_dict[kword] + delimeter + kword)
with open(outfile_name, "w+") as ofile:
ofile.writelines("\n".join(res_lines))
ofile.seek(0)
print(f"содержимое выходного файла {outfile_name}\n{ofile.read()}")
if __name__ == "__main__":
# main()
short_variant()
| 26.864407 | 111 | 0.582334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,528 | 0.414767 |
087388739eebaad50c966ec02ed0312b37726c72 | 1,203 | py | Python | w_cutscenes_test.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
]
| 1 | 2021-02-01T03:09:28.000Z | 2021-02-01T03:09:28.000Z | w_cutscenes_test.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
]
| null | null | null | w_cutscenes_test.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
]
| null | null | null | from w_i_stage import IStage
from direct.interval.IntervalGlobal import Sequence, Func, Wait
class CutsceneTest(IStage):
def __init__(self):
IStage.__init__(self)
def setup(self):
self.previousMap = base.gameData.currentMap
base.gameData.currentMap = 'city'
self.previousPos = base.gameData.heroPos
base.gameData.heroPos = 'startPos'
self.initStage()
self.initHero()
taskMgr.add(self.moveHero, "moveTask")
self.start()
self.animate()
def animate(self):
seq = Sequence(
Func(self.heroNorth),
Wait(2.0),
Func(self.heroStop),
Wait(1.0),
Func(base.requestWithFade, 'RPGField')
)
seq.start()
def heroNorth(self):
base.directionMap["up"] = True
def heroStop(self):
base.directionMap["up"] = False
def quit(self):
render.clearLight()
taskMgr.remove("moveTask")
self.stage.removeNode()
base.gameData.currentMap = self.previousMap
base.gameData.heroPos = self.previousPos
def cancelCommand(self):
pass
def intoEvent(self, entry):
pass
| 25.0625 | 63 | 0.596841 | 1,107 | 0.9202 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.044888 |
0874abf4b1ea7884b6edfbac6a754d49e5cc5678 | 154 | py | Python | lichee/utils/__init__.py | Tencent/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
]
| 91 | 2021-10-30T02:25:05.000Z | 2022-03-28T06:51:52.000Z | lichee/utils/__init__.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
]
| 1 | 2021-12-17T09:30:25.000Z | 2022-03-05T12:30:13.000Z | lichee/utils/__init__.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
]
| 17 | 2021-11-04T07:50:23.000Z | 2022-03-24T14:24:11.000Z | # -*- coding: utf-8 -*-
"""
工具包
"""
from . import convertor
from . import model_loader
from . import storage
from . import parallel
from . import logging
| 15.4 | 26 | 0.688312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.25 |
08757365d19fb16259355c3f4a0bc7a45ccc8fde | 1,808 | py | Python | networkunit/models/backends/network_model.py | russelljjarvis/NetworkUnit | 32179371d3a0ba354e6637cf4f97ba70522d4054 | [
"BSD-3-Clause"
]
| null | null | null | networkunit/models/backends/network_model.py | russelljjarvis/NetworkUnit | 32179371d3a0ba354e6637cf4f97ba70522d4054 | [
"BSD-3-Clause"
]
| 1 | 2019-11-15T22:56:20.000Z | 2019-11-15T22:56:20.000Z | networkunit/models/backends/network_model.py | russelljjarvis/NetworkUnit | 32179371d3a0ba354e6637cf4f97ba70522d4054 | [
"BSD-3-Clause"
]
| null | null | null | """NeuronUnit model class for reduced neuron models"""
import numpy as np
from neo.core import AnalogSignal
import quantities as pq
import neuronunit.capabilities as cap
import neuronunit.models as mod
import neuronunit.capabilities.spike_functions as sf
from neuronunit.models import backends
from generic_network import net_sim_runner, get_dummy_synapses
class NetworkModel(cap.ReceivesCurrent,
cap.ProducesMultiMembranePotentials,
cap.ProducesSpikeRasters,
):
"""Base class for network models
todo replace receives current with receives patterned input."""
def __init__(self, name=None, backend=pyNN, synapses=None):
"""Instantiate a network model.
name: Optional model name.
"""
self.run_number = 0
self.backend = backend
self.tstop = None
self.data = None
self.vms = None
self.binary_trains = None
self.t_spike_axis = None
self.synapses = get_dummy_synapses()
try:
self.sim = generic_network.sim
except:
pass
def get_membrane_potentials(self):
return self.vms
def getSpikeRasters(self, **run_params):
return self.binary_train
def inject_noise_current(self, stim_current, syn_weights):
import pyNN.neuron as sim
noisee = sim.NoisyCurrentSource(mean=0.74/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
noisei = sim.NoisyCurrentSource(mean=1.440/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
stim_noise_currents = [noisee,noisei]
self.data,self.vms,self.binary_trains,self.t_spike_axis = net_sim_runner(syn_weights,sim,self.synapses,stim_noise_currents)
return (self.vms,self.binary_train,self.data)
| 35.45098 | 131 | 0.68031 | 1,446 | 0.799779 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.128319 |
0876136eb46ef1d30f09dbd0eff572dd1e4a0144 | 28,812 | py | Python | generator.py | jimstorch/DGGen | cdecbc4bfa491a634aac370de05b21bb6f6cf8e1 | [
"Apache-2.0"
]
| 19 | 2016-12-04T12:43:43.000Z | 2022-01-25T01:00:24.000Z | generator.py | jimstorch/DGGen | cdecbc4bfa491a634aac370de05b21bb6f6cf8e1 | [
"Apache-2.0"
]
| 9 | 2017-01-04T16:33:00.000Z | 2021-11-16T06:02:16.000Z | generator.py | jimstorch/DGGen | cdecbc4bfa491a634aac370de05b21bb6f6cf8e1 | [
"Apache-2.0"
]
| 7 | 2016-12-04T12:43:47.000Z | 2022-02-04T13:10:58.000Z | #!/usr/bin/env python3
import argparse
import csv
import datetime
import json
import logging
import os
import sys
import warnings
from collections import defaultdict
from copy import copy
from dataclasses import dataclass
from itertools import islice, cycle, chain
from random import randint, shuffle, choice, sample
from textwrap import shorten, wrap
from typing import List, Any, Dict, Tuple
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
script_name = os.path.basename(sys.argv[0])
description = """
Generate characters for the Delta Green pen-and-paper roleplaying game from Arc Dream Publishing.
"""
__version__ = "1.4"
logger = logging.getLogger(script_name)
TEXT_COLOR = (0, 0.1, 0.5)
DEFAULT_FONT = "Special Elite"
MONTHS = ("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC")
SUGGESTED_BONUS_CHANCE = 75
def main():
options = get_options()
init_logger(options.verbosity)
logger.debug(options)
data = load_data(options)
pages_per_sheet = 2 if options.equip else 1
professions = [data.professions[options.type]] if options.type else data.professions.values()
p = Need2KnowPDF(options.output, professions, pages_per_sheet=pages_per_sheet)
for profession in professions:
label = generate_label(profession)
p.bookmark(label)
for sex in islice(
cycle(["female", "male"]), options.count or profession["number_to_generate"]
):
c = Need2KnowCharacter(
data=data,
sex=sex,
profession=profession,
label_override=options.label,
employer_override=options.employer,
)
if options.equip:
c.equip(profession.get("equipment-kit", None))
c.print_footnotes()
p.add_page(c.d)
if pages_per_sheet >= 2:
p.add_page_2(c.e)
p.save_pdf()
logger.info("Wrote %s", options.output)
class Need2KnowCharacter(object):
statpools = [
[13, 13, 12, 12, 11, 11],
[15, 14, 12, 11, 10, 10],
[17, 14, 13, 10, 10, 8],
]
DEFAULT_SKILLS = {
"accounting": 10,
"alertness": 20,
"athletics": 30,
"bureaucracy": 10,
"criminology": 10,
"disguise": 10,
"dodge": 30,
"drive": 20,
"firearms": 20,
"first aid": 10,
"heavy machinery": 10,
"history": 10,
"humint": 10,
"melee weapons": 30,
"navigate": 10,
"occult": 10,
"persuade": 20,
"psychotherapy": 10,
"ride": 10,
"search": 20,
"stealth": 10,
"survival": 10,
"swim": 20,
"unarmed combat": 40,
}
BONUS = [
"accounting",
"alertness",
"anthropology",
"archeology",
"art1",
"artillery",
"athletics",
"bureaucracy",
"computer science",
"craft1value",
"criminology",
"demolitions",
"disguise",
"dodge",
"drive",
"firearms",
"first aid",
"forensics",
"heavy machinery",
"heavy weapons",
"history",
"humint",
"law",
"medicine",
"melee weapons",
"militaryscience1value",
"navigate",
"occult",
"persuade",
"pharmacy",
"pilot1value",
"psychotherapy",
"ride",
"science1value",
"search",
"sigint",
"stealth",
"surgery",
"survival",
"swim",
"unarmed combat",
"language1",
]
def __init__(self, data, sex, profession, label_override=None, employer_override=None):
self.data = data
self.profession = profession
self.sex = sex
# Hold all dictionaries
self.d = {}
self.e = {}
self.footnotes = defaultdict(
iter(
["*", "†", "‡", "§", "¶", "**", "††", "‡‡", "§§", "¶¶", "***", "†††", "‡‡‡", "§§§"]
).__next__
)
self.generate_demographics(label_override, employer_override)
self.generate_stats()
self.generate_derived_attributes()
self.generate_skills()
def generate_demographics(self, label_override, employer_override):
if self.sex == "male":
self.d["male"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.male_given_names)
)
else:
self.d["female"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.female_given_names)
)
self.d["profession"] = label_override or self.profession["label"]
self.d["employer"] = employer_override or ", ".join(
e
for e in [self.profession.get("employer", ""), self.profession.get("division", "")]
if e
)
self.d["nationality"] = "(U.S.A.) " + choice(self.data.towns)
self.d["age"] = "%d (%s %d)" % (randint(24, 55), choice(MONTHS), (randint(1, 28)))
def generate_stats(self):
rolled = [[sum(sorted([randint(1, 6) for _ in range(4)])[1:]) for _ in range(6)]]
pool = choice(self.statpools + rolled)
shuffle(pool)
for score, stat in zip(
pool, ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
):
self.d[stat] = score
self.d[f"{stat}_x5"] = score * 5
self.d[f"{stat}_distinguishing"] = self.distinguishing(stat, score)
def generate_derived_attributes(self):
self.d["hitpoints"] = int(round((self.d["strength"] + self.d["constitution"]) / 2.0))
self.d["willpower"] = self.d["power"]
self.d["sanity"] = self.d["power"] * 5
self.d["breaking point"] = self.d["power"] * 4
self.damage_bonus = ((self.d["strength"] - 1) >> 2) - 2
self.d["damage bonus"] = "DB=%d" % self.damage_bonus
def generate_skills(self):
# Default skills
self.d.update(self.DEFAULT_SKILLS)
# Professional skills
self.d.update(self.profession["skills"]["fixed"])
for skill, score in sample(
self.profession["skills"].get("possible", {}).items(),
self.profession["skills"].get("possible-count", 0),
):
self.d[skill] = score
for i in range(self.profession["bonds"]):
self.d[f"bond{i}"] = self.d["charisma"]
# Bonus skills
self.generate_bonus_skills(self.profession)
def generate_bonus_skills(self, profession):
bonus_skills = [
s
for s in profession["skills"].get("bonus", [])
if randint(1, 100) <= SUGGESTED_BONUS_CHANCE
] + sample(self.BONUS, len(self.BONUS))
bonuses_applied = 0
while bonuses_applied < 8:
skill = bonus_skills.pop(0)
boosted = self.d.get(skill, 0) + 20
if boosted <= 80:
self.d[skill] = boosted
bonuses_applied += 1
logger.debug("%s, boosted %s to %s", self, skill, boosted)
else:
logger.info(
"%s, Skipped boost - %s already at %s", self, skill, self.d.get(skill, 0)
)
def __str__(self):
return ", ".join(
[
self.d.get(i)
for i in ("name", "profession", "employer", "department")
if self.d.get(i)
]
)
def distinguishing(self, field, value):
return choice(self.data.distinguishing.get((field, value), [""]))
def equip(self, kit_name=None):
weapons = [self.data.weapons["unarmed"]]
if kit_name:
kit = self.data.kits[kit_name]
weapons += self.build_weapon_list(kit["weapons"])
gear = []
for item in kit["armour"] + kit["gear"]:
notes = (
(" ".join(self.store_footnote(n) for n in item["notes"]) + " ")
if "notes" in item
else ""
)
text = notes + (self.data.armour[item["type"]] if "type" in item else item["text"])
gear.append(text)
wrapped_gear = list(chain(*[wrap(item, 55, subsequent_indent=" ") for item in gear]))
if len(wrapped_gear) > 22:
logger.warning("Too much gear - truncated.")
for i, line in enumerate(wrapped_gear):
self.e[f"gear{i}"] = line
if len(weapons) > 7:
logger.warning("Too many weapons %s - truncated.", weapons)
for i, weapon in enumerate(weapons[:7]):
self.equip_weapon(i, weapon)
def build_weapon_list(self, weapons_to_add):
result = []
for weapon_to_add in weapons_to_add:
if "type" in weapon_to_add:
weapon = copy(self.data.weapons.get(weapon_to_add["type"], None))
if weapon:
if "notes" in weapon_to_add:
weapon["notes"] = weapon_to_add["notes"]
result += (
[weapon]
if "chance" not in weapon_to_add
or weapon_to_add["chance"] >= randint(1, 100)
else []
)
else:
logger.error("Unknown weapon type %s", weapon_to_add["type"])
elif "one-of" in weapon_to_add:
result += self.build_weapon_list([choice(weapon_to_add["one-of"])])
elif "both" in weapon_to_add:
result += self.build_weapon_list(w for w in weapon_to_add["both"])
else:
logger.error("Don't understand weapon %r", weapon_to_add)
return result
def equip_weapon(self, slot, weapon):
self.e[f"weapon{slot}"] = shorten(weapon["name"], 15, placeholder="…")
roll = int(self.d.get(weapon["skill"], 0) + (weapon["bonus"] if "bonus" in weapon else 0))
self.e[f"weapon{slot}_roll"] = f"{roll}%"
if "base-range" in weapon:
self.e[f"weapon{slot}_range"] = weapon["base-range"]
if "ap" in weapon:
self.e[f"weapon{slot}_ap"] = f"{weapon['ap']}"
if "lethality" in weapon:
lethality = weapon["lethality"]
lethality_note_indicator = (
self.store_footnote(lethality["special"]) if "special" in lethality else None
)
self.e[f"weapon{slot}_lethality"] = (
f"{lethality['rating']}%" if lethality["rating"] else ""
) + (f" {lethality_note_indicator}" if lethality_note_indicator else "")
if "ammo" in weapon:
self.e[f"weapon{slot}_ammo"] = f"{weapon['ammo']}"
if "kill-radius" in weapon:
self.e[f"weapon{slot}_kill_radius"] = f"{weapon['kill-radius']}"
if "notes" in weapon:
self.e[f"weapon{slot}_note"] = " ".join(self.store_footnote(n) for n in weapon["notes"])
if "damage" in weapon:
damage = weapon["damage"]
damage_note_indicator = (
self.store_footnote(damage["special"]) if "special" in damage else None
)
if "dice" in damage:
damage_modifier = (damage["modifier"] if "modifier" in damage else 0) + (
self.damage_bonus if "db-applies" in damage and damage["db-applies"] else 0
)
damage_roll = f"{damage['dice']}D{damage['die-type']}" + (
f"{damage_modifier:+d}" if damage_modifier else ""
)
else:
damage_roll = ""
self.e[f"weapon{slot}_damage"] = damage_roll + (
f" {damage_note_indicator}" if damage_note_indicator else ""
)
def print_footnotes(self):
notes = list(
chain(
*[
wrap(f"{pointer} {note}", 40, subsequent_indent=" ")
for (note, pointer) in list(self.footnotes.items())
]
)
)
if len(notes) > 12:
logger.warning("Too many footnotes - truncated.")
for i, note in enumerate(notes[:12]):
self.e[f"note{i}"] = note
def store_footnote(self, note):
"""Returns indicator character"""
return self.footnotes[note] if note else None
class Need2KnowPDF(object):
# Location of form fields in Points (1/72 inch) - 0,0 is bottom-left - and font size
field_xys = {
# Personal Data
"name": (75, 693, 11),
"profession": (343, 693, 11),
"employer": (75, 665, 11),
"nationality": (343, 665, 11),
"age": (185, 640, 11),
"birthday": (200, 640, 11),
"male": (98, 639, 11),
"female": (76, 639, 11),
# Statistical Data
"strength": (136, 604, 11),
"constitution": (136, 586, 11),
"dexterity": (136, 568, 11),
"intelligence": (136, 550, 11),
"power": (136, 532, 11),
"charisma": (136, 514, 11),
"strength_x5": (172, 604, 11),
"constitution_x5": (172, 586, 11),
"dexterity_x5": (172, 568, 11),
"intelligence_x5": (172, 550, 11),
"power_x5": (172, 532, 11),
"charisma_x5": (172, 514, 11),
"strength_distinguishing": (208, 604, 11),
"constitution_distinguishing": (208, 586, 11),
"dexterity_distinguishing": (208, 568, 11),
"intelligence_distinguishing": (208, 550, 11),
"power_distinguishing": (208, 532, 11),
"charisma_distinguishing": (208, 514, 11),
"damage bonus": (555, 200, 11),
"hitpoints": (195, 482, 11),
"willpower": (195, 464, 11),
"sanity": (195, 446, 11),
"breaking point": (195, 428, 11),
"bond0": (512, 604, 11),
"bond1": (512, 586, 11),
"bond2": (512, 568, 11),
"bond3": (512, 550, 11),
# Applicable Skill Sets
"accounting": (200, 361, 11),
"alertness": (200, 343, 11),
"anthropology": (200, 325, 11),
"archeology": (200, 307, 11),
"art1": (200, 289, 11),
"art2": (200, 281, 11),
"artillery": (200, 253, 11),
"athletics": (200, 235, 11),
"bureaucracy": (200, 217, 11),
"computer science": (200, 200, 11),
"craft1label": (90, 185, 9),
"craft1value": (200, 185, 9),
"craft2label": (90, 177, 9),
"craft2value": (200, 177, 9),
"craft3label": (90, 169, 9),
"craft3value": (200, 169, 9),
"craft4label": (90, 161, 9),
"craft4value": (200, 161, 9),
"criminology": (200, 145, 11),
"demolitions": (200, 127, 11),
"disguise": (200, 109, 11),
"dodge": (200, 91, 11),
"drive": (200, 73, 11),
"firearms": (200, 54, 11),
"first aid": (361, 361, 11),
"forensics": (361, 343, 11),
"heavy machinery": (361, 325, 11),
"heavy weapons": (361, 307, 11),
"history": (361, 289, 11),
"humint": (361, 270, 11),
"law": (361, 253, 11),
"medicine": (361, 235, 11),
"melee weapons": (361, 217, 11),
"militaryscience1value": (361, 199, 11),
"militaryscience1label": (327, 199, 11),
"militaryscience2value": (361, 186, 11),
"militaryscience2label": (327, 186, 11),
"navigate": (361, 163, 11),
"occult": (361, 145, 11),
"persuade": (361, 127, 11),
"pharmacy": (361, 109, 11),
"pilot1value": (361, 91, 9),
"pilot1label": (290, 91, 9),
"pilot2value": (361, 83, 9),
"pilot2label": (290, 83, 9),
"psychotherapy": (361, 54, 11),
"ride": (521, 361, 11),
"science1label": (442, 347, 9),
"science1value": (521, 347, 9),
"science2label": (442, 340, 9),
"science2value": (521, 340, 9),
"science3label": (442, 333, 9),
"science3value": (521, 333, 9),
"science4label": (442, 326, 9),
"science4value": (521, 326, 9),
"search": (521, 307, 11),
"sigint": (521, 289, 11),
"stealth": (521, 270, 11),
"surgery": (521, 253, 11),
"survival": (521, 235, 11),
"swim": (521, 217, 11),
"unarmed combat": (521, 200, 11),
"unnatural": (521, 181, 11),
"language1": (521, 145, 11),
"language2": (521, 127, 11),
"language3": (521, 109, 11),
"skill1": (521, 91, 11),
"skill2": (521, 73, 11),
"skill3": (521, 54, 11),
# 2nd page
"weapon0": (85, 480, 11),
"weapon0_roll": (175, 480, 11),
"weapon0_range": (215, 480, 11),
"weapon0_damage": (270, 480, 11),
"weapon0_ap": (345, 480, 11),
"weapon0_lethality": (410, 480, 11),
"weapon0_kill_radius": (462, 480, 11),
"weapon0_ammo": (525, 480, 11),
"weapon0_note": (560, 480, 11),
"weapon1": (85, 461, 11),
"weapon1_roll": (175, 461, 11),
"weapon1_range": (215, 461, 11),
"weapon1_damage": (270, 461, 11),
"weapon1_ap": (345, 461, 11),
"weapon1_lethality": (410, 461, 11),
"weapon1_kill_radius": (462, 461, 11),
"weapon1_ammo": (525, 461, 11),
"weapon1_note": (560, 461, 11),
"weapon2": (85, 442, 11),
"weapon2_roll": (175, 442, 11),
"weapon2_range": (215, 442, 11),
"weapon2_damage": (270, 442, 11),
"weapon2_ap": (345, 442, 11),
"weapon2_lethality": (410, 442, 11),
"weapon2_kill_radius": (462, 442, 11),
"weapon2_ammo": (525, 442, 11),
"weapon2_note": (560, 442, 11),
"weapon3": (85, 423, 11),
"weapon3_roll": (175, 423, 11),
"weapon3_range": (215, 423, 11),
"weapon3_damage": (270, 423, 11),
"weapon3_ap": (345, 423, 11),
"weapon3_lethality": (410, 423, 11),
"weapon3_kill_radius": (462, 423, 11),
"weapon3_ammo": (525, 423, 11),
"weapon3_note": (560, 423, 11),
"weapon4": (85, 404, 11),
"weapon4_roll": (175, 404, 11),
"weapon4_range": (215, 404, 11),
"weapon4_damage": (270, 404, 11),
"weapon4_ap": (345, 404, 11),
"weapon4_lethality": (410, 404, 11),
"weapon4_kill_radius": (462, 404, 11),
"weapon4_ammo": (525, 404, 11),
"weapon4_note": (560, 404, 11),
"weapon5": (85, 385, 11),
"weapon5_roll": (175, 385, 11),
"weapon5_range": (215, 385, 11),
"weapon5_damage": (270, 385, 11),
"weapon5_ap": (345, 385, 11),
"weapon5_lethality": (410, 385, 11),
"weapon5_kill_radius": (462, 385, 11),
"weapon5_ammo": (525, 385, 11),
"weapon5_note": (560, 385, 11),
"weapon6": (85, 366, 11),
"weapon6_roll": (175, 366, 11),
"weapon6_range": (215, 366, 11),
"weapon6_damage": (270, 366, 11),
"weapon6_ap": (345, 366, 11),
"weapon6_lethality": (410, 366, 11),
"weapon6_kill_radius": (465, 366, 11),
"weapon6_ammo": (525, 366, 11),
"weapon6_note": (560, 366, 11),
"gear0": (75, 628, 8),
"gear1": (75, 618, 8),
"gear2": (75, 608, 8),
"gear3": (75, 598, 8),
"gear4": (75, 588, 8),
"gear5": (75, 578, 8),
"gear6": (75, 568, 8),
"gear7": (75, 558, 8),
"gear8": (75, 548, 8),
"gear9": (75, 538, 8),
"gear10": (75, 528, 8),
"gear11": (323, 628, 8),
"gear12": (323, 618, 8),
"gear13": (323, 608, 8),
"gear14": (323, 598, 8),
"gear15": (323, 588, 8),
"gear16": (323, 578, 8),
"gear17": (323, 568, 8),
"gear18": (323, 558, 8),
"gear19": (323, 548, 8),
"gear20": (323, 538, 8),
"gear21": (323, 528, 8),
"note0": (50, 40, 8),
"note1": (50, 30, 8),
"note2": (50, 20, 8),
"note3": (50, 10, 8),
"note4": (240, 40, 8),
"note5": (240, 30, 8),
"note6": (240, 20, 8),
"note7": (240, 10, 8),
"note8": (410, 40, 8),
"note9": (410, 30, 8),
"note10": (410, 20, 8),
"note11": (410, 10, 8),
}
# Fields that also get a multiplier
x5_stats = ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
def __init__(self, filename, professions, pages_per_sheet=1):
self.filename = filename
self.pages_per_sheet = pages_per_sheet
self.c = canvas.Canvas(self.filename)
# Set US Letter in points
self.c.setPageSize((612, 792))
self.c.setAuthor("https://github.com/jimstorch/DGGen")
self.c.setTitle("Delta Green Agent Roster")
self.c.setSubject("Pre-generated characters for the Delta Green RPG")
# Register Custom Fonts
pdfmetrics.registerFont(TTFont("Special Elite", "data/SpecialElite.ttf"))
pdfmetrics.registerFont(TTFont("OCRA", "data/OCRA.ttf"))
if len(professions) > 1:
self.generate_toc(professions, pages_per_sheet)
def generate_toc(self, professions, pages_per_sheet):
"""Build a clickable Table of Contents on page 1"""
self.bookmark("Table of Contents")
self.c.setFillColorRGB(0, 0, 0)
self.c.setFont("OCRA", 10)
now = datetime.datetime.utcnow().isoformat() + "Z"
self.c.drawString(150, 712, "DGGEN DTG " + now)
self.c.drawString(150, 700, "CLASSIFIED/DG/NTK//")
self.c.drawString(150, 688, "SUBJ ROSTER/ACTIVE/NOCELL/CONUS//")
top = 650
pagenum = 2
for count, profession in enumerate(professions):
label = generate_label(profession)
chapter = "{:.<40}".format(shorten(label, 37, placeholder="")) + "{:.>4}".format(
pagenum
)
self.c.drawString(150, top - self.line_drop(count), chapter)
self.c.linkAbsolute(
label,
label,
(145, (top - 6) - self.line_drop(count), 470, (top + 18) - self.line_drop(count)),
)
pagenum += profession["number_to_generate"] * pages_per_sheet
if pages_per_sheet == 1:
chapter = "{:.<40}".format("Blank Character Sheet Second Page") + "{:.>4}".format(
pagenum + profession["number_to_generate"]
)
self.c.drawString(150, top - self.line_drop(pagenum), chapter)
self.c.linkAbsolute(
"Back Page",
"Back Page",
(
145,
(top - 6) - self.line_drop(pagenum),
470,
(top + 18) - self.line_drop(pagenum),
),
)
self.c.showPage()
@staticmethod
def line_drop(count, linesize=22):
return count * linesize
def bookmark(self, text):
self.c.bookmarkPage(text)
self.c.addOutlineEntry(text, text)
def draw_string(self, x, y, size, text):
self.c.setFont(DEFAULT_FONT, size)
self.c.setFillColorRGB(*TEXT_COLOR)
self.c.drawString(x, y, str(text))
def fill_field(self, field, value):
try:
x, y, s = self.field_xys[field]
self.draw_string(x, y, s, str(value))
except KeyError:
logger.error("Unknown field %s", field)
def add_page(self, d):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND FRONT.jpg", 0, 0, 612, 792)
for key in d:
self.fill_field(key, d[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def add_page_2(self, e):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
for key in e:
self.fill_field(key, e[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def save_pdf(self):
if self.pages_per_sheet == 1:
self.bookmark("Back Page")
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
self.c.showPage()
self.c.save()
def generate_label(profession):
return ", ".join(
e
for e in [
profession.get("label", ""),
profession.get("employer", ""),
profession.get("division", ""),
]
if e
)
def get_options():
"""Get options and arguments from argv string."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="specify up to three times to increase verbosity, "
"i.e. -v to see warnings, -vv for information messages, or -vvv for debug messages.",
)
parser.add_argument("-V", "--version", action="version", version=__version__)
parser.add_argument(
"-o",
"--output",
action="store",
default=f"DeltaGreenPregen-{datetime.datetime.now() :%Y-%m-%d-%H-%M}.pdf",
help="Output PDF file. Defaults to %(default)s.",
)
parser.add_argument(
"-t", "--type", action="store", help=f"Select single profession to generate."
)
parser.add_argument("-l", "--label", action="store", help="Override profession label.")
parser.add_argument(
"-c",
"--count",
type=int,
action="store",
help="Generate this many characters of each profession.",
)
parser.add_argument(
"-e", "--employer", action="store", help="Set employer for all generated characters."
)
parser.add_argument(
"-u",
"--unequipped",
action="store_false",
dest="equip",
help="Don't generate equipment.",
default=True,
)
data = parser.add_argument_group(title="Data", description="Data file locations")
data.add_argument(
"--professions",
action="store",
default="data/professions.json",
help="Data file for professions - defaults to %(default)s",
)
return parser.parse_args()
@dataclass
class Data:
male_given_names: List[str]
female_given_names: List[str]
family_names: List[str]
towns: List[str]
professions: Dict[str, Any]
kits: Dict[str, Any]
weapons: Dict[str, Any]
armour: Dict[str, Any]
distinguishing: Dict[Tuple[str, int], List[str]]
def load_data(options):
with open("data/boys1986.txt") as f:
male_given_names = f.read().splitlines()
with open("data/girls1986.txt") as f:
female_given_names = f.read().splitlines()
with open("data/surnames.txt") as f:
family_names = f.read().splitlines()
with open("data/towns.txt") as f:
towns = f.read().splitlines()
with open(options.professions) as f:
professions = json.load(f)
with open("data/equipment.json") as f:
equipment = json.load(f)
kits = equipment["kits"]
weapons = equipment["weapons"]
armour = equipment["armour"]
distinguishing = {}
with open("data/distinguishing-features.csv") as f:
for row in csv.DictReader(f):
for value in range(int(row["from"]), int(row["to"]) + 1):
distinguishing.setdefault((row["statistic"], value), []).append(
row["distinguishing"]
)
data = Data(
male_given_names=male_given_names,
female_given_names=female_given_names,
family_names=family_names,
towns=towns,
professions=professions,
kits=kits,
weapons=weapons,
armour=armour,
distinguishing=distinguishing,
)
return data
def init_logger(verbosity, stream=sys.stdout):
"""Initialize logger and warnings according to verbosity argument.
Verbosity levels of 0-3 supported."""
is_not_debug = verbosity <= 2
level = (
[logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG
)
log_format = (
"%(message)s"
if is_not_debug
else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s"
)
logging.basicConfig(level=level, format=log_format, stream=stream)
if is_not_debug:
warnings.filterwarnings("ignore")
if __name__ == "__main__":
sys.exit(main())
| 34.797101 | 103 | 0.530508 | 22,858 | 0.792387 | 0 | 0 | 386 | 0.013381 | 0 | 0 | 7,603 | 0.263563 |
087670710e46b9499b04f22d8a01fa0767bf4b47 | 9,093 | py | Python | tests/test_remote.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
]
| null | null | null | tests/test_remote.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
]
| null | null | null | tests/test_remote.py | bcyran/philipstv | 6037724d5fab0b72265c2de2c0441a64f6e00c00 | [
"MIT"
]
| null | null | null | from typing import Union
from unittest.mock import Mock, create_autospec
import pytest
from pytest import MonkeyPatch
from philipstv import PhilipsTVAPI, PhilipsTVPairer, PhilipsTVRemote, PhilipsTVRemoteError
from philipstv.model import (
AllChannels,
AmbilightColor,
AmbilightColors,
AmbilightLayer,
AmbilightPower,
AmbilightPowerValue,
AmbilightTopology,
Application,
ApplicationComponent,
ApplicationIntent,
Applications,
Channel,
ChannelID,
ChannelList,
ChannelShort,
CurrentChannel,
CurrentVolume,
DeviceInfo,
InputKey,
InputKeyValue,
PowerState,
PowerStateValue,
SetChannel,
Volume,
)
CHANNELS = AllChannels(
version=1,
id="all",
list_type="MixedSources",
medium="mixed",
operator="OPER",
install_country="Poland",
channel=[
Channel(
ccid=35,
preset="1",
name="Polsat HD",
onid=1537,
tsid=24,
sid=2403,
service_type="audio_video",
type="DVB_C",
logo_version=33,
),
Channel(
ccid=40,
preset="3",
name="TVN HD",
onid=666,
tsid=24,
sid=2403,
service_type="audio_video",
type="DVB_C",
logo_version=33,
),
],
)
APPLICATION_SPOTIFY = Application(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="com.spotify.tv.android",
class_name="com.spotify.tv.android.SpotifyTVActivity",
),
action="android.intent.action.MAIN",
),
label="Spotify",
order=0,
id="com.spotify.tv.android.SpotifyTVActivity-com.spotify.tv.android",
type="app",
)
APPLICATION_NETFLIX = Application(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="com.netflix.ninja",
class_name="com.netflix.ninja.MainActivity",
),
action="android.intent.action.MAIN",
),
label="Netflix",
order=0,
id="com.netflix.ninja.MainActivity-com.netflix.ninja",
type="app",
)
APPLICATIONS = Applications(
version=0,
applications=[APPLICATION_SPOTIFY, APPLICATION_NETFLIX],
)
@pytest.fixture
def api_mock() -> Mock:
return create_autospec(PhilipsTVAPI, spec_set=True, instance=True) # type: ignore
def test_host(api_mock: Mock) -> None:
expected_host = "192.168.0.66"
api_mock.host = expected_host
result = PhilipsTVRemote(api_mock).host
assert result == expected_host
def test_auth(api_mock: PhilipsTVAPI) -> None:
expected_credentials = ("<key>", "<secret>")
remote = PhilipsTVRemote(api_mock)
remote.auth = expected_credentials
assert remote.auth == expected_credentials
assert api_mock.auth == expected_credentials
def test_pair(api_mock: Mock, monkeypatch: MonkeyPatch) -> None:
given_id = "<id>"
pairer_mock = create_autospec(PhilipsTVPairer)
pairer_mock.return_value = pairer_mock
monkeypatch.setattr("philipstv.remote.PhilipsTVPairer", pairer_mock)
def fake_callback() -> str:
return "str"
PhilipsTVRemote(api_mock).pair(fake_callback, given_id)
pairer_mock.pair.assert_called_once_with(fake_callback)
device_info = pairer_mock.call_args.args[1]
assert isinstance(device_info, DeviceInfo)
assert device_info.id == given_id
def test_pair_no_id(api_mock: Mock, monkeypatch: MonkeyPatch) -> None:
pairer_mock = create_autospec(PhilipsTVPairer)
pairer_mock.return_value = pairer_mock
monkeypatch.setattr("philipstv.remote.PhilipsTVPairer", pairer_mock)
PhilipsTVRemote(api_mock).pair(lambda: "str")
device_info = pairer_mock.call_args.args[1]
assert isinstance(device_info, DeviceInfo)
assert device_info.id.isalnum()
assert len(device_info.id) == 16
def test_get_power(api_mock: Mock) -> None:
api_mock.get_powerstate.return_value = PowerState(powerstate=PowerStateValue.STANDBY)
result = PhilipsTVRemote(api_mock).get_power()
assert result is False
def test_set_power(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_power(True)
api_mock.set_powerstate.assert_called_once_with(PowerState(powerstate=PowerStateValue.ON))
def test_get_volume(api_mock: Mock) -> None:
api_mock.get_volume.return_value = CurrentVolume(muted=False, current=15, min=0, max=60)
result = PhilipsTVRemote(api_mock).get_volume()
assert result == 15
def test_set_volume(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_volume(20)
api_mock.set_volume.assert_called_once_with(Volume(current=20, muted=False))
def test_get_current_channel(api_mock: Mock) -> None:
api_mock.get_current_channel.return_value = CurrentChannel(
channel=ChannelShort(ccid=5, preset="10", name="TVN HD"),
channel_list=ChannelList(id="allcab", version="1"),
)
result = PhilipsTVRemote(api_mock).get_current_channel()
assert result == "TVN HD"
@pytest.mark.parametrize(
"input, expected",
[
(1, SetChannel(channel=ChannelID(ccid=35))),
("Polsat HD", SetChannel(channel=ChannelID(ccid=35))),
(3, SetChannel(channel=ChannelID(ccid=40))),
("TVN HD", SetChannel(channel=ChannelID(ccid=40))),
],
)
def test_set_channel(api_mock: Mock, input: Union[int, str], expected: SetChannel) -> None:
api_mock.get_all_channels.return_value = CHANNELS
remote = PhilipsTVRemote(api_mock)
remote.set_channel(input)
api_mock.set_channel.assert_called_once_with(expected)
remote.set_channel(input)
api_mock.get_all_channels.assert_called_once()
def test_set_channel_error(api_mock: Mock) -> None:
api_mock.get_current_channel.return_value = CHANNELS
with pytest.raises(PhilipsTVRemoteError):
PhilipsTVRemote(api_mock).set_channel("random channel")
def test_get_all_channels(api_mock: Mock) -> None:
api_mock.get_all_channels.return_value = CHANNELS
result = PhilipsTVRemote(api_mock).get_all_channels()
assert result == {1: "Polsat HD", 3: "TVN HD"}
def test_input_key(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).input_key(InputKeyValue.STANDBY)
api_mock.input_key.assert_called_once_with(InputKey(key=InputKeyValue.STANDBY))
def test_get_ambilight_power(api_mock: Mock) -> None:
api_mock.get_ambilight_power.return_value = AmbilightPower(power=AmbilightPowerValue.OFF)
result = PhilipsTVRemote(api_mock).get_ambilight_power()
assert result is False
def test_set_ambilight_power(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_ambilight_power(True)
api_mock.set_ambilight_power.assert_called_once_with(
AmbilightPower(power=AmbilightPowerValue.ON)
)
def test_set_ambilight_color(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_ambilight_color(AmbilightColor(r=0, g=69, b=255))
api_mock.set_ambilight_cached.assert_called_once_with(AmbilightColor(r=0, g=69, b=255))
def test_set_ambilight_color_sides(api_mock: Mock) -> None:
left_color = AmbilightColor(r=255, g=0, b=0)
top_color = AmbilightColor(r=0, g=255, b=0)
right_color = AmbilightColor(r=0, g=0, b=255)
bottom_color = AmbilightColor(r=125, g=0, b=125)
topology = AmbilightTopology(layers=1, left=2, top=3, right=2, bottom=3)
api_mock.get_ambilight_topology.return_value = topology
PhilipsTVRemote(api_mock).set_ambilight_color(
left=left_color, top=top_color, right=right_color, bottom=bottom_color
)
api_mock.set_ambilight_cached.assert_called_once_with(
AmbilightColors(
__root__={
"layer1": AmbilightLayer(
left={str(point): left_color for point in range(topology.left)},
top={str(point): top_color for point in range(topology.top)},
right={str(point): right_color for point in range(topology.right)},
bottom={str(point): bottom_color for point in range(topology.bottom)},
)
}
)
)
def test_get_applications(api_mock: Mock) -> None:
api_mock.get_applications.return_value = APPLICATIONS
result = PhilipsTVRemote(api_mock).get_applications()
assert result == ["Spotify", "Netflix"]
@pytest.mark.parametrize(
"app, expected",
[
("Spotify", APPLICATION_SPOTIFY),
("Netflix", APPLICATION_NETFLIX),
],
)
def test_launch_application(api_mock: Mock, app: str, expected: ApplicationIntent) -> None:
api_mock.get_applications.return_value = APPLICATIONS
remote = PhilipsTVRemote(api_mock)
remote.launch_application(app)
api_mock.launch_application.assert_called_once_with(expected)
remote.launch_application(app)
api_mock.get_applications.assert_called_once()
def test_launch_application_error(api_mock: Mock) -> None:
api_mock.get_applications.return_value = APPLICATIONS
with pytest.raises(PhilipsTVRemoteError):
PhilipsTVRemote(api_mock).launch_application("whatever")
| 29.144231 | 94 | 0.698119 | 0 | 0 | 0 | 0 | 1,298 | 0.142747 | 0 | 0 | 721 | 0.079292 |
0879ba08e89fa5f242f50ddb01acf847e7896d29 | 9,612 | py | Python | a2t/src/test_runner.py | syeda-khurrath/fabric8-analytics-common | 421f7e27869c5695ed73b51e6422e097aba00108 | [
"Apache-2.0"
]
| null | null | null | a2t/src/test_runner.py | syeda-khurrath/fabric8-analytics-common | 421f7e27869c5695ed73b51e6422e097aba00108 | [
"Apache-2.0"
]
| 4 | 2019-05-20T08:27:47.000Z | 2019-05-20T08:29:57.000Z | a2t/src/test_runner.py | codeready-analytics/fabric8-analytics-common | a763c5534d601f2f40a0f02c02914c49ea23669d | [
"Apache-2.0"
]
| 1 | 2020-10-05T21:12:44.000Z | 2020-10-05T21:12:44.000Z | """Implementation of benchmarks.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from random import randint
from fastlog import log
from time import time
from queue import Queue
from threading import Thread
from report_generator import generate_csv_report
from component_generator import ComponentGenerator
from setup import parse_tags
# directory containing test results
RESULT_DIRECTORY = "test_results"
def check_number_of_results(queue_size, component_analysis_count, stack_analysis_count):
"""Check if we really got the same number of results as expected.
When the server respond by any HTTP error code (4xx, 5xx), the results
are NOT stored in the queue. This means that number of results stored
in the queue might be less than number of threads set up by user via
CLI parameters in certain situations. This function check this situation.
"""
log.info("queue size: {size}".format(size=queue_size))
expected = component_analysis_count + 2 * stack_analysis_count
if queue_size != expected:
log.warning("Warning: {expected} results expected, but only {got} is presented".format(
expected=expected, got=queue_size))
log.warning("This means that {n} analysis ends with error or exception".format(
n=expected - queue_size))
def prepare_component_generators(python_payload, maven_payload, npm_payload):
"""Prepare all required component generators for selected payload types."""
component_generator = ComponentGenerator()
g_python = component_generator.generator_for_ecosystem("pypi")
g_maven = component_generator.generator_for_ecosystem("maven")
g_npm = component_generator.generator_for_ecosystem("npm")
generators = []
if python_payload:
generators.append(g_python)
if maven_payload:
generators.append(g_maven)
if npm_payload:
generators.append(g_npm)
return generators
def initialize_generators(generators):
"""Initialize the generators randomly so we don't start from the 1st item."""
for i in range(randint(10, 100)):
for g in generators:
next(g)
def component_analysis_benchmark(queue, threads, component_analysis, thread_count,
python_payload, maven_payload, npm_payload):
"""Component analysis benchmark."""
generators = prepare_component_generators(python_payload, maven_payload, npm_payload)
initialize_generators(generators)
for t in range(thread_count):
g = generators[randint(0, len(generators) - 1)]
ecosystem, component, version = next(g)
with log.indent():
log.info("Component analysis for E/P/V {} {} {}".format(ecosystem, component, version))
t = Thread(target=component_analysis.start,
args=(t, ecosystem, component, version, queue))
t.start()
threads.append(t)
# skip some items
for i in range(randint(5, 25)):
next(g)
def stack_analysis_benchmark(queue, threads, stack_analysis, thread_count,
python_payload, maven_payload, npm_payload):
"""Stack analysis benchmark."""
# TODO: read automagically from the filelist
manifests = (
("maven", "clojure_1_6_0.xml"),
("maven", "clojure_1_7_0.xml"),
("maven", "clojure_1_8_0.xml"),
("maven", "clojure_junit.xml"),
("pypi", "click_6_star.txt"),
("pypi", "array_split.txt"),
("pypi", "fastlog_urllib_requests.txt"),
("pypi", "requests_latest.txt"),
("pypi", "numpy_latest.txt"),
("pypi", "flask_latest.txt"),
("pypi", "scipy_latest.txt"),
("pypi", "pygame_latest.txt"),
("pypi", "pyglet_latest.txt"),
("pypi", "dash_latest.txt"),
("pypi", "pudb_latest.txt"),
("pypi", "pytest_latest.txt"),
("pypi", "numpy_1_11_0.txt"),
("pypi", "numpy_1_12_0.txt"),
("pypi", "numpy_1_16_2.txt"),
("pypi", "numpy_1_16_3.txt"),
("pypi", "numpy_scipy.txt"),
("pypi", "pytest_2_0_0.txt"),
("pypi", "pytest_2_0_1.txt"),
("pypi", "pytest_3_2_2.txt"),
("pypi", "requests_2_20_0.txt"),
("pypi", "requests_2_20_1.txt"),
("pypi", "requests_2_21_0.txt"),
("pypi", "scipy_1_1_0.txt"),
("pypi", "scipy_1_2_0.txt"),
("pypi", "scipy_1_2_1.txt"),
("npm", "array.json"),
("npm", "dependency_array.json"),
("npm", "dependency_emitter_component.json"),
("npm", "dependency_jquery.json"),
("npm", "dependency_jquery_react.json"),
("npm", "dependency_lodash.json"),
("npm", "dependency_lodash_react_jquery.json"),
("npm", "dependency_react.json"),
("npm", "dependency_to_function.json"),
("npm", "dependency_to_function_vue_array.json"),
("npm", "dependency_underscore.json"),
("npm", "dependency_underscore_react_jquery.json"),
("npm", "dependency_vue.json"),
("npm", "dependency_vue_to_function.json"),
("npm", "empty.json"),
("npm", "jquery.json"),
("npm", "lodash.json"),
("npm", "mocha.json"),
("npm", "no_requirements.json"),
("npm", "underscore.json"),
("npm", "wisp.json"),
)
for t in range(thread_count):
manifest_idx = randint(0, len(manifests) - 1)
manifest = manifests[manifest_idx]
with log.indent():
log.info("Stack analysis")
ecosystem = manifest[0]
manifest_file = manifest[1]
t = Thread(target=stack_analysis.start,
args=(t, ecosystem, manifest_file, queue))
t.start()
threads.append(t)
def wait_for_all_threads(threads):
"""Wait for all threads to finish."""
log.info("Waiting for all threads to finish")
for t in threads:
t.join()
log.success("Done")
def run_test(cfg, test, i, component_analysis, stack_analysis):
"""Run one selected test."""
test_name = test["Name"]
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test_name))
with log.indent():
start = time()
threads = []
queue = Queue()
with log.indent():
component_analysis_count = int(test["Component analysis"])
stack_analysis_count = int(test["Stack analysis"])
python_payload = test["Python payload"] in ("Yes", "yes")
maven_payload = test["Maven payload"] in ("Yes", "yes")
npm_payload = test["NPM payload"] in ("Yes", "yes")
component_analysis_benchmark(queue, threads, component_analysis,
component_analysis_count,
python_payload, maven_payload, npm_payload)
stack_analysis_benchmark(queue, threads, stack_analysis,
stack_analysis_count,
python_payload, maven_payload, npm_payload)
wait_for_all_threads(threads)
queue_size = queue.qsize()
check_number_of_results(queue_size, component_analysis_count, stack_analysis_count)
end = time()
# TODO: use better approach to join paths
filename = RESULT_DIRECTORY + "/" + test_name.replace(" ", "_") + ".csv"
log.info("Generating test report into file '{filename}'".format(filename=filename))
generate_csv_report(queue, test, start, end, end - start, filename)
def run_all_loaded_tests(cfg, tests, component_analysis, stack_analysis):
"""Run all tests read from CSV file."""
i = 1
for test in tests:
run_test(cfg, test, i, component_analysis, stack_analysis)
i += 1
def run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis):
"""Run tests read from CSV file that are marged by any of tags provided in tags parameter."""
i = 1
for test in tests:
test_tags = parse_tags(test["Tags"])
test_name = test["Name"]
if tags <= test_tags:
run_test(cfg, test, i, component_analysis, stack_analysis)
i += 1
else:
log.info("Skipping test #{n} with name '{desc}'".format(n=i, desc=test_name))
def no_tests(tests):
"""Predicate for number of tests."""
return not tests or len(tests) == 0
def start_tests(cfg, tests, tags, component_analysis, stack_analysis):
"""Start all tests using the already loaded configuration."""
log.info("Run tests")
with log.indent():
if no_tests(tests):
log.error("No tests loaded!")
sys.exit(-1)
if len(tests) == 1:
log.success("Loaded 1 test")
else:
log.success("Loaded {n} tests".format(n=len(tests)))
if not tags:
run_all_loaded_tests(cfg, tests, component_analysis, stack_analysis)
else:
run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis)
| 37.546875 | 99 | 0.634415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,660 | 0.380774 |
087dd6bb53dc8a5a49168182e90d10a64dea2f64 | 159 | py | Python | bus_system/apps/bus/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | bus_system/apps/bus/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | bus_system/apps/bus/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
]
| null | null | null | # Core Django imports
from django.contrib import admin
# Imports from my apps
from bus_system.apps.bus.models import BusModel
admin.site.register(BusModel)
| 19.875 | 47 | 0.805031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.27673 |
087e3e81767ebb79be98cf41ccb71262d3691e12 | 2,454 | py | Python | jocular/calcs.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
]
| 6 | 2021-03-21T16:46:44.000Z | 2021-11-27T14:07:06.000Z | jocular/calcs.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
]
| null | null | null | jocular/calcs.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
]
| null | null | null | ''' Various astro calcs mainly based on Meuss.
'''
import numpy as np
import math
import time
from datetime import datetime
def julian_date(when):
# from Meuss p 61; 'when' is a datetime object
y = when.year
m = when.month
d = when.day + when.hour/24 + when.minute/(24*60) + when.second/(24*3600)
if m < 3:
y -= 1
m += 12
a = int(y / 100)
if y >= 1582 and m >= 10:
# Gregorian
a = int(y/100)
b = 2 - a + int(a / 4)
else:
# Julian
b = 0
jd = int(365.25 * (y + 4716)) + int(30.6001 * (m + 1)) + d + b - 1524.5
return jd
def to_range(x, d):
# reduce x to range 0-d by adding or subtracting multiples of d
if x < 0:
return x - int((x / d) - 1) * d
else:
return x - int((x / d)) * d
def local_sidereal_time(when, longitude):
# direct method of Meuss p87
# when must be in UT
jd = julian_date(when)
t = (jd - 2451545.0) / 36525.0
mst = 280.46061837 + 360.98564736629 * (jd - 2451545.0) + .000387933 * t**2 - t**3 / 38710000
# convert to 0-360
mst = to_range(mst, 360)
# convert from Greenwich to local
lst = mst + longitude
return lst
def sun_altitude(when, latitude, longitude):
# Meuss p163+
jd = julian_date(when)
rads = math.pi / 180.
t = (jd - 2451545.0) / 36525.0
L0 = 280.46646 + 36000.76983 * t + 0.0003032 * t * t
L0 = to_range(L0, 360)
M = 357.52911 + 35999.05029 * t - 0.0001537 * t * t
#e = 0.016708634 - 0.000042037 * t - 0.0000001267 * t * t
C = (1.914602 - 0.004817 * t - 0.000014 * t * t) * np.sin(M * rads) + \
(0.019993 - 0.000101 * t) * np.sin(2 * M * rads) + \
0.000289 * np.sin(3 * M * rads)
long_sun = L0 + C
#v = M + C
# R = (1.000001018 * (1 - e * e)) / (1 + e * np.cos(v * rads))
sigma = 125.04 - 1934.136 * t
lam = long_sun - 0.00569 - 0.00478 * np.sin(sigma * rads)
ep = 23 + (26/60) + (21.448/3600) - (46.815*t + 0.00059 * t**2 - 0.001813*t**3) / 3600
ep_corr = ep + 0.00256 * np.cos(sigma * rads)
ra = np.arctan2(np.cos(ep_corr * rads) * np.sin(lam * rads), np.cos(lam * rads)) / rads
ra = to_range(ra, 360)
dec = np.arcsin(np.sin(ep_corr * rads) * np.sin(lam * rads)) / rads
# now convert to locale
ts = time.time()
utc_offset = (datetime.fromtimestamp(ts) - datetime.utcfromtimestamp(ts)).total_seconds() / 3600.0
lst = local_sidereal_time(when, longitude)
lat = latitude * rads
H = (-utc_offset*15 + lst - ra) * rads
alt = np.arcsin(np.sin(lat) * np.sin(dec * rads) + np.cos(lat) * np.cos(dec * rads) * np.cos(H)) / rads
return alt
| 26.106383 | 104 | 0.600652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.181855 |
087e86827c6cc73f03d6554fcf8f36b2777a11b4 | 1,221 | py | Python | win/python/CAO/calcClient.py | kioto/ORiN2Sample | a7a9007b696fdd3ab29f1ec5cededc59b232fae2 | [
"MIT"
]
| null | null | null | win/python/CAO/calcClient.py | kioto/ORiN2Sample | a7a9007b696fdd3ab29f1ec5cededc59b232fae2 | [
"MIT"
]
| null | null | null | win/python/CAO/calcClient.py | kioto/ORiN2Sample | a7a9007b696fdd3ab29f1ec5cededc59b232fae2 | [
"MIT"
]
| null | null | null | import win32com.client
import time
class CalcClient(object):
def __init__(self):
# CAOエンジンの作成
self._eng = win32com.client.Dispatch('CAO.CaoEngine')
self._ws = self._eng.Workspaces(0)
self._ctrl = self._ws.AddController('bb1', 'CaoProv.Blackboard')
# 変数の追加
self._var_cmd = self._ctrl.AddVariable('cmd')
self._var_val1 = self._ctrl.AddVariable('val1')
self._var_val2 = self._ctrl.AddVariable('val2')
self._var_res = self._ctrl.AddVariable('res')
self._var_ack = self._ctrl.AddVariable('ack')
def calc(self, cmd_str, val1, val2):
print(f'calc({cmd_str}, {val1}, {val2})')
self._var_val1.Value = val1
self._var_val2.Value = val2
self._var_cmd.Value = cmd_str # ここで計算が実行
# 計算の終了待ち
while True:
if self._var_ack.Value is True:
break
time.sleep(0.1)
res = self._var_res.Value
print(' = ', res)
time.sleep(1)
if __name__ == '__main__':
cc = CalcClient()
cc.calc('ADD', 123, 567)
cc.calc('SUB', 123, 567)
cc.calc('MUL', 123, 567)
cc.calc('DIV', 123, 567) | 29.780488 | 73 | 0.564292 | 1,061 | 0.832157 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.181961 |
08804de9d3324b167c6447b69cc226552d4b7bbe | 282 | py | Python | Mundo-1/exercicio-05.py | FRafaelPA/Praticando-Python | d8a46beceeae2ac20acf4c63f86a32cba537c896 | [
"MIT"
]
| null | null | null | Mundo-1/exercicio-05.py | FRafaelPA/Praticando-Python | d8a46beceeae2ac20acf4c63f86a32cba537c896 | [
"MIT"
]
| null | null | null | Mundo-1/exercicio-05.py | FRafaelPA/Praticando-Python | d8a46beceeae2ac20acf4c63f86a32cba537c896 | [
"MIT"
]
| null | null | null | '''
Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e seu antecessor.
'''
n = int(input('Entre com um valor: '))
antecessor = n - 1
sucessor = n + 1
msg = 'o antecessor do número {} é {} e seu sucessor é {}'.format(n, antecessor, sucessor)
print(msg) | 23.5 | 93 | 0.673759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.630662 |
0880a4f7dffdc5894d94be459ed45b4d22287a7c | 3,505 | py | Python | tests/sql_parser/ast/test_insert_statement_is_parsed.py | vladbalmos/mitzasql | 06c2a96eb4494095b2b72bc1454199a4940b0700 | [
"MIT"
]
| 69 | 2019-05-16T06:40:18.000Z | 2022-03-24T06:23:49.000Z | tests/sql_parser/ast/test_insert_statement_is_parsed.py | vladbalmos/mitzasql | 06c2a96eb4494095b2b72bc1454199a4940b0700 | [
"MIT"
]
| 36 | 2019-05-15T19:55:24.000Z | 2021-07-22T07:07:14.000Z | tests/sql_parser/ast/test_insert_statement_is_parsed.py | vladbalmos/mitzasql | 06c2a96eb4494095b2b72bc1454199a4940b0700 | [
"MIT"
]
| 8 | 2019-05-16T06:56:28.000Z | 2022-02-11T02:24:12.000Z | import pytest
from mitzasql.sql_parser.parser import parse
from mitzasql.utils import dfs
def test_simple_insert_is_parsed():
raw_sql = '''
INSERT DELAYED INTO table (col1, col2, col3) VALUES (100, 200, 300)
'''
ast = parse(raw_sql)
assert len(ast) > 0
ast = ast[0]
assert ast.type == 'insert'
assert len(ast.children) == 4
modifier = ast.get_child('modifier')
assert modifier is not None
assert len(modifier.children) == 1
into = ast.get_child('into')
assert into is not None
assert len(into.children) == 1
assert into.children[0].children[0].value == 'table'
columns = ast.get_child('columns')
assert columns is not None
assert len(columns.children) == 1
assert len(columns.children[0].children) == 3
values = ast.get_child('values')
assert values is not None
assert len(values.children) == 1
assert len(values.children[0].children) == 3
def test_insert_without_columns_is_parsed():
raw_sql = '''
INSERT INTO table VALUES (100, 200, 300)
'''
ast = parse(raw_sql)
assert len(ast) > 0
ast = ast[0]
assert ast.type == 'insert'
assert len(ast.children) == 2
into = ast.get_child('into')
assert into is not None
assert len(into.children) == 1
assert into.children[0].children[0].value == 'table'
values = ast.get_child('values')
assert values is not None
assert len(values.children) == 1
assert len(values.children[0].children) == 3
def test_insert_with_select_is_parsed():
raw_sql = '''
INSERT INTO table SELECT col1, col2 FROM tbl2 WHERE col1 > 1 ON DUPLICATE
KEY UPDATE id = 1
'''
ast = parse(raw_sql)
assert len(ast) > 0
ast = ast[0]
assert ast.type == 'insert'
assert len(ast.children) == 3
into = ast.get_child('into')
assert into is not None
assert len(into.children) == 1
assert into.children[0].children[0].value == 'table'
select = ast.get_child('select')
assert select is not None
on = ast.get_child('on')
assert on is not None
assert len(on.children) == 1
duplicate = ast.get_child('duplicate')
assert duplicate is not None
assert len(duplicate.children) == 1
key = ast.get_child('key')
assert key is not None
assert len(key.children) == 1
update = ast.get_child('update')
assert update is not None
assert len(update.children) == 1
def test_insert_with_assignment_list_is_parsed():
raw_sql = '''
INSERT INTO table SET col1 = 2, col2 = 3
'''
ast = parse(raw_sql)
assert len(ast) > 0
ast = ast[0]
assert ast.type == 'insert'
assert len(ast.children) == 2
into = ast.get_child('into')
assert into is not None
assert len(into.children) == 1
assert into.children[0].children[0].value == 'table'
assignment_list = ast.get_child('assignment_list')
assert assignment_list is not None
assert len(assignment_list.children) == 2
assignment = assignment_list.children[0]
assert assignment.type == 'operator'
assert assignment.value == '='
assert len(assignment.children) == 2
assert assignment.children[0].value == 'col1'
assert assignment.children[1].value == '2'
assignment = assignment_list.children[1]
assert assignment.type == 'operator'
assert assignment.value == '='
assert len(assignment.children) == 2
assert assignment.children[0].value == 'col2'
assert assignment.children[1].value == '3'
| 26.353383 | 77 | 0.650499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.14893 |
088195a1ba4520a7f098f9cb7902e66481e7e187 | 21 | py | Python | checkov/version.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
]
| null | null | null | checkov/version.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
]
| null | null | null | checkov/version.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
]
| null | null | null | version = '2.0.1048'
| 10.5 | 20 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.47619 |
0881a63860af8a2c6c4f14401c7170b38015ba3a | 775 | py | Python | ArraysAndSorting/MakeItAnagram.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
]
| 8 | 2015-04-16T03:43:49.000Z | 2018-08-14T22:47:03.000Z | ArraysAndSorting/MakeItAnagram.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
]
| null | null | null | ArraysAndSorting/MakeItAnagram.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
]
| 7 | 2016-03-22T20:29:27.000Z | 2018-09-29T18:55:47.000Z | # Importing standard libraires
import sys
'''
Main Function for the program. Logic is as follows
Make two frequency tables for two strings
Take overlap of both and add up the non overlapping regions (absolute values)
'''
if __name__ == "__main__":
# Parsing in the input
s1 = list(sys.stdin.readline().rstrip())
s2 = list(sys.stdin.readline().rstrip())
# Initialize the character array as a hashtable
charFreqs1 = [0]*26
charFreqs2 = [0]*26
anagram = [0]*26
# Record frequencies of characters in s1 and s2
for i in s1:
charFreqs1[ord(i) - ord('a')] += 1
for i in s2:
charFreqs2[ord(i) - ord('a')] += 1
for i in range(26):
anagram[i] = abs(charFreqs1[i] - charFreqs2[i])
print sum(anagram)
| 31 | 81 | 0.636129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.454194 |
08826307649d95fd1f1fa357479507c8385245c7 | 1,181 | py | Python | tests/test_get_schedule.py | j-muller/pypuregym | 396862047f8b5c0b1138b5c562ddb6958aaa6817 | [
"MIT"
]
| 1 | 2020-12-31T01:42:14.000Z | 2020-12-31T01:42:14.000Z | tests/test_get_schedule.py | j-muller/pypuregym | 396862047f8b5c0b1138b5c562ddb6958aaa6817 | [
"MIT"
]
| null | null | null | tests/test_get_schedule.py | j-muller/pypuregym | 396862047f8b5c0b1138b5c562ddb6958aaa6817 | [
"MIT"
]
| null | null | null | from .utilities import Response
SCHEDULE_RESPONSE = b"""
{"error":{"code":200,"message":"Success"},"data":{"classes":[{
"id":113209,"sector":"F","class_type_id":48,"start_date":"2020-06-07",
"end_date":"2020-06-07","start_time":"09:00:00","end_time":"09:45:00",
"duration":"2700000","teacher_id":782,"location_id":10,"level_id":9,
"pillar_id":6,"button_status":0,"booking_id":0,
"start_datetime":"2020-06-07T09:00:00+08:00","is_free":false,
"color_code":"","is_filmed":false,"is_online":0,"is_cycling":false,
"free_class_type":0,"special_flag":null,"duration_min":45,
"class_type":{"id":48,"name":"TRX Blast",
"description":"","is_fuze":false,"pillar":{"name":"Strength",
"color":"#ed1c24","code":"strength_and_conditioning"},"level":"All Levels"},
"teacher":{"id":782,"name":"","full_name":"","image_link":"",
"type":"teacher"}}]}}
"""
def test_get_schedule(pure_api, monkeypatch):
monkeypatch.setattr(
'requests.sessions.Session.get',
lambda *args, **kwargs: Response(SCHEDULE_RESPONSE),
)
classes = pure_api.get_schedule(
start_date='2020-06-07',
last_date='2020-06-07',
location_id=10,
)
assert len(classes) == 1
| 36.90625 | 76 | 0.663844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.710415 |
0883af2fe80ecab9fbfc1b7be524e037979d920a | 518 | py | Python | testing/examples/talib-macd.py | pchaos/quanttesting | 98331670547e8a45ba93b49f3e9c660495645114 | [
"MIT"
]
| 5 | 2020-04-08T14:14:05.000Z | 2021-06-29T03:42:01.000Z | testing/examples/talib-macd.py | pchaos/quanttesting | 98331670547e8a45ba93b49f3e9c660495645114 | [
"MIT"
]
| null | null | null | testing/examples/talib-macd.py | pchaos/quanttesting | 98331670547e8a45ba93b49f3e9c660495645114 | [
"MIT"
]
| 7 | 2020-04-15T15:07:39.000Z | 2022-03-23T05:44:02.000Z | '''
Ta-lib计算MACD
'''
import pandas as pd
import numpy as np
import talib as ta
import tushare as ts
from matplotlib import rc
import matplotlib.pyplot as plt
import seaborn as sns
rc('mathtext', default='regular')
sns.set_style('white')
# %matplotlib
plt.rcParams["figure.figsize"] = (20, 10)
dw = ts.get_k_data("600600")
close = dw.close.values
dw['macd'], dw['macdsignal'], dw['macdhist'] = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
dw[['close','macd','macdsignal','macdhist']].plot()
plt.show() | 24.666667 | 107 | 0.722008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.287356 |
0883de75e3222b2bd0245697a6613a014446c4c7 | 252 | py | Python | packages/regression_model/regression_model/__init__.py | abdurrehman11/deploying-machine-learning-models | 93872e4c197df2543e492af3df718bdad1817752 | [
"BSD-3-Clause"
]
| null | null | null | packages/regression_model/regression_model/__init__.py | abdurrehman11/deploying-machine-learning-models | 93872e4c197df2543e492af3df718bdad1817752 | [
"BSD-3-Clause"
]
| null | null | null | packages/regression_model/regression_model/__init__.py | abdurrehman11/deploying-machine-learning-models | 93872e4c197df2543e492af3df718bdad1817752 | [
"BSD-3-Clause"
]
| null | null | null | import logging
from regression_model.config import config
from regression_model.config import logging_config
VERSION_PATH = config.PACKAGE_ROOT / 'VERSION'
with open(VERSION_PATH, 'r') as version_file:
__version__ = version_file.read().strip() | 25.2 | 50 | 0.797619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.047619 |
0886c3adb37d4bb2d284b34954bef308daf23bd3 | 508 | py | Python | 001-Python-basico/008-desafio-pratico.py | clebertonf/Python-course | a57f405cbd27f96e0cb61128df31e9249c79a962 | [
"MIT"
]
| null | null | null | 001-Python-basico/008-desafio-pratico.py | clebertonf/Python-course | a57f405cbd27f96e0cb61128df31e9249c79a962 | [
"MIT"
]
| null | null | null | 001-Python-basico/008-desafio-pratico.py | clebertonf/Python-course | a57f405cbd27f96e0cb61128df31e9249c79a962 | [
"MIT"
]
| null | null | null | from datetime import date
year_current_date = date.today().year
def get_info(name, age, height, weight):
year_birth = year_current_date - age
imc = round(weight / (height ** 2), 2)
print(f"{name} tem {age} anos, {height} de altura e pesa {weight} KG.")
print(f"O IMC do {name} é: {imc}")
print(f"{name} nasceu em {year_birth}")
get_info("Cleberton", 28, 1.69, 75)
# Função recebe algumas informaçoes por parametro, e retorna ano de nascimento, imc
# com algumas frases customizadas
| 28.222222 | 83 | 0.687008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.496094 |
0887199a887a1fbf59285a7c42522a561d36fdf6 | 160 | py | Python | jsons.py | tebeka/py2go-cheatsheet | 14c83850876ef80c36af326ab4fc6f56344781c7 | [
"BSD-3-Clause"
]
| 13 | 2017-09-09T08:32:34.000Z | 2022-02-28T04:32:43.000Z | jsons.py | tebeka/py2go-cheatsheet | 14c83850876ef80c36af326ab4fc6f56344781c7 | [
"BSD-3-Clause"
]
| 3 | 2017-11-25T18:48:11.000Z | 2017-12-30T13:00:04.000Z | jsons.py | tebeka/py2go-cheatsheet | 14c83850876ef80c36af326ab4fc6f56344781c7 | [
"BSD-3-Clause"
]
| 2 | 2019-11-03T19:58:17.000Z | 2020-04-28T01:14:17.000Z | import json
from sys import stdout
# START
data = '''{
"name": "bugs",
"age": 76
}'''
obj = json.loads(data)
json.dump(obj, stdout)
# END
print(obj)
| 10.666667 | 22 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.34375 |
08873554c1a8d8174ca6425485bfe2a0d0880e6a | 2,306 | py | Python | tests/components/speedtestdotnet/test_init.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
]
| 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/speedtestdotnet/test_init.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
]
| 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/speedtestdotnet/test_init.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
]
| null | null | null | """Tests for SpeedTest integration."""
from unittest.mock import patch
import speedtest
from openpeerpower import config_entries
from openpeerpower.components import speedtestdotnet
from openpeerpower.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_setup_with_config(opp):
"""Test that we import the config and setup the integration."""
config = {
speedtestdotnet.DOMAIN: {
speedtestdotnet.CONF_SERVER_ID: "1",
speedtestdotnet.CONF_MANUAL: True,
speedtestdotnet.CONF_SCAN_INTERVAL: "00:01:00",
}
}
with patch("speedtest.Speedtest"):
assert await async_setup_component(opp, speedtestdotnet.DOMAIN, config)
async def test_successful_config_entry(opp):
"""Test that SpeedTestDotNet is configured successfully."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest"), patch(
"openpeerpower.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
await opp.config_entries.async_setup(entry.entry_id)
assert entry.state is config_entries.ConfigEntryState.LOADED
assert forward_entry_setup.mock_calls[0][1] == (
entry,
"sensor",
)
async def test_setup_failed(opp):
"""Test SpeedTestDotNet failed due to an error."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest", side_effect=speedtest.ConfigRetrievalError):
await opp.config_entries.async_setup(entry.entry_id)
assert entry.state is config_entries.ConfigEntryState.SETUP_RETRY
async def test_unload_entry(opp):
"""Test removing SpeedTestDotNet."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest"):
await opp.config_entries.async_setup(entry.entry_id)
assert await opp.config_entries.async_unload(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is config_entries.ConfigEntryState.NOT_LOADED
assert speedtestdotnet.DOMAIN not in opp.data
| 28.825 | 82 | 0.717259 | 0 | 0 | 0 | 0 | 0 | 0 | 2,014 | 0.873374 | 421 | 0.182567 |
088834b65e8fc3335e7c944aeb1e307017ece6c9 | 1,258 | py | Python | opetuskoodi/2021_10_18/2_kerta_kertaus.py | mikkokotola/pythonkoodaus | 5415b3d87dfcb65b72edb916967824304d155d9a | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | opetuskoodi/2021_10_18/2_kerta_kertaus.py | mikkokotola/pythonkoodaus | 5415b3d87dfcb65b72edb916967824304d155d9a | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | opetuskoodi/2021_10_18/2_kerta_kertaus.py | mikkokotola/pythonkoodaus | 5415b3d87dfcb65b72edb916967824304d155d9a | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | # Kertaus, kerta 3
# Muuttujat ja syötteen lukeminen käyttäjältä
nimi = input("Anna nimesi: ")
kengännumero = input("Mikä on kengännumerosi: ")
print("Moi vaan, " + nimi + "! Kengännumerosi on " + kengännumero + ".")
# F-merkkijono
print(f"Moi vaan, {nimi}! Kengännumerosi on {kengännumero}.")
# Numerot
# Ikälaskuri
syntymävuosi = input("Mikä on syntymävuotesi? ")
syntymävuosi = int(syntymävuosi) # Muunnetaan merkkijono kokonaisluvuksi, jotta voimme laskea sillä
ikä = 2021 - syntymävuosi
print(f"Ikäsi vuoden 2021 lopussa on {ikä}")
# Laskin, joka osaa kertoa lukuja
luku1 = int(input("Anna luku: "))
luku2 = int(input("Anna toinen luku: "))
tulos = luku1 * luku2
print(f"{luku1} * {luku2} = {tulos}")
# Laskin, joka laskee kolmen luvun summan
summa = 0
luku = int(input("Ensimmäinen luku: "))
summa = summa + luku
luku = int(input("Toinen luku: "))
summa = summa + luku
luku = int(input("kolmas luku: "))
summa = summa + luku
print(f"Lukujen summa: {summa}")
# Minkälaisia laskuja voi laskea
print(5+2)
print(5-2)
print(5*2)
print(5/2)
print(5//2)
print(5%2)
print(2 + 2 * 3)
print((2 + 2) * 3)
# Liukuluvut = desimaaliluvut
luku1 = 4.0
luku2 = 1.5
tulos = luku1 - luku2
print(f"Tulos on {tulos}")
print(f"{luku1} - {luku2} = {tulos}")
| 21.689655 | 99 | 0.683625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.544817 |
0888b580bb9eb1968da656fe5efb329d6602a748 | 616 | py | Python | case/xpath.py | xierensong/learnPython | 33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455 | [
"MIT"
]
| null | null | null | case/xpath.py | xierensong/learnPython | 33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455 | [
"MIT"
]
| null | null | null | case/xpath.py | xierensong/learnPython | 33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455 | [
"MIT"
]
| 1 | 2018-10-11T08:20:44.000Z | 2018-10-11T08:20:44.000Z | import requests
from lxml import etree
if __name__ == '__main__':
headers = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
url = 'https://www.apache.org/dist/ant/'
sourceHTML = requests.get(url, headers = headers)
selector = etree.HTML(sourceHTML.text)
folder_list = selector.xpath('//pre[position()=1]/a[@href]')
for elmt in folder_list:
#
href_TT = elmt.get('href')
print('href_TT ', href_TT)
if href_TT[len(href_TT)-1] == '/':
print('folder_list', elmt.attrib) | 41.066667 | 144 | 0.63474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.37987 |
088c4b3ec59271d9af6031e07d4cb3e300f061c4 | 62 | py | Python | hermes1d/__init__.py | certik/hermes1d-llnl | 8e3b76fd3022af90e5c4c3923337a422d79604d5 | [
"BSD-3-Clause"
]
| 1 | 2016-08-18T23:21:55.000Z | 2016-08-18T23:21:55.000Z | hermes1d/__init__.py | certik/hermes1d-llnl | 8e3b76fd3022af90e5c4c3923337a422d79604d5 | [
"BSD-3-Clause"
]
| null | null | null | hermes1d/__init__.py | certik/hermes1d-llnl | 8e3b76fd3022af90e5c4c3923337a422d79604d5 | [
"BSD-3-Clause"
]
| null | null | null | from h1d_wrapper.h1d_wrapper import Element, Mesh, Linearizer
| 31 | 61 | 0.854839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
088ddea79b72540b919336ee600c90b0505ded86 | 5,132 | py | Python | jelm/tests/unit/test_jelm_class.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
]
| null | null | null | jelm/tests/unit/test_jelm_class.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
]
| null | null | null | jelm/tests/unit/test_jelm_class.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
]
| null | null | null | import pytest
from jelm import Jelm, Node, Edge
from jelm.tests.network_case_set_class import NetwokCaseTemplate
def test_eq(jelm_pair_case: NetwokCaseTemplate):
jelm_pair_case.evaluate_fun(non_altering_function=lambda x: x)
assert not (10 == Jelm())
assert not ("fing" == Jelm())
def test_jelm_repr(jelm_pair_case: NetwokCaseTemplate):
def repr_check(el: Jelm):
repr_string = el.__repr__()
assert "jelm" in repr_string
assert str(len(el.nodes.keys())) in repr_string
return el
jelm_pair_case.evaluate_fun(non_altering_function=repr_check)
def test_neighbors(jelm_pair_case: NetwokCaseTemplate):
def neighbor_check(el: Jelm):
for nid, n in el.nodes.items():
for nid2 in n.neighbors.keys():
assert nid in el.get_node(nid2).neighbors.keys()
for nid3 in n.target_neighbors.keys():
assert nid in el.get_node(nid3).source_neighbors.keys()
return el
jelm_pair_case.evaluate_fun(non_altering_function=neighbor_check)
def test_add_node_as_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_node_as_obj(el: Jelm):
el.add_object({"type": "node", "id": "n10"})
return el
def assert_node_as_obj_added(el: Jelm):
assert isinstance(el.get_node("n10"), Node)
def catch_node_as_obj_add(el: Jelm, e):
assert isinstance(e, ValueError)
assert isinstance(el.get_node("n10"), Node)
jelm_pair_case.evaluate_fun(
altering_function=add_node_as_obj,
assert_alteration=assert_node_as_obj_added,
catch_alteration_exception=catch_node_as_obj_add,
)
def test_add_edge_as_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_edge_as_obj(el: Jelm):
el.add_object({"type": "edge", "source": "n1", "target": "n2"})
return el
def assert_edge_as_obj_added(el: Jelm):
n = el.get_node("n1")
assert "n2" in n.neighbors.keys()
assert "n1" in el.get_node("n2").neighbors
assert "n2" in n.target_neighbors.keys()
def catch_edge_as_obj_add(el: Jelm, e):
assert isinstance(e, KeyError)
assert ("n1" not in el.nodes.keys()) or ("n2" not in el.nodes.keys())
jelm_pair_case.evaluate_fun(
altering_function=add_edge_as_obj,
assert_alteration=assert_edge_as_obj_added,
catch_alteration_exception=catch_edge_as_obj_add,
)
def test_add_edge_jelm_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_edge_jelm_obj(el: Jelm):
el.add_object(Edge(source="n1", target="n2", id="fing"))
return el
def assert_edge_jelm_obj_added(el: Jelm):
n = el.get_node("n1")
assert "n2" in n.neighbors.keys()
assert "n1" in el.get_node("n2").neighbors
assert "n2" in n.target_neighbors.keys()
edge_ids = [e.id for e in n.neighbors["n2"]]
assert "fing" in edge_ids
def catch_edge_jelm_obj_add(el: Jelm, e):
assert isinstance(e, KeyError)
assert ("n1" not in el.nodes.keys()) or ("n2" not in el.nodes.keys())
jelm_pair_case.evaluate_fun(
altering_function=add_edge_jelm_obj,
assert_alteration=assert_edge_jelm_obj_added,
catch_alteration_exception=catch_edge_jelm_obj_add,
)
def test_init():
el = Jelm(metadata={"author": "John Doe"}, objects=[])
assert isinstance(el.objects, list)
assert isinstance(el.metadata, dict)
el2 = Jelm(metadata={"author": "John Doe"}, nodes={})
assert el == el2
el3 = Jelm()
assert not (el == el3)
el4_1 = Jelm(nodes={"id1": Node(id="n1")})
el4_2 = Jelm(objects=[{"type": "node", "id": "n1"}])
assert el4_1 == el4_2
def test_init_w_cases(jelm_pair_case: NetwokCaseTemplate):
def transform_init(el):
el_from_nodes = Jelm(metadata=el.metadata, nodes=el.nodes)
assert el_from_nodes == el
return el_from_nodes
jelm_pair_case.evaluate_fun(non_altering_function=transform_init)
def test_add_object():
el = Jelm()
el.add_object({"type": "node", "id": "n1"})
el.add_object(Node(id="n2"))
el.add_object({"type": "edge", "source": "n1", "target": "n2"})
el.add_object(Node(id="n3", attributes={"priority": "low"}))
with pytest.raises(ValueError):
el.add_object({"no": "type"})
with pytest.raises(ValueError):
el.add_object({"type": "wrong"})
with pytest.raises(ValueError):
el.add_object(10)
el.add_edge("n3", "n2")
el.add_node("n4", {"order": "latest"})
assert len(set([type(o) for o in el.objects])) > 1
assert isinstance(el.objects[0], Node)
assert isinstance(el.objects[2], Edge)
def test_iter():
el = Jelm(
metadata={"author": "John Doe"},
objects=[
{"type": "node", "id": "n1"},
{"type": "node", "id": "n2"},
{"type": "edge", "source": "n1", "target": "n2"},
],
)
for idx, o in enumerate(el):
if idx < 2:
assert isinstance(o, Node)
else:
assert isinstance(o, Edge)
| 25.034146 | 77 | 0.639517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.087101 |
088f0e150b58a95dbcc3bacf169c6bdc57e4eedc | 6,582 | py | Python | trajectory_prediction/evaluation.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
]
| 385 | 2018-07-02T22:21:25.000Z | 2022-03-28T13:12:47.000Z | trajectory_prediction/evaluation.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
]
| 102 | 2018-08-01T10:40:40.000Z | 2022-03-16T10:32:44.000Z | trajectory_prediction/evaluation.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
]
| 98 | 2018-07-12T18:36:42.000Z | 2022-03-20T04:38:03.000Z | '''
Evaluation code for trajectory prediction.
We record the objects in the last frame of every sequence in test dataset as considered objects, which is stored in considered_objects.txt.
We compare the error between your predicted locations in the next 3s(six positions) and the ground truth for these considered objects.
To run this script, make sure that your results are in required format.
'''
import os
import argparse
import numpy as np
def evaluation(frame_data_result, frame_data_gt, consider_peds):
# defined length of predicted trajectory
predict_len = 6
# the counter for testing sequences
sequence_count = 0
# weighted coefficient for vehicles, pedestrians, bicyclists respectively
vehicle_coe = 0.2
pedestrian_coe = 0.58
bicycle_coe = 0.22
# error for missing considered objects
miss_error = 100
# record displacement error for three types of objects
vehicle_error = []
pedestrian_error = []
bicycle_error = []
# record final displacement error for three types of objects
vehicle_final_error = []
pedestrian_final_error = []
bicycle_final_error = []
for i in range(0, len(frame_data_result) - predict_len + 1, predict_len):
current_consider_ped = consider_peds[sequence_count]
sequence_count = sequence_count + 1
for j in range(i, i + predict_len):
for ped_gt in frame_data_gt[j]:
if current_consider_ped.count(int(ped_gt[0])):
# ignore unknown objects
if ped_gt[1] == 5:
continue
# error will be large if missing considered objects
error = miss_error
for ped_res in frame_data_result[j]:
if int(ped_res[0]) == int(ped_gt[0]):
error = distance([ped_gt[2], ped_gt[3]], [ped_res[2], ped_res[3]])
break
# distribute the error to different types of objects
if ped_gt[1] == 1 or ped_gt[1] == 2:
vehicle_error.append(error)
if j == i + predict_len - 1:
vehicle_final_error.append(error)
elif ped_gt[1] == 3:
pedestrian_error.append(error)
if j == i + predict_len - 1:
pedestrian_final_error.append(error)
elif ped_gt[1] == 4:
bicycle_error.append(error)
if j == i + predict_len - 1:
bicycle_final_error.append(error)
# the mean error for objects
vehicle_mean_error = sum(vehicle_error) / len(vehicle_error)
pedestrian_mean_error = sum(pedestrian_error) / len(pedestrian_error)
bicycle_mean_error = sum(bicycle_error) / len(bicycle_error)
# the final error for objects
vehicle_final_error = sum(vehicle_final_error) / len(vehicle_final_error)
pedestrian_final_error = sum(pedestrian_final_error) / len(pedestrian_final_error)
bicycle_final_error = sum(bicycle_final_error) / len(bicycle_final_error)
# weighted sum of mean error
WSADE = vehicle_mean_error * vehicle_coe + pedestrian_mean_error * pedestrian_coe + bicycle_mean_error * bicycle_coe
# weighted sum of final error
WSFDE = vehicle_final_error * vehicle_coe + pedestrian_final_error * pedestrian_coe + bicycle_final_error * bicycle_coe
print('WSADE:', WSADE)
print('ADEv, ADEp, ADEb:', vehicle_mean_error, pedestrian_mean_error, bicycle_mean_error)
print('WSFDE:', WSFDE)
print('FDEv, FDEp, FDEb:',vehicle_final_error, pedestrian_final_error, bicycle_final_error)
return (WSADE, vehicle_mean_error, pedestrian_mean_error, bicycle_mean_error,
WSFDE, vehicle_final_error, pedestrian_final_error, bicycle_final_error)
def readConsiderObjects(filename):
print('Load file: ', filename)
# load considered objects of each sequence
consider_peds = []
with open(filename, 'r') as file_to_read:
while True:
lines = file_to_read.readline()
if not lines:
break
curLine = lines.strip().split(" ")
intLine = map(int, curLine)
consider_peds.append(intLine)
return consider_peds
def readTrajectory(filename):
print('Load file: ',filename)
raw_data = []
# load all the data in the file
with open(filename, 'r') as file_to_read:
while True:
lines = file_to_read.readline()
if not lines:
break
timestamp, id, type, x, y = [float(i) for i in lines.split()]
raw_data.append((timestamp, id, type, x, y))
# get frame list
frameList = []
for i in range(len(raw_data)):
if frameList.count(raw_data[i][0]) == 0:
frameList.append(raw_data[i][0])
counter = 0
frame_data = []
for ind, frame in enumerate(frameList):
pedsInFrame = []
# Extract all pedestrians in current frame
for r in range(counter, len(raw_data)):
row = raw_data[r]
if raw_data[r][0] == frame:
pedsInFrame.append([row[1], row[2], row[3], row[4]])
counter += 1
else:
break
frame_data.append(pedsInFrame)
return frame_data
def distance(pos1, pos2):
# Euclidean distance
return np.sqrt(pow(pos1[0]-pos2[0], 2) + pow(pos1[1]-pos2[1], 2))
def main():
parser = argparse.ArgumentParser(
description='Evaluation self localization.')
parser.add_argument('--gt_dir', default='./test_eval_data/prediction_gt.txt',
help='the dir of ground truth')
parser.add_argument('--object_file', default='./test_eval_data/considered_objects.txt',
help='the dir of considered objects')
parser.add_argument('--res_file', default='./test_eval_data/prediction_result.txt',
help='the dir of results')
args = parser.parse_args()
# load results
file_result = args.res_file
frame_data_result = readTrajectory(file_result)
# load ground truth
file_gt = args.gt_dir
frame_data_gt = readTrajectory(file_gt)
# load considered objects
file_consider_objects = args.object_file
consider_peds = readConsiderObjects(file_consider_objects)
# Do evaluation
evaluation(frame_data_result, frame_data_gt, consider_peds)
if __name__ == '__main__':
main()
| 36.364641 | 139 | 0.628836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,524 | 0.231541 |
0891695cf058c07ea805662895cf40325fd7ce37 | 2,561 | py | Python | shellfoundry/commands/install_command.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
]
| null | null | null | shellfoundry/commands/install_command.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
]
| 1 | 2021-03-25T23:21:02.000Z | 2021-03-25T23:21:02.000Z | shellfoundry/commands/install_command.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
]
| null | null | null | # !/usr/bin/python
# -*- coding: utf-8 -*-
import click
import os
try:
# Python 2.x version
from urllib2 import HTTPError, URLError
except:
# Python 3.x version
from urllib.error import HTTPError, URLError
from shellfoundry.exceptions import FatalError
from shellfoundry.utilities.config_reader import Configuration, CloudShellConfigReader
from shellfoundry.utilities.installer import ShellInstaller
from shellfoundry.utilities.shell_config_reader import ShellConfigReader
from shellfoundry.utilities.shell_package import ShellPackage
from shellfoundry.utilities.shell_package_installer import ShellPackageInstaller
class InstallCommandExecutor(object):
def __init__(self, cloudshell_config_reader=None, installer=None, shell_config_reader=None,
shell_package_installer=None):
self.cloudshell_config_reader = cloudshell_config_reader or Configuration(CloudShellConfigReader())
self.installer = installer or ShellInstaller()
self.shell_config_reader = shell_config_reader or ShellConfigReader()
self.shell_package_installer = shell_package_installer or ShellPackageInstaller()
def install(self):
current_path = os.getcwd()
shell_package = ShellPackage(current_path)
if shell_package.is_layer_one():
click.secho("Installing a L1 shell directly via shellfoundry is not supported. "
"Please follow the L1 shell import procedure described in help.quali.com.", fg="yellow")
else:
if shell_package.is_tosca():
self.shell_package_installer.install(current_path)
else:
self._install_old_school_shell()
click.secho('Successfully installed shell', fg='green')
def _install_old_school_shell(self):
error = None
try:
cloudshell_config = self.cloudshell_config_reader.read()
shell_config = self.shell_config_reader.read()
self.installer.install(shell_config.name, cloudshell_config)
except HTTPError as e:
if e.code == 401:
raise FatalError('Login to CloudShell failed. Please verify the credentials in the config')
error = str(e)
except URLError:
raise FatalError('Connection to CloudShell Server failed. Please make sure it is up and running properly.')
except Exception as e:
error = str(e)
if error:
raise FatalError("Failed to install shell. CloudShell responded with: '{}'".format(error))
| 43.40678 | 119 | 0.705193 | 1,924 | 0.751269 | 0 | 0 | 0 | 0 | 0 | 0 | 488 | 0.190551 |
0893025cc2e6d02ad0cc2a38ee4b17db36c8a68d | 9,064 | py | Python | sam_actions/scripts/gps_fix_server.py | Jollerprutt/sam_common | dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2 | [
"BSD-3-Clause"
]
| 1 | 2020-06-09T18:23:53.000Z | 2020-06-09T18:23:53.000Z | sam_actions/scripts/gps_fix_server.py | Jollerprutt/sam_common | dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2 | [
"BSD-3-Clause"
]
| 3 | 2020-10-06T09:46:03.000Z | 2021-03-10T13:40:44.000Z | sam_actions/scripts/gps_fix_server.py | Jollerprutt/sam_common | dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2 | [
"BSD-3-Clause"
]
| 5 | 2020-01-20T18:33:55.000Z | 2020-12-29T12:34:22.000Z | #!/usr/bin/python
import rospy
from rospy import ROSException
from std_msgs.msg import Header, Bool
from std_srvs.srv import SetBool
from geometry_msgs.msg import PoseWithCovarianceStamped, Point, Quaternion
from sensor_msgs.msg import NavSatFix, NavSatStatus
from sam_msgs.msg import GetGPSFixAction, GetGPSFixFeedback, GetGPSFixResult
from sam_msgs.msg import PercentStamped
import actionlib
import tf_conversions
import tf
from tf.transformations import quaternion_from_euler, quaternion_multiply
from geodesy import utm
import math
import numpy as np
class GPSFixServer(object):
_feedback = GetGPSFixFeedback()
_result = GetGPSFixResult()
def __init__(self, name):
self.last_gps_pos = None
self.last_dr_pos = None
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, GetGPSFixAction, execute_cb=self.execute_cb, auto_start=False)
self.pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=10)
self.lcg_disable_pub = rospy.Publisher('/sam/ctrl/lcg/pid_enable', Bool, queue_size=10)
self.vbs_disable_pub = rospy.Publisher('/sam/ctrl/vbs/pid_enable', Bool, queue_size=10)
self.lcg_pub = rospy.Publisher('/sam/core/lcg_cmd', PercentStamped, queue_size=10)
self.vbs_pub = rospy.Publisher('/sam/core/vbs_cmd', PercentStamped, queue_size=10)
self.listener = tf.TransformListener()
self._as.start()
def start_stop_dvl(self, value, value_string):
try:
rospy.wait_for_service('/sam/core/start_stop_dvl', timeout=3.)
start_stop_dvl = rospy.ServiceProxy('/sam/core/start_stop_dvl', SetBool)
resp = start_stop_dvl(value)
if not resp.success:
self._feedback.status = "Service call returned false, failed to %s dvl" % value_string
rospy.loginfo("Service call returned false, failed to %s dvl", value_string)
except (rospy.ServiceException, ROSException), e:
self._feedback.status = "Service call failed, failed to %s dvl" % value_string
rospy.loginfo("Service call failed: %s, failed to %s dvl", e, value_string)
#finally:
# self._feedback.status = "Did %s dvl" % (value_string)
self._as.publish_feedback(self._feedback)
def estimate_position(self, fixes, covars):
try:
now = rospy.Time(0)
(world_trans, world_rot) = self.listener.lookupTransform("world_utm", "world_local", now)
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_utm", "world_local")
rospy.loginfo("Could not get transform between %s and %s" % ("world_utm", "world_local"))
self._as.publish_feedback(self._feedback)
# easting, northing is in world_utm coordinate system,
# we need to transform it to world or world_local
pos = np.zeros((len(fixes), 3))
for i, fix in enumerate(fixes):
utm_point = utm.fromLatLong(fix[0], fix[1])
easting = utm_point.easting
northing = utm_point.northing
utm_zone = utm_point.zone
pos[i, :] = np.array([easting-world_trans[0], northing-world_trans[1], 0.])
# use the cov to weight the means in the future
estimate = np.mean(pos, axis=0)
return estimate
def execute_cb(self, goal):
rospy.loginfo("Got action callback...")
self._feedback.status = "Shutting down controllers and DVL"
self._as.publish_feedback(self._feedback)
header = Header()
timeout = goal.timeout
required_gps_msgs = goal.required_gps_msgs
self.start_stop_dvl(False, "stop")
# Disable controllers
self.vbs_disable_pub.publish(False)
self.lcg_disable_pub.publish(False)
# Sleep to make sure controllers are down
rospy.sleep(0.1)
# Set VBS to 0
self.vbs_pub.publish(0., header)
# Set LCG to 0
self.lcg_pub.publish(0., header)
good_fixes = []
good_vars = [] # NOTE: covariances are in m^2
# Get GPS fixes until we are in a good place
gps_topic = "/sam/core/gps"
start_time = rospy.get_time()
while rospy.get_time() - start_time < timeout and len(good_fixes) < required_gps_msgs:
try:
gps_msg = rospy.wait_for_message(gps_topic, NavSatFix, 3.)
except rospy.ROSException:
rospy.loginfo("Could not get gps message on %s, aborting...", gps_topic)
self._feedback.status = "Could not get gps message on %s..." % gps_topic
self._as.publish_feedback(self._feedback)
continue
if gps_msg.status.status != NavSatStatus.STATUS_NO_FIX:
self._feedback.status = "Good fix, now has %d msgs" % len(good_fixes)
good_fixes.append(np.array([gps_msg.latitude, gps_msg.longitude]))
good_vars.append(np.array([gps_msg.position_covariance[:2], gps_msg.position_covariance[3:5]]))
else:
self._feedback.status = "No fix, now has %d msgs" % len(good_fixes)
self._as.publish_feedback(self._feedback)
if len(good_fixes) < required_gps_msgs:
self._result.status = "Timeout, not enough msgs"
self._as.set_aborted(self._result)
return
else:
self._feedback.status = "Done listening, got %d msgs" % len(good_fixes)
self._as.publish_feedback(self._feedback)
self.start_stop_dvl(True, "start")
gps_pos = self.estimate_position(good_fixes, good_vars)
corrected_rot = [0., 0., 0., 1.] # Start with 0 yaw
if self.last_dr_pos is not None and self.last_gps_pos is not None:
self._feedback.status = "Found previous positions, doing heading estimation"
self._as.publish_feedback(self._feedback)
try:
now = rospy.Time(0)
(dr_trans, dr_rot) = self.listener.lookupTransform("world_local", "sam/base_link", now)
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_local", "sam/base_link")
rospy.loginfo("Could not get transform between %s and %s" % ("world_local", "sam/base_link"))
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
gps_diff = gps_pos - self.last_gps_pos
#gps_diff = 1./np.linalg.norm(gps_diff)*gps_diff
gps_trajectory_yaw = math.atan2(gps_diff[1], gps_diff[0])
dr_diff = np.array((dr_trans[0] - self.last_dr_pos[0], dr_trans[1] - self.last_dr_pos[1]))
#dr_diff = 1./np.linalg.norm(dr_diff)*dr_diff
dr_trajectory_yaw = math.atan2(dr_diff[1], dr_diff[0])
yaw_correction = gps_trajectory_yaw - dr_trajectory_yaw
# to get the actual yaw, we need to look at the
# the difference in odom between last time and this time
# note that we need to get the new estimated yaw
# after publishing this to get the corrected one
self._feedback.status = "Estimated GPS yaw: %f, DR yaw: %f, Yaw corr: %f" % (gps_trajectory_yaw, dr_trajectory_yaw, yaw_correction)
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
corrected_rot = quaternion_multiply(quaternion_from_euler(0., 0., yaw_correction), dr_rot)
self._feedback.status = "Waiting for filter to update"
self._as.publish_feedback(self._feedback)
pose_msg = PoseWithCovarianceStamped()
pose_msg.header = header
pose_msg.header.frame_id = "world_local"
pose_msg.pose.pose.position = Point(*gps_pos.tolist())
pose_msg.pose.pose.orientation = Quaternion(*corrected_rot)
self.pose_pub.publish(pose_msg)
rospy.sleep(.5)
self._feedback.status = "Getting updated pose"
self._as.publish_feedback(self._feedback)
try:
now = rospy.Time(0)
(trans, rot) = self.listener.lookupTransform("world_local", "sam/base_link", now)
self.last_dr_pos = trans
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_local", "sam/base_link")
rospy.loginfo("Could not get transform between %s and %s" % ("world_local", "sam/base_link"))
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
self.last_gps_pos = gps_pos
self._result.status = "Finished setting position"
self._as.set_succeeded(self._result)
if __name__ == "__main__":
rospy.init_node('gps_fix_server', anonymous=False) #True)
check_server = GPSFixServer(rospy.get_name())
rospy.spin()
| 42.553991 | 143 | 0.645521 | 8,345 | 0.920675 | 0 | 0 | 0 | 0 | 0 | 0 | 2,039 | 0.224956 |
08967bfbf25d6987de9933fc65d4f932dbcd6e60 | 1,307 | py | Python | src/model/RoleProxy.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
]
| 4 | 2017-08-26T10:18:10.000Z | 2020-07-28T19:50:54.000Z | src/model/RoleProxy.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
]
| null | null | null | src/model/RoleProxy.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
]
| 3 | 2020-09-22T12:17:14.000Z | 2021-07-16T12:28:18.000Z | # -*- coding: utf-8 -*-
from puremvc.patterns.proxy import Proxy
from .. import ApplicationFacade
class RoleProxy(Proxy):
NAME = 'RoleProxy'
def __init__(self, proxyName=None, data=[]):
super(RoleProxy, self).__init__(proxyName, data)
self.data = data
def addItem(self, role):
self.data.append(role)
def deleteItem(self, user):
for role in self.data:
if role.username == user.username:
self.data.remove(role)
break
def doesUserHaveRole(self, user, role):
return role in self.getUserRoles(user.username)
def addRoleToUser(self, user, role):
result = False
if not self.doesUserHaveRole(user, role):
userRoles = self.getUserRoles(user.username)
userRoles.append(role)
result = True
self.sendNotification(ApplicationFacade.ADD_ROLE_RESULT, result)
def removeRoleFromUser(self, user, role):
if self.doesUserHaveRole(user, role):
userRoles = self.getUserRoles(user.username)
userRoles.remove(role)
def getUserRoles(self, username):
userRoles = None
for userRoles in self.data:
if userRoles.username == username:
break
return userRoles.roles
| 27.808511 | 72 | 0.61974 | 1,204 | 0.921194 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.026014 |
0896a00f400830a8eb41593559f65d607a6a09c6 | 1,358 | py | Python | flappy-brird/utils/radio.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
]
| null | null | null | flappy-brird/utils/radio.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
]
| null | null | null | flappy-brird/utils/radio.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
]
| null | null | null | import pygame
import os
class Radio:
def __init__(self, settings):
"""
Method that initiates the object Radio for game sounds
Input = (Dict)
"""
pygame.mixer.init()
self.file_die_sound = pygame.mixer.Sound('Assets/Sounds/die.mp3')
self.file_hit_sound = pygame.mixer.Sound('Assets/Sounds/hit.mp3')
self.file_wing_sound = pygame.mixer.Sound('Assets/Sounds/wing.mp3')
self.file_score_sound = pygame.mixer.Sound('Assets/Sounds/point.mp3')
self.volume = settings['Sound Volume']
self.file_score_sound.set_volume(self.volume * 0.3)
self.file_die_sound.set_volume(self.volume)
self.file_hit_sound.set_volume(self.volume)
self.file_wing_sound.set_volume(self.volume)
self.file_score_sound.set_volume(self.volume)
def die_sound(self):
"""
Method that play the death sound
"""
self.file_die_sound.play()
def score_sound(self):
"""
Method that play the score sound
"""
self.file_score_sound.play()
def hit_sound(self):
"""
Method that play the hit sound
"""
self.file_hit_sound.play()
def wing_sound(self):
"""
Method that play the wing beat sound
"""
self.file_wing_sound.play()
| 25.148148 | 77 | 0.611193 | 1,331 | 0.980118 | 0 | 0 | 0 | 0 | 0 | 0 | 443 | 0.326215 |
0896e29401ea1989cb26ef01107f5729035c11a7 | 4,405 | py | Python | app/__main__.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | app/__main__.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | app/__main__.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | # This Python file uses the following encoding: utf-8
from app.package.views.Calibrate_view import CalibrateView
from app.package.controllers.Calibrate_controller import CalibrateController
from app.package.models.Calibrate_model import CalibrateModel
import sys
import matplotlib
from PySide2.QtWidgets import QApplication
from PySide2 import QtCore
from .package.models.NewProjectModel import NewProjectModel
from .package.models.DataAcquisitionModel import DataAcquisitionModel
from .package.models.DisplayResultsModel import DisplayResultsModel
from .package.controllers.Navigator import Navigator
from .package.controllers.NewProjectController import NewProjectController
from .package.controllers.DataAcquisitionController import (
DataAcquisitionController)
from .package.controllers.DisplayResultsController import (
DisplayResultsController)
from .package.views.MainWindow import MainWindow
from .package.views.NewProjectView import NewProjectView
from .package.views.DataAcquisitionView import DataAcquisitionView
from .package.views.DisplayResultsView import DisplayResultsView
class App(QApplication):
# Diccionario que mapea nombres con Vistas
views = {}
@staticmethod
def log(msg: str) -> None:
print(f'[App] {msg}')
def __init__(self, args):
super(App, self).__init__(args)
self.navigator = Navigator()
self.navigator.navigator.connect(self.change_view)
# MODELS
self.new_project_model = NewProjectModel()
self.data_acquisition_model = DataAcquisitionModel()
self.display_results_model = DisplayResultsModel()
self.calibrate_model = CalibrateModel()
# CONTROLLERS
self.new_project_controller = NewProjectController(
self.new_project_model, self.navigator)
self.data_acquisition_controller = DataAcquisitionController(
self.data_acquisition_model, self.navigator)
self.display_results_controller = DisplayResultsController(
self.display_results_model, self.navigator)
self.calibrate_controller = CalibrateController(
self.calibrate_model, self.navigator)
# VIEWS
self.main_view = MainWindow(None, self.navigator)
self.new_project_view = NewProjectView(
self.new_project_model, self.new_project_controller)
self.data_acquisition_view = DataAcquisitionView(
self.data_acquisition_model, self.data_acquisition_controller)
self.display_results_view = DisplayResultsView(
self.display_results_model, self.display_results_controller)
self.calibrate_view = CalibrateView(
self.calibrate_model, self.calibrate_controller)
self.views['main_view'] = self.main_view
self.views['new_project'] = self.new_project_view
self.views['data_acquisition'] = self.data_acquisition_view
self.views['display_results'] = self.display_results_view
self.views['calibrate'] = self.calibrate_view
self.change_view('new_project')
@QtCore.Slot(str)
def change_view(self, name_view, closeOthers=True):
self.log(f'Navigating to {name_view}')
_view = self.views.get(name_view)
if _view is None:
raise Exception(f'{name_view} is not part of Views dictionary.')
if closeOthers:
self.log('closing other views...')
for view in self.views:
if view != name_view:
self.views.get(view).close()
_view.open()
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
def main():
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = App([])
sys.exit(app.exec_())
matplotlib.use('tkagg')
if __name__ == "__main__":
main()
# if __name__ == "__main__":
# import cProfile
# cProfile.run('main()', 'output.dat')
# import pstats
# from pstats import SortKey
# with open("output_time.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("time").print_stats()
# with open("output_calls.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("calls").print_stats()
| 33.120301 | 76 | 0.711691 | 2,447 | 0.555505 | 0 | 0 | 557 | 0.126447 | 0 | 0 | 758 | 0.172077 |
0897118d6f2834e2b8c74ba12247412406dbd2c7 | 557 | py | Python | Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py | disrupted/Trakttv.bundle | 24712216c71f3b22fd58cb5dd89dad5bb798ed60 | [
"RSA-MD"
]
| 1,346 | 2015-01-01T14:52:24.000Z | 2022-03-28T12:50:48.000Z | Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
]
| 474 | 2015-01-01T10:27:46.000Z | 2022-03-21T12:26:16.000Z | Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
]
| 191 | 2015-01-02T18:27:22.000Z | 2022-03-29T10:49:48.000Z | import datetime
import decimal
from playhouse.sqlite_ext import *
# Peewee assumes that the `pysqlite2` module was compiled against the
# BerkeleyDB SQLite libraries.
from pysqlite2 import dbapi2 as berkeleydb
berkeleydb.register_adapter(decimal.Decimal, str)
berkeleydb.register_adapter(datetime.date, str)
berkeleydb.register_adapter(datetime.time, str)
class BerkeleyDatabase(SqliteExtDatabase):
def _connect(self, database, **kwargs):
conn = berkeleydb.connect(database, **kwargs)
self._add_conn_hooks(conn)
return conn
| 27.85 | 69 | 0.779174 | 195 | 0.35009 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.177738 |
089a04fda175104b7a74e5689381760d2e0c8310 | 1,513 | py | Python | PyEEA/analysis/SimulationAnalysisEngine.py | ThomasJFR/PyEEA | 7927ee5ff1de8d3cf9e9654899ea4c2c0284519c | [
"MIT"
]
| 1 | 2020-06-15T03:16:06.000Z | 2020-06-15T03:16:06.000Z | PyEEA/analysis/SimulationAnalysisEngine.py | ThomasJFR/PyEEA | 7927ee5ff1de8d3cf9e9654899ea4c2c0284519c | [
"MIT"
]
| 1 | 2020-06-19T04:56:21.000Z | 2020-06-19T04:56:21.000Z | PyEEA/analysis/SimulationAnalysisEngine.py | ThomasJFR/PyEEA | 7927ee5ff1de8d3cf9e9654899ea4c2c0284519c | [
"MIT"
]
| null | null | null | from numpy.random import standard_normal
from numbers import Number
def simulation_analysis(project, sim_dict, iterations=250, valuator=None):
"""
Purpose:
Analyses the effects of uncertainty of a system by performing a Monte Carlo simulation.
Args:
project: An instance of Project to perform the simulation on
sim_dict: A dict where the key is the name of the cashflow to simulate and the value
is either a number defining the standard deviation for the cashflow as a percentage, or a
function defining some way to modify the cashflow by an amount
"""
# Make every sim_fun value a callable, converting numbers to stdev functions
for key in sim_dict:
if isinstance(sim_dict[key], Number):
stdev = sim_dict[key]
def std_dist(amt):
return amt * stdev * standard_normal()
sim_dict[key] = std_dist
valuator = valuator or project.npw
if not callable(valuator):
return TypeError("Valuator must be a callable construct!")
# Perform the simulation
valuations = []
for _ in range(iterations):
with project as p:
for key in sim_dict:
sim_fun = sim_dict[key]
n_cashflows = len(p[key])
for n in range(n_cashflows):
cf = p[key][n]
cf.amount += sim_fun(cf.amount)
valuations.append(valuator())
return valuations
| 35.186047 | 110 | 0.61996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 632 | 0.417713 |
089bce5935bf86ce3e21aaf4f16ec72196bcf521 | 433 | py | Python | bot/handlers/common.py | slawiko/remindmelater_bot | 76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e | [
"Apache-2.0"
]
| null | null | null | bot/handlers/common.py | slawiko/remindmelater_bot | 76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e | [
"Apache-2.0"
]
| 2 | 2017-06-11T11:07:30.000Z | 2017-06-24T05:37:34.000Z | bot/handlers/common.py | slawiko/remindmelater_bot | 76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e | [
"Apache-2.0"
]
| null | null | null | import logging
from telegram.ext import CommandHandler
logger = logging.getLogger(__name__)
def handle_dispatcher(dispatcher):
dispatcher.add_handler(ping())
dispatcher.add_error_handler(error)
def error(a, b, c):
logger.error('Error %s %s "%s"' % a, b, c)
def ping():
def handle(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="pong")
return CommandHandler('ping', handle)
| 18.041667 | 69 | 0.699769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.069284 |
089c7e0aea70b01bb6426d16f39e585a80906239 | 307 | py | Python | src/papierstat/datasets/data_helper.py | sdpython/papierstat | f69de884c59ada30b58224dca39f2a44d92122c1 | [
"MIT"
]
| 7 | 2019-03-21T09:52:31.000Z | 2021-01-17T16:56:27.000Z | src/papierstat/datasets/data_helper.py | sdpython/papierstat | f69de884c59ada30b58224dca39f2a44d92122c1 | [
"MIT"
]
| 33 | 2018-02-08T23:56:57.000Z | 2021-02-10T23:55:43.000Z | src/papierstat/datasets/data_helper.py | sdpython/papierstat | f69de884c59ada30b58224dca39f2a44d92122c1 | [
"MIT"
]
| 1 | 2021-02-11T09:16:33.000Z | 2021-02-11T09:16:33.000Z | # -*- coding: utf-8 -*-
"""
@file
@brief Fonctions retournant des jeux de données.
"""
import os
def get_data_folder():
"""
Retourne le répertoire de données inclus dans ce module.
"""
this = os.path.dirname(__file__)
data = os.path.join(this, "data")
return os.path.abspath(data)
| 19.1875 | 60 | 0.641694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.535484 |
089f58dcbc654f5aa5c0245f1cc3a918e10ba168 | 464 | py | Python | ollivanders/offer/models.py | paradox-particle/diagon-alley | 184a58da34046319c96b6d5535646497d80c4970 | [
"MIT"
]
| null | null | null | ollivanders/offer/models.py | paradox-particle/diagon-alley | 184a58da34046319c96b6d5535646497d80c4970 | [
"MIT"
]
| null | null | null | ollivanders/offer/models.py | paradox-particle/diagon-alley | 184a58da34046319c96b6d5535646497d80c4970 | [
"MIT"
]
| null | null | null | from django.db import models
class Coupon(models.Model):
coupon = models.CharField(max_length=20)
discount = models.IntegerField()
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
active = models.BooleanField(default=True)
def __str__(self):
return self.coupon
class Meta:
db_table = 'coupon'
verbose_name = 'Coupon'
verbose_name_plural = 'Coupons'
ordering = ['coupon']
| 24.421053 | 46 | 0.661638 | 432 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.071121 |
089fb8ac7ea2e682cd5404a50c0fbd9b93f873d7 | 6,470 | py | Python | qa/advancedloggingplugin/advancedlogging.py | robertstarmer/aurora | ae98329c8dc466dea2c8040203624f0bfc1c7cfe | [
"Apache-2.0"
]
| 23 | 2015-01-22T22:23:35.000Z | 2021-10-21T23:08:29.000Z | qa/advancedloggingplugin/advancedlogging.py | sysbot/aurora | 9e319d4eb8c760cf84cb80ed2959cd52976af11c | [
"Apache-2.0"
]
| 2 | 2015-10-13T00:47:45.000Z | 2019-05-06T14:52:33.000Z | qa/advancedloggingplugin/advancedlogging.py | sysbot/aurora | 9e319d4eb8c760cf84cb80ed2959cd52976af11c | [
"Apache-2.0"
]
| 27 | 2015-03-18T19:39:30.000Z | 2022-03-11T00:58:09.000Z | import os
from os.path import join
import traceback
from bs4 import BeautifulSoup
from nose.plugins import Plugin
class AdvancedLogging(Plugin):
name = "advanced-logging"
enabled = False
capture_screen = True
score = 1
_log_path = join(os.getcwd(), 'test_output')
_script_path = None
def __init__(self):
super(AdvancedLogging, self).__init__()
html_template = """
<html>
<head>
<title></title>
<style type="text/css">
.header {
font-weight: bold;
}
span.fail {
color: red;
}
span.error {
color: orange;
}
span.pass {
color: green;
}
</style>
</head>
<body><body>
</html>
"""
self.soup = BeautifulSoup(html_template)
self.html = self.soup.body
title = self.soup.title
title.string = 'Advanced log'
self.fieldset = None
def options(self, parser, env=os.environ):
parser.add_option(
"--advanced-logging", action="store_true",
dest="advancedlogging",
default=False,
help="Optional: This will enable advanced logging.")
parser.add_option(
"--disable-capture-screen", action="store_false",
dest="disablecapturescreen",
default=True,
help="Optional: This will disable capture screen on failure.")
parser.add_option(
"--advanced-log-filename", action="store",
default='AdvancedLog.html',
dest="advancedlogfilename",
help="Optional: Advanced log filename, e.g. Result.html"
"default is AdvancedLog.html")
def configure(self, options, conf):
if not options.advancedlogging:
return
self.enabled = True
self.capture_screen = options.disablecapturescreen
self.html_filename = options.advancedlogfilename
super(AdvancedLogging, self).configure(options, conf)
def addFailure(self, test, err):
err = self.formatErr(err)
span = self.soup.new_tag('span')
span.string = 'FAIL'
span['class'] = 'header fail'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
try:
if self.capture_screen:
filename = '%s.png' % test.address()[2]
full_filename = join(self._log_path, filename)
driver = test.context.uidriver.webdriver
driver.get_screenshot_as_file(full_filename)
print 'Screenshot was captured %s' % full_filename
a = self.soup.new_tag('a')
a['href'] = filename
a['target'] = '_blank'
img = self.soup.new_tag('img')
img['src'] = filename
img['alt'] = filename
img['title'] = filename
img['width'] = '800px'
img['border'] = '1'
a.append(img)
self.testdiv.append(a)
except:
pass
pre = self.soup.new_tag('pre')
pre.string = err
self.testdiv.append(pre)
def addSuccess(self, test):
span = self.soup.new_tag('span')
span.string = 'OK'
span['class'] = 'header pass'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
def addError(self, test, err):
try:
err = self.formatErr(err)
span = self.soup.new_tag('span')
span.string = 'ERROR'
span['class'] = 'header error'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
pre = self.soup.new_tag('pre')
pre.string = err
self.testdiv.append(pre)
except:
pass
def finalize(self, result):
br = self.soup.new_tag('br')
self.html.append(br)
div1 = self.soup.new_tag('div')
div2 = self.soup.new_tag('div')
self.html.append(div1)
div1.string = "Ran %d test%s" % \
(result.testsRun, result.testsRun != 1 and 's' or '')
self.html.append(div2)
span = self.soup.new_tag('span')
div2.append(span)
if not result.wasSuccessful():
span2 = self.soup.new_tag('span')
span.string = 'FAILED'
span['class'] = 'header fail'
span2.string = '(failures=%d errors=%d)' %\
(len(result.failures), len(result.errors))
div2.append(span2)
else:
span.string = 'OK'
span['class'] = 'header pass'
full_html_filename = join(self._log_path, self.html_filename)
with open(full_html_filename, 'w') as html_file:
str_html = self.soup.prettify()
html_file.write(str_html)
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def startContext(self, ctx):
if hasattr(ctx, '__file__'):
self._script_path = ctx.__file__.replace('.pyc', '.py')
return
try:
n = ctx.__name__
except AttributeError:
n = str(ctx).replace('<', '').replace('>', '')
self.fieldset = self.soup.new_tag('fieldset')
legend = self.soup.new_tag('legend')
span1 = self.soup.new_tag('span')
span1.string = n
span1['class'] = 'header'
legend.append(span1)
if self._script_path:
span2 = self.soup.new_tag('span')
span2.string = '(%s)' % self._script_path
legend.append(span2)
self.fieldset.append(legend)
self.html.append(self.fieldset)
def stopContext(self, ctx):
self.fieldset = None
def startTest(self, test):
self.testdiv = self.soup.new_tag('div')
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
span = self.soup.new_tag('span')
span.string = test.shortDescription() or str(test)
span['class'] = 'header'
self.testdiv.append(span)
self.fieldset.append(self.testdiv)
| 30.956938 | 75 | 0.52813 | 6,352 | 0.981762 | 0 | 0 | 0 | 0 | 0 | 0 | 1,452 | 0.22442 |
08a0d32c04f17aef2a43162c106a80b10c85518c | 999 | py | Python | setup.py | sunghyunzz/aiohttp-toolbox | 1948a1962b3bd4071f234719b6683b55cd03d6f0 | [
"MIT"
]
| 6 | 2016-11-21T08:38:34.000Z | 2019-02-20T12:56:16.000Z | setup.py | sunghyunzz/aiohttp-toolbox | 1948a1962b3bd4071f234719b6683b55cd03d6f0 | [
"MIT"
]
| 1 | 2017-07-20T02:20:03.000Z | 2017-07-20T02:20:03.000Z | setup.py | sunghyunzz/aiohttp-toolbox | 1948a1962b3bd4071f234719b6683b55cd03d6f0 | [
"MIT"
]
| 2 | 2017-07-20T02:20:44.000Z | 2019-02-21T13:37:37.000Z | """
aiohttp-ultrajson
-----------------
Integrates UltraJSON with your aiohttp application.
"""
from setuptools import setup
setup(
name='aiohttp-ultrajson',
version='0.1.0',
url='https://github.com/sunghyunzz/aiohttp-ultrajson',
license='MIT',
author='sunghyunzz',
author_email='[email protected]',
description='Integrates UltraJSON with your aiohttp application.',
long_description=__doc__,
py_modules=['aiohttp_ultrajson'],
zip_safe=False,
platforms='any',
install_requires=[
'aiohttp>2',
'ujson>=1.34'
],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Framework :: AsyncIO'
]
)
| 27 | 70 | 0.608609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.613614 |
08a23fb05230d43cddbfc2a0b66eda446175157f | 322 | py | Python | PTA/PAT_B/Python3/B1061_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
]
| 1 | 2019-05-13T10:09:55.000Z | 2019-05-13T10:09:55.000Z | PTA/PAT_B/Python3/B1061_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
]
| null | null | null | PTA/PAT_B/Python3/B1061_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
]
| null | null | null | n, m = map(int, input().split())
scores = list(map(int, input().split()))
answers = list(map(int, input().split()))
for i in range(n):
actuals = list(map(int, input().split()))
result = 0
for i, score in enumerate(scores):
if actuals[i] == answers[i]:
result += score
print(result)
| 21.466667 | 45 | 0.568323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08a37f1f4c2faa26bde495db95f37f4816d7caf0 | 12,652 | py | Python | dh/network/__init__.py | dhaase-de/dh-python-dh | 40b04407e5f67ec261f559263718ec2b2588dabb | [
"MIT"
]
| null | null | null | dh/network/__init__.py | dhaase-de/dh-python-dh | 40b04407e5f67ec261f559263718ec2b2588dabb | [
"MIT"
]
| null | null | null | dh/network/__init__.py | dhaase-de/dh-python-dh | 40b04407e5f67ec261f559263718ec2b2588dabb | [
"MIT"
]
| null | null | null | """
Tools for network communication.
"""
import abc
import io
import json
import socket
import struct
import sys
import time
import zlib
import dh.ejson
import dh.utils
# NumPy is only needed for some parts and is optional
try:
import numpy as np
except ImportError as e:
_NUMPY_ERROR = e
else:
_NUMPY_ERROR = None
###
#%% socket message types
###
class SocketMessageType(abc.ABC):
"""
Base class providing `send()` and `recv()` methods for sending and
receiving (higher-level) messages via the socket `socket`.
"""
@abc.abstractmethod
def send(self, socket, x):
pass
@abc.abstractmethod
def recv(self, socket):
pass
class ByteSocketMessageType(SocketMessageType):
"""
Class providing methods for sending and receiving byte *messages* of up to
4 GiB in size via a given socket.
Each message has a fixed-length (four byte) header, specifying the length
of the message content. Thus, calls to `send()` and `recv()` always
ensure that the entire message is being sent/received.
If `compress` is `True`, messages are compressed before sending and
decompressed after receiving. This reduces the network load but costs more
time. The value for `compress` must be the same for both the server and the
client.
"""
def __init__(self, compress=False):
self._compress = compress
def _recvn(self, socket, byteCount):
"""
Receive and return a fixed number of `byteCount` bytes from the socket.
"""
b = io.BytesIO()
while True:
currentByteCount = b.getbuffer().nbytes
if currentByteCount >= byteCount:
break
packet = socket.recv(byteCount - currentByteCount)
if len(packet) == 0:
return None
b.write(packet)
return b.getvalue()
def send(self, socket, b):
if self._compress:
b = zlib.compress(b)
header = struct.pack(">I", int(len(b)))
socket.sendall(header + b)
def recv(self, socket):
header = self._recvn(socket, 4)
if header is None:
return None
length = struct.unpack(">I", header)[0]
b = self._recvn(socket, length)
if self._compress:
b = zlib.decompress(b)
return b
class NumpySocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
NumPy ndarray objects via the given socket.
"""
def __init__(self, *args, **kwargs):
if _NUMPY_ERROR is not None:
raise _NUMPY_ERROR
super().__init__(*args, **kwargs)
def send(self, socket, x):
b = io.BytesIO()
np.save(file=b, arr=x, allow_pickle=False, fix_imports=False)
super().send(socket, b.getvalue())
def recv(self, socket):
b = io.BytesIO(super().recv(socket))
return np.load(file=b, allow_pickle=False, fix_imports=False)
class JsonSocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
JSON-serializable objects via the given socket.
"""
def send(self, socket, x):
j = json.dumps(x, ensure_ascii=True)
b = bytes(j, "ascii")
super().send(socket, b)
def recv(self, socket):
b = super().recv(socket)
j = b.decode("ascii")
x = json.loads(j)
return x
class ExtendedJsonSocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
JSON-serializable (with extended range of supported types, see
`dh.ejson`) objects via the given socket.
.. seealso:: `dh.ejson`.
"""
def send(self, socket, x):
j = dh.ejson.dumps(x)
b = bytes(j, "ascii")
super().send(socket, b)
def recv(self, socket):
b = super().recv(socket)
j = b.decode("ascii")
x = dh.ejson.loads(j)
return x
###
#%% extended socket with support for multiple message types
###
class MessageSocket():
"""
This is a wrapper class for `socket.socket` which supports the methods
`msend()` and `mrecv()`, which send/receive entire (higher-level) messages.
For both methods, the `messageType` argument must be an instance of the
class `SocketMessageType`.
Note: in this context, 'message' means a high-level, user-defined object,
not the 'message' used in the context of `socket.socket.recvmsg` and
`socket.socket.sendmsg`.
"""
def __init__(self, socket):
self._socket = socket
def msend(self, messageType, x):
messageType.send(self._socket, x)
def mrecv(self, messageType):
return messageType.recv(self._socket)
###
#%% socket servers/clients
###
class SocketServer(abc.ABC):
"""
Simple socket server which accepts connections on the specified `host`
and `port` and communicates with the client as specified in
`communicate()`.
See http://stackoverflow.com/a/19742674/1913780 for an explanation of
`nodelay`.
"""
def __init__(self, host="", port=7214, backlog=5, nodelay=True):
print("Creating socket...")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
print("Binding socket to {}:{}...".format(host if len(host) > 0 else "*", port))
self._socket.bind((host, port))
self._backlog = backlog
self._nodelay = nodelay
def _print(self, text):
print("[{}] {}".format(dh.utils.dtstr(compact=False), text))
def run(self):
self._socket.listen(self._backlog)
while True:
self._print("Waiting for connection...")
sys.stdout.flush()
(connectionSocket, connectionAddress) = self._socket.accept()
self._print("Accepted connection from {}:{}".format(connectionAddress[0], connectionAddress[1]))
t0 = time.time()
try:
self.communicate(MessageSocket(connectionSocket))
connectionSocket.close()
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
self._print("Finished request from {}:{} after {} ms".format(connectionAddress[0], connectionAddress[1], dh.utils.around((time.time() - t0) * 1000.0)))
@abc.abstractmethod
def communicate(self, socket):
"""
Implements the entire communication happening for one connection with a
client via high-level socket messages (see `SocketMessageType`).
Counterpart of `SocketClient.communicate`. See specific client/server
implementations for examples.
"""
pass
class SocketClient(abc.ABC):
"""
Simple socket client which connects to the server on the specified `host`
and `port` each time `query()` is called. The communication with the server
is specified in `communicate()`.
See http://stackoverflow.com/a/19742674/1913780 for an explanation of
`nodelay`.
"""
def __init__(self, host, port=7214, nodelay=True):
self._host = host
self._port = port
self._nodelay = nodelay
def query(self, *args, **kwargs):
# establish connection with the server
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._nodelay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self._socket.connect((self._host, self._port))
# actual communication, keep result
result = self.communicate(MessageSocket(self._socket), *args, **kwargs)
# close connection
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
return result
@abc.abstractmethod
def communicate(self, socket, *args, **kwargs):
"""
Implements the entire communication happening for one connection with a
server via high-level socket messages (see `SocketMessageType`).
Counterpart of `SocketServer.communicate`. See specific client/server
implementations for examples.
"""
pass
class ImageProcessingServer(SocketServer):
"""
Special case of `SocketServer` which accepts a NumPy array and JSON-encoded
parameters and returns a NumPy array. The counterpart is the
`ImageProcessingClient` class.
To specify the processing behavior, sub-class this class and implement
the static method `process(data, params)`.
"""
def communicate(self, socket):
# receive input image and parameters
data = socket.mrecv(NumpySocketMessageType())
params = socket.mrecv(JsonSocketMessageType())
# process
try:
result = self.process(data=data, params=params)
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
result = np.zeros(shape=(0, 0), dtype="uint8")
# send result image
socket.msend(NumpySocketMessageType(), result)
@staticmethod
@abc.abstractmethod
def process(data, params):
"""
This function specifies the processing behavior of this server and must
be implemeted by the user.
"""
pass
class ImageProcessingClient(SocketClient):
"""
Special case of `SocketClient` which sends a NumPy array and JSON-encoded
parameters and receives a NumPy array. The counterpart is the
`ImageProcessingServer` class.
The processing behavior is specified by sub-classing
`ImageProcessingServer` and implementing the static method
`process(data, params)`.
"""
def communicate(self, socket, data, params):
# send input image and parameters
socket.msend(NumpySocketMessageType(), data)
socket.msend(JsonSocketMessageType(), params)
# receive result image
return socket.mrecv(NumpySocketMessageType())
def process(self, data, params):
"""
Just another name for the `query` method (to better show the connection
to the server's `process` method).
"""
return self.query(data=data, params=params)
class ImageProcessingServer2(SocketServer):
"""
Special case of `SocketServer` which accepts a NumPy array and JSON-encoded
parameters and returns a NumPy array plus a JSON-encodable object. The
counterpart is the `ImageProcessingClient2` class.
To specify the processing behavior, sub-class this class and implement
the static method `process(data, params)`.
"""
def communicate(self, socket):
# receive input image and parameters
data = socket.mrecv(NumpySocketMessageType())
params = socket.mrecv(JsonSocketMessageType())
# process
try:
(result, info) = self.process(data=data, params=params)
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
result = np.zeros(shape=(0, 0), dtype="uint8")
info = None
# send result image and info
socket.msend(NumpySocketMessageType(), result)
socket.msend(JsonSocketMessageType(), info)
@staticmethod
@abc.abstractmethod
def process(data, params):
"""
This function specifies the processing behavior of this server and must
be implemeted by the user.
"""
pass
class ImageProcessingClient2(SocketClient):
"""
Special case of `SocketClient` which sends a NumPy array and JSON-encoded
parameters and receives a NumPy array and a JSON-encoded object. The
counterpart is the `ImageProcessingServer2` class.
The processing behavior is specified by sub-classing
`ImageProcessingServer` and implementing the static method
`process(data, params)`.
"""
def communicate(self, socket, data, params):
# send input image and parameters
socket.msend(NumpySocketMessageType(), data)
socket.msend(JsonSocketMessageType(), params)
# receive result image
result = socket.mrecv(NumpySocketMessageType())
info = socket.mrecv(JsonSocketMessageType())
return (result, info)
def process(self, data, params):
"""
Just another name for the `query` method (to better show the connection
to the server's `process` method).
"""
return self.query(data=data, params=params)
| 30.858537 | 163 | 0.641084 | 12,144 | 0.959848 | 0 | 0 | 1,302 | 0.102909 | 0 | 0 | 5,613 | 0.443645 |
08a4afc96f7c56b3ec32526d5ad975c5272d1d27 | 1,225 | py | Python | setup.py | danizen/apache-replay | 5e5cc8d0df693f2367d188d71099041c6a65317f | [
"MIT"
]
| null | null | null | setup.py | danizen/apache-replay | 5e5cc8d0df693f2367d188d71099041c6a65317f | [
"MIT"
]
| null | null | null | setup.py | danizen/apache-replay | 5e5cc8d0df693f2367d188d71099041c6a65317f | [
"MIT"
]
| null | null | null | from setuptools import setup
def get_readme():
with open('README.md') as f:
return f.read()
setup(
name = 'apache-replay',
version = '0.0.3',
url = 'https://github.com/danizen/apache-replay.git',
author = 'Daniel Davis',
author_email = '[email protected]',
description = 'Facilitates replaying of Apache files in Common Log and Combined Log format',
long_description = get_readme(),
long_description_content_type='text/markdown; charset=UTF-8; variant=CommonMark',
packages = ['apache_replay'],
entry_points={
'console_scripts': [
'apache-replay=apache_replay.script:main',
]
},
install_requires = ['attrs', 'requests'],
tests_require = ['attrs', 'requests', 'pytest', 'pytest-pythonpath', 'pytest-cov', 'tox'],
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Testing :: Traffic Generation',
]
)
| 33.108108 | 96 | 0.625306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.570612 |
08a6713846bc912e38363c64df0ddb98d1d40470 | 464 | py | Python | setup.py | duytintruong/do_more | 3a306da78ca302d2963cc7bae5f17e668168b595 | [
"MIT"
]
| null | null | null | setup.py | duytintruong/do_more | 3a306da78ca302d2963cc7bae5f17e668168b595 | [
"MIT"
]
| null | null | null | setup.py | duytintruong/do_more | 3a306da78ca302d2963cc7bae5f17e668168b595 | [
"MIT"
]
| null | null | null | from distutils.core import setup
setup(
name='do_more',
packages=['do_more'],
version='0.1.0',
description='A library enhancing pydoit features.',
author='Duy Tin Truong',
author_email='',
url='https://github.com/duytintruong/do_more',
download_url='https://github.com/duytintruong/do_more/archive/0.1.0.tar.gz',
keywords=['pipeline', 'data', 'doit'],
classifiers=[],
install_requires=[
'doit>=0.31.1',
],
)
| 27.294118 | 80 | 0.637931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.474138 |
08a7afeb8a1abc10ec91968f8b8eddea6a7e071a | 16,361 | py | Python | qtable/engine.py | ihgazni2/qtable | 269bb1052d7c7aeeae4d0b1024746fae38870c40 | [
"MIT"
]
| null | null | null | qtable/engine.py | ihgazni2/qtable | 269bb1052d7c7aeeae4d0b1024746fae38870c40 | [
"MIT"
]
| null | null | null | qtable/engine.py | ihgazni2/qtable | 269bb1052d7c7aeeae4d0b1024746fae38870c40 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import elist.elist as elel
import edict.edict as eded
import tlist.tlist as tltl
import copy
__all__ = [
'_append_col',
'_append_cols',
'_append_row',
'_append_rows',
'_cn2clocs',
'_col',
'_cols',
'_columns_map',
'_crop',
'_get_clocs',
'_get_rlocs',
'_getitem',
'_index_map',
'_insert_col',
'_insert_cols',
'_insert_row',
'_insert_rows',
'_ltd_index_first',
'_ltd_index_last',
'_name2ilocs',
'_prepend_col',
'_prepend_cols',
'_prepend_row',
'_prepend_rows',
'_reindex_cols',
'_reindex_rows',
'_rename_cols',
'_rename_rows',
'_repl_col',
'_repl_cols',
'_repl_row',
'_repl_rows',
'_rmcol',
'_rmcols',
'_rmrow',
'_rmrows',
'_rn2rlocs',
'_row',
'_rows',
'_setitem',
'_subtb',
'_swapcol',
'_swaprow',
'_transpose',
'_fliplr',
'_flipud'
]
#all operations will generate a new Qtable(copy.deepcopy), and will not change the original Qtable
#columns col-names-list no-duplicate-names-permitted
#index rowname-names-list no-duplicate-names-permitted
#df pd.DataFrame
def _index_map(df):
d = elel.ivdict(list(df.index))
return(d)
def _columns_map(df):
d = elel.ivdict(list(df.columns))
return(d)
def _name2ilocs(rowname,colname,**kwargs):
if('index_map' in kwargs):
index_map = kwargs['index_map']
else:
df = kwargs['DF']
index_map = _index_map(df)
if('columns_map' in kwargs):
columns_map = kwargs['columns_map']
else:
df = kwargs['DF']
columns_map = _columns_map(df)
kl,vl = eded.d2kvlist(index_map)
rlocs = elel.indexes_all(vl,rowname)
kl,vl = eded.d2kvlist(columns_map)
clocs = elel.indexes_all(vl,colname)
return((rlocs,clocs))
# index_map = _index_map(df)
# columns_map = _columns_map(df)
# _getitem(df,rowname,colname,rloc=0,cloc=0)
# rloc relative-row-position
# cloc relative-col-position
def _getitem(df,rowname,colname,*args,**kwargs):
rlocs,clocs = _name2ilocs(rowname,colname,index_map=kwargs['index_map'],columns_map=kwargs['columns_map'])
rslt = df.iloc[rlocs,clocs]
args = list(args)
if(args.__len__()==0):
pass
else:
rloc = args[0]
cloc = args[1]
rslt = rslt.iloc[rloc,cloc]
return(rslt)
def _setitem(df,rowname,colname,value,*args,**kwargs):
rlocs,clocs = _name2ilocs(rowname,colname,index_map=kwargs['index_map'],columns_map=kwargs['columns_map'])
rslt = df.iloc[rlocs,clocs]
args = list(args)
if(args.__len__()==0):
rslt = value
else:
rloc = args[0]
cloc = args[1]
rslt.iloc[rloc,cloc] = value
df.iloc[rlocs,clocs] = rslt
#rn ---------------------rowname
def _rn2rlocs(rowname,**kwargs):
if('index_map' in kwargs):
index_map = kwargs['index_map']
else:
df = kwargs['DF']
index_map = _index_map(df)
kl,vl = eded.d2kvlist(index_map)
rlocs = elel.indexes_all(vl,rowname)
rlocs.sort()
return(rlocs)
def _row(df,rowname,*args,**kwargs):
rlocs = _rn2rlocs(rowname,**kwargs)
args = list(args)
if(args.__len__()==0):
pass
else:
rlocs = elel.select_seqs(rlocs,args)
return(df.iloc[rlocs])
#cn ---------------------colname
def _cn2clocs(colname,**kwargs):
if('columns_map' in kwargs):
columns_map = kwargs['columns_map']
else:
df = kwargs['DF']
columns_map = _columns_map(df)
kl,vl = eded.d2kvlist(columns_map)
clocs = elel.indexes_all(vl,colname)
clocs.sort()
return(clocs)
def _col(df,colname,*args,**kwargs):
clocs = _cn2clocs(colname,**kwargs)
args = list(args)
if(args.__len__()==0):
pass
else:
clocs = elel.select_seqs(clocs,args)
return(df.iloc[:,clocs])
def _get_rlocs(rownames,**kwargs):
rlocs = []
for i in range(rownames.__len__()):
rowname = rownames[i]
tmp = _rn2rlocs(rowname,**kwargs)
rlocs = elel.concat(rlocs,tmp)
rlocs.sort()
return(rlocs)
def _get_clocs(colnames,**kwargs):
clocs = []
for i in range(colnames.__len__()):
colname = colnames[i]
tmp = _cn2clocs(colname,**kwargs)
clocs = elel.concat(clocs,tmp)
clocs.sort()
return(clocs)
def _rows(df,*rownames,**kwargs):
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
rlocs = _get_rlocs(rownames,**kwargs)
return(df.iloc[rlocs])
def _cols(df,*colnames,**kwargs):
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
clocs = _get_clocs(colnames,**kwargs)
return(df.iloc[:,clocs])
def _subtb(df,rownames,colnames,**kwargs):
rownames = elel.uniqualize(rownames)
colnames = elel.uniqualize(colnames)
rlocs = _get_rlocs(rownames,**kwargs)
clocs = _get_clocs(colnames,**kwargs)
return(df.iloc[rlocs,clocs])
def _ltd_index_first(ltd,value):
for i in range(ltd.__len__()):
if(ltd[i] == value):
return(i)
else:
pass
raise ValueError("value not exist")
def _ltd_index_last(ltd,value):
for i in range(ltd.__len__()-1,-1,-1):
if(ltd[i] == value):
return(i)
else:
pass
raise ValueError("value not exist")
def _crop(df,top,left,bot,right,**kwargs):
imd = kwargs['index_map']
top = _ltd_index_first(imd,top)
bot = _ltd_index_last(imd,bot)
cmd = kwargs['columns_map']
left = _ltd_index_first(cmd,left)
right = _ltd_index_last(cmd,right)
rownames = list(df.index[top:bot+1])
colnames = list(df.columns[left:right+1])
return(_subtb(df,rownames,colnames,**kwargs))
def _swapcol(df,colname1,colname2,*args,**kwargs):
df = copy.deepcopy(df)
clocs1 = _cn2clocs(colname1,**kwargs)
clocs2 = _cn2clocs(colname2,**kwargs)
args = list(args)
if(args.__len__()==0):
which1 = 0
which2 = 0
elif(args.__len__()==1):
which1 = args[0]
which2 = 0
else:
which1 = args[0]
which2 = args[1]
cloc1 = clocs1[which1]
cloc2 = clocs2[which2]
clocs = elel.init_range(0,df.columns.__len__(),1)
clocs = elel.iswap(clocs,cloc1,cloc2)
return(df.iloc[:,clocs])
def _reindex_cols(df,*columns,**kwargs):
df = copy.deepcopy(df)
columns = list(columns)
if(isinstance(columns[0],list)):
columns = columns[0]
else:
pass
clocs_array = []
for i in range(columns.__len__()):
clocs = _cn2clocs(columns[i],**kwargs)
clocs_array.append(clocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
else:
whiches = elel.init(clocs_array.__len__(),0)
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
return(df.iloc[:,clocs])
def _swaprow(df,rowname1,rowname2,*args,**kwargs):
df = copy.deepcopy(df)
rlocs1 = _rn2rlocs(rowname1,**kwargs)
rlocs2 = _rn2rlocs(rowname2,**kwargs)
args = list(args)
if(args.__len__()==0):
which1 = 0
which2 = 0
elif(args.__len__()==1):
which1 = args[0]
which2 = 0
else:
which1 = args[0]
which2 = args[1]
rloc1 = rlocs1[which1]
rloc2 = rlocs2[which2]
rlocs = elel.init_range(0,df.columns.__len__(),1)
rlocs = elel.iswap(rlocs,rloc1,rloc2)
return(df.iloc[rlocs])
def _reindex_rows(df,*index,**kwargs):
df = copy.deepcopy(df)
index = list(index)
if(isinstance(index[0],list)):
index = index[0]
else:
pass
rlocs_array = []
for i in range(index.__len__()):
rlocs = _rn2rlocs(index[i],**kwargs)
rlocs_array.append(rlocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
else:
whiches = elel.init(rlocs_array.__len__(),0)
rlocs = elel.batexec(lambda rlocs,which:rlocs[which],rlocs_array,whiches)
return(df.iloc[rlocs])
def _rmcol(df,colname,*args,**kwargs):
df = copy.deepcopy(df)
clocs = _cn2clocs(colname,**kwargs)
if(args.__len__()==0):
whiches = elel.init_range(0,clocs.__len__(),1)
else:
whiches = list(args)
clocs = elel.select_seqs(clocs,whiches)
all_clocs = elel.init_range(0,df.columns.__len__(),1)
lefted_clocs = elel.select_seqs_not(all_clocs,clocs)
return(df.iloc[:,lefted_clocs])
def _rmcols(df,*colnames,**kwargs):
df = copy.deepcopy(df)
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
clocs_array = []
for i in range(colnames.__len__()):
clocs = _cn2clocs(colnames[i],**kwargs)
clocs_array.append(clocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
else:
#by default remove all
clocs = elel.concat(*clocs_array)
all_clocs = elel.init_range(0,df.columns.__len__(),1)
lefted_clocs = elel.select_seqs_not(all_clocs,clocs)
return(df.iloc[:,lefted_clocs])
def _rmrow(df,rowname,*args,**kwargs):
df = copy.deepcopy(df)
rlocs = _rn2rlocs(rowname,**kwargs)
if(args.__len__()==0):
whiches = elel.init_range(0,rlocs.__len__(),1)
else:
whiches = list(args)
rlocs = elel.select_seqs(rlocs,whiches)
all_rlocs = elel.init_range(0,df.index.__len__(),1)
lefted_rlocs = elel.select_seqs_not(all_rlocs,rlocs)
return(df.iloc[lefted_rlocs])
def _rmrows(df,*rownames,**kwargs):
df = copy.deepcopy(df)
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
rlocs_array = []
for i in range(rownames.__len__()):
rlocs = _rn2rlocs(rownames[i],**kwargs)
rlocs_array.append(rlocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
rlocs = elel.batexec(lambda rlocs,which:rlocs[which],rlocs_array,whiches)
else:
#by default remove all
rlocs = elel.concat(*rlocs_array)
all_rlocs = elel.init_range(0,df.index.__len__(),1)
lefted_rlocs = elel.select_seqs_not(all_rlocs,rlocs)
return(df.iloc[lefted_rlocs])
def _insert_col(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pass
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(args.__len__() == 1):
colname = list(args[0].keys())[0]
values = list(args[0].values())[0]
else:
colname = args[0]
if(isinstance(args[1],list)):
values = args[1]
else:
values = args[1:]
####
####
df.insert(pos,colname,values,kwargs['allow_duplicates'])
return(df)
def _insert_cols(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pass
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(isinstance(args[0],dict)):
kl,vl = eded.d2kvlist(args[0])
else:
if(isinstance(args[1],list)):
kl = elel.select_evens(args)
vl = elel.select_odds(args)
else:
kl,vl = elel.brkl2kvlist(args,df.index.__len__()+1)
for i in range(kl.__len__()):
colname = kl[i]
values = vl[i]
df.insert(pos+i,colname,values,kwargs['allow_duplicates'])
return(df)
def _insert_row(df,pos,*args,**kwargs):
df = df.T
df = _insert_col(df,pos,*args,**kwargs)
df = df.T
return(df)
def _insert_rows(df,pos,*args,**kwargs):
df = df.T
df = _insert_cols(df,pos,*args,**kwargs)
df = df.T
return(df)
def _append_col(df,*args,**kwargs):
pos = df.columns.__len__()
return(_insert_col(df,pos,*args,**kwargs))
def _append_cols(df,*args,**kwargs):
pos = df.columns.__len__()
return(_insert_cols(df,pos,*args,**kwargs))
def _append_row(df,*args,**kwargs):
pos = df.index.__len__()
return(_insert_row(df,pos,*args,**kwargs))
def _append_rows(df,*args,**kwargs):
pos = df.index.__len__()
return(_insert_rows(df,pos,*args,**kwargs))
def _prepend_col(df,*args,**kwargs):
return(_insert_col(df,0,*args,**kwargs))
def _prepend_cols(df,*args,**kwargs):
return(_insert_cols(df,0,*args,**kwargs))
def _prepend_row(df,*args,**kwargs):
return(_insert_row(df,0,*args,**kwargs))
def _prepend_rows(df,*args,**kwargs):
return(_insert_rows(df,0,*args,**kwargs))
def _rename_cols(df,*colnames):
df = copy.deepcopy(df)
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
df.columns = colnames
return(df)
def _rename_rows(df,*rownames):
df = copy.deepcopy(df)
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
df.index = rownames
return(df)
def _repl_col(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pos = pos + 1
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(args.__len__() == 1):
colname = list(args[0].keys())[0]
values = list(args[0].values())[0]
else:
colname = args[0]
if(isinstance(args[1],list)):
values = args[1]
else:
values = args[1:]
df.insert(pos,colname,values,kwargs['allow_duplicates'])
pos = pos -1
all_clocs = elel.init_range(0,df.columns.__len__(),1)
all_clocs.remove(pos)
return(df.iloc[:,all_clocs])
def _repl_cols(df,poses,*args,**kwargs):
df = copy.deepcopy(df)
args = list(args)
if(isinstance(args[0],dict)):
kl,vl = eded.d2kvlist(args[0])
else:
if(isinstance(args[1],list)):
kl = elel.select_evens(args)
vl = elel.select_odds(args)
else:
kl,vl = elel.brkl2kvlist(args,df.index.__len__()+1)
if(isinstance(poses[0],int)):
pass
else:
colnames = poses
clocs_array = []
for i in range(colnames.__len__()):
clocs = _cn2clocs(colnames[i],**kwargs)
clocs_array.append((clocs,i))
if("whiches" in kwargs):
whiches = kwargs['whiches']
clocs_array = elel.mapv(clocs_array,lambda ele:ele[0])
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
poses = clocs
else:
#by default replace all
nkl = []
nvl = []
nclocs = []
for i in range(clocs_array.__len__()):
clocs = clocs_array[i][0]
index = clocs_array[i][1]
tmpkl = elel.init(clocs.__len__(),kl[i])
tmpvl = elel.init(clocs.__len__(),vl[i])
nkl = elel.concat(nkl,tmpkl)
nvl = elel.concat(nvl,tmpvl)
nclocs = elel.concat(nclocs,clocs)
#batsort
poses = nclocs
kl,vl = elel.batsorted(nclocs,nkl,nvl)
poses = elel.mapv(poses,lambda pos:pos+1)
poses.sort()
for i in range(0,poses.__len__()):
pos = poses[i]
df.insert(pos,kl[i],vl[i],kwargs['allow_duplicates'])
pos = pos -1
all_clocs = elel.init_range(0,df.columns.__len__(),1)
all_clocs.remove(pos)
df = df.iloc[:,all_clocs]
return(df)
def _repl_row(df,pos,*args,**kwargs):
df = df.T
df = _repl_col(df,pos,*args,**kwargs)
df = df.T
return(df)
def _repl_rows(df,poses,*args,**kwargs):
df = df.T
df = _repl_cols(df,poses,*args,**kwargs)
df = df.T
return(df)
def _transpose(df):
df = copy.deepcopy(df)
df = df.T
return(df)
def _fliplr(df,**kwargs):
columns = list(df.columns)
columns.reverse()
df = _reindex_cols(df,columns,**kwargs)
return(df)
def _flipud(df,**kwargs):
index = list(df.index)
index.reverse()
df = _reindex_rows(df,index,**kwargs)
return(df)
| 27.40536 | 110 | 0.603753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,518 | 0.092782 |
08a840735e2065bf6687b54ab836fe21b29363da | 901 | py | Python | src/npgru/predictor/tensorflow_predictor.py | grainpowder/gru-forward-numpy-app | efd24f9f397d51e7e18bdad5cba12451ad69d3de | [
"MIT"
]
| null | null | null | src/npgru/predictor/tensorflow_predictor.py | grainpowder/gru-forward-numpy-app | efd24f9f397d51e7e18bdad5cba12451ad69d3de | [
"MIT"
]
| null | null | null | src/npgru/predictor/tensorflow_predictor.py | grainpowder/gru-forward-numpy-app | efd24f9f397d51e7e18bdad5cba12451ad69d3de | [
"MIT"
]
| null | null | null | from typing import List, Tuple
import sentencepiece as spm
import tensorflow as tf
import tensorflow.keras as keras
from npgru.predictor.category_predictor import CategoryPredictor
from npgru.preprocessor.model_file import get_model_dir
class TensorflowPredictor(CategoryPredictor):
def __init__(self):
model_dir = get_model_dir()
self._tokenizer = spm.SentencePieceProcessor(model_file=str(model_dir.joinpath("tokenizer.model")))
self._model = keras.models.load_model(model_dir.joinpath("tensorflow"))
def predict(self, title: str, num_predictions) -> List[Tuple[int, float]]:
tokenized_title = self._tokenizer.encode(title) if title else [1]
probabilities = self._model(tf.constant([tokenized_title]))
prediction = sorted(enumerate(probabilities.numpy()[0]), key=lambda x: x[1], reverse=True)[:num_predictions]
return prediction
| 39.173913 | 116 | 0.751387 | 659 | 0.73141 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.032186 |
08a9c217d15d92822c4d608156162c3fb67806ee | 358 | py | Python | eo_sensors/migrations/0005_auto_20210425_1946.py | dymaxionlabs/satlomas-back | f4568f6535755fd4a2432ecc661a264872206c6c | [
"Apache-2.0"
]
| 1 | 2021-02-18T20:11:25.000Z | 2021-02-18T20:11:25.000Z | eo_sensors/migrations/0005_auto_20210425_1946.py | dymaxionlabs/satlomas-back | f4568f6535755fd4a2432ecc661a264872206c6c | [
"Apache-2.0"
]
| 7 | 2020-06-09T14:54:43.000Z | 2021-09-22T21:00:13.000Z | eo_sensors/migrations/0005_auto_20210425_1946.py | dymaxionlabs/satlomas-back | f4568f6535755fd4a2432ecc661a264872206c6c | [
"Apache-2.0"
]
| 1 | 2020-05-08T20:42:49.000Z | 2020-05-08T20:42:49.000Z | # Generated by Django 3.1.6 on 2021-04-25 19:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eo_sensors', '0004_coveragemask'),
]
operations = [
migrations.AlterUniqueTogether(
name='raster',
unique_together={('date', 'source', 'slug')},
),
]
| 19.888889 | 57 | 0.592179 | 273 | 0.76257 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.296089 |
08ab2b8e0ae8691b40dba63be074cd70a395c8c1 | 503 | py | Python | algorithms/course_1/assignments/frac_knapsack/code.py | ideahitme/coursera | af44c8d817481d4f9025205284f109d95a9bb45d | [
"MIT"
]
| null | null | null | algorithms/course_1/assignments/frac_knapsack/code.py | ideahitme/coursera | af44c8d817481d4f9025205284f109d95a9bb45d | [
"MIT"
]
| null | null | null | algorithms/course_1/assignments/frac_knapsack/code.py | ideahitme/coursera | af44c8d817481d4f9025205284f109d95a9bb45d | [
"MIT"
]
| null | null | null | import math
line = raw_input().strip().split()
N = int(line[0])
cap = float(line[1])
items = []
for _ in xrange(N):
items.append(map(float, raw_input().split()))
def custcmp(x, y):
_x = x[0]/x[1]
_y = y[0]/y[1]
if _x < _y:
return 1
if _x == _y:
return 0
if _x > _y:
return -1
items = sorted(items, cmp=custcmp)
answer = 0.0
index = 0
while cap > 0 and index < N:
cur = items[index]
to_add = min(cur[1], cap)
answer += to_add*(cur[0]/cur[1])
cap -= to_add
index+=1
print answer
| 14.371429 | 46 | 0.600398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08ab2c42e46cc085323887951f27802509bc2c01 | 1,131 | py | Python | pythonDesafios/desafio058.py | mateusdev7/desafios-python | 6160ddc84548c7af7f5775f9acabe58238f83008 | [
"MIT"
]
| null | null | null | pythonDesafios/desafio058.py | mateusdev7/desafios-python | 6160ddc84548c7af7f5775f9acabe58238f83008 | [
"MIT"
]
| null | null | null | pythonDesafios/desafio058.py | mateusdev7/desafios-python | 6160ddc84548c7af7f5775f9acabe58238f83008 | [
"MIT"
]
| null | null | null | from random import randint
from time import sleep
opcao = 123
cont = 0
while opcao != 0:
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 10, quer tentar adivinhar?')
print('-=-' * 20)
print('\n[ 1 ] Sim [ 0 ] Não')
opcao = int(input('Escolha uma das opções acima\n>'))
if opcao == 1:
computador = randint(0, 10) # O computador sorteia um número de 0 a 10
usuario = int(input('\nEscolha um número entre 0 e 10: ').strip())
cont += 1
while usuario != computador:
if usuario < computador:
print('Mais... Tente novamente')
else:
print('Menos... Tente novamente')
usuario = int(input('Insira outro número: '))
cont += 1
if usuario == computador:
print('\nPARABÉNS. Você ACERTOU!!!')
print('Calculando a quantide de tentivas necessárias...')
sleep(1)
print('-=-' * 15)
print(f'Você precisou de {cont} tentativa(s) para acertar.')
print('-=-'* 15)
elif opcao == 0:
print('Você saiu do jogo.') | 35.34375 | 78 | 0.535809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.3972 |
08ab8c8ec2777c51be6f0455ab77ed9f159c8995 | 1,896 | py | Python | FeatureEngineeringPy_DataScience/demo153_rarecategories.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | FeatureEngineeringPy_DataScience/demo153_rarecategories.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | FeatureEngineeringPy_DataScience/demo153_rarecategories.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Demo153_RareCategories.ipynb
## Rare Categories
- Labels
- The number of labels in the dataset are different
- __high cardinality__ refers to uniqueness of data values
- The lower the cardinality, the more duplicated elements in a column
- A column with the lowest possible cardinality would have the same value for every row
- Highly cardinal variables dominate tree based algorithms
- Labels may only be present in the training data set, but not in the test data set
- Labels may appear in the test set that were not present in the training set
__Tree methods are biased towards variables with many labels__
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/train.csv")
cat_cols = ['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']
for i in cat_cols:
print('Number of categories in the variable {}: {}'.format(i,len(data[i].unique())))
print('Total rows: {}'.format(len(data)))
data['Sex'].value_counts()
data['Cabin_processed'] = data['Cabin'].astype(str).str[0]
data['Cabin_processed_X'] = data['Cabin'].astype(str).str[1]
cat_cols = [ 'Sex', 'Embarked', 'Cabin_processed']
for i in cat_cols:
sns.catplot(x=i, kind='count', data=data)
data['Cabin_processed'].value_counts() / len(data)
for i in cat_cols:
sns.catplot(x=i,data=data, hue='Survived', kind='count', palette="ch:.25")
"""### Transform Rare Labels"""
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp.sort_values(ascending=False)
_temp
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp
for i in _labels:
data['Cabin_processed'].replace(i, 'rare', inplace=True)
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp | 26.704225 | 88 | 0.728903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,071 | 0.564873 |
08ac25798ceabd59a9de5ca3ae55d5d23549ad85 | 6,640 | py | Python | adios-1.9.0/utils/skel/lib/skel_params.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
]
| null | null | null | adios-1.9.0/utils/skel/lib/skel_params.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
]
| null | null | null | adios-1.9.0/utils/skel/lib/skel_params.py | swatisgupta/Adaptive-compression | b97a1d3d3e0e968f59c7023c7367a7efa9f672d0 | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
import sys
import os
import argparse
import adios
import skel_settings
import skel_bpls
# Command line parsing is chained together. This is stage two. The first stage happens in ../bin/skel
def pparse_command_line (parent_parser):
parser = argparse.ArgumentParser (
parents=[parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter,
prog='skel',
#add_help=False,
description='''\
skel params
create a parameter file to define skeletal application behavior''')
parser.add_argument ('project', metavar='project', help='Name of the skel project')
parser.add_argument ('-g', '--group', help='adios group')
parser.add_argument ('-b', '--bpls', help='file containing bpls output')
parser.add_argument ('-f', '--force', dest='force', action='store_true', help='overwrite existing params file')
parser.set_defaults(force=False)
return parser.parse_args()
def generate_param_file_with_args (parent_parser):
args = pparse_command_line (parent_parser)
try:
config = adios.adiosConfig (args.project + '_skel.xml')
except (IOError):
print "XXError reading " + args.project + "_skel.xml. Try running skel xml " + args.project + " first."
return 1
outfilename = args.project + '_params.xml'
# Only proceed if outfilename does not already exist, or if -f was used
if os.path.exists (outfilename) and not args.force:
print "%s exists, aborting. Delete the file or use -f to overwrite." % outfilename
return 999
try:
config = adios.adiosConfig (args.project + '_skel.xml')
except (IOError):
print "Error reading " + args.project + "_skel.xml. Try running skel xml " + args.project + " first."
return 1
generate_param_file (args.project, outfilename, config, args.group, args.bpls)
def generate_param_file (app, outfile, config, groupname, bplsfile=None):
param_file = open (outfile, 'w')
if bplsfile is not None:
print "Using bpls data in %s" % bplsfile
bpdata = skel_bpls.bpls (open (bplsfile, 'r') )
#Write the file header
param_file.write ('<?xml version="1.0"?>')
param_file.write ('\n<skel-config application="' + app + '">')
param_file.write ('\n\n<!--')
param_file.write ('\n Within each group, use the scalar elements to control things like array sizes and offsets.')
param_file.write ('\n Simply adjust the value attribute as needed. The type is provided for convenience.')
param_file.write ('\n Note that there are 2 special values that you can use:')
param_file.write ('\n skel_mpi_size refers to the number of processes participating in this run, and')
param_file.write ('\n skel_mpi_rank is used to indicate the rank of the local process')
param_file.write ('\n -->\n')
#Write a section for each group of interest
for group in config.get_groups():
# if we've specified a particular group, ignore all of the other groups
if (groupname != None and groupname != group.get_name() ):
continue
param_file.write ('\n\n <adios-group name="' + group.get_name() + '">')
all_scalars = set()
all_arrays = set()
for var in group.get_vars():
if var.is_scalar():
if bplsfile is None:
all_scalars.add ('\n <scalar name="' + var.get_name() + '" type="' + var.get_type() + '" value="128" />')
else:
scalar_value = None
first_use_name, first_use_dim_num = var.find_first_use () # Get the name and dimension number of the first array that uses this scalar, or None if it is not used
if first_use_name is not None:
dims = bpdata.get_dims (first_use_name)
if dims is None:
# Try adding a leading slash to deal with the way that bpls reports variable names without one
dims = bpdata.get_dims ("/%s" % first_use_name)
if dims is not None:
scalar_value = dims[first_use_dim_num]
if scalar_value is None:
scalar_value = 0 # Should be used only for variables that do not appear in any array dimensions
all_scalars.add ('\n <scalar name="' + var.get_name() + '" type="' + var.get_type() + '" value="%s" />' % scalar_value)
else:
dims = var.get_dimensions()
dim_str ='dims="'
for dim in dims:
dim_str = dim_str + dim + ','
dim_str = dim_str.rstrip(',')
dim_str = dim_str + '"'
all_arrays.add ('\n <array name="' + var.get_gwrite() + '" type="' + var.get_type() + '" ' + dim_str + ' fill-method="rank"></array>')
for s in all_scalars:
param_file.write (s)
for a in all_arrays:
param_file.write (a)
param_file.write ('\n </adios-group>')
# Make a test run for all of the writes
param_file.write ('\n\n <batch name="writes" cores="128" walltime="0:30:00">')
for group in config.get_groups():
param_file.write ('\n <test type="write" group="' + group.get_name() + '" method="POSIX" iterations="10" rm="pre" tags="name1:val1,name2:val2" />')
param_file.write ('\n </batch>')
#Write the footer
param_file.write ('\n\n</skel-config>')
param_file.close()
# TODO: Get rid of this in favor of chained version, above.
def parse_command_line():
parser = argparse.ArgumentParser (description='Create a parameter file for the given skel project')
parser.add_argument ('project', metavar='project', help='Name of the skel project')
parser.add_argument ('-g', '--group', help='If specified, produce output only for this group')
return parser.parse_args()
def main(argv=None):
skel_settings.create_settings_dir_if_needed()
args = parse_command_line()
config = adios.adiosConfig (args.project + '_skel.xml')
# Determine outfile name
outfilename = args.project + '_params.xml.default'
# Only proceed if outfilename does not already exist.
if os.path.exists (outfilename):
print "%s exists, aborting. Delete the file or use '-f' to overwrite."
return 999
generate_param_file (args.project, outfilename, config, args.group)
if __name__ == "__main__":
main()
| 37.514124 | 181 | 0.613253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,526 | 0.380422 |
08ac337e36cbf17a299188a90d4c593630ec7136 | 786 | py | Python | centraldogma/util.py | line/centraldogma-python | 2248e8d7d660c0535aa747a70742ddd2bb0a5268 | [
"Apache-2.0"
]
| 8 | 2021-12-02T00:51:35.000Z | 2022-01-07T09:49:08.000Z | centraldogma/util.py | line/centraldogma-python | 2248e8d7d660c0535aa747a70742ddd2bb0a5268 | [
"Apache-2.0"
]
| 8 | 2021-11-22T03:37:17.000Z | 2022-02-14T10:02:31.000Z | centraldogma/util.py | line/centraldogma-python | 2248e8d7d660c0535aa747a70742ddd2bb0a5268 | [
"Apache-2.0"
]
| 4 | 2021-11-22T03:48:39.000Z | 2021-12-31T05:42:43.000Z | # Copyright 2021 LINE Corporation
#
# LINE Corporation licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def to_string(obj) -> str:
items = vars(obj).items()
values = [f"{k}={v}" for k, v in items]
return f"{obj.__class__.__name__}({','.join(values)})"
| 39.3 | 78 | 0.720102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.849873 |
08ad2e5befe9beab57f5cfbb4752e8b8f6f82193 | 3,834 | py | Python | Build/site_scons/msvs_preprocessed.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
]
| null | null | null | Build/site_scons/msvs_preprocessed.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
]
| null | null | null | Build/site_scons/msvs_preprocessed.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
]
| null | null | null | """Provides a Preprocessed action for the Microsoft Visual Studio compilers.
"""
import os
import SCons.Action
import SCons.Util
import preprocessed_builder
# XXX These are internal to SCons and may change in the future...but it's unlikely
from SCons.Tool.msvc import CSuffixes, CXXSuffixes, msvc_batch_key
# TODO Contribute this back to SCons
def _preprocessed_emitter(target, source, env, suffix):
target = [
SCons.Util.adjustixes(str(t), "", suffix, ensure_suffix=False)
for t in target
]
return (target, source)
def c_preprocessed_emitter(target, source, env):
suffix = env.subst('$CPREPROCESSEDSUFFIX')
return _preprocessed_emitter(target, source, env, suffix)
def cxx_preprocessed_emitter(target, source, env):
suffix = env.subst('$CXXPREPROCESSEDSUFFIX')
return _preprocessed_emitter(target, source, env, suffix)
# XXX Adapted from SCons' msvc_output_flag
def msvc_pp_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fi flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fi string that specifies the target explicitly. Otherwise,
we return an /Fi string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .i files).
"""
# TODO /Fi is not supported on Visual Studio 9.00 (2008) and earlier
# https://msdn.microsoft.com/en-us/library/8z9z0bx6(v=vs.90).aspx
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fi$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fi
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fi${TARGET.dir}' + os.sep
CPreprocessedAction = SCons.Action.Action("$PPCCCOM", "$PPCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXPreprocessedAction = SCons.Action.Action("$PPCXXCOM", "$PPCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate_PreprocessedBuilder(env):
preprocessed = preprocessed_builder.createPreprocessedBuilder(env)
for suffix in CSuffixes:
preprocessed.add_action(suffix, CPreprocessedAction)
preprocessed.add_emitter(suffix, c_preprocessed_emitter)
for suffix in CXXSuffixes:
preprocessed.add_action(suffix, CXXPreprocessedAction)
preprocessed.add_emitter(suffix, cxx_preprocessed_emitter)
env['_MSVC_PP_OUTPUT_FLAG'] = msvc_pp_output_flag
# PPCC is the preprocessor-only mode for CC, the C compiler (compare with SHCC et al)
# TODO For SCons: be smart and when passed a preprocessed file, compiler skips certain options?
env['PPCC'] = '$CC'
env['PPCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['PPCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['PPCCCOM'] = '${TEMPFILE("$PPCC /P $_MSVC_PP_OUTPUT_FLAG /c $CHANGED_SOURCES $PPCFLAGS $PPCCFLAGS $_CCCOMCOM","$PPCCCOMSTR")}'
env['PPCXX'] = '$CXX'
env['PPCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['PPCXXCOM'] = '${TEMPFILE("$PPCXX /P $_MSVC_PP_OUTPUT_FLAG /c $CHANGED_SOURCES $PPCXXFLAGS $PPCCFLAGS $_CCCOMCOM","$PPCXXCOMSTR")}'
| 41.673913 | 139 | 0.684142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,996 | 0.520605 |
08af1415b9293340224b8360402e471dbf0548c7 | 5,716 | py | Python | django_geoip/south_migrations/0001_initial.py | mandalay-rp/django-geoip | 2608cb15cdd7678c2ff923aff2437b1a861b8e6b | [
"MIT"
]
| 38 | 2015-01-10T06:44:12.000Z | 2021-11-16T10:53:43.000Z | django_geoip/south_migrations/0001_initial.py | mandalay-rp/django-geoip | 2608cb15cdd7678c2ff923aff2437b1a861b8e6b | [
"MIT"
]
| 28 | 2015-01-11T08:44:06.000Z | 2019-07-25T19:04:10.000Z | django_geoip/south_migrations/0001_initial.py | mandalay-rp/django-geoip | 2608cb15cdd7678c2ff923aff2437b1a861b8e6b | [
"MIT"
]
| 56 | 2015-01-11T08:30:57.000Z | 2021-10-01T05:57:00.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('django_geoip_country', (
('code', self.gf('django.db.models.fields.CharField')(max_length=2, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('django_geoip', ['Country'])
# Adding model 'Region'
db.create_table('django_geoip_region', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(related_name='regions', to=orm['django_geoip.Country'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('django_geoip', ['Region'])
# Adding unique constraint on 'Region', fields ['country', 'name']
db.create_unique('django_geoip_region', ['country_id', 'name'])
# Adding model 'City'
db.create_table('django_geoip_city', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(related_name='cities', to=orm['django_geoip.Region'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
))
db.send_create_signal('django_geoip', ['City'])
# Adding unique constraint on 'City', fields ['region', 'name']
db.create_unique('django_geoip_city', ['region_id', 'name'])
# Adding model 'IpRange'
db.create_table('django_geoip_iprange', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('start_ip', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)),
('end_ip', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['django_geoip.Country'])),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['django_geoip.Region'], null=True)),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['django_geoip.City'], null=True)),
))
db.send_create_signal('django_geoip', ['IpRange'])
def backwards(self, orm):
# Removing unique constraint on 'City', fields ['region', 'name']
db.delete_unique('django_geoip_city', ['region_id', 'name'])
# Removing unique constraint on 'Region', fields ['country', 'name']
db.delete_unique('django_geoip_region', ['country_id', 'name'])
# Deleting model 'Country'
db.delete_table('django_geoip_country')
# Deleting model 'Region'
db.delete_table('django_geoip_region')
# Deleting model 'City'
db.delete_table('django_geoip_city')
# Deleting model 'IpRange'
db.delete_table('django_geoip_iprange')
models = {
'django_geoip.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'to': "orm['django_geoip.Region']"})
},
'django_geoip.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'django_geoip.iprange': {
'Meta': {'object_name': 'IpRange'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_geoip.City']", 'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_geoip.Country']"}),
'end_ip': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_geoip.Region']", 'null': 'True'}),
'start_ip': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'})
},
'django_geoip.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'to': "orm['django_geoip.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['django_geoip']
| 53.420561 | 147 | 0.601645 | 5,590 | 0.977957 | 0 | 0 | 0 | 0 | 0 | 0 | 3,301 | 0.577502 |
08af904e7f82a923beed7c2fa65793eb9bf02793 | 878 | py | Python | popbl_servicesapp/flask_app/order/application/api_client.py | xetxezarreta/master-popbl1 | 253880b9ba358f63f666893cdbbffe7391fcd096 | [
"MIT"
]
| null | null | null | popbl_servicesapp/flask_app/order/application/api_client.py | xetxezarreta/master-popbl1 | 253880b9ba358f63f666893cdbbffe7391fcd096 | [
"MIT"
]
| 1 | 2021-06-02T00:57:11.000Z | 2021-06-02T00:57:11.000Z | popbl_servicesapp/flask_app/order/application/api_client.py | xetxezarreta/master-popbl1 | 253880b9ba358f63f666893cdbbffe7391fcd096 | [
"MIT"
]
| null | null | null | import requests
import json
from os import environ
from .models import Order, Piece
from .BLConsul import BLConsul
GATEWAY_PORT = environ.get("HAPROXY_PORT")
GATEWAY_ADDRESS = environ.get("HAPROXY_IP")
MACHINE_SERVICE = "machine"
PAYMENT_SERVICE = "payment"
DELIVERY_SERVICE = "delivery"
AUTH_SERVICE = "auth"
CA_CERT = environ.get("RABBITMQ_CA_CERT")
consul = BLConsul.get_instance()
class ApiClient:
@staticmethod
def auth_get_pubkey():
consul_dict = consul.get_service(AUTH_SERVICE)
print("CONSUL RESPONSE {}".format(consul_dict))
address = consul_dict['Address']
port = str(consul_dict['Port'])
r = requests.get("http://{}:{}/{}/pubkey".format(address, port, AUTH_SERVICE), verify=False)
if r.status_code == 200:
content = json.loads(r.content)
return content["publicKey"].encode("utf-8")
| 29.266667 | 100 | 0.693622 | 488 | 0.555809 | 0 | 0 | 466 | 0.530752 | 0 | 0 | 155 | 0.176538 |
08b0009d58869628b97762ea4dfa9d97bd3f4777 | 429 | py | Python | envdsys/envdaq/migrations/0006_controllerdef_component_map.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
]
| 1 | 2021-11-06T19:22:53.000Z | 2021-11-06T19:22:53.000Z | envdsys/envdaq/migrations/0006_controllerdef_component_map.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
]
| 25 | 2019-06-18T20:40:36.000Z | 2021-07-23T20:56:48.000Z | envdsys/envdaq/migrations/0006_controllerdef_component_map.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
]
| null | null | null | # Generated by Django 3.1.7 on 2021-02-26 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envdaq', '0005_controller_alias_name'),
]
operations = [
migrations.AddField(
model_name='controllerdef',
name='component_map',
field=models.TextField(default='{}', verbose_name='Component Map'),
),
]
| 22.578947 | 79 | 0.615385 | 336 | 0.783217 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.307692 |
08b08e4c091db6970d8bd9b3e8f858f92dfeb9ac | 2,569 | py | Python | polya/modules/congruence_closure_module.py | holtzermann17/polya | 6d611bf47185249a96f4cf7ee9b3884bc70a15ac | [
"Apache-2.0"
]
| 24 | 2015-01-01T18:21:40.000Z | 2021-08-29T01:56:14.000Z | polya/modules/congruence_closure_module.py | holtzermann17/polya | 6d611bf47185249a96f4cf7ee9b3884bc70a15ac | [
"Apache-2.0"
]
| 1 | 2018-09-06T17:53:13.000Z | 2018-09-07T13:57:39.000Z | polya/modules/congruence_closure_module.py | holtzermann17/polya | 6d611bf47185249a96f4cf7ee9b3884bc70a15ac | [
"Apache-2.0"
]
| 4 | 2017-02-08T15:04:09.000Z | 2021-05-02T15:13:05.000Z | ####################################################################################################
#
# congruence_closure_module.py
#
# Authors:
# Jeremy Avigad
# Rob Lewis
#
# This module maintains a union-find structure for terms in Blackboard, which is currently only used
# for congruence closure. It should perhaps be integrated differently into Blackboard.
#
# Contains a set for each equality class (up to constant multiples) of terms, and tracks which terms
# appear as arguments to which function terms.
#
####################################################################################################
import polya.main.terms as terms
import polya.main.messages as messages
import polya.util.timer as timer
import fractions
import itertools
class CongClosureModule:
def __init__(self):
pass
def update_blackboard(self, B):
"""
Checks the blackboard B for function terms with equal arguments, and asserts that the
function terms are equal.
"""
def eq_func_terms(f1, f2):
"""
Returns true if f1 and f2 have the same name and arity, and all args are equal.
"""
if f1.func_name != f2.func_name or len(f1.args) != len(f2.args):
return False
for i in range(len(f1.args)):
arg1, arg2 = f1.args[i], f2.args[i]
if arg1.coeff == 0:
eq = B.implies(arg2.term.index, terms.EQ, 0, 0) or arg2.coeff == 0
else:
eq = B.implies(arg1.term.index, terms.EQ,
fractions.Fraction(arg2.coeff, arg1.coeff), arg2.term.index)
if not eq:
return False
return True
timer.start(timer.CCM)
messages.announce_module('congruence closure module')
func_classes = {}
for i in (d for d in range(B.num_terms) if isinstance(B.term_defs[d], terms.FuncTerm)):
name = B.term_defs[i].func_name
func_classes[name] = func_classes.get(name, []) + [i]
for name in func_classes:
tinds = func_classes[name]
for (i, j) in itertools.combinations(tinds, 2):
# ti and tj are function terms with the same symbols. check if they're equal.
f1, f2 = B.term_defs[i], B.term_defs[j]
if eq_func_terms(f1, f2):
B.assert_comparison(terms.IVar(i) == terms.IVar(j))
timer.stop(timer.CCM)
def get_split_weight(self, B):
return None | 36.7 | 100 | 0.54963 | 1,809 | 0.704165 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.374076 |
08b3ea49c776eba1ca9a6e036f7a93721ad3e46b | 3,280 | py | Python | build.py | Jackcava/mappingToFHIR | 3189b55121a50ee1c4734227cde6da58ed6cb576 | [
"MIT"
]
| null | null | null | build.py | Jackcava/mappingToFHIR | 3189b55121a50ee1c4734227cde6da58ed6cb576 | [
"MIT"
]
| null | null | null | build.py | Jackcava/mappingToFHIR | 3189b55121a50ee1c4734227cde6da58ed6cb576 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import csv
def buildPat(row,key):
if key == "extension.valueAddress.city":
return row.A01_DESC_LUOGO_NASCITA
elif key == "identifier.value":
return row.A01_ID_PERSONA
elif key == "name.family":
return row.A01_COGNOME
elif key == "name.given":
return row.A01_NOME
elif key == "gender":
if row.A01_SESSO=='M':
return 'male'
elif row.A01_SESSO=='F':
return 'female'
else:
return 'unknown'
elif key == "birthDate":
if isinstance(row.A01_DATA_NASCITA,str):
return row.A01_DATA_NASCITA[:10]
else:
return row.A01_DATA_NASCITA.strftime("%Y-%m-%d")
elif key == "contact.relationship.coding.code":
if row.A02_DESC_TELEFONO1 in ("MAMMA","PAPA'","MADRE","PADRE"):
return 'PRN'
elif row.A02_DESC_TELEFONO1 == "ZIA":
return 'AUNT'
elif row.A02_DESC_TELEFONO1 == "ZIO":
return 'UNCLE'
else:
return ''
elif key == "contact.relationship.coding.display":
if row.A02_DESC_TELEFONO1 in ("MAMMA","PAPA'","MADRE","PADRE"):
return 'parent'
elif row.A02_DESC_TELEFONO1 == "ZIA":
return 'aunt'
elif row.A02_DESC_TELEFONO1 == "ZIO":
return 'uncle'
else:
return ''
elif key == "contact.telecom.emailvalue":
return row.A02_EMAIL
elif key == "contact.telecom.phonevalue":
return row.A02_NUM_TELEFONO1
elif key == "contact.relationship.coding.code2":
if row.A02_DESC_TELEFONO2 in ("MAMMA","PAPA'","PAPA","MADRE","PADRE"):
return 'PRN'
elif row.A02_DESC_TELEFONO2 == "ZIA":
return 'AUNT'
elif row.A02_DESC_TELEFONO2 == "ZIO":
return 'UNCLE'
else:
return ''
elif key == "contact.relationship.coding.display2":
if row.A02_DESC_TELEFONO2 in ("MAMMA","PAPA'","PAPA","MADRE","PADRE"):
return 'parent'
elif row.A02_DESC_TELEFONO2 == "ZIA":
return 'aunt'
elif row.A02_DESC_TELEFONO2 == "ZIO":
return 'uncle'
else:
return ''
elif key == "contact.telecom.phonevalue2":
return row.A02_NUM_TELEFONO2
def buildCond(row,key):
if key == "extension.valueDateTime":
if isinstance(row.DT_REGISTRAZIONE,str):
return row.DT_REGISTRAZIONE[:10]
else:
return row.DT_REGISTRAZIONE.strftime("%Y-%m-%d")
elif key == "bodySite.coding.code":
if row.TITOLO_LIV2 == "Sottosede":
return row.CODICE_LIV2
elif key == "bodySite.text":
if row.TITOLO_LIV2 == "Sottosede":
return row.DESC_LIV2
elif key == "stage.summary.text":
if row.TITOLO_LIV2 == "Stadio":
stadio = row.CODICE_LIV2.split()[1]
return stadio
elif key == "subject.reference":
return "Patient/"+row.ID_PAZIENTE
elif key == "recordedDate":
if isinstance(row.DT_REGISTRAZIONE,str):
return row.DT_REGISTRAZIONE[:10]
else:
return row.DT_REGISTRAZIONE.strftime("%Y-%m-%d")
elif key == "description":
return row.DESC_LIV2
| 34.893617 | 78 | 0.576524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 797 | 0.242988 |
08b53292c5c752e44fcf8b466dc5d84fa3ed0ec7 | 231 | py | Python | server.py | LuisAlbizo/luisalbizo.github.io | 823cac2c184686eb5056f9e1d3d0790f9a2233e1 | [
"MIT"
]
| null | null | null | server.py | LuisAlbizo/luisalbizo.github.io | 823cac2c184686eb5056f9e1d3d0790f9a2233e1 | [
"MIT"
]
| null | null | null | server.py | LuisAlbizo/luisalbizo.github.io | 823cac2c184686eb5056f9e1d3d0790f9a2233e1 | [
"MIT"
]
| null | null | null | import http.server
import os
import socketserver
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("127.0.0.1", 8080), Handler)
print("server:\thttp://127.0.0.1:8080\n\nlog:")
httpd.serve_forever()
| 19.25 | 60 | 0.757576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.220779 |
08b53ae263a1ae583483ba9e1d84efca2906ad4a | 2,109 | py | Python | sources-filter-list.py | kerberizer/wikimedia-scripts | 18b78d5cc0042d5efcb355a65f4309fb4ae97eaf | [
"CC0-1.0"
]
| null | null | null | sources-filter-list.py | kerberizer/wikimedia-scripts | 18b78d5cc0042d5efcb355a65f4309fb4ae97eaf | [
"CC0-1.0"
]
| null | null | null | sources-filter-list.py | kerberizer/wikimedia-scripts | 18b78d5cc0042d5efcb355a65f4309fb4ae97eaf | [
"CC0-1.0"
]
| 1 | 2016-07-31T07:26:33.000Z | 2016-07-31T07:26:33.000Z | #!/usr/bin/env python3
import locale
import sys
from datetime import datetime as dt
import pywikibot as pwb
def main(argv):
dump_only = False
if len(argv) > 1:
if argv.pop() == '--dump':
dump_only = True
else:
print('Error: Unrecognized option.', file=sys.stderr)
sys.exit(1)
wik = pwb.Site(code='bg', fam='wikipedia')
params = {
'action': 'query',
'format': 'json',
'list': 'abusefilters',
'formatversion': '2',
'abfstartid': '12',
'abfendid': '12',
'abfprop': 'pattern',
}
pattern = pwb.data.api.Request(
site=wik,
parameters=params
).submit()['query']['abusefilters'][0]['pattern']
site_list = [_[5:][:-4].replace('\\.', '.') for _ in pattern.splitlines() if _[2:5] == "'\\b"]
site_list.sort()
if dump_only:
for site in site_list:
print('* {}'.format(site))
else:
list_page_name = 'Уикипедия:Патрульори/СФИН'
list_page = pwb.Page(wik, list_page_name)
lnum_page = pwb.Page(wik, list_page_name + '/N')
lupd_page = pwb.Page(wik, list_page_name + '/U')
list_page.text = '{{' + list_page_name + '/H}}\n'
site_index = ''
for site in site_list:
if site[0] != site_index:
list_page.text += '\n<h3> {} </h3>\n'.format(site[0].capitalize())
site_index = site[0]
list_page.text += '* {}\n'.format(site)
list_page.text += '\n{{' + list_page_name + '/F}}'
lnum_page.text = str(len(site_list))
locale.setlocale(locale.LC_TIME, 'bg_BG.UTF-8')
lupd_page.text = dt.now().strftime('%H:%M на %e %B %Y').lower()
locale.resetlocale(locale.LC_TIME)
list_page.save(summary='Бот: актуализация', quiet=True)
lnum_page.save(summary='Бот: актуализация', quiet=True)
lupd_page.save(summary='Бот: актуализация', quiet=True)
if __name__ == '__main__':
main(sys.argv)
# vim: set ts=4 sts=4 sw=4 tw=100 et:
| 31.477612 | 98 | 0.543385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.244608 |
08b87f9a1caf36c7bc295170c1ae9c29a566dd2b | 1,494 | py | Python | Python scripts/Mail/search-and-modify-inbox-mail.py | shartrooper/My-python-scripts | 5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c | [
"MIT"
]
| null | null | null | Python scripts/Mail/search-and-modify-inbox-mail.py | shartrooper/My-python-scripts | 5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c | [
"MIT"
]
| null | null | null | Python scripts/Mail/search-and-modify-inbox-mail.py | shartrooper/My-python-scripts | 5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c | [
"MIT"
]
| null | null | null | Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:20:19) [MSC v.1925 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # pip install imapclient // pip install pyzmail
>>> import imapclient
>>> conn= imapclient.IMAPClient('imap.gmail.com', ssl=True) #True to use SSL encryption
>>> conn.login('[email protected]','whatever')
>>> conn.select_folter('INBOX',readonly= True)
>>> UIDs = conn.search(['SINCE 20-Aug-2015']) #return a list of unique IDs for mails
>>> rawMessage=conn.fetch(['mail int UID number to fetch'],['BODY[]','FLAGS'])
>>> import pyzmail
>>> pyzmail.PyzMessage.factory(rawMessage['same UID Number passed to rawMessage'][b'BODY'])
>>> message=pyzmail.PyzMessage.factory(rawMessage['same UID Number passed to rawMessage'][b'BODY'])
T
>>> message.get_subject() #mail's subject
>>> message.get_addresses('from')
>>> message.get_addresses('to')
>>> message.get_addresses('bcc')
>>> message.text_part # return len and type
>>> message.text_part #None if doesn't have html
>>> message.html_part == None # True
>>> message.text_part.get_payload().decode('UTF-8')
>>> message.text_part.charset
>>> conn.list_folders()
>>> conn.select_folder('INBOX',readonly=False) #to modify the inbox
>>> UIDS= conn.search(['ON 24-Aug-2015'])
>>> conn.delete_messages(['UIDs to delete'])
>>> ''' Full documentation ar: https://imapclient.readthedocs.org http://www.magiksys.net/pyzmail ''' | 33.954545 | 101 | 0.680054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.407631 |
08bc79ee80be4534b8a825bbd1af9247d2662a71 | 2,633 | py | Python | diagnosis/system_diagnosis/inference.py | opengauss-mirror/openGauss-AI | 449ce3cac81ced74dd56edf76709553411b0814a | [
"MulanPSL-1.0"
]
| 1 | 2021-12-22T08:31:07.000Z | 2021-12-22T08:31:07.000Z | diagnosis/system_diagnosis/inference.py | opengauss-mirror/openGauss-AI | 449ce3cac81ced74dd56edf76709553411b0814a | [
"MulanPSL-1.0"
]
| null | null | null | diagnosis/system_diagnosis/inference.py | opengauss-mirror/openGauss-AI | 449ce3cac81ced74dd56edf76709553411b0814a | [
"MulanPSL-1.0"
]
| 3 | 2021-12-16T13:55:57.000Z | 2022-02-24T09:53:49.000Z | /*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
import pickle, os, json
import numpy as np
import train
from argparse import ArgumentParser
from pprint import pprint
anomaly_type_num = 10
n_neighbors = 5
# data_explore()
# 0"cpu_saturation",
# 1"io_saturation",
# 2"database_backup",
# 3"table_restore",
# 4"poorly_physical_design",
# 5"poorly_written_query",
# 6"workload_spike",
# 7"flush_log",
# 8"vacuum_analyze",
# 9"lock_contention",
def kNN(alpha_vec, X_train, y_train, new_vec):
res_distance = []
# print(alpha_vec)
for i in range(len(X_train)):
idx = int(y_train[i])
res = np.sqrt(np.dot((X_train[i] - new_vec)**2, alpha_vec[idx]))
res_distance.append(res)
idx_res = np.argsort(res_distance)
# print(idx_res)
int_y = y_train.astype(int)
return np.argmax(np.bincount(int_y[idx_res[: n_neighbors]]))
def anomaly_metrics(alpha_vec, new_vec):
feature_vec = alpha_vec * new_vec
# threshold =
idx_list = np.argsort(feature_vec)[::-1]
return idx_list[:5]
def build_description(root_cause_id):
with open("./config/anomaly_type.json", "r") as f1, \
open("./config/anomaly_info.json", "r") as f2:
anomaly_lookup = json.load(f1)
desc_lookup = json.load(f2)
res = desc_lookup[anomaly_lookup[str(root_cause_id)]]
pprint(res)
X_train_path = "./model/X_train.npy"
y_train_path = "./model/y_train.npy"
alpha_vec_path = "./model/anomaly_vec.npy"
if __name__ == "__main__":
parser = ArgumentParser(description="")
parser.add_argument("--vec_path")
args = parser.parse_args()
X_train, y_train, alpha_vec = np.array([]), np.array([]), np.array([])
if os.path.isfile(X_train_path)==False or os.path.isfile(y_train_path)==False:
train.generate_X_y()
if os.path.isfile(alpha_vec_path)==False:
train.generate_anomaly_alpha()
X_train = np.load(X_train_path)
y_train = np.load(y_train_path)
alpha_vec = np.load(alpha_vec_path)
new_vec = np.load(args.vec_path)
root_cause_id = kNN(alpha_vec, X_train, y_train, new_vec)
build_description(root_cause_id)
| 28.010638 | 87 | 0.689708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.164451 |
08bd8918199d2e5006f69cc8ccd6b3fde0ba16d8 | 1,850 | py | Python | python/test_golden_master.py | AEGISoft/GildedRose-Refactoring-Kata | a81452de5b6831fa6c4f42b15f827ecf6ef29807 | [
"MIT"
]
| null | null | null | python/test_golden_master.py | AEGISoft/GildedRose-Refactoring-Kata | a81452de5b6831fa6c4f42b15f827ecf6ef29807 | [
"MIT"
]
| null | null | null | python/test_golden_master.py | AEGISoft/GildedRose-Refactoring-Kata | a81452de5b6831fa6c4f42b15f827ecf6ef29807 | [
"MIT"
]
| null | null | null | import unittest
from gilded_rose import Item, GildedRose
class GoldenMasterTest(unittest.TestCase):
def test_golden_master(self):
output_file = None
try:
output_file = open("output.txt", 'r')
golden_master_lines = [output_file.readlines()]
finally:
output_file.close()
lines = golden_master_test_run()
for i in range(len(golden_master_lines) - 1):
self.assertEquals(golden_master_lines[i], lines[i])
def golden_master_test_run():
lines = ["OMGHAI!"]
items = [
Item(name="+5 Dexterity Vest", sell_in=10, quality=20),
Item(name="Aged Brie", sell_in=2, quality=0),
Item(name="Elixir of the Mongoose", sell_in=5, quality=7),
Item(name="Sulfuras, Hand of Ragnaros", sell_in=0, quality=80),
Item(name="Sulfuras, Hand of Ragnaros", sell_in=-1, quality=80),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=15, quality=20),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=10, quality=49),
Item(name="Backstage passes to a TAFKAL80ETC concert", sell_in=5, quality=49),
Item(name="Conjured Mana Cake", sell_in=3, quality=6), # <-- :O
]
days = 2
import sys
if len(sys.argv) > 1:
days = int(sys.argv[1]) + 1
for day in range(days):
lines.append("-------- day %s --------" % day)
lines.append("name, sellIn, quality")
for item in items:
lines.append(str(item))
lines.append("")
GildedRose(items).update_quality()
return lines
def persist_golden_master_testrun():
output_file = open("output.txt", mode="w+")
for line in golden_master_test_run():
output_file.write(line)
output_file.write("\n")
if __name__ == '__main__':
unittest.main()
| 31.355932 | 87 | 0.617838 | 437 | 0.236216 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.201081 |
08bee3076eef6096e40c84d40b43d0ef450a6e30 | 895 | py | Python | utils/randomdata.py | M1d0r1/py_mantis | 8d2b05601b9240e76e2e07b50770e39df5bcade9 | [
"Apache-2.0"
]
| null | null | null | utils/randomdata.py | M1d0r1/py_mantis | 8d2b05601b9240e76e2e07b50770e39df5bcade9 | [
"Apache-2.0"
]
| null | null | null | utils/randomdata.py | M1d0r1/py_mantis | 8d2b05601b9240e76e2e07b50770e39df5bcade9 | [
"Apache-2.0"
]
| null | null | null | import random
import string
class RandomData:
def __init__(self):
pass
@staticmethod
def get_random_bool():
i = random.randrange(2)
if i == 0:
return True
else:
return False
@staticmethod
def get_random_list_value(list):
i = random.randrange(len(list))
return list[i]
# noinspection PyUnusedLocal
@staticmethod
def get_random_string():
ind = random.randrange(20)
s = ''.join([random.choice(string.ascii_letters + string.digits + " ") for i in range(ind)])
return s
@staticmethod
def get_random_phone():
return str(random.randrange(1000000, 9999999))
@staticmethod
def get_random_multistring():
return "%s\n%s\n%s" % (
RandomData.get_random_string(), RandomData.get_random_string(), RandomData.get_random_string())
| 22.948718 | 107 | 0.61676 | 864 | 0.965363 | 0 | 0 | 745 | 0.832402 | 0 | 0 | 45 | 0.050279 |
08c039eccfb3500006401f61d37873f932777364 | 1,120 | py | Python | douyin/hot/trend.py | miaotiaotech/DouYin | e996ad99ce27e0d13f2856c497fd4b4f05f95b56 | [
"MIT"
]
| 657 | 2018-10-24T16:58:04.000Z | 2022-03-15T03:58:04.000Z | douyin/hot/trend.py | 1997lw/DouYin | 5859f4db5258ad10926fddaa2b4074c85581d419 | [
"MIT"
]
| 15 | 2018-10-30T09:40:11.000Z | 2020-08-09T13:58:31.000Z | douyin/hot/trend.py | 1997lw/DouYin | 5859f4db5258ad10926fddaa2b4074c85581d419 | [
"MIT"
]
| 249 | 2018-10-25T07:12:14.000Z | 2022-02-21T07:49:58.000Z | from douyin.utils import fetch
from douyin.config import hot_trend_url, common_headers
from douyin.utils.tranform import data_to_music, data_to_topic
from douyin.structures.hot import HotTrend
from douyin.utils.common import parse_datetime
# define trend query params
query = {
'version_code': '2.9.1',
'count': '10',
}
def trend():
"""
get trend result
:return:
"""
offset = 0
while True:
query['cursor'] = str(offset)
result = fetch(hot_trend_url, headers=common_headers, params=query, verify=False)
category_list = result.get('category_list')
datetime = parse_datetime(result.get('extra', {}).get('now'))
final = []
for item in category_list:
# process per category
if item.get('desc') == '热门话题':
final.append(data_to_topic(item.get('challenge_info', {})))
if item.get('desc') == '热门音乐':
final.append(data_to_music(item.get('music_info', {})))
yield HotTrend(datetime=datetime, data=final, offset=offset, count=int(query.get('count')))
offset += 10
| 32 | 99 | 0.633036 | 0 | 0 | 803 | 0.706866 | 0 | 0 | 0 | 0 | 236 | 0.207746 |
08c1a85992031481a6829f933c45c2206c709fa4 | 288 | py | Python | hashing/hashing.py | subhamsagar524/Learn-Blockchain | 316f30ed9d43f6ab806ca87b9b83c0237ef69828 | [
"MIT"
]
| null | null | null | hashing/hashing.py | subhamsagar524/Learn-Blockchain | 316f30ed9d43f6ab806ca87b9b83c0237ef69828 | [
"MIT"
]
| null | null | null | hashing/hashing.py | subhamsagar524/Learn-Blockchain | 316f30ed9d43f6ab806ca87b9b83c0237ef69828 | [
"MIT"
]
| 1 | 2020-03-13T06:32:46.000Z | 2020-03-13T06:32:46.000Z | # Import the hashing Library
import hashlib
# Get the string as input
word = input("Enter the word for Hashing: ")
# Get the hashing
hashed_code = hashlib.sha256(word.encode())
final = hashed_code.hexdigest()
# Print the result
print("Hashed with 256 bit: ")
print(final)
| 20.571429 | 45 | 0.704861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.503472 |
08c3c73fb071c563aa6c6cb9106af9a4e78d2bdf | 1,416 | py | Python | bookmarks/bookmarks/models.py | tom-henderson/bookmarks | 5515bedf1008da3e97caf0ed5867bcf983b375b1 | [
"MIT"
]
| 6 | 2017-01-09T22:59:31.000Z | 2022-01-06T01:40:57.000Z | bookmarks/bookmarks/models.py | tom-henderson/bookmarks | 5515bedf1008da3e97caf0ed5867bcf983b375b1 | [
"MIT"
]
| 30 | 2016-09-13T07:30:26.000Z | 2022-02-07T22:49:03.000Z | bookmarks/bookmarks/models.py | tom-henderson/bookmarks | 5515bedf1008da3e97caf0ed5867bcf983b375b1 | [
"MIT"
]
| null | null | null | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.dispatch import receiver
from django.conf import settings
from taggit.managers import TaggableManager
import requests
class Bookmark(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
date_added = models.DateTimeField(default=timezone.now, blank=True)
tags = TaggableManager(blank=True)
private = models.BooleanField(default=False)
url = models.URLField(max_length=500)
def __unicode__(self):
return "{}: {} [{}]".format(
self.pk,
self.title[:40],
self.date_added
)
@receiver(models.signals.post_save, sender=Bookmark)
def bookmark_pre_save_handler(sender, instance, created, *args, **kwargs):
# Only run for new items, not updates
if created:
if not hasattr(settings, 'SLACK_WEBHOOK_URL'):
return
payload = {
'channel': "#bookmarks-dev",
'username': "Bookmarks",
'text': "<{}|{}>\n{}".format(
instance.url,
instance.title,
instance.description,
),
'icon_emoji': ":blue_book:",
'unfurl_links': True
}
requests.post(settings.SLACK_WEBHOOK_URL, json=payload)
| 28.897959 | 74 | 0.631356 | 510 | 0.360169 | 0 | 0 | 664 | 0.468927 | 0 | 0 | 173 | 0.122175 |
08c3ea3ed3c0d6241f479fa852ed05c431f46706 | 797 | py | Python | vernam cipher.py | BenMiller3/Vernam-Cipher | 19f7a447bc8080c8e275b96a85d359f4e187a4d3 | [
"MIT"
]
| null | null | null | vernam cipher.py | BenMiller3/Vernam-Cipher | 19f7a447bc8080c8e275b96a85d359f4e187a4d3 | [
"MIT"
]
| null | null | null | vernam cipher.py | BenMiller3/Vernam-Cipher | 19f7a447bc8080c8e275b96a85d359f4e187a4d3 | [
"MIT"
]
| null | null | null | """
Vernam Cipher
Benjamin D. Miller
Takes a key, and a message
Encripts the message using the key
"""
def vernam(key,message):
message = str(message)
m = message.upper().replace(" ","") # Convert to upper case, remove whitespace
encrypt = ""
try:
key = int(key) # if the key value is not a number, then run with key = 0
except ValueError:
key = 0
for i in range(len(m)):
letter = ord(m[i])-65 # Letters now range 0-25
letter = (letter + key)%25 # Alphanumeric + key mod 25 = 0-25
letter +=65
encrypt = encrypt + chr(letter) # Concatenate message
return encrypt
""" * TEST CASES * """
vernam(9,"hello world")
vernam(14,"TEST_CASE 34!")
vernam("test","test")
| 27.482759 | 91 | 0.567127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.450439 |
08c47e02acc3cf4c516e8edc1336ab1be1430cd8 | 421 | py | Python | utils.py | c0derabbit/talk | 26673fde934ef51e76002ea6ddc65bdb42720865 | [
"MIT"
]
| null | null | null | utils.py | c0derabbit/talk | 26673fde934ef51e76002ea6ddc65bdb42720865 | [
"MIT"
]
| 1 | 2017-05-25T20:37:54.000Z | 2017-05-26T07:33:00.000Z | utils.py | c0derabbit/talk | 26673fde934ef51e76002ea6ddc65bdb42720865 | [
"MIT"
]
| null | null | null | from datetime import datetime as d
def stringify_date(date):
try:
return '{0}-{1}-{2}-{3}-{4}'.format(date.year, date.month, date.day, date.hour, date.minute)
except ValueError:
raise ValueError('Invalid date format', date)
def parse_date(date):
try:
return d.strptime(date, '%Y-%m-%d-%H-%M')
except ValueError:
raise ValueError('Could not convert string to date', date)
| 30.071429 | 100 | 0.64133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.218527 |
08c693a49ad9f776684155a7c2f26843f0a00070 | 3,694 | py | Python | fineract/objects/org.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
]
| 7 | 2019-03-11T16:17:33.000Z | 2020-10-22T21:57:51.000Z | fineract/objects/org.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
]
| 3 | 2019-11-05T20:22:16.000Z | 2019-12-11T17:09:04.000Z | fineract/objects/org.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
]
| 2 | 2020-11-19T16:00:36.000Z | 2021-11-19T09:36:13.000Z | from fineract.objects.currency import Currency
from fineract.objects.fineract_object import FineractObject
from fineract.objects.types import ChargeTimeType, ChargeAppliesTo, ChargeCalculationType, ChargePaymentMode
class Office(FineractObject):
"""
This class represent an Office
"""
def _init_attributes(self):
self.id = None
self.name = None
self.name_decorated = None
self.external_id = None
self.opening_date = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
self.name_decorated = attributes.get('nameDecorated', None)
self.external_id = attributes.get('externalId', None)
self.opening_date = self._make_date_object(attributes.get('openingDate', None))
self.hierarchy = attributes.get('hierarchy', None)
class Staff(FineractObject):
"""
This class represents a Staff
"""
def _init_attributes(self):
self.id = None
self.firstname = None
self.lastname = None
self.display_name = None
self.office_id = None
self.office_name = None
self.is_loan_officer = None
self.external_id = None
self.is_active = None
self.join_date = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.firstname = attributes.get('firstname', None)
self.lastname = attributes.get('lastname', None)
self.display_name = attributes.get('displayName', None)
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.is_loan_officer = attributes.get('isLoanOfficer', None)
self.is_active = attributes.get('externalId', None)
self.join_date = self._make_date_object(attributes.get('joiningDate', None))
class Fund(FineractObject):
"""
This class represents a Fund
"""
def _init_attributes(self):
self.id = None
self.name = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
class Charge(FineractObject):
"""
This class represents a Charge
"""
def _init_attributes(self):
self.id = None
self.name = None
self.active = None
self.penalty = None
self.currency = None
self.amount = None
self.charge_time_type = None
self.charge_applies_to = None
self.charge_calculation_type = None
self.charge_payment_mode = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.name = attributes.get('name', None)
self.active = attributes.get('active', None)
self.penalty = attributes.get('penalty', None)
self.currency = self._make_fineract_object(Currency, attributes.get('currency', None))
self.amount = attributes.get('amount', None)
self.charge_time_type = self._make_fineract_object(ChargeTimeType, attributes.get('chargeTimeType', None))
self.charge_applies_to = self._make_fineract_object(ChargeAppliesTo, attributes.get('chargeAppliesTo', None))
self.charge_calculation_type = self._make_fineract_object(ChargeCalculationType,
attributes.get('chargeCalculationType', None))
self.charge_payment_mode = self._make_fineract_object(ChargePaymentMode,
attributes.get('chargePaymentMode', None))
| 36.574257 | 117 | 0.647266 | 3,466 | 0.938278 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.127775 |
08c6e61cafacb0416494f10178b2d50c3d4b7ef8 | 1,736 | py | Python | Heap/PathWithMinEffort.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
]
| 2 | 2021-01-31T03:42:01.000Z | 2021-01-31T03:43:08.000Z | Heap/PathWithMinEffort.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
]
| null | null | null | Heap/PathWithMinEffort.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
]
| 1 | 2021-01-31T03:42:02.000Z | 2021-01-31T03:42:02.000Z | from heapq import heapify, heappop, heappush
class Solution:
def minimumEffortPath(self, heights):
# get the max rows and cols
m, n = len(heights), len(heights[0])
# make a heap to store the current min cost, x, and y
heap = [(0, 0, 0)]
# keep track of current cost
currCost = 0
# keep track of the nodes you have visited
visited = set()
# make a directions array
directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]
while heap:
# get the min cost val, x and y coordinate
k, x, y = heappop(heap)
# update the cost
currCost = max(currCost, k)
# if we reach the bottom right corner, return the cost
if (x, y) == (m -1, n - 1):
return currCost
# add current node to the visited set
visited.add((x, y))
# for each direction, find the new cost
for dir_ in directions:
xn = x + dir_[0]
yn = y + dir_[1]
# check boundary conditions and if the cell has been visited
if 0 <= xn <= m - 1 and 0 <= yn <= n - 1 and (xn, yn) not in visited:
# get new cost
newc = abs(heights[x][y] - heights[xn][yn])
# push the new x, y location and the new cost to min heap
heappush(heap, (newc, xn, yn))
# if no path, return -1
return -1
def main():
heights = [[1,2,2],[3,8,2],[5,3,5]]
mySol = Solution()
print("The min cost path for the grid heights = [[1,2,2],[3,8,2],[5,3,5]] is " + str(mySol.minimumEffortPath(heights)))
if __name__ == "__main__":
main() | 36.166667 | 123 | 0.506336 | 1,452 | 0.836406 | 0 | 0 | 0 | 0 | 0 | 0 | 600 | 0.345622 |
08c9794f0bd47ea3dffb38ca1c36f7471259ea64 | 62 | py | Python | car_acc_graphs.py | EricMbuthia/car_acc | 11c1d32400279f462861793ad393f00861445b64 | [
"MIT"
]
| null | null | null | car_acc_graphs.py | EricMbuthia/car_acc | 11c1d32400279f462861793ad393f00861445b64 | [
"MIT"
]
| null | null | null | car_acc_graphs.py | EricMbuthia/car_acc | 11c1d32400279f462861793ad393f00861445b64 | [
"MIT"
]
| null | null | null | def example_plotting_functions():
#Sort then plot
pass | 20.666667 | 33 | 0.725806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.241935 |
08c9e9c176a984ea5d15821ab3616cd2313fc432 | 1,427 | wsgi | Python | vagrant/catalog/StuffMart.wsgi | cpwhidden/StuffMart | a192b8cad8942d0bfddb3af861f1e48c460e28cf | [
"MIT"
]
| null | null | null | vagrant/catalog/StuffMart.wsgi | cpwhidden/StuffMart | a192b8cad8942d0bfddb3af861f1e48c460e28cf | [
"MIT"
]
| null | null | null | vagrant/catalog/StuffMart.wsgi | cpwhidden/StuffMart | a192b8cad8942d0bfddb3af861f1e48c460e28cf | [
"MIT"
]
| null | null | null | activate_this = '/var/www/html/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys, os, logging
from flask_apscheduler import APScheduler
sys.path.insert(0, 'var/www/html/StuffMart/vagrant/catalog')
logging.basicConfig(stream=sys.stderr)
from server import flask as application
application.secret_key = 'qPHE[Cht}*kSCVango3i'
application.config['APP_DIR'] = os.path.abspath(os.path.dirname(__file__))
application.config['WHOOSH_BASE'] = 'server/whoosh'
application.config['PRODUCT_IMAGES_FOLDER'] = 'vagrant/catalog/server/static/product_images/'
application.config['JOBS'] = [
{
'id': 'buildNewlyAddedRSSFeed',
'func': 'server.views:buildNewlyAddedRSSFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedAtomFeed',
'func': 'server.views:buildNewlyAddedAtomFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedRSSFeedAtStartup',
'func': 'server.views:buildNewlyAddedRSSFeed'
},
{
'id': 'buildNewlyAddedAtomFeedAtStartup',
'func': 'server.views:buildNewlyAddedAtomFeed'
}
]
application.config['SCHEDULER_VIEWS_ENABLED'] = True
application.debug = True
scheduler = APScheduler()
scheduler.init_app(application)
scheduler.start() | 34.804878 | 93 | 0.658725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 603 | 0.422565 |
08cc589cc9423942aa94cc3bb343109a1f7cba67 | 18,161 | py | Python | tests/strategies/test_horizontal.py | rohith-bs/dgraphpandas | 29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316 | [
"MIT"
]
| 1 | 2022-02-28T17:34:11.000Z | 2022-02-28T17:34:11.000Z | tests/strategies/test_horizontal.py | rohith-bs/dgraphpandas | 29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316 | [
"MIT"
]
| null | null | null | tests/strategies/test_horizontal.py | rohith-bs/dgraphpandas | 29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316 | [
"MIT"
]
| 1 | 2021-04-10T19:57:05.000Z | 2021-04-10T19:57:05.000Z | import unittest
from unittest.mock import patch, Mock
import pandas as pd
from pandas.testing import assert_frame_equal
from parameterized import parameterized
from dgraphpandas.strategies.horizontal import horizontal_transform
class HorizontalTests(unittest.TestCase):
@parameterized.expand([
(None, {'config': {}}, 'config_key'),
(pd.DataFrame(), None, 'config_key'),
(pd.DataFrame(), '', 'config_key'),
(pd.DataFrame(), {'config': {}}, None),
(pd.DataFrame(), {'config': {}}, ''),
])
def test_horizontal_transform_null_parameters(self, frame, config, config_file_key):
'''
Ensures when parameters are null, then an
error is raised
'''
with self.assertRaises(ValueError):
horizontal_transform(frame, config, config_file_key)
def test_horizontal_config_key_does_not_exist(self):
'''
Ensures when the config key does not exist
within the config then an error is raised
'''
frame = pd.DataFrame()
config_key = 'my_key'
config = {
'files': {
'some_other_key': {}
}
}
with self.assertRaises(KeyError):
horizontal_transform(frame, config, config_key)
@parameterized.expand([
('',),
(None,),
])
def test_horizontal_subject_fields_not_provided(self, subject_fields):
'''
Ensures when subject fields is not provided
then an error is raised
'''
frame = pd.DataFrame()
config_key = 'my_key'
config = {
'files': {
'my_key': {
'subject_fields': subject_fields
}
}
}
with self.assertRaises(ValueError):
horizontal_transform(frame, config, config_key)
def test_horizontal_could_not_convert_type(self):
'''
Ensures when a type could not be applied to a column,
then an error is raised
'''
frame = pd.DataFrame(data={
'customer_id': [1, 2, 3],
'age': [23, 'not number', 56]
})
config = {
'files': {
'customer': {
'subject_fields': ['customer_id'],
'type_overrides': {
'customer_id': 'int32',
'age': 'int32'
}
}
}
}
config_file_key = 'customer'
with self.assertRaises(SystemExit):
horizontal_transform(frame, config, config_file_key)
@parameterized.expand([
###
(
'single_predicate',
pd.DataFrame(data={
'customer_id': [1, 2, 3],
'age': [23, 67, 56]
}),
{
'files': {
'customer': {
'subject_fields': ['customer_id'],
'type_overrides': {
'customer_id': 'int32',
'age': 'int32'
}
}
}
},
'customer',
pd.DataFrame(data={
'customer_id': pd.Series([1, 2, 3], dtype='int32'),
'predicate': pd.Series(['age']*3, dtype='O'),
'object': pd.Series([23, 67, 56], dtype='int32')
})
),
###
(
'multiple_predicates',
pd.DataFrame(data={
'customer_id': [1, 2, 3],
'age': [23, 67, 56],
'weight': [189, 167, 190]
}),
{
'files': {
'customer': {
'subject_fields': ['customer_id'],
'type_overrides': {
'customer_id': 'int32',
'age': 'int32',
'weight': 'int32'
}
}
}
},
'customer',
pd.DataFrame(data={
'customer_id': pd.Series([1, 2, 3, 1, 2, 3], dtype='int32'),
'predicate': pd.Series(['age']*3 + ['weight']*3, dtype='O'),
'object': pd.Series([23, 67, 56, 189, 167, 190], dtype='int32')
})
),
###
(
'multiple_subject_fields',
pd.DataFrame(data={
'customer_id': [1, 2, 3],
'order_id': [405, 210, 321],
'value': [200, 321, 67],
}),
{
'files': {
'order': {
'subject_fields': ['customer_id', 'order_id'],
'type_overrides': {
'customer_id': 'int32',
'order_id': 'int32',
'value': 'int32'
}
}
}
},
'order',
pd.DataFrame(data={
'customer_id': pd.Series([1, 2, 3], dtype='int32'),
'order_id': pd.Series([405, 210, 321], dtype='int32'),
'predicate': pd.Series(['value']*3, dtype='O'),
'object': pd.Series([200, 321, 67], dtype='int32')
})
)
])
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
def test_horizontal_melted_passed(self, name, frame, config, config_file_key, expected_melted, transform_mock: Mock):
'''
Ensures that the passed horizontal frame is melted and
passed into the vertical_transform.
Also ensures the same config and key are passed through
'''
intrinsic_mock = Mock(spec=pd.DataFrame)
edges_mock = Mock(spec=pd.DataFrame)
transform_mock.return_value = (intrinsic_mock, edges_mock)
intrinsic, edges = horizontal_transform(frame, config, config_file_key)
transform_mock.assert_called_once()
args, kwargs = transform_mock.call_args_list[0]
invoked_frame, invoked_config, invoked_key = args
assert_frame_equal(invoked_frame, expected_melted)
self.assertEqual(invoked_config, config)
self.assertEqual(invoked_key, config_file_key)
self.assertEqual(kwargs, {})
self.assertEqual(intrinsic_mock, intrinsic)
self.assertEqual(edges_mock, edges)
def test_horizontal_frame_only_has_subject_and_no_data_fields(self):
'''
Ensures when the horizontal frame only has subject fields
and no actual data fields then an error is raised
'''
frame = pd.DataFrame(data={
'customer_id': [1, 2, 3],
'order_id': [405, 210, 321]
})
config = {
'files': {
'order': {
'subject_fields': ['customer_id', 'order_id'],
'type_overrides': {
'customer_id': 'int32',
'order_id': 'int32',
}
}
}
}
config_key = 'order'
with self.assertRaises(ValueError):
horizontal_transform(frame, config, config_key)
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
@patch('dgraphpandas.strategies.horizontal.pd.read_csv', spec=pd.read_csv)
def test_horizontal_melted_file_path_passed(self, mock_pandas: Mock, mock_transform: Mock):
'''
Ensures when a file path(str) it passed into the transform, then the file
is read using read_csv before going into logic.
'''
file = 'test.csv'
frame = pd.DataFrame(data={
'customer_id': [1, 2, 3],
'age': [23, 67, 56]
})
config = {
'files': {
'customer': {
'subject_fields': ['customer_id'],
'type_overrides': {
'customer_id': 'int32',
'age': 'int32'
}
}
}
}
config_file_key = 'customer'
expected_melted = pd.DataFrame(data={
'customer_id': pd.Series([1, 2, 3], dtype='int32'),
'predicate': pd.Series(['age']*3, dtype='O'),
'object': pd.Series([23, 67, 56], dtype='int32')
})
mock_pandas.return_value = frame
horizontal_transform(file, config, config_file_key)
args, kwargs = mock_pandas.call_args_list[0]
self.assertEqual(file, args[0])
self.assertEqual({}, kwargs)
args, kwargs = mock_transform.call_args_list[0]
assert_frame_equal(expected_melted, args[0])
self.assertEqual(config, args[1])
self.assertEqual(config_file_key, args[2])
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
@patch('dgraphpandas.strategies.horizontal.pd.read_csv', spec=pd.read_csv)
def test_horizontal_melted_file_path_custom_csv_passed(self, mock_pandas: Mock, mock_transform: Mock):
'''
Ensures when a read_csv_options option is defined inside file configuration
it is applied to the pd.read_csv call.
'''
file = 'test.csv'
read_csv_options = {'sep': ';'}
frame = pd.DataFrame(data={
'customer_id': [1, 2, 3],
'age': [23, 67, 56]
})
config = {
'files': {
'customer': {
'subject_fields': ['customer_id'],
'type_overrides': {
'customer_id': 'int32',
'age': 'int32'
},
'read_csv_options': read_csv_options
}
}
}
config_file_key = 'customer'
expected_melted = pd.DataFrame(data={
'customer_id': pd.Series([1, 2, 3], dtype='int32'),
'predicate': pd.Series(['age']*3, dtype='O'),
'object': pd.Series([23, 67, 56], dtype='int32')
})
mock_pandas.return_value = frame
horizontal_transform(file, config, config_file_key)
args, kwargs = mock_pandas.call_args_list[0]
self.assertEqual(file, args[0])
self.assertEqual(read_csv_options, kwargs)
args, kwargs = mock_transform.call_args_list[0]
assert_frame_equal(expected_melted, args[0])
self.assertEqual(config, args[1])
self.assertEqual(config_file_key, args[2])
@parameterized.expand([
###
(
'year_wrong_order',
{'dob': {'format': "%Y-%m-%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['03-02-2021', '01-03-1945'],
'weight': [50, 32]
})
),
###
(
'alphanumerical_string',
{'dob': {'format': "%Y-%m-%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['not a date', '01-03-1945'],
'weight': [50, 32]
})
),
###
(
'missing_dashes',
{'dob': {'format': "%Y-%m%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['2021-03-02', '19450301'],
'weight': [50, 32]
})
),
###
(
'missing_dots',
{'dob': {'format': "%Y.%m.%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['2021-03-02', '1945.03&01'],
'weight': [50, 32]
})
),
###
(
'malformed_month_string',
{'dob': {'format': "%d-%b-%Y"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['02-FebFake-2021', '01-Mar-1945'],
'weight': [50, 32]
})
)
])
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
def test_horizontal_transform_incorrect_date_format(self, name, date_format, frame, transform_mock: Mock):
'''
Ensures when the date format provided does not match the value within the frame,
then an error is raised.
'''
config_file_key = 'customer'
config = {
'files': {
config_file_key: {
'subject_fields': ['customer_id'],
'date_fields': date_format
}
}
}
with self.assertRaisesRegex(ValueError, "time data (.*) (doesn't|does not) match format(.*)"):
horizontal_transform(frame, config, config_file_key)
transform_mock.assert_not_called()
@parameterized.expand([
###
(
'uncoverted_month_day',
{'dob': {'format': "%Y"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['2021-03-02', '1945-03-01'],
'weight': [50, 32]
})
),
###
(
'uncoverted_month_year',
{'dob': {'format': "%m-%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['03-02-2021', '03-01-2021'],
'weight': [50, 32]
})
)
])
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
def test_horizontal_transform_unconverted_date_parts(self, name, date_format, frame, transform_mock: Mock):
'''
Ensures when the date partially matches and there are some converted
parts, an error is raised
'''
config_file_key = 'customer'
config = {
'files': {
config_file_key: {
'subject_fields': ['customer_id'],
'date_fields': date_format
}
}
}
with self.assertRaisesRegex(ValueError, "unconverted data remains: (.*)"):
horizontal_transform(frame, config, config_file_key)
transform_mock.assert_not_called()
@parameterized.expand([
###
(
'dash_format',
{'dob': {'format': "%Y-%m-%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['2021-03-02', '1945-03-01'],
'weight': [50, 32]
}),
pd.DataFrame(data={
'customer_id': [1, 2, 1, 2],
'predicate': ['dob', 'dob', 'weight', 'weight'],
'object':[pd.to_datetime('2021-03-02 00:00:00'), pd.to_datetime('1945-03-01 00:00:00'), 50, 32]
})
),
###
(
'dot_format',
{'dob': {'format': "%Y.%m.%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['1999.05.09', '1789.02.12'],
'weight': [50, 32]
}),
pd.DataFrame(data={
'customer_id': [1, 2, 1, 2],
'predicate': ['dob', 'dob', 'weight', 'weight'],
'object': [pd.to_datetime('1999-05-09 00:00:00'), pd.to_datetime('1789-02-12 00:00:00'), 50, 32]
})
),
###
(
'multiple_date_fields',
{'updated_at': {'format': '%Y.%m.%d'}, 'dob': {'format': "%Y.%m.%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['1999.05.09', '1789.02.12'],
'updated_at': ['2021.03.02', '2021.03.04'],
'weight': [50, 32]
}),
pd.DataFrame(data={
'customer_id': [1, 2, 1, 2, 1, 2],
'predicate': ['dob', 'dob', 'updated_at', 'updated_at', 'weight', 'weight'],
'object': [
pd.to_datetime('1999-05-09 00:00:00'),
pd.to_datetime('1789-02-12 00:00:00'),
pd.to_datetime('2021-03-02 00:00:00'),
pd.to_datetime('2021-03-04 00:00:00'),
50,
32]
})
),
###
(
'multiple_date_fields_different_formats',
{'updated_at': {'format': '%Y$%m$%d'}, 'dob': {'format': "%Y.%m.%d"}},
pd.DataFrame(data={
'customer_id': [1, 2],
'dob': ['1999.05.09', '1789.02.12'],
'updated_at': ['2021$03$02', '2021$03$04'],
'weight': [50, 32]
}),
pd.DataFrame(data={
'customer_id': [1, 2, 1, 2, 1, 2],
'predicate': ['dob', 'dob', 'updated_at', 'updated_at', 'weight', 'weight'],
'object': [
pd.to_datetime('1999-05-09 00:00:00'),
pd.to_datetime('1789-02-12 00:00:00'),
pd.to_datetime('2021-03-02 00:00:00'),
pd.to_datetime('2021-03-04 00:00:00'),
50,
32]
})
)
])
@patch('dgraphpandas.strategies.horizontal.vertical_transform')
def test_horizontal_transform_correct_date_format(self, name, date_format, frame, expected_melted, transform_mock: Mock):
'''
Ensures when the date_format provided is in the correct format,
no error is raised
'''
config_file_key = 'customer'
config = {
'files': {
config_file_key: {
'subject_fields': ['customer_id'],
'date_fields': date_format
}
}
}
horizontal_transform(frame, config, config_file_key)
transform_mock.assert_called_once()
args, kwargs = transform_mock.call_args_list[0]
passed_frame, passed_config, passed_config_key = args
assert_frame_equal(passed_frame, expected_melted)
self.assertEqual(passed_config, config)
self.assertEqual(passed_config_key, config_file_key)
self.assertEqual(kwargs, {})
| 34.395833 | 125 | 0.472 | 17,928 | 0.98717 | 0 | 0 | 15,810 | 0.870547 | 0 | 0 | 5,312 | 0.292495 |
08cedf482cda63c943ec43e8d04a65c278427e19 | 378 | py | Python | clienteTCP.py | planetacomputer/pythonsecurity | 5b808512afae5bc221715f37f91a0294f4800f19 | [
"MIT"
]
| null | null | null | clienteTCP.py | planetacomputer/pythonsecurity | 5b808512afae5bc221715f37f91a0294f4800f19 | [
"MIT"
]
| null | null | null | clienteTCP.py | planetacomputer/pythonsecurity | 5b808512afae5bc221715f37f91a0294f4800f19 | [
"MIT"
]
| null | null | null | #!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
#host = socket.gethostname() # Get local machine name
host = socket.gethostbyname("localhost")
print host
port = 53 # Reserve a port for your service.
s.connect((host, port))
print s.recv(1024)
s.close | 29.076923 | 59 | 0.637566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.518519 |
08ceeff12c2a6ee62212a18498cd6880997296e3 | 1,759 | py | Python | application/routes.py | N-A-Podgornov/CFT-MLC | ded9267c5b8053a15bdcc67be9f83097749cfb13 | [
"Apache-2.0"
]
| null | null | null | application/routes.py | N-A-Podgornov/CFT-MLC | ded9267c5b8053a15bdcc67be9f83097749cfb13 | [
"Apache-2.0"
]
| null | null | null | application/routes.py | N-A-Podgornov/CFT-MLC | ded9267c5b8053a15bdcc67be9f83097749cfb13 | [
"Apache-2.0"
]
| null | null | null | import os
import shutil
from flask import render_template, redirect, url_for, request
from werkzeug.utils import secure_filename
from config import Config
from application import app
from application.model import Model
@app.route('/')
def index():
return redirect(url_for('submit'))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in Config.ALLOWED_EXTENSIONS
def file_system_preparation():
try:
shutil.rmtree(path=Config.UPLOAD_FOLDER)
shutil.rmtree(path=Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER)
except OSError:
print("error :: failed to clean file system")
try:
os.mkdir(path=Config.UPLOAD_FOLDER)
os.mkdir(path=Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER)
except OSError:
print("error :: failed to prepare file system")
@app.route('/submit', methods=['GET', 'POST'])
def submit():
file_system_preparation()
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('response', filename=filename))
return render_template('submit.html')
@app.route('/<filename>', methods=['GET'])
def response(filename):
in_fn, fn_ex = os.path.splitext(filename)
out_fn_w = os.path.join(Config.PATH_TO_SPECTROGRAM_FOLDER + Config.SPECTROGRAM_FOLDER, in_fn + ".png")
out_fn_r = os.path.join(Config.SPECTROGRAM_FOLDER, in_fn + ".png")
Model(filename).get_spectrogram().savefig(out_fn_w)
return render_template('response.html', spectrogram=out_fn_r)
| 30.859649 | 106 | 0.704377 | 0 | 0 | 0 | 0 | 931 | 0.529278 | 0 | 0 | 210 | 0.119386 |
08cfc63dc9bcf57b5303ab14c053f28fd612cafc | 4,095 | py | Python | tests/test_onnxml_imputer_converter.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
]
| 2,772 | 2020-05-04T21:03:40.000Z | 2022-03-30T11:00:03.000Z | tests/test_onnxml_imputer_converter.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
]
| 486 | 2020-05-05T00:45:44.000Z | 2022-03-15T01:02:31.000Z | tests/test_onnxml_imputer_converter.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
]
| 232 | 2019-11-02T22:06:38.000Z | 2022-03-25T07:36:17.000Z | """
Tests onnxml Imputer converter
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.impute import SimpleImputer
from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, lightgbm_installed
from hummingbird.ml import convert
if onnx_runtime_installed():
import onnxruntime as ort
if onnx_ml_tools_installed():
from onnxmltools import convert_sklearn
from onnxmltools.convert.common.data_types import FloatTensorType as FloatTensorType_onnx
class TestONNXImputer(unittest.TestCase):
def _test_imputer_converter(self, model, mode="onnx"):
warnings.filterwarnings("ignore")
X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(model, initial_types=[("float_input", FloatTensorType_onnx(X.shape))])
# Get the predictions for the ONNX-ML model
session = ort.InferenceSession(onnx_ml_model.SerializeToString())
output_names = [session.get_outputs()[i].name for i in range(len(session.get_outputs()))]
inputs = {session.get_inputs()[0].name: X}
onnx_ml_pred = session.run(output_names, inputs)[0]
# Create test model by calling converter
model = convert(onnx_ml_model, mode, X)
# Get the predictions for the test model
pred = model.transform(X)
return onnx_ml_pred, pred
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_const(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_const_nan0(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant", fill_value=0)
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_mean(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="mean", fill_value="nan")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_converter_raises_rt(self):
warnings.filterwarnings("ignore")
model = SimpleImputer(strategy="mean", fill_value="nan")
X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)
model.fit(X)
# Create ONNX-ML model
onnx_ml_model = convert_sklearn(model, initial_types=[("float_input", FloatTensorType_onnx(X.shape))])
onnx_ml_model.graph.node[0].attribute[0].name = "".encode()
self.assertRaises(RuntimeError, convert, onnx_ml_model, "onnx", X)
@unittest.skipIf(
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test requires ONNX, ORT and ONNXMLTOOLS"
)
def test_onnx_imputer_torch(self, rtol=1e-06, atol=1e-06):
model = SimpleImputer(strategy="constant")
onnx_ml_pred, onnx_pred = self._test_imputer_converter(model, mode="torch")
# Check that predicted values match
np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)
if __name__ == "__main__":
unittest.main()
| 40.147059 | 125 | 0.70696 | 3,532 | 0.862515 | 0 | 0 | 2,573 | 0.628327 | 0 | 0 | 710 | 0.173382 |
08d1b0407331ee4e1921fc4b74a0794639337160 | 7,520 | py | Python | rs_etl.py | jlauman/data_engineering_project_03 | 722c0f5226ed29c00d6b33e64da5982fe0be69e0 | [
"MIT"
]
| null | null | null | rs_etl.py | jlauman/data_engineering_project_03 | 722c0f5226ed29c00d6b33e64da5982fe0be69e0 | [
"MIT"
]
| null | null | null | rs_etl.py | jlauman/data_engineering_project_03 | 722c0f5226ed29c00d6b33e64da5982fe0be69e0 | [
"MIT"
]
| null | null | null | import configparser, os, glob, csv, json, hashlib, time
import pandas as pd
import psycopg2
from pprint import pprint
from rs_sql_queries import staging_events_insert, staging_songs_insert
from rs_sql_queries import insert_table_queries
import boto3
from botocore import UNSIGNED
from botocore.config import Config
DEND_BUCKET='udacity-dend'
# global lookup table
NAME_TO_GENDER = {}
def load_gender_lookup():
"""Load lookup dictionary to find gender given a name.
"""
base_path = os.getcwd() + '/data/names'
for root, dirs, files in os.walk(base_path):
file_paths = glob.glob(os.path.join(root,'*.txt'))
for file_path in file_paths:
print('names: %s' % file_path)
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# pprint(row)
NAME_TO_GENDER[row[0]] = row[1]
# pprint(NAME_TO_GENDER)
True
def get_object_paths(s3, bucket, prefix):
"""List objects in S3 bucket with given prefix.
Uses paginator to ensure a complete list of object paths is returned.
"""
# r1 = s3.list_objects(Bucket=DEND_BUCKET, Prefix=prefix)
# r2 = list(map(lambda obj: obj['Key'], r1['Contents']))
# r3 = list(filter(lambda str: str.endswith('.json'), r2))
# s3 client does not need to be closed
object_paths = []
paginator = s3.get_paginator('list_objects')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
for page in pages:
# print("len(page['Contents'])=" + str(len(page['Contents'])))
r1 = list(map(lambda obj: obj['Key'], page['Contents']))
r2 = list(filter(lambda str: str.endswith('.json'), r1))
object_paths.extend(r2)
print('%s/%s total object paths = %d' % (bucket, prefix, len(object_paths)))
time.sleep(2)
return object_paths
def load_staging_log_data(cur, conn):
"""Load song-play event records into s_songplay_event table.
"""
# import pdb; pdb.set_trace()
# load log_data (events) into s_event table
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'log_data')
pprint(file_paths)
for file_path in file_paths:
sql = str(staging_events_insert)
print('log_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
df = pd.read_json(str1, lines=True)
df = df[df.page == 'NextSong']
df['timestamp'] = pd.to_datetime(df['ts'], unit='ms')
df['year'] = df['timestamp'].dt.year
df['week'] = df['timestamp'].dt.weekofyear
df['month'] = df['timestamp'].dt.month
df['day'] = df['timestamp'].dt.day
df['hour'] = df['timestamp'].dt.hour
df['weekday'] = df['timestamp'].dt.weekday
# pprint(df)
for index, row in df.iterrows():
# create a sha256 hash for event's unique id
event_id = hashlib.sha256((str(row.userId) + ' ' + str(row.sessionId) + ' ' + row.timestamp.strftime('%Y%m%d%H%M') + ' ' + row.song).encode('utf-8')).hexdigest()
str1 = ("(" +
"'" + event_id + "', " +
"'" + row.artist.replace("'", "''") + "', " +
"'" + row.auth + "', " +
"'" + row.firstName.replace("'", "''") + "', " +
"" + str(row.itemInSession) + ", " +
"'" + row.lastName.replace("'", "''") + "', " +
"'" + NAME_TO_GENDER[row.firstName] + "', " +
"" + str(row.length) + ", " +
"'" + row.level + "', " +
"'" + row.location.replace("'", "''") + "', " +
"'" + row.method + "', " +
"'" + row.page + "', " +
"'" + str(row.registration) + "', " +
"'" + str(row.sessionId) + "', " +
"'" + row.song.replace("'", "''") + "', " +
"'" + str(row.status) + "', " +
"'" + row.timestamp.strftime('%Y-%m-%d %H') + "', " +
"" + str(row.year) + ", " +
"" + str(row.week) + ", " +
"" + str(row.month) + ", " +
"" + str(row.day) + ", " +
"" + str(row.hour) + ", " +
"" + str(row.weekday) + ", " +
"'" + row.userAgent.replace("'", "''") + "', " +
"'" + str(row.userId) + "'" +
"),\n")
sql += str1
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_song_data(cur, conn):
"""Load song records into s_song staging table.
"""
sql = str(staging_songs_insert)
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'song_data')
pprint(file_paths)
for file_path in file_paths:
print('song_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
data = json.loads(str1)
if data['year'] == 0: data['year'] = None
# fix link string...
if str(data['artist_location']).startswith('<a'): data['artist_location'] = None
# pprint(data)
str2 = ("(" +
"'" + data['artist_id'] + "', " +
"" + (str(data['artist_latitude']) if not data['artist_latitude'] == None else 'null') + ", " +
"'" + str(data['artist_location']).replace("'", "''") + "', " +
"" + (str(data['artist_longitude']) if not data['artist_longitude'] == None else 'null') + ", " +
"'" + str(data['artist_name']).replace("'", "''") + "', " +
"" + str(data['duration']) + ", " +
"" + str(data['num_songs']) + ", " +
"'" + data['song_id'] + "', " +
"'" + str(data['title']).replace("'", "''") + "', " +
"" + (str(data['year']) if not data['year'] == None else 'null') + "" +
"),\n")
sql += str2
# print(str2)
# batch inserts at 8k character threshold
if len(sql) > 8192:
print(' 8k insert...')
sql = ''.join(sql).strip()[:-1] + ';'
cur.execute(sql)
conn.commit()
sql = str(staging_songs_insert)
print('last insert...')
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_tables(cur, conn):
load_staging_song_data(cur, conn)
load_staging_log_data(cur, conn)
def insert_tables(cur, conn):
"""Populate staging, dimension and fact tables.
The fact table must be the last item in the query list.
"""
for query in insert_table_queries:
if query.strip() != "":
pprint(query)
cur.execute(query)
conn.commit()
def main():
"""Run Redshift ETL for staging, dimension and fact tables.
"""
config = configparser.ConfigParser()
config.read('rs_dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_gender_lookup()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 38.367347 | 173 | 0.529255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,211 | 0.294016 |
08d269d1ebc51a6ac75c04bc8fcc26f6ea8bd98e | 1,002 | py | Python | cron/weather.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
]
| 1 | 2017-08-25T18:55:11.000Z | 2017-08-25T18:55:11.000Z | cron/weather.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
]
| null | null | null | cron/weather.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
import json, urllib2, datetime
from sqlite3 import dbapi2 as sqlite3
# zip codes to log
zipcodes = ['07740','11210','33139','90210']
# configuration
DATABASE = '../db/weather.db'
SECRET_KEY = 'hackerati'
DEBUG = True
# open database
db = sqlite3.connect(DATABASE)
for zipcode in zipcodes:
# pull weather from API
weather_api = urllib2.urlopen('http://api.openweathermap.org/data/2.5/weather?zip='+zipcode+',us')
weather_data = weather_api.read()
weather_api.close()
weather = json.loads(weather_data)
# convert from kelvin to fahrenheit
temp_val = (((weather['main']['temp']-273.15)*9)/5)+32
humidity_val = weather['main']['humidity']
print zipcode,
print temp_val,
print humidity_val
# insert db entry
db.execute('insert into weather (zipcode, temp, humidity, stamp) values (?, ?, ?, ?)',
[zipcode, int(temp_val), int(humidity_val), datetime.datetime.utcnow()])
db.commit()
# close database
db.close()
| 26.368421 | 102 | 0.673653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.372255 |
08d4af28a19751f94bb0827b79075eb4b7ae0ea7 | 234 | py | Python | tests/test_print_as_discovered.py | acardos/git_inspector | ee194a62606ddb882ce0736618bae053e6b8521d | [
"MIT"
]
| 4 | 2021-12-06T15:35:19.000Z | 2022-01-23T23:17:38.000Z | tests/test_print_as_discovered.py | acardos/git_inspector | ee194a62606ddb882ce0736618bae053e6b8521d | [
"MIT"
]
| 12 | 2021-03-31T09:14:40.000Z | 2022-01-31T10:01:25.000Z | tests/test_print_as_discovered.py | acardos/git_inspector | ee194a62606ddb882ce0736618bae053e6b8521d | [
"MIT"
]
| 1 | 2022-01-22T11:37:08.000Z | 2022-01-22T11:37:08.000Z |
from git import Repo
from git_inspector import find_git_directories
def test_find_git_directories(repo: Repo):
generator = find_git_directories(search_paths=[repo.working_dir])
assert next(generator) == repo.working_dir
| 19.5 | 69 | 0.794872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08d50632dbe42cde10ed75ee126dd035ddf3804a | 3,480 | py | Python | src/frontend/function_transforms/pass_div_zero.py | mfeliu/gelpia | 30c6c1030165b26bf5f84613316f6fc2ce3ebe8b | [
"MIT"
]
| null | null | null | src/frontend/function_transforms/pass_div_zero.py | mfeliu/gelpia | 30c6c1030165b26bf5f84613316f6fc2ce3ebe8b | [
"MIT"
]
| null | null | null | src/frontend/function_transforms/pass_div_zero.py | mfeliu/gelpia | 30c6c1030165b26bf5f84613316f6fc2ce3ebe8b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
try:
from gelpia import bin_dir
except:
print("gelpia not found, gaol_repl must be in your PATH\n")
bin_dir = ""
from pass_utils import *
from output_flatten import flatten
import re
import sys
import subprocess
import os.path as path
def div_by_zero(exp, inputs, assigns, consts):
query_proc = subprocess.Popen(path.join(bin_dir, 'gaol_repl'),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
bufsize=0)
root = exp
bad_exp = None
def gaol_eval(exp):
flat_exp = flatten(exp, inputs, consts, assigns)
query_proc.stdin.write('{}\n'.format(flat_exp))
result = query_proc.stdout.readline()
try:
match = re.match("[<\[]([^,]+),([^>\]]+)[>\]]", result)
l = float(match.group(1))
r = float(match.group(2))
except:
print("Fatal error in gaol_eval")
print(" query was: '{}'".format(flat_exp))
print(" unable to match: '{}'".format(result))
sys.exit(-1)
return l,r
def contains_zero(exp):
l,r = gaol_eval(exp)
return l<=0 and 0<=r
def less_than_zero(exp):
l,r = gaol_eval(exp)
return l<0
def _div_by_zero(exp):
nonlocal bad_exp
typ = exp[0]
if typ in {'Float', 'Integer', 'ConstantInterval',
'InputInterval', 'Input', 'Symbol'}:
return False
if typ == '/':
retval = (contains_zero(exp[2]) or
_div_by_zero(exp[1]) or
_div_by_zero(exp[2]))
if retval:
bad_exp = exp
return retval
if typ == "powi":
temp = False
if less_than_zero(exp[2]):
temp = contains_zero(exp[1])
retval = temp or _div_by_zero(exp[1]) or _div_by_zero(exp[2])
if retval:
bad_exp = exp
return retval
if typ == "pow":
temp = False
e = expand(exp[2], assigns, consts)
assert(e[0] == "Integer")
if int(e[1]) < 0:
temp = contains_zero(exp[1])
retval = temp or _div_by_zero(exp[1])
if retval:
bad_exp = exp
return retval
if typ in BINOPS:
return _div_by_zero(exp[1]) or _div_by_zero(exp[2])
if typ in UNOPS.union({"Return"}):
return _div_by_zero(exp[1])
if typ in {"Variable"}:
return _div_by_zero(assigns[exp[1]])
if typ in {"Const"}:
return _div_by_zero(consts[exp[1]])
print("div_by_zero error unknown: '{}'".format(exp))
sys.exit(-1)
result = _div_by_zero(exp)
query_proc.communicate()
return (result, bad_exp)
def runmain():
from lexed_to_parsed import parse_function
from pass_lift_inputs_and_assigns import lift_inputs_and_assigns
from pass_lift_consts import lift_consts
from pass_simplify import simplify
data = get_runmain_input()
exp = parse_function(data)
exp, inputs, assigns = lift_inputs_and_assigns(exp)
exp, consts = lift_consts(exp, inputs, assigns)
exp = simplify(exp, inputs, assigns, consts)
has_div_zero, bad_exp = div_by_zero(exp, inputs, assigns, consts)
print("divides by zero:")
print(has_div_zero)
if has_div_zero:
print()
print("offending exp:")
print(bad_exp)
print()
print_exp(exp)
print()
print_inputs(inputs)
print()
print_assigns(assigns)
print()
print_consts(consts)
if __name__ == "__main__":
try:
runmain()
except KeyboardInterrupt:
print("\nGoodbye")
| 23.355705 | 67 | 0.611207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.113793 |
08d52a54cf446718a15b7b80b28b2ccd05586869 | 2,150 | py | Python | setup.py | bearroast/django-estimators | 5dd72694dab6725335214543a59104c4de504037 | [
"MIT"
]
| 46 | 2016-09-13T06:33:30.000Z | 2022-01-08T00:55:37.000Z | setup.py | bearroast/django-estimators | 5dd72694dab6725335214543a59104c4de504037 | [
"MIT"
]
| 14 | 2016-09-10T04:56:30.000Z | 2017-11-28T04:12:43.000Z | setup.py | bearroast/django-estimators | 5dd72694dab6725335214543a59104c4de504037 | [
"MIT"
]
| 19 | 2016-09-20T23:53:26.000Z | 2022-01-08T00:55:39.000Z | import os
from pip.req import parse_requirements
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(
os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=False)
reqs = [str(ir.req) for ir in install_reqs]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-estimators',
version='0.2.1',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
license='MIT License', # example license
description='A django model to persist and track machine learning models',
long_description=README,
url='https://github.com/fridiculous/django-estimators',
author='Simon Frid',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Version Control',
],
keywords='''scikit-learn, sklearn, machine learning, artificial intelligence, ml,
ai, estimators, version, versioning, benchmark, persist, storage, track, models,
repository, evaluation, workflow'''
)
| 40.566038 | 88 | 0.670698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,292 | 0.60093 |
08d5314ae1e6b39701c18dfc2466ee45cde74ef6 | 7,517 | py | Python | ip_group.py | vectranetworks/csv-to-ip-group | f8f53f979c62c3db161fcb7fdc3b7ebb26842055 | [
"MIT"
]
| null | null | null | ip_group.py | vectranetworks/csv-to-ip-group | f8f53f979c62c3db161fcb7fdc3b7ebb26842055 | [
"MIT"
]
| null | null | null | ip_group.py | vectranetworks/csv-to-ip-group | f8f53f979c62c3db161fcb7fdc3b7ebb26842055 | [
"MIT"
]
| null | null | null | import csv
import ipaddress
import logging.handlers
import sys
import argparse
try:
import vat.vectra as vectra
import requests
except Exception as error:
print('\nMissing import requirements: {}\n'.format(str(error)))
sys.exit(0)
LOG = logging.getLogger(__name__)
INVALID_CHARS = ['~', '#', '$', '^', '+', '=', '<', '>', '?', ';']
SUB_CHAR = '_'
# Suppress Detect certificate warning
requests.packages.urllib3.disable_warnings()
def ip_subnet(subnet_string):
"""
Called with string that represents an IP subnet with CIDR or netmask in dotted decimal format
Validates string represents valid subnet and removes host bits
Returns string representation of subnet in CIDR format
:param subnet_string: string representing subnet in CIDR w.x.y.z/n or netmask w.x.y.z/aa.bb.cc.dd format
:return: returns string representation of subnet in CIDR format
"""
try:
ipaddress.IPv4Network(subnet_string)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as error:
LOG.info('Subnet {} format error, {}'.format(subnet_string, error))
return
except ValueError as error:
LOG.info('{}, removing host bits'.format(error))
subnet = ipaddress.IPv4Network(subnet_string, strict=False)
return str(subnet)
def sub_bad_chars(string, sub=SUB_CHAR):
"""
Substitute unsupported characters in string representing group
:param string: original string
:param sub: substitution character, default defined in SUB_CHAR
:return: returns the original string with any illegal characters substituted
"""
for bad_char in INVALID_CHARS:
string = string.replace(bad_char, sub)
return string
def group_exists(group_name, brain):
"""
Determines if group exists
Called with initialized vectra client and name of group
:param group_name: group name
:param brain: initialized Vectra Client object
:return: True if group exists, False otherwise
"""
group_iterator = brain.get_all_groups(name=group_name)
for item in group_iterator:
if item.json()['count'] > 0:
for group in item.json()['results']:
if group['name'] == group_name:
return {'name': group['name'], 'id': group['id']}
return False
def create_group(name, subnet, brain, descr=''):
"""
Creates group and adds supplied subnet, and description if supplied
:param name: group name
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.create_group(name=name, description=descr, type='ip', members=list(subnet))
else:
brain.create_group(name=name, type='ip', members=list(subnet))
def update_group(grp_id, subnet, brain, descr=''):
"""
Updates existing group with supplied subnet, and description if supplied
:param grp_id: group ID
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.update_group(group_id=grp_id, description=descr, members=subnet, append=True)
else:
brain.update_group(group_id=grp_id, members=subnet, append=True)
def obtain_args():
parser = argparse.ArgumentParser(description='Supplied with name of CSV input file, creates or updates IP groups '
'with supplied subnet information. \nCSV file format: '
'group_name,subnet,description\n\n'
'Subnet can be supplied in CIDR notation e.g. \n'
'group name,10.1.1.0/24,some description\n\n'
'or as subnet and netmask separate by a comma (,) e.g.\n'
'group name,10.1.1.1.0,255.255.255.0,some description',
prefix_chars='--', formatter_class=argparse.RawTextHelpFormatter,
epilog='')
parser.add_argument('brain', type=str, help='Hostname or IP of Congito Detect brain')
parser.add_argument('token', type=str, help='API token to access Cognito Detect')
parser.add_argument('file', type=str, help='Name of csv input file')
parser.add_argument('--sub_char', default=False, type=str, help='Override default invalid character '
'substitution in group names and '
'description. Default is _\n'
'May not be one of the following characters\n'
'{}'.format(str(INVALID_CHARS)))
parser.add_argument('--verbose', default=False, action='store_true', help='Verbose logging')
return parser.parse_args()
def main():
"""
Supplied with valid CSV file containing 3 or 4 columns of data, iterates over rows and creates or updates groups
Supports CSV files with following format examples with or without header row
group 1,192.168.1.0/255.255.255.0,group1 description
group 2,10.1.1.0/24,group2 description
"""
args = obtain_args()
sub_char = args.sub_char if args.sub_char else SUB_CHAR
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if len(sys.argv) == 1:
print('Run python3 ip_group.py -h for help.')
sys.exit()
file = args.file
with open(file, newline='') as csvfile:
vc = vectra.VectraClientV2_1(url='https://' + args.brain, token=args.token, verify=False)
reader = csv.reader(csvfile)
for row in reader:
if len(row) < 3 or len(row) > 4:
LOG.info('Invalid number of columns in row, skipping')
continue
if len(row) == 4:
LOG.debug('Number of rows 4: {}'.format(len(row)))
subnet = ip_subnet('{}/{}'.format(row[1], row[2]))
description = sub_bad_chars(row[3], sub_char)
elif len(row) == 3:
LOG.debug('Number of rows 3: {}'.format(len(row)))
subnet = ip_subnet(row[1])
description = sub_bad_chars(row[2], sub_char)
group_name = sub_bad_chars(row[0], sub_char)
if subnet is not None:
"""group_obj False or {'name': 'somename', 'id':'123'}"""
group_obj = group_exists(group_name, vc)
if not group_obj:
# Group does not exist, creating
LOG.info('Group does not exist, creating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
create_group(group_name, [str(subnet)], vc, description)
else:
LOG.info('Group exists, updating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
update_group(group_obj['id'], [str(subnet)], vc, description)
else:
LOG.info('Invalid subnet, skipping')
if __name__ == '__main__':
main()
| 39.151042 | 118 | 0.596648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,091 | 0.411201 |
08d5fc45e5a26919b46ae56fd9e3cb2d53ede3e7 | 512 | py | Python | BasicSyntax/DataType.py | Fjaxzhy/top.kagurayayoi.learn.Python | af2ad3b7da85fb0af1668d3751c0342b16d0966f | [
"MIT"
]
| null | null | null | BasicSyntax/DataType.py | Fjaxzhy/top.kagurayayoi.learn.Python | af2ad3b7da85fb0af1668d3751c0342b16d0966f | [
"MIT"
]
| 11 | 2021-03-29T08:50:16.000Z | 2021-03-31T08:46:55.000Z | BasicSyntax/DataType.py | Fjaxzhy/top.kagurayayoi.learn.Python | af2ad3b7da85fb0af1668d3751c0342b16d0966f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Python变量不需要声明数据类型
# 变量在使用前必须赋值
# 变量没有类型 类型指内存中对象的类型
# 不可变数据 Number / String / Tuple
# 可变数据 List / Dictionary / Set
# 数字 Number
# 整数 Int
IntNum = 100
# 浮点数 Float
FloatNum = 100.10
# 布尔值 Boolean // True:1 False:0
BoolNum = True
# 复数 Complex
ComplexNum = 1.00j
# 字符串 String
Str = "这是字符串"
# 列表 List
List = ['a', 'b', 1, 2]
# 元组 Tuple
Tup = ('a', 'b', 1, 2)
# 集合 Set
Set = {'a', 'b', 1, 2}
# 字典 Dictionary
Dict = {'key1': 'value1', 'key2': 'value2'}
| 14.628571 | 43 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.750755 |
08d5febbe68f3f78281ba4430f7b17df3067f244 | 3,526 | py | Python | tcex/api/tc/v3/_gen/_gen_args_abc.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
]
| null | null | null | tcex/api/tc/v3/_gen/_gen_args_abc.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
]
| null | null | null | tcex/api/tc/v3/_gen/_gen_args_abc.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
]
| null | null | null | """Generate Docs for ThreatConnect API"""
# standard library
import importlib
import sys
from abc import ABC
from typing import Any, Optional
# first-party
from tcex.api.tc.v3._gen._gen_abc import GenerateABC
class GenerateArgsABC(GenerateABC, ABC):
"""Generate docstring for Model."""
def __init__(self, type_: Any) -> None:
"""Initialize class properties."""
super().__init__(type_)
self.type_ = self.utils.snake_string(self._type_map(type_))
@staticmethod
def _import_model(module, class_name) -> Any:
"""Import the appropriate model."""
return getattr(importlib.import_module(module), class_name)
def _prop_type(self, prop_data: dict) -> str:
"""Return the appropriate arg type."""
prop_type = None
if 'type' in prop_data:
prop_type = self._prop_type_map(prop_data.get('type'))
elif 'allOf' in prop_data and prop_data.get('allOf'):
ref = prop_data.get('allOf')[0].get('$ref')
prop_type = ref.split('/')[-1].replace('Model', '')
elif 'items' in prop_data and prop_data.get('items'):
ref = prop_data.get('items').get('$ref')
prop_type = ref.split('/')[-1].replace('Model', '')
return prop_type
@staticmethod
def _prop_type_map(prop_type: str) -> str:
"""Return hint type."""
_prop_types = {
'boolean': 'bool',
'integer': 'int',
'string': 'str',
}
return _prop_types.get(prop_type, prop_type)
def gen_args(
self,
i1: Optional[str] = None,
i2: Optional[str] = None,
updatable: Optional[bool] = True,
) -> str:
"""Model Map"""
i1 = i1 or self.i1
i2 = i2 or self.i2
module_import_data = self._module_import_data(self.type_)
model = self._import_model(
module_import_data.get('model_module'), module_import_data.get('model_class')
)
_doc_string = [f'{i1}Args:']
# get properties from schema
schema = model().schema(by_alias=False)
if '$ref' in schema:
model_name = schema.get('$ref').split('/')[-1]
properties = schema.get('definitions').get(model_name).get('properties')
elif 'properties' in schema:
properties = schema.get('properties')
else:
print(model().schema_json(by_alias=False))
sys.exit()
# iterate over properties to build docstring
for arg, prop_data in properties.items():
# for all doc string read-only args should not be included.
if prop_data.get('read_only', False) is True:
continue
# for add_xxx method doc string non-updatable args should not be included.
if updatable is False and prop_data.get('updatable', True) is False:
continue
# get arg type
prop_type = self._prop_type(prop_data)
# arg
_arg_doc = f'{arg} ({prop_type}, kwargs)'
# description
description = prop_data.get('description')
_arg_doc = self._format_description(
arg=_arg_doc,
description=description,
length=100,
indent=' ' * len(i2),
)
# add arg to doc string
_doc_string.append(f'{i2}{_arg_doc}')
if len(_doc_string) > 1:
return '\n'.join(_doc_string)
return ''
| 33.264151 | 89 | 0.574022 | 3,313 | 0.939592 | 0 | 0 | 444 | 0.125922 | 0 | 0 | 838 | 0.237663 |
08d6edb44ef1415e69d5e8564970749ce00f431c | 382 | py | Python | rename_smpls.py | Chartiza/bulls | e4e7895a37a0335572dea50f2cbaae2737b3cd5f | [
"MIT"
]
| null | null | null | rename_smpls.py | Chartiza/bulls | e4e7895a37a0335572dea50f2cbaae2737b3cd5f | [
"MIT"
]
| null | null | null | rename_smpls.py | Chartiza/bulls | e4e7895a37a0335572dea50f2cbaae2737b3cd5f | [
"MIT"
]
| null | null | null | #!/usr/bin/python
sootv = {}
#Read file sootvetstviya
for l in open ("filesootv"):
data = l.strip().split("\t")
if data[0] not in sootv:
sootv[data[0]] = data[1]
#Read FinalReport file
for l in open('Ire30_GP'):
data = l.strip().split("\t")
if data[1] in sootv:
print(data[0]+"\t"+sootv[data[1]]+"\t"+data[2]+"\t"+data[3]+"\t"+"\t"+data[4]+"\t"+data[5])
| 23.875 | 94 | 0.570681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.311518 |
08d8b9d0bef0b39a979fb0521f02328c098ccbd7 | 448 | py | Python | 3day/Quiz01_1.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
]
| null | null | null | 3day/Quiz01_1.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
]
| null | null | null | 3day/Quiz01_1.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
]
| null | null | null | # Quiz01_1.py
items = {"콜라":1000,"사이다":900,"씨그램":500,"우유":700,"활명수":800}
print("=== 음료 자판기 입니다 ====")
print("[콜라][사이다][씨그램][우유][활명수] 중 선택")
print("복수 선택 시 --> 예) 사이다,우유 ")
def pItems(*args1,**args2) :
price = 0
for i in args1:
price = price + args2[i.strip()]
return price
# 선택목록 item, 가격 price
item = input() # 사이다,우유
items2 = item.strip().split(',')
price = pItems(*items2,**items)
print("가격 : {0} 원".format(price) ) | 22.4 | 58 | 0.564732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.486063 |
08d8e05ba83fd1eb90111af5408ae91ffdf11318 | 2,619 | py | Python | src/custom_arch/custom_alexnet.py | joeyseash/PruneTrain | 5adb367eb90b7e1e38251f8e3a8e7eb65b167aa0 | [
"Apache-2.0"
]
| 1 | 2021-10-03T00:57:32.000Z | 2021-10-03T00:57:32.000Z | src/custom_arch/custom_alexnet.py | VictorSuciu/prunetrain | ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b | [
"Apache-2.0"
]
| null | null | null | src/custom_arch/custom_alexnet.py | VictorSuciu/prunetrain | ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b | [
"Apache-2.0"
]
| null | null | null | """
Copyright 2019 Sangkug Lym
Copyright 2019 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from .arch_utils import layerUtil
arch = {}
arch[0] = {'name':'conv1', 'kernel_size':11, 'stride':4, 'padding':5, 'bias':True}
arch[1] = {'name':'conv2', 'kernel_size':5, 'stride':1, 'padding':2, 'bias':True}
arch[2] = {'name':'conv3', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[3] = {'name':'conv4', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[4] = {'name':'conv5', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[5] = {'name':'pool', 'kernel_size':2, 'stride':2}
arch[6] = {'name':'relu'}
arch[7] = {'name':'fc', 'out_chs':'num_classes'}
def _genDenseArchAlexNet(model, out_f_dir1, out_f_dir2, arch_name, dense_chs, chs_map, is_gating=False):
# File heading
ctx = 'import torch.nn as nn\n'
ctx += '__all__ = [\'alexnet_flat\']\n'
ctx += 'class AlexNet(nn.Module):\n'
ctx += '\tdef __init__(self, num_classes=10):\n'
ctx += '\t\tsuper(AlexNet, self).__init__()\n'
lyr = layerUtil(model, dense_chs)
# Layer definition
for idx in sorted(arch):
ctx += lyr.getLayerDef(arch[idx])
ctx += '\tdef forward(self, x):\n'
ctx += lyr.forward('conv1')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += lyr.forward('conv2')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += lyr.forward('conv3')
ctx += lyr.forward('relu')
ctx += lyr.forward('conv4')
ctx += lyr.forward('relu')
ctx += lyr.forward('conv5')
ctx += lyr.forward('relu')
ctx += lyr.forward('pool')
ctx += '\t\tx = x.view(x.size(0), -1)\n'
ctx += forward('fc')
ctx += '\t\treturn x\n'
# AlexNet definition
ctx += 'def alexnet_flat(**kwargs):\n'
ctx += '\tmodel = AlexNet(**kwargs)\n'
ctx += '\treturn model\n'
if not os.path.exists(out_f_dir2):
os.makedirs(out_f_dir2)
print ("[INFO] Generating a new dense architecture...")
f_out1 = open(os.path.join(out_f_dir1, 'alexnet_flat.py'),'w')
f_out1.write(ctx)
f_out2 = open(os.path.join(out_f_dir2, arch_name),'w')
f_out2.write(ctx) | 34.012987 | 104 | 0.658267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,472 | 0.562047 |
08da6b4771c11626f2b1e4199314a129e0c7bb3d | 3,089 | py | Python | app/ml/train_data.py | curioswati/scrapy-tsa | 50556880125412e0b8d925fb46c41c44dd31fb37 | [
"MIT"
]
| 2 | 2020-01-15T05:17:23.000Z | 2020-08-13T01:50:00.000Z | app/ml/train_data.py | curioswati/scrapy-tsa | 50556880125412e0b8d925fb46c41c44dd31fb37 | [
"MIT"
]
| null | null | null | app/ml/train_data.py | curioswati/scrapy-tsa | 50556880125412e0b8d925fb46c41c44dd31fb37 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import numpy
import csv
import re, nltk
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.linear_model import LogisticRegression
# from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
def decode_emoticons(text):
text = "Sunny Again Work Tomorrow :-| TV Tonight"
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
# remove non letters
text = re.sub("[^a-zA-Z]", " ", text)
# tokenize
tokens = nltk.word_tokenize(text)
# stem
stems = stem_tokens(tokens, stemmer)
return stems
if __name__ == "__main__":
train_data = {
"text": [],
"sentiment": []
}
raw_count = 0
with open('Sentiment Analysis Dataset.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
headers = next(csvreader, None)
for line in csvreader:
train_data["text"].append(line[3].strip())
train_data["sentiment"].append(int(line[1]))
# raw_count += 1
# if raw_count >= 1000:
# break
raw_count = 0
with open('training.1600000.processed.noemoticon.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
for line in csvreader:
try:
train_data["text"].append(line[5].strip())
except Exception as e:
print e
print "line", line
print line[5]
exit(0)
if int(line[0]) == 4:
train_data["sentiment"].append(1)
else:
train_data["sentiment"].append(0)
# raw_count += 1
# if raw_count >= 1000:
# break
print train_data["text"][:3]
print train_data["sentiment"][:3]
print numpy.unique(numpy.array(train_data["sentiment"]))
print "data extracted"
# exit(0)
stemmer = PorterStemmer()
vectorizer = CountVectorizer(
analyzer = 'word',
tokenizer = tokenize,
lowercase = True,
stop_words = 'english',
max_features = 100,
encoding='utf-8'
)
print "creating corpus_data_features"
X_train_counts = vectorizer.fit_transform(train_data["text"])
# tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
# X_train_tf = tf_transformer.transform(X_train_counts)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print "X_train_tfidf.shape", X_train_tfidf.shape
print "training"
model = MultinomialNB().fit(X_train_tfidf, train_data["sentiment"])
joblib.dump(model, 'twitter_MultinomialNB_model.pkl', compress=1)
joblib.dump(vectorizer, 'vectorizer.pkl', compress=1)
joblib.dump(tfidf_transformer, 'tfidf_transformer.pkl', compress=1)
docs_new = ['God is love', 'OpenGL on the GPU is fast', "it was a very fantastic experience"]
X_new_counts = vectorizer.transform(docs_new)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = model.predict(X_new_tfidf)
print "predicted", predicted
print model.score(X_train_tfidf, train_data["sentiment"])
| 26.86087 | 94 | 0.717708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 874 | 0.282939 |
08da7e9d21e70b892d492b272992e48bff8c4a03 | 162 | py | Python | November/Poor Pigs.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
]
| null | null | null | November/Poor Pigs.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
]
| null | null | null | November/Poor Pigs.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
]
| null | null | null | class Solution:
def poorPigs(self, buckets: int, minutesToDie: int, minutesToTest: int) -> int:
return ceil(log(buckets)/log(minutesToTest//minutesToDie + 1))
| 40.5 | 80 | 0.746914 | 161 | 0.993827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08dc052ecc3d96e2ef3efe41624a974268f5c7b0 | 2,596 | py | Python | DIP/exercises/ex10/pca.py | apeyrard/sjtu-work | ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a | [
"MIT"
]
| 1 | 2022-03-26T10:04:05.000Z | 2022-03-26T10:04:05.000Z | DIP/exercises/ex10/pca.py | apeyrard/sjtu-work | ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a | [
"MIT"
]
| null | null | null | DIP/exercises/ex10/pca.py | apeyrard/sjtu-work | ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a | [
"MIT"
]
| 1 | 2022-03-26T10:04:06.000Z | 2022-03-26T10:04:06.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
import os
from PIL import Image
import numpy as np
size = None
matrix_x = None
for image in os.listdir('./washington'):
try:
print(image)
with Image.open(os.path.join('./washington',image)) as im:
imgVector = np.array(list(im.getdata()))
imgVector = imgVector.reshape(1, imgVector.shape[0])
try:
matrix_x = np.vstack((matrix_x, imgVector))
except:
matrix_x = imgVector
except FileNotFoundError as e:
sys.exit("Error : file not found")
#matrix_x = np.array([[0,1,1,1],
#[0,0,1,0],
#[0,0,0,1]
#])
#mean vector
K = matrix_x.shape[1]
print('K', K)
nb = matrix_x.shape[0]
print('nb', nb)
mx = np.zeros((nb, 1))
for x in range(K):
for y in range(nb):
mx[y] += matrix_x[y, x]
mx = mx/K
#covar matrix
cx = np.zeros((nb,nb))
for x in range(K):
tmp = (matrix_x[:,x])
tmp = tmp.reshape(tmp.shape[0],1)
cx += np.dot(tmp,tmp.T) - np.dot(mx,mx.T)
cx = cx/K
eigenvalues, eigenvectors = np.linalg.eig(cx)
#tri
eival = np.zeros(eigenvalues.shape)
eivec = np.zeros(eigenvectors.shape)
j = 0
for _ in range(nb):
maxval = eigenvalues.max()
for i in range(eigenvalues.shape[0]):
val = eigenvalues[i]
if val == maxval:
eival[j] = val
eigenvalues[i] = 0
eivec[j] = eigenvectors[i]
j += 1
break
#pruning eivec
pruning = 2
eivec = eivec[:pruning,:]
print(eivec)
matrix_y = np.zeros((pruning, matrix_x.shape[1]))
for i in range(K):
tmp = (matrix_x[:,i]).reshape(nb, 1)
truc = np.dot(eivec,(tmp-mx))
matrix_y[:, i] = truc.reshape(truc.shape[0])
#reconstruction
matrix_x2 = np.zeros(matrix_x.shape)
for i in range(K):
tmp = (matrix_y[:,i])
tmp = tmp.reshape(tmp.shape[0], 1)
matrix_x2[:, i] = np.array((np.dot(eivec.T,tmp)+mx).reshape(nb))
def rescale(matrix):
matrix = matrix - matrix.min()
matrix = matrix * 255 / matrix.max()
return matrix
data = np.vsplit(matrix_x2, 6)
for i,item in enumerate(data):
item = list(rescale(item.reshape(item.shape[1])))
newIm = Image.new(im.mode, im.size)
newIm.putdata(item)
newIm.show()
diff = item - matrix_x[i]
epsilon = 0.1
print(diff)
for j,val in enumerate(diff):
if abs(val) < epsilon:
diff[j] = 0
print(diff)
diff = rescale(diff)
newIm = Image.new(im.mode, im.size)
newIm.putdata(list(diff))
newIm.show()
| 23.6 | 68 | 0.573575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.083975 |
08dc36bae83be55acec0ed61f76a33d11f4bb8a1 | 1,677 | py | Python | organisations/migrate-entities/script.py | jbarnes/aws-python-script-collection | bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9 | [
"MIT"
]
| null | null | null | organisations/migrate-entities/script.py | jbarnes/aws-python-script-collection | bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9 | [
"MIT"
]
| null | null | null | organisations/migrate-entities/script.py | jbarnes/aws-python-script-collection | bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9 | [
"MIT"
]
| null | null | null | import boto3
import sys
if __name__ == "__main__":
if len(sys.argv) > 2:
print("[ERROR] You have passed in an invalid target-id, example target-id is ou-zhz0-prn5fmbc")
sys.exit()
else:
print("[INFO] Valid argument detected, proceeding with account migration")
destination_id = str(sys.argv[1])
# Gather source ids
with open("source_ids.txt") as f:
source_ids = f.read().splitlines()
l = len(source_ids)
print("[INFO] Detected {} source id(s) to be migrated".format(l))
print("[INFO] Beginning processing of source id(s)...")
# Process the source ids for migration
client = boto3.client("organizations")
for source_id in source_ids:
print("[INFO] Now attempting to move source id: {}".format(source_id))
get_parent = client.list_parents(ChildId=source_id)
parent_id = get_parent["Parents"][0]["Id"]
try:
response = client.move_account(
AccountId=source_id, SourceParentId=parent_id, DestinationParentId=destination_id
)
print(
"[INFO] Successfully moved source id: {} to target id: {}".format(
source_id, destination_id
)
)
except client.exceptions.DuplicateAccountException:
print(
"[NOTICE] Source id: {} is already migrated to target id: {}".format(
source_id, destination_id
)
)
print("[INFO] Successfully migrated required accounts.")
| 35.680851 | 103 | 0.556947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.342874 |
08dcaa11c309d6ad11738f4ba7bc30c87f71fe32 | 274 | py | Python | templates/python.py | limacat76/Polyglot-Study | ec71186d4dfbecebf372eb11affd9b5a2b76e47a | [
"MIT"
]
| null | null | null | templates/python.py | limacat76/Polyglot-Study | ec71186d4dfbecebf372eb11affd9b5a2b76e47a | [
"MIT"
]
| null | null | null | templates/python.py | limacat76/Polyglot-Study | ec71186d4dfbecebf372eb11affd9b5a2b76e47a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
from sys import argv
from sys import stdin
from sys import stdout
alp = len(argv)
if alp > 1 and argv[1] == "--version":
print ('version 0.1')
quit()
if alp > 1 and argv[1] == "--help":
print ('ctrl+d to quit')
quit()
print('todo')
| 17.125 | 38 | 0.605839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.277372 |
08dfdc660c21f835d66e36e8817b005006f680b0 | 1,142 | py | Python | Transcribing_DNA_into_RNA.py | hedianeposselt/ROSALIND | 6d53167e1ec98fc22992da7cd74d385a97baa870 | [
"Apache-2.0"
]
| 1 | 2022-03-29T10:17:56.000Z | 2022-03-29T10:17:56.000Z | Transcribing_DNA_into_RNA.py | hedianeposselt/ROSALIND | 6d53167e1ec98fc22992da7cd74d385a97baa870 | [
"Apache-2.0"
]
| null | null | null | Transcribing_DNA_into_RNA.py | hedianeposselt/ROSALIND | 6d53167e1ec98fc22992da7cd74d385a97baa870 | [
"Apache-2.0"
]
| null | null | null | # https://rosalind.info/problems/rna/
# Transcribing DNA into RNA exercise from ROSALIND
DNA = "ACAACAAAGGATCGGCGAGGAGCTGGTTAATCTCGATTCTAACAAAGGCCTCTTGAGTGACATAAAGTTGCTGTTCGGCCCCCGTTGCAGCCAAGCCTAGACTCGAGCGGGGTCTACCTCTGTAAACCCAAGTCGCAGGCCAAGGGCATTTTAACCCCCAAAGTTAGATACGTCGATTGAGTGCGCACTCCCTAACTTCAGACAGGATGGCGCTTAGCACTGGTTAGGTCCCTCATTAGAGGCTTACACGGGACCCCAGCGATCTGCAGGGCTACATGAACCGGCGATACCTGCAACCCTTCACGTGTGGTGCGAGTGCTGGACCCATGCACGGGCCCAAGAAGCGGGAGCACCCACGGCCTGAGCCTGTAGCTTCATACTTAGAGTAACACCTATAAGTTCTCCGTTTCACGTTATTTTACTTAACAAAGCACATCGATGGGCGGACGTACGAGCCGAGCCTCGTCCCCATTTACTCAAGTAACCAAGTCATTGTTTAGTCTATGGTAGGCTCTTTGATTGGGTACGCCGCAGCCATCCGCACACTTGCAGGGCTTTAGTCCGAACTCGTTCAAAGGGTTCGACGTACAACAGCGCCTACTAAATCCCCGCCTTGTAACGGAAGACGTGTGGGACCTCTTGAAACATCTTCGACCATACATCTCCATTTTAACAATGAAGCTGTATCAGTGGTCAGTCTTACTATGCCTGCACTCAGCAACAAGGGGCGCGATGATGTAGTCAGCGTGCCCAGATTCAGTACGGACAGTCAAGTGCGATCTTTCTGGGTCGCGCGGCTGGTGGTAATGAGAATGTTCTTACCTGACAAGTAATGCTTCTTCCAATCGTGCTGGGGGCAAGGTTTATTCTCTCTTAACCTGTTGCTCATCTCTAGCGATAACTGGTGCATGATCAATTTGCGG"
RNA = ""
for nucleotide in DNA:
if nucleotide == "T":
RNA = RNA + "U"
else:
RNA = RNA + nucleotide
print( RNA ) | 81.571429 | 931 | 0.932574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,020 | 0.89317 |
08e02a9b4adc8aa43eb49a2fd41a870ebd71dbaa | 516 | py | Python | micro_admin/migrations/0010_auto_20160804_1044.py | lance0145/micro-finance | 1ba6339a9d05ff2f20b020b97a233c766b2ee6e0 | [
"MIT"
]
| 72 | 2015-09-18T07:23:20.000Z | 2022-03-23T14:35:46.000Z | micro_admin/migrations/0010_auto_20160804_1044.py | mohbadar/micro-finance | 00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6 | [
"MIT"
]
| 68 | 2015-01-03T13:44:40.000Z | 2021-06-10T20:00:23.000Z | micro_admin/migrations/0010_auto_20160804_1044.py | mohbadar/micro-finance | 00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6 | [
"MIT"
]
| 73 | 2015-02-10T07:03:42.000Z | 2022-02-24T21:11:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 10:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('micro_admin', '0009_page'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'permissions': (('branch_manager', 'Can manage all accounts under his/her branch.'), ('content_manager', 'Can add, edit, delete content.'))},
),
]
| 25.8 | 162 | 0.629845 | 367 | 0.71124 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.436047 |
08e07a97a9f3cede768ff174381cda6e3a2e9847 | 3,823 | py | Python | ProgrammersGuideExamples/provisioning.py | mrhorrible78/PyU4V | 5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c | [
"MIT"
]
| null | null | null | ProgrammersGuideExamples/provisioning.py | mrhorrible78/PyU4V | 5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c | [
"MIT"
]
| null | null | null | ProgrammersGuideExamples/provisioning.py | mrhorrible78/PyU4V | 5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c | [
"MIT"
]
| null | null | null | # The MIT License (MIT)
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from PyU4V import U4VConn
ru = U4VConn(u4v_version='84')
PARSER = argparse.ArgumentParser(description='This python scrtipt is a basic '
'VMAX REST recipe provisioning '
'multiple sized volume for an '
'application.\n'
'python provisioning.py -sg TEST '
'-ig initiators.txt -pg ports.txt'
' -cap 1')
RFLAGS = PARSER.add_argument_group('Required arguments')
RFLAGS.add_argument('-sg', required=True, help='Storage group name, typically '
'the application name '
'e.g. oraclefinace')
RFLAGS.add_argument('-ig', required=True, help='Filename containing initiators'
',one per line '
'e.g. 10000000c9873cae')
RFLAGS.add_argument('-pg', required=True, help='Filename containing list of '
'ports one per line, '
'e.g. FA-1D:25')
RFLAGS.add_argument('-cap', required=True, help='Capacity in GB')
# Assign parameters to command line arguments
ARGS = PARSER.parse_args()
sgname = ARGS.sg
hba_file = ARGS.ig
port_file = ARGS.pg
appname = "REST_" + sgname
sg_id = appname + "_SG"
ig_id = appname + "_IG"
pg_id = appname + "_PG"
mv_id = appname + "_MV"
requested_capacity = ARGS.cap
initiator_list = ru.common.create_list_from_file(hba_file)
def provision_storage():
if headroom_check():
sg_job = ru.provisioning.create_non_empty_storagegroup(
"SRP_1", sg_id, "Diamond", "OLTP", 1, requested_capacity, "GB", True)
# showing how async functions can be worked in.
ru.common.wait_for_job("", sg_job)
print("Storage Group Created.")
ru.provisioning.create_host(ig_id, initiator_list)
print("Host Created.")
ru.provisioning.create_portgroup_from_file(port_file, pg_id)
print("Port Group Created.")
ru.provisioning.create_masking_view_existing_components(
pg_id, mv_id, sg_id, ig_id)
print("Masking View Created.")
else:
print("Headroom Check Failed, Check array Capacity Usage")
def headroom_check():
headroom_cp = ru.common.get_headroom("OLTP")[0]["headroom"][0]["headroomCapacity"]
if int(requested_capacity) <= int(headroom_cp):
return True
else:
return False
provision_storage()
| 43.443182 | 86 | 0.625948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,888 | 0.493853 |
08e17200183b1b4c4b38978e4c91346462570f54 | 8,227 | py | Python | quickdraw-doodle-recognition/gcloud/common.py | yasserglez/kaggle_titanic | 7a4857ec9a99c31eb53a91dda3ad9ecd5b647278 | [
"MIT"
]
| 2 | 2019-09-29T02:26:58.000Z | 2020-03-06T07:38:58.000Z | quickdraw-doodle-recognition/gcloud/common.py | yasserglez/kaggle_titanic | 7a4857ec9a99c31eb53a91dda3ad9ecd5b647278 | [
"MIT"
]
| 2 | 2018-12-17T04:32:09.000Z | 2019-10-22T00:31:06.000Z | quickdraw-doodle-recognition/gcloud/common.py | yasserglez/kaggle | 7a4857ec9a99c31eb53a91dda3ad9ecd5b647278 | [
"MIT"
]
| null | null | null | import struct
import itertools
import numpy as np
from bitarray import bitarray
RANDOM_SEED = 2387613
IMAGE_SIZE = 128
BATCH_SIZE = 2048
# Assign an integer to each word to be predicted.
WORD2LABEL = {
'The Eiffel Tower': 0,
'The Great Wall of China': 1,
'The Mona Lisa': 2,
'airplane': 3,
'alarm clock': 4,
'ambulance': 5,
'angel': 6,
'animal migration': 7,
'ant': 8,
'anvil': 9,
'apple': 10,
'arm': 11,
'asparagus': 12,
'axe': 13,
'backpack': 14,
'banana': 15,
'bandage': 16,
'barn': 17,
'baseball': 19,
'baseball bat': 18,
'basket': 20,
'basketball': 21,
'bat': 22,
'bathtub': 23,
'beach': 24,
'bear': 25,
'beard': 26,
'bed': 27,
'bee': 28,
'belt': 29,
'bench': 30,
'bicycle': 31,
'binoculars': 32,
'bird': 33,
'birthday cake': 34,
'blackberry': 35,
'blueberry': 36,
'book': 37,
'boomerang': 38,
'bottlecap': 39,
'bowtie': 40,
'bracelet': 41,
'brain': 42,
'bread': 43,
'bridge': 44,
'broccoli': 45,
'broom': 46,
'bucket': 47,
'bulldozer': 48,
'bus': 49,
'bush': 50,
'butterfly': 51,
'cactus': 52,
'cake': 53,
'calculator': 54,
'calendar': 55,
'camel': 56,
'camera': 57,
'camouflage': 58,
'campfire': 59,
'candle': 60,
'cannon': 61,
'canoe': 62,
'car': 63,
'carrot': 64,
'castle': 65,
'cat': 66,
'ceiling fan': 67,
'cell phone': 68,
'cello': 69,
'chair': 70,
'chandelier': 71,
'church': 72,
'circle': 73,
'clarinet': 74,
'clock': 75,
'cloud': 76,
'coffee cup': 77,
'compass': 78,
'computer': 79,
'cookie': 80,
'cooler': 81,
'couch': 82,
'cow': 83,
'crab': 84,
'crayon': 85,
'crocodile': 86,
'crown': 87,
'cruise ship': 88,
'cup': 89,
'diamond': 90,
'dishwasher': 91,
'diving board': 92,
'dog': 93,
'dolphin': 94,
'donut': 95,
'door': 96,
'dragon': 97,
'dresser': 98,
'drill': 99,
'drums': 100,
'duck': 101,
'dumbbell': 102,
'ear': 103,
'elbow': 104,
'elephant': 105,
'envelope': 106,
'eraser': 107,
'eye': 108,
'eyeglasses': 109,
'face': 110,
'fan': 111,
'feather': 112,
'fence': 113,
'finger': 114,
'fire hydrant': 115,
'fireplace': 116,
'firetruck': 117,
'fish': 118,
'flamingo': 119,
'flashlight': 120,
'flip flops': 121,
'floor lamp': 122,
'flower': 123,
'flying saucer': 124,
'foot': 125,
'fork': 126,
'frog': 127,
'frying pan': 128,
'garden': 130,
'garden hose': 129,
'giraffe': 131,
'goatee': 132,
'golf club': 133,
'grapes': 134,
'grass': 135,
'guitar': 136,
'hamburger': 137,
'hammer': 138,
'hand': 139,
'harp': 140,
'hat': 141,
'headphones': 142,
'hedgehog': 143,
'helicopter': 144,
'helmet': 145,
'hexagon': 146,
'hockey puck': 147,
'hockey stick': 148,
'horse': 149,
'hospital': 150,
'hot air balloon': 151,
'hot dog': 152,
'hot tub': 153,
'hourglass': 154,
'house': 156,
'house plant': 155,
'hurricane': 157,
'ice cream': 158,
'jacket': 159,
'jail': 160,
'kangaroo': 161,
'key': 162,
'keyboard': 163,
'knee': 164,
'ladder': 165,
'lantern': 166,
'laptop': 167,
'leaf': 168,
'leg': 169,
'light bulb': 170,
'lighthouse': 171,
'lightning': 172,
'line': 173,
'lion': 174,
'lipstick': 175,
'lobster': 176,
'lollipop': 177,
'mailbox': 178,
'map': 179,
'marker': 180,
'matches': 181,
'megaphone': 182,
'mermaid': 183,
'microphone': 184,
'microwave': 185,
'monkey': 186,
'moon': 187,
'mosquito': 188,
'motorbike': 189,
'mountain': 190,
'mouse': 191,
'moustache': 192,
'mouth': 193,
'mug': 194,
'mushroom': 195,
'nail': 196,
'necklace': 197,
'nose': 198,
'ocean': 199,
'octagon': 200,
'octopus': 201,
'onion': 202,
'oven': 203,
'owl': 204,
'paint can': 205,
'paintbrush': 206,
'palm tree': 207,
'panda': 208,
'pants': 209,
'paper clip': 210,
'parachute': 211,
'parrot': 212,
'passport': 213,
'peanut': 214,
'pear': 215,
'peas': 216,
'pencil': 217,
'penguin': 218,
'piano': 219,
'pickup truck': 220,
'picture frame': 221,
'pig': 222,
'pillow': 223,
'pineapple': 224,
'pizza': 225,
'pliers': 226,
'police car': 227,
'pond': 228,
'pool': 229,
'popsicle': 230,
'postcard': 231,
'potato': 232,
'power outlet': 233,
'purse': 234,
'rabbit': 235,
'raccoon': 236,
'radio': 237,
'rain': 238,
'rainbow': 239,
'rake': 240,
'remote control': 241,
'rhinoceros': 242,
'river': 243,
'roller coaster': 244,
'rollerskates': 245,
'sailboat': 246,
'sandwich': 247,
'saw': 248,
'saxophone': 249,
'school bus': 250,
'scissors': 251,
'scorpion': 252,
'screwdriver': 253,
'sea turtle': 254,
'see saw': 255,
'shark': 256,
'sheep': 257,
'shoe': 258,
'shorts': 259,
'shovel': 260,
'sink': 261,
'skateboard': 262,
'skull': 263,
'skyscraper': 264,
'sleeping bag': 265,
'smiley face': 266,
'snail': 267,
'snake': 268,
'snorkel': 269,
'snowflake': 270,
'snowman': 271,
'soccer ball': 272,
'sock': 273,
'speedboat': 274,
'spider': 275,
'spoon': 276,
'spreadsheet': 277,
'square': 278,
'squiggle': 279,
'squirrel': 280,
'stairs': 281,
'star': 282,
'steak': 283,
'stereo': 284,
'stethoscope': 285,
'stitches': 286,
'stop sign': 287,
'stove': 288,
'strawberry': 289,
'streetlight': 290,
'string bean': 291,
'submarine': 292,
'suitcase': 293,
'sun': 294,
'swan': 295,
'sweater': 296,
'swing set': 297,
'sword': 298,
't-shirt': 299,
'table': 300,
'teapot': 301,
'teddy-bear': 302,
'telephone': 303,
'television': 304,
'tennis racquet': 305,
'tent': 306,
'tiger': 307,
'toaster': 308,
'toe': 309,
'toilet': 310,
'tooth': 311,
'toothbrush': 312,
'toothpaste': 313,
'tornado': 314,
'tractor': 315,
'traffic light': 316,
'train': 317,
'tree': 318,
'triangle': 319,
'trombone': 320,
'truck': 321,
'trumpet': 322,
'umbrella': 323,
'underwear': 324,
'van': 325,
'vase': 326,
'violin': 327,
'washing machine': 328,
'watermelon': 329,
'waterslide': 330,
'whale': 331,
'wheel': 332,
'windmill': 333,
'wine bottle': 334,
'wine glass': 335,
'wristwatch': 336,
'yoga': 337,
'zebra': 338,
'zigzag': 339,
}
LABEL2WORD = dict((v, k) for k, v in WORD2LABEL.items())
def pack_example(image, label, fout):
image_as_bits = bitarray(image.flatten().tolist())
fout.write(image_as_bits.tobytes())
fout.write(struct.pack('H', label))
def unpack_example(fin):
image_size = IMAGE_SIZE * IMAGE_SIZE // 8 # bytes
image_as_bits = bitarray()
image_as_bits.fromfile(fin, image_size)
image_as_bytes = np.frombuffer(image_as_bits.tobytes(), count=image_size, dtype=np.uint8)
image = np.unpackbits(image_as_bytes).astype(np.float32).reshape(IMAGE_SIZE, IMAGE_SIZE, 1)
label, = struct.unpack('H', fin.read(2))
return {'image': image, 'label': label}
def unpack_examples(fin):
while True:
try:
yield unpack_example(fin)
except (EOFError, struct.error):
break
# https://docs.python.org/3/library/itertools.html#recipes
def roundrobin(iterables):
num_active = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = itertools.cycle(itertools.islice(nexts, num_active))
| 20.880711 | 95 | 0.519874 | 0 | 0 | 570 | 0.069284 | 0 | 0 | 0 | 0 | 3,275 | 0.398079 |
08e17bfd02380a8da82eb6cb901cf80fe395ede2 | 34,734 | py | Python | feature.py | TimothyChen225/AFC-X | 901a0019b7c153804570c480c3da4825776dbf02 | [
"MIT"
]
| null | null | null | feature.py | TimothyChen225/AFC-X | 901a0019b7c153804570c480c3da4825776dbf02 | [
"MIT"
]
| null | null | null | feature.py | TimothyChen225/AFC-X | 901a0019b7c153804570c480c3da4825776dbf02 | [
"MIT"
]
| null | null | null | from collections import Counter
from Bio import SeqIO
import numpy as np
import warnings
import math
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import Word2Vec
Max_length = 100 # maximum length of used peptides
def check_length(file):
length = []
global Max_length
with open(file) as f:
for i in f:
if i[0] != ">":
length.append(len(i))
temp_max = max(length)
if temp_max > Max_length:
Max_length = temp_max
def add(x, i):
x_copy = x.copy()
x_copy[i] = 1
return x_copy
def BLOSUM62(seq):
blosum62 = {
'A': [4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0], # A
'R': [-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3], # R
'N': [-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3], # N
'D': [-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3], # D
'C': [0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1], # C
'Q': [-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2], # Q
'E': [-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2], # E
'G': [0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3], # G
'H': [-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3], # H
'I': [-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3], # I
'L': [-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1], # L
'K': [-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2], # K
'M': [-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1], # M
'F': [-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1], # F
'P': [-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2], # P
'S': [1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2], # S
'T': [0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0], # T
'W': [-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3], # W
'Y': [-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1], # Y
'V': [0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4], # V
'-': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # -
}
pad_len = Max_length - len(seq)
seqs = []
for aa in seq:
seqs.append(blosum62[aa])
for _ in range(pad_len):
seqs.append(blosum62['-'])
return seqs
def Count(aaSet, sequence):
number = 0
for aa in sequence:
if aa in aaSet:
number = number + 1
cutoffNums = [1, math.floor(0.25 * number), math.floor(0.50 * number), math.floor(0.75 * number), number]
cutoffNums = [i if i >= 1 else 1 for i in cutoffNums]
code = []
for cutoff in cutoffNums:
myCount = 0
for i in range(len(sequence)):
if sequence[i] in aaSet:
myCount += 1
if myCount == cutoff:
code.append((i + 1) / len(sequence) * Max_length)
break
if myCount == 0:
code.append(0)
return code
def CTDD(seq):
group1 = {
'hydrophobicity_PRAM900101': 'RKEDQN',
'hydrophobicity_ARGP820101': 'QSTNGDE',
'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',
'hydrophobicity_PONP930101': 'KPDESNQT',
'hydrophobicity_CASG920101': 'KDEQPSRNTG',
'hydrophobicity_ENGD860101': 'RDKENQHYP',
'hydrophobicity_FASG890101': 'KERSQD',
'normwaalsvolume': 'GASTPDC',
'polarity': 'LIFWCMVY',
'polarizability': 'GASDT',
'charge': 'KR',
'secondarystruct': 'EALMQKRH',
'solventaccess': 'ALFCGIVW'
}
group2 = {
'hydrophobicity_PRAM900101': 'GASTPHY',
'hydrophobicity_ARGP820101': 'RAHCKMV',
'hydrophobicity_ZIMJ680101': 'HMCKV',
'hydrophobicity_PONP930101': 'GRHA',
'hydrophobicity_CASG920101': 'AHYMLV',
'hydrophobicity_ENGD860101': 'SGTAW',
'hydrophobicity_FASG890101': 'NTPG',
'normwaalsvolume': 'NVEQIL',
'polarity': 'PATGS',
'polarizability': 'CPNVEQIL',
'charge': 'ANCQGHILMFPSTWYV',
'secondarystruct': 'VIYCWFT',
'solventaccess': 'RKQEND'
}
group3 = {
'hydrophobicity_PRAM900101': 'CLVIMFW',
'hydrophobicity_ARGP820101': 'LYPFIW',
'hydrophobicity_ZIMJ680101': 'LPFYI',
'hydrophobicity_PONP930101': 'YMFWLCVI',
'hydrophobicity_CASG920101': 'FIWC',
'hydrophobicity_ENGD860101': 'CVLIMF',
'hydrophobicity_FASG890101': 'AYHWVMFLIC',
'normwaalsvolume': 'MHKFRYW',
'polarity': 'HQRKNED',
'polarizability': 'KMHFRYW',
'charge': 'DE',
'secondarystruct': 'GNPSD',
'solventaccess': 'MSPTHY'
}
groups = [group1, group2, group3]
property = (
'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101',
'hydrophobicity_PONP930101',
'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',
'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess')
encodings = []
code = []
for p in property:
code = code + Count(group1[p], seq) + Count(group2[p], seq) + Count(group3[p], seq)
encodings.append(code)
return encodings
def DPC(seq):
AA = 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA]
# header = ['#'] + diPeptides
# encodings.append(header)
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
# for i in fastas:
# name, sequence = i[0], re.sub('-', '', i[1])
code = []
tmpCode = [0] * 400
for j in range(len(seq) - 2 + 1):
tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[
seq[j + 1]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings
def AAC(seq):
AA = 'ACDEFGHIKLMNPQRSTVWY'
# AA = 'ARNDCQEGHILKMFPSTWYV'
encodings = []
# for i in fastas:
# name, sequence = i[0], re.sub('-', '', i[1])
count = Counter(seq)
for key in count:
count[key] = count[key] / len(seq)
code = []
for aa in AA:
code.append(count[aa])
encodings.append(code)
return encodings
def ZSCALE(seq):
zscale = {
'A': [0.24, -2.32, 0.60, -0.14, 1.30], # A
'C': [0.84, -1.67, 3.71, 0.18, -2.65], # C
'D': [3.98, 0.93, 1.93, -2.46, 0.75], # D
'E': [3.11, 0.26, -0.11, -0.34, -0.25], # E
'F': [-4.22, 1.94, 1.06, 0.54, -0.62], # F
'G': [2.05, -4.06, 0.36, -0.82, -0.38], # G
'H': [2.47, 1.95, 0.26, 3.90, 0.09], # H
'I': [-3.89, -1.73, -1.71, -0.84, 0.26], # I
'K': [2.29, 0.89, -2.49, 1.49, 0.31], # K
'L': [-4.28, -1.30, -1.49, -0.72, 0.84], # L
'M': [-2.85, -0.22, 0.47, 1.94, -0.98], # M
'N': [3.05, 1.62, 1.04, -1.15, 1.61], # N
'P': [-1.66, 0.27, 1.84, 0.70, 2.00], # P
'Q': [1.75, 0.50, -1.44, -1.34, 0.66], # Q
'R': [3.52, 2.50, -3.50, 1.99, -0.17], # R
'S': [2.39, -1.07, 1.15, -1.39, 0.67], # S
'T': [0.75, -2.18, -1.12, -1.46, -0.40], # T
'V': [-2.59, -2.64, -1.54, -0.85, -0.02], # V
'W': [-4.36, 3.94, 0.59, 3.44, -1.59], # W
'Y': [-2.54, 2.44, 0.43, 0.04, -1.47], # Y
'-': [0.00, 0.00, 0.00, 0.00, 0.00], # -
}
encodings = []
# header = ['#']
# for p in range(1, len(fastas[0][1]) + 1):
# for z in ('1', '2', '3', '4', '5'):
# header.append('Pos' + str(p) + '.ZSCALE' + z)
# encodings.append(header)
# for i in fastas:
# name, sequence = i[0], i[1]
code = []
for _ in range(Max_length - len(seq)):
code = code + zscale['-']
for aa in seq:
code = code + zscale[aa]
encodings.append(code)
return encodings
def TPC(seq):
AA = 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
# for i in fastas:
# name, sequence = i[0], re.sub('-', '', i[1])
code = []
tmpCode = [0] * 8000
for j in range(len(seq) - 3 + 1):
tmpCode[AADict[seq[j]] * 400 + AADict[seq[j + 1]] * 20 + AADict[seq[j + 2]]] = tmpCode[AADict[seq[j]] * 400 +
AADict[seq[j + 1]] * 20 +
AADict[seq[j + 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings
def DDE(seq):
AA = 'ACDEFGHIKLMNPQRSTVWY'
myCodons = {
'A': 4,
'C': 2,
'D': 2,
'E': 2,
'F': 2,
'G': 4,
'H': 2,
'I': 3,
'K': 2,
'L': 6,
'M': 1,
'N': 2,
'P': 4,
'Q': 2,
'R': 6,
'S': 6,
'T': 4,
'V': 4,
'W': 1,
'Y': 2
}
encodings = []
diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA]
myTM = []
for pair in diPeptides:
myTM.append((myCodons[pair[0]] / 61) * (myCodons[pair[1]] / 61))
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
# for i in fastas:
# name, sequence = i[0], re.sub('-', '', i[1])
code = []
tmpCode = [0] * 400
for j in range(len(seq) - 2 + 1):
tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[
seq[j + 1]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
myTV = []
for j in range(len(myTM)):
myTV.append(myTM[j] * (1 - myTM[j]) / (len(seq) - 1))
for j in range(len(tmpCode)):
tmpCode[j] = (tmpCode[j] - myTM[j]) / math.sqrt(myTV[j])
code = code + tmpCode
encodings.append(code)
return encodings
def CalculateKSCTriad(sequence, gap, features, AADict):
res = []
for g in range(gap + 1):
myDict = {}
for f in features:
myDict[f] = 0
for i in range(len(sequence)):
if i + gap + 1 < len(sequence) and i + 2 * gap + 2 < len(sequence):
fea = AADict[sequence[i]] + '.' + AADict[sequence[i + gap + 1]] + '.' + AADict[
sequence[i + 2 * gap + 2]]
myDict[fea] = myDict[fea] + 1
maxValue, minValue = max(myDict.values()), min(myDict.values())
for f in features:
res.append((myDict[f] - minValue) / maxValue)
return res
def CTriad(seq):
AAGroup = {
'g1': 'AGV',
'g2': 'ILFP',
'g3': 'YMTS',
'g4': 'HNQW',
'g5': 'RK',
'g6': 'DE',
'g7': 'C'
}
myGroups = sorted(AAGroup.keys())
AADict = {}
for g in myGroups:
for aa in AAGroup[g]:
AADict[aa] = g
features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]
encodings = []
# header = ['#']
# for f in features:
# header.append(f)
# encodings.append(header)
# me, sequence = i[0], re.sub('-', '', i[1])
code = []
if len(seq) < 3:
print('Error: for "CTriad" encoding, the input fasta sequences should be greater than 3. \n\n')
return 0
code = code + CalculateKSCTriad(seq, 0, features, AADict)
encodings.append(code)
return encodings
def CalculateKSCTriad(sequence, gap, features, AADict):
res = []
for g in range(gap + 1):
myDict = {}
for f in features:
myDict[f] = 0
for i in range(len(sequence)):
if i + g + 1 < len(sequence) and i + 2 * g + 2 < len(sequence):
fea = AADict[sequence[i]] + '.' + AADict[sequence[i + g + 1]] + '.' + AADict[sequence[i + 2 * g + 2]]
myDict[fea] = myDict[fea] + 1
maxValue, minValue = max(myDict.values()), min(myDict.values())
for f in features:
res.append((myDict[f] - minValue) / maxValue)
return res
def KSCTriad(seq, gap=1):
AAGroup = {
'g1': 'AGV',
'g2': 'ILFP',
'g3': 'YMTS',
'g4': 'HNQW',
'g5': 'RK',
'g6': 'DE',
'g7': 'C'
}
myGroups = sorted(AAGroup.keys())
AADict = {}
for g in myGroups:
for aa in AAGroup[g]:
AADict[aa] = g
features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]
encodings = []
code = []
if len(seq) < 2 * gap + 3:
print('Error: for "KSCTriad" encoding, the input fasta sequences should be greater than (2*gap+3). \n\n')
return 0
code = code + CalculateKSCTriad(seq, gap, features, AADict)
encodings.append(code)
return encodings
def GTPC(seq):
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
groupKey = group.keys()
baseNum = len(groupKey)
triple = [g1 + '.' + g2 + '.' + g3 for g1 in groupKey for g2 in groupKey for g3 in groupKey]
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
encodings = []
code = []
myDict = {}
for t in triple:
myDict[t] = 0
sum = 0
for j in range(len(seq) - 3 + 1):
myDict[index[seq[j]] + '.' + index[seq[j + 1]] + '.' + index[seq[j + 2]]] = myDict[index[seq[j]] + '.' + index[
seq[j + 1]] + '.' + index[seq[j + 2]]] + 1
sum = sum + 1
if sum == 0:
for t in triple:
code.append(0)
else:
for t in triple:
code.append(myDict[t] / sum)
encodings.append(code)
return encodings
def generateGroupPairs(groupKey):
gPair = {}
for key1 in groupKey:
for key2 in groupKey:
gPair[key1 + '.' + key2] = 0
return gPair
def CKSAAGP(seq, gap=2):
if gap < 0:
print('Error: the gap should be equal or greater than zero' + '\n\n')
return 0
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
AA = 'ARNDCQEGHILKMFPSTWYV'
groupKey = group.keys()
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
gPairIndex = []
for key1 in groupKey:
for key2 in groupKey:
gPairIndex.append(key1 + '.' + key2)
encodings = []
code = []
for g in range(gap + 1):
gPair = generateGroupPairs(groupKey)
sum = 0
for p1 in range(len(seq)):
p2 = p1 + g + 1
if p2 < len(seq) and seq[p1] in AA and seq[p2] in AA:
gPair[index[seq[p1]] + '.' + index[seq[p2]]] = gPair[index[seq[p1]] + '.' + index[
seq[p2]]] + 1
sum = sum + 1
if sum == 0:
for gp in gPairIndex:
code.append(0)
else:
for gp in gPairIndex:
code.append(gPair[gp] / sum)
encodings.append(code)
return encodings
def GAAC(seq):
group = {
'alphatic': 'GAVLMI',
'aromatic': 'FYW',
'postivecharge': 'KRH',
'negativecharge': 'DE',
'uncharge': 'STCPNQ'
}
groupKey = group.keys()
encodings = []
code = []
count = Counter(seq)
myDict = {}
for key in groupKey:
for aa in group[key]:
myDict[key] = myDict.get(key, 0) + count[aa]
for key in groupKey:
code.append(myDict[key] / len(seq))
encodings.append(code)
return encodings
def GDPC(seq):
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
groupKey = group.keys()
baseNum = len(groupKey)
dipeptide = [g1 + '.' + g2 for g1 in groupKey for g2 in groupKey]
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
encodings = []
code = []
myDict = {}
for t in dipeptide:
myDict[t] = 0
sum = 0
for j in range(len(seq) - 2 + 1):
myDict[index[seq[j]] + '.' + index[seq[j + 1]]] = myDict[index[seq[j]] + '.' + index[
seq[j + 1]]] + 1
sum = sum + 1
if sum == 0:
for t in dipeptide:
code.append(0)
else:
for t in dipeptide:
code.append(myDict[t] / sum)
encodings.append(code)
return encodings
def AAINDEX(seq):
temp = "-" * (Max_length - len(seq))
seq += temp
AA = 'ARNDCQEGHILKMFPSTWYV'
fileAAindex = "data\\AAindex1.txt"
with open(fileAAindex) as f:
records = f.readlines()[1:]
AAindex = []
AAindexName = []
for i in records:
AAindex.append(i.rstrip().split()[1:] if i.rstrip() != '' else None)
AAindexName.append(i.rstrip().split()[0] if i.rstrip() != '' else None)
index = {}
for i in range(len(AA)):
index[AA[i]] = i
encodings = []
code = []
for aa in seq:
if aa == '-':
for j in AAindex:
code.append(0)
continue
for j in AAindex:
code.append(j[index[aa]])
encodings.append(code)
return encodings
def CTDT(seq):
group1 = {
'hydrophobicity_PRAM900101': 'RKEDQN',
'hydrophobicity_ARGP820101': 'QSTNGDE',
'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',
'hydrophobicity_PONP930101': 'KPDESNQT',
'hydrophobicity_CASG920101': 'KDEQPSRNTG',
'hydrophobicity_ENGD860101': 'RDKENQHYP',
'hydrophobicity_FASG890101': 'KERSQD',
'normwaalsvolume': 'GASTPDC',
'polarity': 'LIFWCMVY',
'polarizability': 'GASDT',
'charge': 'KR',
'secondarystruct': 'EALMQKRH',
'solventaccess': 'ALFCGIVW'
}
group2 = {
'hydrophobicity_PRAM900101': 'GASTPHY',
'hydrophobicity_ARGP820101': 'RAHCKMV',
'hydrophobicity_ZIMJ680101': 'HMCKV',
'hydrophobicity_PONP930101': 'GRHA',
'hydrophobicity_CASG920101': 'AHYMLV',
'hydrophobicity_ENGD860101': 'SGTAW',
'hydrophobicity_FASG890101': 'NTPG',
'normwaalsvolume': 'NVEQIL',
'polarity': 'PATGS',
'polarizability': 'CPNVEQIL',
'charge': 'ANCQGHILMFPSTWYV',
'secondarystruct': 'VIYCWFT',
'solventaccess': 'RKQEND'
}
group3 = {
'hydrophobicity_PRAM900101': 'CLVIMFW',
'hydrophobicity_ARGP820101': 'LYPFIW',
'hydrophobicity_ZIMJ680101': 'LPFYI',
'hydrophobicity_PONP930101': 'YMFWLCVI',
'hydrophobicity_CASG920101': 'FIWC',
'hydrophobicity_ENGD860101': 'CVLIMF',
'hydrophobicity_FASG890101': 'AYHWVMFLIC',
'normwaalsvolume': 'MHKFRYW',
'polarity': 'HQRKNED',
'polarizability': 'KMHFRYW',
'charge': 'DE',
'secondarystruct': 'GNPSD',
'solventaccess': 'MSPTHY'
}
groups = [group1, group2, group3]
property = (
'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101',
'hydrophobicity_PONP930101',
'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',
'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess')
encodings = []
code = []
aaPair = [seq[j:j + 2] for j in range(len(seq) - 1)]
for p in property:
c1221, c1331, c2332 = 0, 0, 0
for pair in aaPair:
if (pair[0] in group1[p] and pair[1] in group2[p]) or (pair[0] in group2[p] and pair[1] in group1[p]):
c1221 = c1221 + 1
continue
if (pair[0] in group1[p] and pair[1] in group3[p]) or (pair[0] in group3[p] and pair[1] in group1[p]):
c1331 = c1331 + 1
continue
if (pair[0] in group2[p] and pair[1] in group3[p]) or (pair[0] in group3[p] and pair[1] in group2[p]):
c2332 = c2332 + 1
code = code + [c1221 / len(aaPair), c1331 / len(aaPair), c2332 / len(aaPair)]
encodings.append(code)
return encodings
def Geary(seq, props=['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201'],
nlag=2):
AA = 'ARNDCQEGHILKMFPSTWYV'
fileAAidx = "data\\AAidx.txt"
with open(fileAAidx) as f:
records = f.readlines()[1:]
myDict = {}
for i in records:
array = i.rstrip().split('\t')
myDict[array[0]] = array[1:]
AAidx = []
AAidxName = []
for i in props:
if i in myDict:
AAidx.append(myDict[i])
AAidxName.append(i)
else:
print('"' + i + '" properties not exist.')
return None
AAidx1 = np.array([float(j) for i in AAidx for j in i])
AAidx = AAidx1.reshape((len(AAidx), 20))
propMean = np.mean(AAidx, axis=1)
propStd = np.std(AAidx, axis=1)
for i in range(len(AAidx)):
for j in range(len(AAidx[i])):
AAidx[i][j] = (AAidx[i][j] - propMean[i]) / propStd[i]
index = {}
for i in range(len(AA)):
index[AA[i]] = i
encodings = []
code = []
N = len(seq)
for prop in range(len(props)):
xmean = sum([AAidx[prop][index[aa]] for aa in seq]) / N
for n in range(1, nlag + 1):
if len(seq) > nlag:
# if key is '-', then the value is 0
rn = (N - 1) / (2 * (N - n)) * ((sum(
[(AAidx[prop][index.get(seq[j], 0)] - AAidx[prop][index.get(seq[j + n], 0)]) ** 2 for
j in range(len(seq) - n)])) / (sum(
[(AAidx[prop][index.get(seq[j], 0)] - xmean) ** 2 for j in range(len(seq))])))
else:
rn = 'NA'
code.append(rn)
encodings.append(code)
return encodings
def CKSAAP(seq, gap=2, **kw):
if gap < 0:
print('Error: the gap should be equal or greater than zero' + '\n\n')
return 0
AA = 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
aaPairs = []
for aa1 in AA:
for aa2 in AA:
aaPairs.append(aa1 + aa2)
code = []
for g in range(gap + 1):
myDict = {}
for pair in aaPairs:
myDict[pair] = 0
sum = 0
for index1 in range(len(seq)):
index2 = index1 + g + 1
if index1 < len(seq) and index2 < len(seq) and seq[index1] in AA and seq[
index2] in AA:
myDict[seq[index1] + seq[index2]] = myDict[seq[index1] + seq[index2]] + 1
sum = sum + 1
for pair in aaPairs:
code.append(myDict[pair] / sum)
encodings.append(code)
return encodings
def Rvalue(aa1, aa2, AADict, Matrix):
return sum([(Matrix[i][AADict[aa1]] - Matrix[i][AADict[aa2]]) ** 2 for i in range(len(Matrix))]) / len(Matrix)
def PAAC(seq, lambdaValue=3, w=0.05):
dataFile = 'data\PAAC.txt'
with open(dataFile) as f:
records = f.readlines()
AA = ''.join(records[0].rstrip().split()[1:])
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
AAProperty = []
AAPropertyNames = []
for i in range(1, len(records)):
array = records[i].rstrip().split() if records[i].rstrip() != '' else None
AAProperty.append([float(j) for j in array[1:]])
AAPropertyNames.append(array[0])
AAProperty1 = []
for i in AAProperty:
meanI = sum(i) / 20
fenmu = math.sqrt(sum([(j - meanI) ** 2 for j in i]) / 20)
AAProperty1.append([(j - meanI) / fenmu for j in i])
encodings = []
code = []
theta = []
for n in range(1, lambdaValue + 1):
theta.append(
sum([Rvalue(seq[j], seq[j + n], AADict, AAProperty1) for j in range(len(seq) - n)]) / (
len(seq) - n))
myDict = {}
for aa in AA:
myDict[aa] = seq.count(aa)
code = code + [myDict[aa] / (1 + w * sum(theta)) for aa in AA]
code = code + [(w * j) / (1 + w * sum(theta)) for j in theta]
encodings.append(code)
return encodings
# AFC-T, AFC-CP
def Feature(f):
amino_acids = "XACDEFGHIKLMNPQRSTVWY"
amino_acids_dict = {}
seqs = []
seqs_blosum62 = []
seqs_dde = []
seqs_z = []
seqs_dpc = []
seqs_aac = []
seqs_ctdd = []
lable_seqs = []
work2vec = []
seqs_sr = []
seqs_ksctriad = []
seqs_gtpc = []
seqs_cksaagp = []
seqs_gaac = []
seqs_gdpc = []
seqs_aaindex = []
seqs_ctdt = []
seqs_geary = []
seqs_cksaap = []
seqs_ctrial = []
seqs_paac = []
for n, s in enumerate(amino_acids):
amino_acids_dict[s] = n
#new_antifu = Word2Vec.load('fa_model_All.bin')
for n, s in enumerate(SeqIO.parse(f, "fasta")):
seq_blosum62 = BLOSUM62(s.seq)
seq_ksctriad = KSCTriad(s.seq)
seq_dde = DDE(s.seq)
seq_z = ZSCALE(s.seq)
seq_aac = AAC(s.seq)
seq_dpc = DPC(s.seq)
seq_ctdd = CTDD(s.seq)
seq_ctrial = CTriad(s.seq)
seq_gtpc = GTPC(s.seq)
seq_cksaagp = CKSAAGP(s.seq)
seq_gaac = GAAC(s.seq)
seq_gdpc = GDPC(s.seq)
seq_ctdt = CTDT(s.seq)
seq_geary = Geary(s.seq)
seq_cksaap = CKSAAP(s.seq)
seq_aaindex = AAINDEX(s.seq)
seq_paac = PAAC(s.seq)
seqs_dde.append(seq_dde)
seqs_z.append(seq_z)
seqs_aac.append(seq_aac)
seqs_dpc.append(seq_dpc)
seqs_ctdd.append(seq_ctdd)
seqs_blosum62.append(seq_blosum62)
seqs_ctrial.append(seq_ctrial)
seqs_ksctriad.append(seq_ksctriad)
seqs_gtpc.append(seq_gtpc)
seqs_cksaagp.append(seq_cksaagp)
seqs_gaac.append(seq_gaac)
seqs_gdpc.append(seq_gdpc)
seqs_ctdt.append(seq_ctdt)
seqs_geary.append(seq_geary)
seqs_cksaap.append(seq_cksaap)
seqs_aaindex.append(seq_aaindex)
seqs_paac.append(seq_paac)
temp_pad = []
temp_pad1 = []
temps = []
for i in range(20):
temp_pad1.append(0)
for i in range(Max_length - len(s)):
temps.append(temp_pad1)
for i in range(Max_length - len(str(s.seq))):
temp_pad.append(0)
train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad
seqs_sr.append(train_seq)
#aux_p3 = [new_antifu.wv[a] if a in "ACDEFGHIKLMNPQRSTVWY" else [0 for i in range(20)] for a in
#str(s.seq).upper()] + temps
#work2vec.append(aux_p3)
if s.id[-1] == "1":
#print(s.id)
lable_seqs.append([1])
else:
#print(s.id)
lable_seqs.append([0])
return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac
# AFC-C based on main dataset
def Feature1(f):
amino_acids = "XACDEFGHIKLMNPQRSTVWY"
amino_acids_dict = {}
seqs = []
seqs_blosum62 = []
seqs_dde = []
seqs_z = []
seqs_dpc = []
seqs_aac = []
seqs_ctdd = []
lable_seqs = []
work2vec = []
seqs_sr = []
seqs_ksctriad = []
seqs_gtpc = []
seqs_cksaagp = []
seqs_gaac = []
seqs_gdpc = []
seqs_aaindex = []
seqs_ctdt = []
seqs_geary = []
seqs_cksaap = []
seqs_ctrial = []
seqs_paac = []
for n, s in enumerate(amino_acids):
amino_acids_dict[s] = n
#new_antifu = Word2Vec.load('D:\E下载\Dataset\Dataset\\fa_model_All.bin')
for n, s in enumerate(SeqIO.parse(f, "fasta")):
seq_blosum62 = BLOSUM62(s.seq)
#seq_ksctriad = KSCTriad(s.seq)
seq_dde = DDE(s.seq)
seq_z = ZSCALE(s.seq)
seq_aac = AAC(s.seq)
seq_dpc = DPC(s.seq)
seq_ctdd = CTDD(s.seq)
#seq_ctrial = CTriad(s.seq)
seq_gtpc = GTPC(s.seq)
seq_cksaagp = CKSAAGP(s.seq)
seq_gaac = GAAC(s.seq)
seq_gdpc = GDPC(s.seq)
seq_ctdt = CTDT(s.seq)
seq_geary = Geary(s.seq)
#seq_cksaap = CKSAAP(s.seq)
seq_aaindex = AAINDEX(s.seq)
#seq_paac = PAAC(s.seq)
seqs_dde.append(seq_dde)
seqs_z.append(seq_z)
seqs_aac.append(seq_aac)
seqs_dpc.append(seq_dpc)
seqs_ctdd.append(seq_ctdd)
seqs_blosum62.append(seq_blosum62)
#seqs_ctrial.append(seq_ctrial)
#seqs_ksctriad.append(seq_ksctriad)
seqs_gtpc.append(seq_gtpc)
seqs_cksaagp.append(seq_cksaagp)
seqs_gaac.append(seq_gaac)
seqs_gdpc.append(seq_gdpc)
seqs_ctdt.append(seq_ctdt)
seqs_geary.append(seq_geary)
#seqs_cksaap.append(seq_cksaap)
seqs_aaindex.append(seq_aaindex)
#seqs_paac.append(seq_paac)
temp_pad = []
temp_pad1 = []
temps = []
for i in range(20):
temp_pad1.append(0)
for i in range(Max_length - len(s)):
temps.append(temp_pad1)
for i in range(Max_length - len(str(s.seq))):
temp_pad.append(0)
train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad
seqs_sr.append(train_seq)
#aux_p3 = [new_antifu.wv[a] if a in "ACDEFGHIKLMNPQRSTVWY" else [0 for i in range(20)] for a in
#str(s.seq).upper()] + temps
#work2vec.append(aux_p3)
if s.id[-1] == "1":
lable_seqs.append([1])
else:
lable_seqs.append([0])
return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac
# AFC-C based on alternate dataset
def Feature2(f):
amino_acids = "XACDEFGHIKLMNPQRSTVWY"
amino_acids_dict = {}
seqs = []
seqs_blosum62 = []
seqs_dde = []
seqs_z = []
seqs_dpc = []
seqs_aac = []
seqs_ctdd = []
lable_seqs = []
work2vec = []
seqs_sr = []
seqs_ksctriad = []
seqs_gtpc = []
seqs_cksaagp = []
seqs_gaac = []
seqs_gdpc = []
seqs_aaindex = []
seqs_ctdt = []
seqs_geary = []
seqs_cksaap = []
seqs_ctrial = []
seqs_paac = []
for n, s in enumerate(amino_acids):
amino_acids_dict[s] = n
#new_antifu = Word2Vec.load('D:\E下载\Dataset\Dataset\\fa_model_All.bin')
for n, s in enumerate(SeqIO.parse(f, "fasta")):
seq_blosum62 = BLOSUM62(s.seq)
#seq_ksctriad = KSCTriad(s.seq)
seq_dde = DDE(s.seq)
seq_z = ZSCALE(s.seq)
seq_aac = AAC(s.seq)
seq_dpc = DPC(s.seq)
seq_ctdd = CTDD(s.seq)
seq_ctrial = CTriad(s.seq)
seq_gtpc = GTPC(s.seq)
seq_cksaagp = CKSAAGP(s.seq)
seq_gaac = GAAC(s.seq)
seq_gdpc = GDPC(s.seq)
seq_ctdt = CTDT(s.seq)
seq_geary = Geary(s.seq)
#seq_cksaap = CKSAAP(s.seq)
seq_aaindex = AAINDEX(s.seq)
#seq_paac = PAAC(s.seq)
seqs_dde.append(seq_dde)
seqs_z.append(seq_z)
seqs_aac.append(seq_aac)
seqs_dpc.append(seq_dpc)
seqs_ctdd.append(seq_ctdd)
seqs_blosum62.append(seq_blosum62)
seqs_ctrial.append(seq_ctrial)
#seqs_ksctriad.append(seq_ksctriad)
seqs_gtpc.append(seq_gtpc)
seqs_cksaagp.append(seq_cksaagp)
seqs_gaac.append(seq_gaac)
seqs_gdpc.append(seq_gdpc)
seqs_ctdt.append(seq_ctdt)
seqs_geary.append(seq_geary)
#seqs_cksaap.append(seq_cksaap)
seqs_aaindex.append(seq_aaindex)
#seqs_paac.append(seq_paac)
temp_pad = []
temp_pad1 = []
temps = []
for i in range(20):
temp_pad1.append(0)
for i in range(Max_length - len(s)):
temps.append(temp_pad1)
for i in range(Max_length - len(str(s.seq))):
temp_pad.append(0)
train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad
seqs_sr.append(train_seq)
#aux_p3 = [new_antifu.wv[a] if a in "ACDEFGHIKLMNPQRSTVWY" else [0 for i in range(20)] for a in
#str(s.seq).upper()] + temps
#work2vec.append(aux_p3)
if s.id[-1] == "1":
lable_seqs.append([1])
else:
lable_seqs.append([0])
return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac
| 31.040214 | 241 | 0.501324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,538 | 0.188187 |
08e364b287cb5954101aa31b3cb5304b7c80b252 | 35 | py | Python | excel4lib/macro/analysis/__init__.py | aaaddress1/boobsnail | c0c2067d7271ca76ee721998d28e8c3c81a48397 | [
"MIT"
]
| 169 | 2021-05-26T13:35:16.000Z | 2021-09-06T08:04:19.000Z | excel4lib/macro/analysis/__init__.py | H4xl0r/boobsnail | c0c2067d7271ca76ee721998d28e8c3c81a48397 | [
"MIT"
]
| 2 | 2021-06-01T13:46:37.000Z | 2021-07-12T19:06:37.000Z | excel4lib/macro/analysis/__init__.py | H4xl0r/boobsnail | c0c2067d7271ca76ee721998d28e8c3c81a48397 | [
"MIT"
]
| 29 | 2021-05-27T17:28:29.000Z | 2021-09-04T19:24:50.000Z | from .excel4_anti_analysis import * | 35 | 35 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.