max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
rlkit/samplers/rollout.py | HamzaHz2/rlkit | 0 | 12789751 | import numpy as np
#
class Rollout:
def __init__(self):
self.dict_obs = []
self.dict_next_obs = []
self.actions = []
self.rewards = []
self.terminals = []
self.agent_infos = []
self.env_infos = {}
self.path_length = 0
def __len__(self):
return self.path_length
def add_transition(self, obs, action, next_obs, reward, done, env_info, agent_info):
self.dict_obs.append(obs)
self.dict_next_obs.append(next_obs)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(done)
if not self.env_infos:
for k, v in env_info.items():
self.env_infos[k] = [v]
else:
for k, v in env_info.items():
self.env_infos[k].append(v)
self.path_length += 1
def to_dict(self):
self.actions = np.array(self.actions)
if len(self.actions.shape) == 1:
self.actions = np.expand_dims(self.actions, 1)
for k, v in self.env_infos.items():
self.env_infos[k] = np.array(v)
self.rewards = np.array(self.rewards)
self.terminals = np.array(self.terminals).reshape(-1, 1)
return dict(
observations=self.dict_obs,
actions=self.actions,
rewards=self.rewards,
next_observations=self.dict_next_obs,
terminals=self.terminals,
agent_infos=self.agent_infos,
env_infos=self.env_infos,
)
| 2.53125 | 3 |
azure_config_template.py | alan-turing-institute/Pangeo-UKCP-Transfer | 2 | 12789752 | <reponame>alan-turing-institute/Pangeo-UKCP-Transfer
config = {"ACCOUNT_NAME": ACC_NAME,
"SAS_TOKEN": SAS_TOKEN}
| 1.226563 | 1 |
AutoWebBrowsing/webBrowse.py | mshahmalaki/PyAutomate | 0 | 12789753 | <filename>AutoWebBrowsing/webBrowse.py
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://www.seleniumeasy.com/test/basic-first-form-demo.html')
message_field = driver.find_element_by_xpath('//*[@id="user-message"]')
message_field.send_keys('Hello <PASSWORD>')
show_message_button = driver.find_element_by_xpath('//*[@id="get-input"]/button')
show_message_button.click()
addition_field1 = driver.find_element_by_xpath('//*[@id="sum1"]')
addition_field1.send_keys('10')
addition_field2 = driver.find_element_by_xpath('//*[@id="sum2"]')
addition_field2.send_keys('15')
get_total_button = driver.find_element_by_xpath('//*[@id="gettotal"]/button')
get_total_button.click() | 3.203125 | 3 |
libs/__init__.py | ardzix/instalment-app | 1 | 12789754 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Author: <NAME>
# <EMAIL>
#
# File Created: Wednesday, 10th January 2018 11:35:14 pm
# Last Modified: Wednesday, 10th January 2018 11:37:28 pm
# Modified By: <NAME> (<EMAIL>)
#
# Give the best to the world
# Copyright - 2018 Ardz.Co
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 1.546875 | 2 |
netbox/extras/api/serializers.py | team-telnyx/netbox | 3 | 12789755 | from rest_framework import serializers
from extras.models import CF_TYPE_SELECT, CustomFieldChoice, Graph
class CustomFieldSerializer(serializers.Serializer):
"""
Extends a ModelSerializer to render any CustomFields and their values associated with an object.
"""
custom_fields = serializers.SerializerMethodField()
def get_custom_fields(self, obj):
# Gather all CustomFields applicable to this object
fields = {cf.name: None for cf in self.context['view'].custom_fields}
# Attach any defined CustomFieldValues to their respective CustomFields
for cfv in obj.custom_field_values.all():
# Attempt to suppress database lookups for CustomFieldChoices by using the cached choice set from the view
# context.
if cfv.field.type == CF_TYPE_SELECT and hasattr(self, 'custom_field_choices'):
cfc = {
'id': int(cfv.serialized_value),
'value': self.context['view'].custom_field_choices[int(cfv.serialized_value)]
}
fields[cfv.field.name] = CustomFieldChoiceSerializer(instance=cfc).data
# Fall back to hitting the database in case we're in a view that doesn't inherit CustomFieldModelAPIView.
elif cfv.field.type == CF_TYPE_SELECT:
fields[cfv.field.name] = CustomFieldChoiceSerializer(instance=cfv.value).data
else:
fields[cfv.field.name] = cfv.value
return fields
class CustomFieldChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = CustomFieldChoice
fields = ['id', 'value']
class GraphSerializer(serializers.ModelSerializer):
embed_url = serializers.SerializerMethodField()
embed_link = serializers.SerializerMethodField()
class Meta:
model = Graph
fields = ['name', 'embed_url', 'embed_link']
def get_embed_url(self, obj):
return obj.embed_url(self.context['graphed_object'])
def get_embed_link(self, obj):
return obj.embed_link(self.context['graphed_object'])
| 2.28125 | 2 |
src/sensor.py | dizzyrobin/rpi-trigger | 0 | 12789756 | import Adafruit_DHT
import requests
from time import sleep
req_url = 'http://localhost:7777/'
sensor = Adafruit_DHT.DHT22
pin = 4
def createRequestData(temperature, humidity, electricalOutlet):
json = '''{{ "temperature": {}, "humidity": {}, "electricalOutlet": {} }}'''
return json.format(temperature, humidity, electricalOutlet)
def sendData(temperature, humidity, electricalOutlet):
data = createRequestData(temperature, humidity, electricalOutlet)
response = requests.post(req_url, data=data)
# TODO Get the response status
def measureData():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# TODO Get electricalOutlet
print(humidity, temperature)
sendData(temperature, humidity, electricalOutlet)
while True:
measureData()
sleep(10)
| 2.9375 | 3 |
main2.py | abdoulaye2019/BokehViz | 0 | 12789757 | <reponame>abdoulaye2019/BokehViz<filename>main2.py
from tkinter import Label
from bokeh.plotting import figure, output_file,save, show, ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.transform import factor_cmap
from bokeh.palettes import Blues8
from bokeh.embed import components
import pandas as pd
df = pd.read_csv('topGoals.csv', delimiter=';', encoding='unicode_escape')
#print(df)
# Create a ColumnDataSource from DataFrame
source = ColumnDataSource(df)
car = df['Joueurs']
hp = df['Buts']
output_file('index2.html')
# Car list
car_list = source.data['Pays'].tolist()
# Add plot
p = figure(
y_range=car_list,
plot_width=1000,
plot_height=600,
title='Statistics of the Goals Scored by CAN 2021 Players',
x_axis_label='Goals Scored',
tools='pan,box_select,zoom_in,zoom_out,save,reset'
)
#citation = Label(text='<NAME>')
# Render glyph
p.hbar(
y = 'Pays',
right='Buts',
left=0,
height=0.4,
color='orange',
fill_alpha=0.9,
fill_color=factor_cmap(
'Pays',
palette=Blues8,
factors=car_list
),
source = source,
legend_field='Joueurs'
)
# Add Legend
p.legend.orientation='vertical'
p.legend.location='top_right'
p.legend.label_text_font_size='10px'
p.title.align='center'
p.title.text_color='green'
p.title.text_font_size='100xp'
p.title.background_fill_color='#f9fd00'
# Add Tooltips
hover = HoverTool()
hover.tooltips = """
<div>
<h3>@Pays</h3>
<div></strong><h3>@Joueurs</h3></div>
<div><strong>Goals Scored: </strong>@Buts</div>
<div><img src="@Images" alt="" width="200"/></div>
</div>
"""
p.add_tools(hover)
#p.add_layout(citation)
# Show results
# show(p)
save(p)
# Print out div and scripts
# script, div = components(p)
# print(div)
# print(script)
| 2.96875 | 3 |
.leetcode/647.palindromic-substrings.py | KuiyuanFu/PythonLeetCode | 0 | 12789758 | # @lc app=leetcode id=647 lang=python3
#
# [647] Palindromic Substrings
#
# https://leetcode.com/problems/palindromic-substrings/description/
#
# algorithms
# Medium (63.36%)
# Likes: 5179
# Dislikes: 144
# Total Accepted: 329.3K
# Total Submissions: 518.4K
# Testcase Example: '"abc"'
#
# Given a string s, return the number of palindromic substrings in it.
#
# A string is a palindrome when it reads the same backward as forward.
#
# A substring is a contiguous sequence of characters within the string.
#
#
# Example 1:
#
#
# Input: s = "abc"
# Output: 3
# Explanation: Three palindromic strings: "a", "b", "c".
#
#
# Example 2:
#
#
# Input: s = "aaa"
# Output: 6
# Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 1000
# s consists of lowercase English letters.
#
#
#
# @lc tags=string;dynamic-programming
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 求回文子字符串的个数。
# 直接遍历。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def countSubstrings(self, s: str) -> int:
length = len(s)
def p(l, r):
res = 0
while l >= 0 and r < length:
if s[l] == s[r]:
res += 1
else:
break
l, r = l - 1, r + 1
return res
res = 0
for i in range(len(s)):
res += p(i, i)
res += p(i, i + 1)
return res
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('s = "abc"')
print('Exception :')
print('3')
print('Output :')
print(str(Solution().countSubstrings("abc")))
print()
print('Example 2:')
print('Input : ')
print('s = "aaa"')
print('Exception :')
print('6')
print('Output :')
print(str(Solution().countSubstrings("aaa")))
print()
pass
# @lc main=end | 3.796875 | 4 |
bobbot/search_node.py | TheCheapestPixels/bobbot | 1 | 12789759 | <filename>bobbot/search_node.py<gh_stars>1-10
class SearchNode:
"""Implements expansion of new nodes, and merging of instances of
nodes, which may be required after the same state has been
reached via two or more different routes of search tree
expansion.
Public methods: .expand(), .post_expansio_insertion()
"""
def __init__(self, state=None, known_predecessors=None):
if state is None:
state = self._starting_state()
self.state = state
if known_predecessors is None:
known_predecessors = set()
assert isinstance(known_predecessors, set)
self.known_predecessors = known_predecessors
self.is_expanded = False
self.successors = {} # {key: state} to keep a ref to the successor
self.moves = {} # {move: key}
def expand(self):
"""Return all successor states for this state. This doesn't
store them in this state object; .post_expansion_insertion()
has to be used for that. The reason for this is that any state
node found during expansion may have previously been come upon
via another set of moves. Therefore, it is the search tree's
responsibility to detecht such duplications, and indicate them
when calling .post_expansion_insertion().
Returns: SearchNode objects.
Requires: ._make_move(), ._all_legal_moves(), ._node_key()
"""
# TODO: This computes each move twice. Optimize!
# TODO: Can I be sure that there aren't any more kwargs?
move_to_successor = {move: self.__class__(state=self._make_move(move),
known_predecessors={self})
for move in self._all_legal_moves()}
# moves are {move: successor_node_key}, so unlike the actual
# successor state instance (which might be a spurious
# duplicate that will be removed during merge), these can
# safely be stored here; the node_key of both instances has
# to be the same to be valid.
self.moves = {move: state._node_key()
for move, state in move_to_successor.items()}
self.is_expanded = True
return move_to_successor.values()
def post_expansion_insertion(self, old, new):
"""Gets called after this node has been expanded and its new
successors have been inserted into the search tree. :old:
contains nodes that were already present in the search tree,
but have been updated through .merge(). new contains nodes
that haven't been known before. The format of both is
{node_key: node}
"""
self.successors.update(old)
self.successors.update(new)
def merge(self, other_instance):
"""This node has been re-discovered through expansion of a
game state, and the resulting information should be added to
this instance.
"""
if not self.is_expanded and other_instance.is_expanded:
self.successors = other_instance.successors
self.is_expanded = True
self.known_predecessors.add(*other_instance.known_predecessors)
def get_successors(self):
return self.successors
def get_successor_nodes(self):
return self.successors.values()
def get_successor_keys(self):
return self.successors.keys()
def get_successor(self, move):
return self.successors[self.moves[move]]
def remove_predecessors(self, to_remove):
self.known_predecessors -= set(to_remove)
def is_finished(self):
"""Is the game in a state from which it can't be continued,
either because a player won or it resulted in a draw?
"""
raise NotImplementedError("Game's SearchNode doesn't implement "
".is_finished()")
def node_key(self):
raise NotImplementedError("Game's SearchNode doesn't implement "
".node_key()")
class GameAdapter(SearchNode):
"""Helper class to create and test integrations with game rule
implementations more easily.
"""
def _starting_state(self):
return self.starting_state()
def starting_state(self):
raise NotImplementedError("Game does not implement .starting_state()")
def _active_player(self):
return self.active_player(self.state)
def active_player(self, game_state):
raise NotImplementedError("Game does not implement .active_player()")
def _is_finished(self):
return self.is_finished(self.state)
def is_finished(self, game_state):
raise NotImplementedError("Game does not implement .is_finished()")
def _all_legal_moves(self):
return self.all_legal_moves(self.state)
def all_legal_moves(self, game_state):
raise NotImplementedError("Game does not implement .all_legal_moves()")
def _make_move(self, move):
return self.make_move(self.state, move)
def make_move(self, game_state, move):
raise NotImplementedError("Game does not implement .make_move()")
def _winner(self):
return self.winner(self.state)
def winner(self, game_state):
raise NotImplementedError("Game does not implement .winner()")
def _node_key(self):
return self.node_key(self.state)
def node_key(self, game_state):
raise NotImplementedError("Game does not implement .node_key()")
def _evaluate(self):
return self.evaluate(self.state)
def evaluate(self, game_state):
raise NotImplementedError("Game does not implement .evaluate()")
def __repr__(self):
return self._node_key()
# Score management
# FIXME: This implementation triggers a backpropagation cascade
# pretty much every time that a score has been updated. There have
# to be approaches where each node has to update only once even
# without traversing the whole tree in the beginning.
class BackpropagationScoringMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.score = self._evaluate()
def backpropagate_score(self):
for node in self.known_predecessors:
node.update_score()
def post_expansion_insertion(self, old, new):
super().post_expansion_insertion(old, new)
if old or new:
self.update_score()
def merge(self, other_instance):
super().merge(other_instance)
if self.successors:
self.update_score()
def update_score(self):
has_been_updated = False
for player in self.score:
successor_scores = [successor.score[player]
for successor in self.successors.values()]
new_score = self.calculate_score(player, successor_scores)
if new_score != self.score[player]:
self.score[player] = new_score
has_been_updated = True
if has_been_updated:
self.backpropagate_score()
class MinMaxScoringMixin(BackpropagationScoringMixin):
def calculate_score(self, player, successor_scores):
if player == self._active_player():
new_score = max(successor_scores)
else:
new_score = min(successor_scores)
return new_score
# Move choosers
#
# These SearchNodes implement .find_best_move() and usually require
# .score to be implemented.
import random
class ChooseRandomMoveMixin:
def find_best_move(self):
return random.choice(self.moves)
class ChooseFirstMoveMixin:
def find_best_move(self):
return sorted(self.moves)[0]
class ChooseRandomMoveFromBestMixin:
def find_best_move(self):
assert self.is_expanded
possible_moves = {move: self.successors[key].score[self._active_player()]
for move, key in self.moves.items()}
best_score = max(possible_moves.values())
best_moves = [move for move in possible_moves
if possible_moves[move] == best_score]
return random.choice(best_moves)
| 3.21875 | 3 |
mne/time_frequency/__init__.py | jaeilepp/eggie | 0 | 12789760 | <reponame>jaeilepp/eggie
"""Time frequency analysis tools
"""
from .tfr import induced_power, single_trial_power, morlet, tfr_morlet
from .tfr import AverageTFR
from .psd import compute_raw_psd, compute_epochs_psd
from .csd import CrossSpectralDensity, compute_epochs_csd
from .ar import yule_walker, ar_raw, iir_filter_raw
from .multitaper import dpss_windows, multitaper_psd
from .stft import stft, istft, stftfreq
| 1.15625 | 1 |
tests/test_pipeline.py | coralproject/atoll | 12 | 12789761 | import unittest
from atoll import Pipeline
def lowercase(x):
return x.lower()
def tokenize(x, delimiter=' '):
return x.split(delimiter)
def word_counter(x):
return len(x)
def count_per_key(value):
return len(value)
def add(x, y):
return x + y
def make_list(x):
return ['a', x]
class PipelineTests(unittest.TestCase):
def setUp(self):
self.docs = [
'Coral reefs are diverse underwater ecosystems',
'Coral reefs are built by colonies of tiny animals'
]
self.expected_counts = [6,9]
self.expected_chars = [['c', 'r', 'a', 'd', 'u', 'e'], ['c', 'r', 'a', 'b', 'b', 'c', 'o', 't', 'a']]
def test_map_pipeline(self):
expected = [
['coral', 'reefs', 'are', 'diverse', 'underwater', 'ecosystems'],
['coral', 'reefs', 'are', 'built', 'by', 'colonies', 'of', 'tiny', 'animals']
]
pipeline = Pipeline().map(lowercase).map(tokenize)
output = pipeline(self.docs)
for o, e in zip(output, expected):
self.assertEqual(set(o), set(e))
def test_nested_pipeline(self):
nested_pipeline = Pipeline().map(lowercase).map(tokenize)
pipeline = Pipeline().to(nested_pipeline).map(word_counter)
counts = pipeline(self.docs)
self.assertEqual(counts, [6,9])
def test_map_parallel(self):
expected = [
['coral', 'reefs', 'are', 'diverse', 'underwater', 'ecosystems'],
['coral', 'reefs', 'are', 'built', 'by', 'colonies', 'of', 'tiny', 'animals']
]
pipeline = Pipeline().map(lowercase).map(tokenize)
output = pipeline(self.docs, n_jobs=2)
for o, e in zip(output, expected):
self.assertEqual(set(o), set(e))
def test_map_values(self):
expected = [
('a', 2),
('b', 3)
]
pipeline = Pipeline().mapValues(count_per_key)
output = pipeline({
'a': [0,0],
'b': [0,0,0]
})
self.assertEqual(set(output), set(expected))
def test_flat_map(self):
expected = ['a', 2, 'a', 3]
pipeline = Pipeline().flatMap(make_list)
output = pipeline([2,3])
self.assertEqual(output, expected)
def test_flat_map_values(self):
expected = [('a', 1), ('a', 2), ('b', 3), ('b', 4)]
pipeline = Pipeline().flatMapValues(None) # None = identity func
output = pipeline({
'a': [1,2],
'b': [3,4]
})
self.assertEqual(set(output), set(expected))
def test_reduce(self):
expected = 10
pipeline = Pipeline().reduce(add)
output = pipeline([1,2,3,4])
self.assertEqual(output, expected)
def test_reduce_by_key(self):
expected = [('a', 3), ('b', 7)]
pipeline = Pipeline().reduceByKey(add)
output = pipeline([('a', 1), ('a', 2), ('b', 3), ('b', 4)])
self.assertEqual(set(output), set(expected))
def test_partial_handling(self):
expected = [5, 6]
pipeline = Pipeline().map(add, 4)
output = pipeline([1,2])
self.assertEqual(output, expected)
def test_kwargs_missing(self):
pipeline = Pipeline().map(tokenize, kwargs=['delimiter'])
input = [doc.replace(' ', ',') for doc in self.docs]
self.assertRaises(KeyError, pipeline, input)
def test_kwargs(self):
expected = [
['Coral', 'reefs', 'are', 'diverse', 'underwater', 'ecosystems'],
['Coral', 'reefs', 'are', 'built', 'by', 'colonies', 'of', 'tiny', 'animals']
]
pipeline = Pipeline().map(tokenize, kwargs=['delimiter'])
input = [doc.replace(' ', ',') for doc in self.docs]
output = pipeline(input, delimiter=',')
for o, e in zip(output, expected):
self.assertEqual(set(o), set(e))
def test_kwargs_nested(self):
expected = [
['coral', 'reefs', 'are', 'diverse', 'underwater', 'ecosystems'],
['coral', 'reefs', 'are', 'built', 'by', 'colonies', 'of', 'tiny', 'animals']
]
token_pipeline = Pipeline().map(tokenize, kwargs=['delimiter'])
lowercase_pipeline = Pipeline().map(lowercase) # kinda hacky
pipeline = Pipeline().to(token_pipeline).map(lowercase_pipeline)
input = [doc.replace(' ', ',') for doc in self.docs]
output = pipeline(input, delimiter=',')
for o, e in zip(output, expected):
self.assertEqual(set(o), set(e))
| 3.15625 | 3 |
farm_management/users/views/__init__.py | alexanders0/farm-management | 1 | 12789762 | <gh_stars>1-10
from .users import UserViewSet
| 1.007813 | 1 |
projects/planetTest/planetTest.py | Aceheliflyer/Computer-Science | 0 | 12789763 | <gh_stars>0
import math
# Settings ##################
app.background = 'black'
app.stepsPerSecond = 60 # The framerate of the app.
app.steps = 0 # The default steps.
obj = {
'sun': Circle(app.centerX, app.centerY, 25, fill=gradient('darkRed', 'yellow')),
'plt': Circle(325, 200, 12.5, fill='skyblue'),
'mun': Circle(370, 200, 6.25, fill='grey')
}
obj['plt'].distFromSun = obj['plt'].centerX - app.centerX
obj['mun'].distFromPlt = obj['mun'].centerX - obj['plt'].centerX
obj['plt'].speed = -1
obj['mun'].speed = -5
def onStep():
app.steps += 1
p = obj['plt']
p.centerX = math.cos((app.steps*p.speed)/app.stepsPerSecond)*p.distFromSun+200
p.centerY = math.sin((app.steps*p.speed)/app.stepsPerSecond)*p.distFromSun+200
m = obj['mun']
m.centerX = math.sin((app.steps*m.speed)/app.stepsPerSecond)*m.distFromPlt+p.centerX
m.centerY = math.cos((app.steps*m.speed)/app.stepsPerSecond)*m.distFromPlt+p.centerY
| 2.546875 | 3 |
tests/test_app.py | wghou/BeeVeeH | 11 | 12789764 | import os
import BeeVeeH
import pytest
BVH_DIR = '%s/bvh_files' % os.path.dirname(__file__)
class TestCase():
def test_bvh_play(self):
file_path = '%s/0007_Cartwheel001.bvh' % BVH_DIR
if not BeeVeeH.start(file_path, test=True):
pytest.skip('Cannot launch the app due to SystemExit, reason above')
if __name__ == '__main__':
TestCase().test_bvh_play()
| 2.140625 | 2 |
flow-tools-analysis/gen_data_input_flows_behavior_metrics.py | spoofer-ix/spoofer-ix | 2 | 12789765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import sys, path
sys.path.append(path.abspath(path.join(path.dirname(__file__), '..')))
import utils.multiprocessing_poll as mpPool
import utils.geolocation_utilities as geoutil
import utils.prefixes_utilities as putil
import utils.constants as cons
import utils.filters_utilities as futil
import utils.fileparsing_utilities as fputil
import utils.notification_utilities as notifutil
import argparse
import sys
import utils.cmdline_interface_utilities as cmdutil
import utils.ixp_members_mappings_utilities as ixp_member_util
import ast
import traceback
import gzip
import pyasn
from netaddr import IPNetwork
from operator import add
import cProfile
from timeit import default_timer as timer
"""
---------------------------------------ABOUT----------------------------------------
Process original traffic flow data, transforming and aggregating data to export
in 5-min bins, allowing distinct analysis of network behavior metrics.
------------------------------------------------------------------------------------
"""
def count_ipv4_prefix24_forip(ip_prefix):
"""
Given a prefix match, get the correspondent /24 IPv4 prefix count.
:param ip_prefix:
:return:
"""
ipv4_prefixlen_desired = 24
ip_prefix_fields = ip_prefix.split('/')
# Check if we have range id with the prefix
if len(ip_prefix_fields) == 2:
# Generate prefix object
cidr_block = ip_prefix_fields[1]
prefix_net = IPNetwork(ip_prefix)
if int(cidr_block) < 24:
# create a list of all possible subnets /24 for IPv4
subnets = list(prefix_net.subnet(ipv4_prefixlen_desired))
return len(subnets)
elif int(cidr_block) == 24:
return 1
# In case they are bigger leave as is (more specific naturally)
elif int(cidr_block) > 24:
return 0
else:
print("ALERT: classfull IP range (A, B, C) found.")
def load_database_ip2prefixasn_routeviews_by_timewindow(p_tw_start):
"""
Load the IPAddress to Prefix/ASN lookup database from Routeviews.
:return:
"""
str_key = str(p_tw_start.year) + str(p_tw_start.month)
if str_key in cons.DICT_OF_ROUTEVIEWS_IP2PREFIX_DATABASES:
path_to_file = cons.DICT_OF_ROUTEVIEWS_IP2PREFIX_DATABASES[str_key]
else:
print "> ERROR: fail to load Routeviews ip2prefixasn database file."
path_to_file = ""
return pyasn.pyasn(path_to_file)
def do_prefix_lookup_forip(str_ip_address):
"""
For a given ip address execute a lookup on routeviews db to get the prefix and asn information.
:param str_ip_address:
:return:
"""
try:
prefix_lookup_result = f_global_asndb_routeviews.lookup(str_ip_address)
origin_asn = prefix_lookup_result[0]
ip_prefix = prefix_lookup_result[1]
return origin_asn, ip_prefix
except:
print "Routeviews DB lookup failed! Double check if the file is ok."
return None, None
def update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ipsrc_level_analysis_perpoint):
"""
Account for unique IPAddresses, BGP prefixes, origin_asn per ingress/egress points.
:param flow_ingress_asn:
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ipsrc_level_analysis_perpoint:
:return: dict of dict {'1234': {('10.10.10.1', 23456, '10.0.0.0/8'): [1]},
'5678': {('172.16.31.10', 98765, '18192.168.127.12/20'): [1]}, ...}
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_ipsrc_level_analysis_perpoint.keys():
d_ipsrc_level_analysis_perpoint[flow_ingress_asn] = dict()
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
if k not in d_ipsrc_level_analysis_perpoint[flow_ingress_asn]:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = map(add, d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k], values)
return d_ipsrc_level_analysis_perpoint
def update_log_ip_ports_protocols_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, flow_port, str_flow_pr, d_analysis_perpoint_ports_protocols):
k = flow_ip
flow_port = [(flow_port)]
str_flow_pr = [(str_flow_pr)]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_analysis_perpoint_ports_protocols.keys():
d_analysis_perpoint_ports_protocols[flow_ingress_asn] = dict()
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k] = {0: set(), 1: set()}
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][0].update(flow_port)
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][1].update(str_flow_pr)
else:
if k not in d_analysis_perpoint_ports_protocols[flow_ingress_asn]:
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k] = {0: set(), 1: set()}
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][0].update(flow_port)
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][1].update(str_flow_pr)
else:
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][0].update(flow_port)
d_analysis_perpoint_ports_protocols[flow_ingress_asn][k][1].update(str_flow_pr)
return d_analysis_perpoint_ports_protocols
def update_log_ip_dict(flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ip_level_analysis):
"""
Counts the unique IPAddresses, BGP prefixes, origin_asn for all the traffic that is seen.
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ip_level_analysis:
:return:
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
if k not in d_ip_level_analysis:
d_ip_level_analysis[k] = values
else:
d_ip_level_analysis[k] = map(add, d_ip_level_analysis[k], values)
return d_ip_level_analysis
def update_log_ip_ports_protocols_dict(str_flow_sa, flow_port, str_flow_pr, d_ip_ports_protocols):
flow_port = [(flow_port)]
str_flow_pr = [(str_flow_pr)]
if str_flow_sa in d_ip_ports_protocols:
d_ip_ports_protocols[str_flow_sa][0].update(flow_port)
d_ip_ports_protocols[str_flow_sa][1].update(str_flow_pr)
else:
d_ip_ports_protocols[str_flow_sa] = {0: set(), 1: set()}
d_ip_ports_protocols[str_flow_sa][0].update(flow_port)
d_ip_ports_protocols[str_flow_sa][1].update(str_flow_pr)
return d_ip_ports_protocols
def count_uniq_ip(l_d_flow_records, d_filters={}):
"""
Filter the traffic flow data and execute the processing analysis logic for network behavior metrics.
"""
d_ipsrc_level_analysis = dict()
d_ipsrc_ports_protocols = dict()
d_ipdst_level_analysis = dict()
d_ipdst_ports_protocols = dict()
d_ipsrc_level_analysis_peringress = dict()
d_ipsrc_peringress_ports_protocols = dict()
d_ipdst_level_analysis_peregress = dict()
d_ipdst_peregress_ports_protocols = dict()
for flow in l_d_flow_records:
if futil.matches_desired_set(flow, d_filters):
str_flow_sa = fputil.record_to_ip(flow['sa'])
str_flow_da = fputil.record_to_ip(flow['da'])
flow_bytes = fputil.record_to_numeric(flow['ibyt'])
flow_packets = fputil.record_to_numeric(flow['ipkt'])
flow_sp = fputil.record_to_numeric(flow['sp'])
flow_dp = fputil.record_to_numeric(flow['dp'])
str_flow_pr = fputil.proto_int_to_str(flow['pr'])
flow_ingress_src_macaddr = fputil.record_to_mac(flow['ismc']).replace(':', '').upper()
flow_egress_dst_macaddr = fputil.record_to_mac(flow['odmc']).replace(':', '').upper()
flow_ingress_asn = ""
if flow_ingress_src_macaddr in d_mapping_macaddress_member_asn:
flow_ingress_asn = d_mapping_macaddress_member_asn[flow_ingress_src_macaddr]
flow_egress_asn = ""
if flow_egress_dst_macaddr in d_mapping_macaddress_member_asn:
flow_egress_asn = d_mapping_macaddress_member_asn[flow_egress_dst_macaddr]
sa_origin_asn, sa_ip_prefix = do_prefix_lookup_forip(str_flow_sa)
da_origin_asn, da_ip_prefix = do_prefix_lookup_forip(str_flow_da)
sa_country_code = None
if sa_ip_prefix is not None:
if str_flow_sa in d_global_get_ip_country:
sa_country_code = d_global_get_ip_country[str_flow_sa]
else:
sa_country_code = geoutil.get_country_netacq_edge_from_ip(str_flow_sa, ipm_netacq_db, i_geodb_id)
d_global_get_ip_country[str_flow_sa] = sa_country_code
da_country_code = None
if da_ip_prefix is not None:
if str_flow_da in d_global_get_ip_country:
da_country_code = d_global_get_ip_country[str_flow_da]
else:
da_country_code = geoutil.get_country_netacq_edge_from_ip(str_flow_da, ipm_netacq_db, i_geodb_id)
d_global_get_ip_country[str_flow_da] = da_country_code
# save data processed
#######
# SRC
#######
d_ipsrc_level_analysis = update_log_ip_dict(str_flow_sa,
sa_origin_asn,
sa_ip_prefix,
sa_country_code,
flow_bytes,
flow_packets,
d_ipsrc_level_analysis)
# save info about src ports and flow protocols
d_ipsrc_ports_protocols = update_log_ip_ports_protocols_dict(str_flow_sa,
flow_sp,
str_flow_pr,
d_ipsrc_ports_protocols)
#######
# DST
#######
d_ipdst_level_analysis = update_log_ip_dict(str_flow_da,
da_origin_asn,
da_ip_prefix,
da_country_code,
flow_bytes,
flow_packets,
d_ipdst_level_analysis)
# save info about dst ports and flow protocols
d_ipdst_ports_protocols = update_log_ip_ports_protocols_dict(str_flow_da,
flow_dp,
str_flow_pr,
d_ipdst_ports_protocols)
if is_to_process_data_per_ingress_egress:
##############
# INGRESS/SRC
##############
if flow_ingress_asn != "":
d_ipsrc_level_analysis_peringress = update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn,
str_flow_sa,
sa_origin_asn,
sa_ip_prefix,
sa_country_code,
flow_bytes,
flow_packets,
d_ipsrc_level_analysis_peringress)
# save info about src ports and flow protocols
d_ipsrc_peringress_ports_protocols = update_log_ip_ports_protocols_dict_per_ingress_egress_point(flow_ingress_asn,
str_flow_sa,
flow_sp,
str_flow_pr,
d_ipsrc_peringress_ports_protocols)
##############
# EGRESS/DST
##############
if flow_egress_asn != "":
d_ipdst_level_analysis_peregress = update_log_ip_dict_per_ingress_egress_point(flow_egress_asn,
str_flow_da,
da_origin_asn,
da_ip_prefix,
da_country_code,
flow_bytes,
flow_packets,
d_ipdst_level_analysis_peregress)
d_ipdst_peregress_ports_protocols = update_log_ip_ports_protocols_dict_per_ingress_egress_point(flow_egress_asn,
str_flow_da,
flow_dp,
str_flow_pr,
d_ipdst_peregress_ports_protocols)
return d_ipsrc_level_analysis, d_ipdst_level_analysis, \
d_ipsrc_level_analysis_peringress, d_ipdst_level_analysis_peregress,\
d_ipsrc_ports_protocols, d_ipdst_ports_protocols, \
d_ipsrc_peringress_ports_protocols, d_ipdst_peregress_ports_protocols
def profile_worker(fn_input):
cProfile.runctx('do_iplevel_analysis(fn_input)', globals(), locals(), 'profile-%s.out' %fn_input.split("/")[-1:])
def do_iplevel_analysis(fn_input):
"""
Execute analysis over the IP level information from the file.
:param fn_input:
:return:
"""
fn_output_pattern_src_addr = "ip=src"
fn_output_pattern_dst_addr = "ip=dst"
fn_output_pattern_src_addr_ingress = "point=ingress"
fn_output_pattern_dst_addr_egress = "point=egress"
try:
reader = fputil.get_flowrecords_from_flowdata_file(fn_input)
d_ipsrc_level_analysis, \
d_ipdst_level_analysis, \
d_ipsrc_level_analysis_peringress, \
d_ipdst_level_analysis_peregress,\
d_ipsrc_ports_protocols, \
d_ipdst_ports_protocols, \
d_ipsrc_peringress_ports_protocols, \
d_ipdst_peregress_ports_protocols = count_uniq_ip(reader, d_filters=d_filter_to_apply)
# save data log for the whole traffic
save_to_logfile(d_ipsrc_level_analysis, d_ipsrc_ports_protocols, fn_input, fn_output_pattern_src_addr, filter_ip_version, filter_svln)
save_to_logfile(d_ipdst_level_analysis, d_ipdst_ports_protocols, fn_input, fn_output_pattern_dst_addr, filter_ip_version, filter_svln)
if is_to_process_data_per_ingress_egress:
# save data log per ingress and egress points
save_data_per_ingress_egress_point_to_logfile(d_ipsrc_level_analysis_peringress,
d_ipsrc_peringress_ports_protocols,
fn_input,
fn_output_pattern_src_addr_ingress,
filter_ip_version, filter_svln)
save_data_per_ingress_egress_point_to_logfile(d_ipdst_level_analysis_peregress,
d_ipdst_peregress_ports_protocols,
fn_input,
fn_output_pattern_dst_addr_egress,
filter_ip_version, filter_svln)
d_ipsrc_level_analysis.clear()
d_ipdst_level_analysis.clear()
d_ipsrc_level_analysis_peringress.clear()
d_ipdst_level_analysis_peregress.clear()
d_ipsrc_ports_protocols.clear()
d_ipdst_ports_protocols.clear()
d_ipsrc_peringress_ports_protocols.clear()
d_ipdst_peregress_ports_protocols.clear()
return 0
except Exception as e:
print('Caught exception in worker thread (file = %s):' % fn_input)
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print()
raise e
except KeyboardInterrupt:
# Allow ^C to interrupt from any thread.
sys.stdout.write('\033[0m')
sys.stdout.write('user interrupt\n')
def save_data_per_ingress_egress_point_to_logfile(d_ipsrc_level_analysis_perpoint,
d_analysis_perpoint_ports_protocols,
fn_input, fn_label,
filter_ip_version, filter_svln):
"""
Save to file the processed data correlated to an specific ingress or egress point from the IXP switching fabric.
:param d_ipsrc_level_analysis_perpoint:
:param fn_input:
:param fn_label:
:param filter_ip_version:
:param filter_svln:
:return: file with the following columns =
"ingress/egress asn;ip[src,dst];origin_asn;bgp_prefix;country;bytes;packets;qty_ports;qty_protocols;flow_ip_count"
"""
fn_output_pattern = "{file_name}.{file_label}.ipv={ip_version}.svln={filter_svln}.txt.gz"
fn_output_name = fn_output_pattern.format(file_name=fn_input,
file_label=fn_label,
ip_version=filter_ip_version,
filter_svln=filter_svln)
with gzip.open(fn_output_name, 'wb') as f:
for ixp_member_point, ixp_member_flow_traffic_data in d_ipsrc_level_analysis_perpoint.iteritems():
ases_point_values = "|".join(str(asn) for asn in ixp_member_point)
for k, v in ixp_member_flow_traffic_data.iteritems():
k_values = ";".join(str(e) for e in k)
flow_ip_count = v[0]
flow_bytes = v[1]
flow_packets = v[2]
ipaddr = k[0]
qty_ports = len(d_analysis_perpoint_ports_protocols[ixp_member_point][ipaddr][0])
qty_protocols = len(d_analysis_perpoint_ports_protocols[ixp_member_point][ipaddr][1])
f.write("".join("{};{};{};{};{};{};{}".format(ases_point_values, k_values, flow_bytes, flow_packets, qty_ports, qty_protocols, flow_ip_count) + "\n"))
f.close()
def save_to_logfile(d_ip_level_analysis, d_ip_ports_protocols, fn_input, fn_label, filter_ip_version, filter_svln):
"""
Save to file the processed data from all the traffic seem in 5-min bin.
:param d_ip_level_analysis:
:param fn_input:
:param fn_label:
:param filter_ip_version:
:param filter_svln:
:return: file with the following columns =
"ip[src,dst];origin_asn;bgp_prefix;country;bytes;packets;qty_ports;qty_protocols;flow_ip_count"
"""
fn_output_pattern = "{file_name}.{file_label}.ipv={ip_version}.svln={filter_svln}.txt.gz"
fn_output_name = fn_output_pattern.format(file_name=fn_input,
file_label=fn_label,
ip_version=filter_ip_version,
filter_svln=filter_svln)
# order ip address by bytes volume desc and write dict result to log file
sorted_d_ip_level_analysis = sorted(d_ip_level_analysis.items(), key=lambda (k, v): v[1], reverse=True)
with gzip.open(fn_output_name, 'wb') as f:
for record in sorted_d_ip_level_analysis:
k_values = ";".join(str(e) for e in record[0])
flow_ip_count = record[1][0]
flow_bytes = record[1][1]
flow_packets = record[1][2]
ipaddr = record[0][0]
qty_ports = len(d_ip_ports_protocols[ipaddr][0])
qty_protocols = len(d_ip_ports_protocols[ipaddr][1])
f.write("".join("{};{};{};{};{};{}".format(k_values, flow_bytes, flow_packets, qty_ports, qty_protocols, flow_ip_count) + "\n"))
f.close()
if __name__ == '__main__':
"""
Build cli parameter parser.
"""
parser = argparse.ArgumentParser(prog=sys.argv[0], description='Traffic classification taking Apache Avro '
'as input files.')
parser.add_argument('-tw', dest='time_window_op', required=True,
help="Time window to load files to process. Format: start-end, %Y%m%d%H%M-%Y%m%d%H%M")
parser.add_argument('-flowdir', dest='flows_dir_path', required=True,
help="Directory where are the flows to process")
parser.add_argument('-tmpdir', dest='temp_path', required=True,
help="Temporary dir to save output files")
parser.add_argument('-np', dest='number_concur_process',
help="Number of concurrent process to execute")
parser.add_argument('-filter', dest='flow_filter', required=True,
help="Filter to apply over each flow file read")
parser.add_argument('-process_ingress_egress_data', dest='to_process_data_per_ingress_egress', type=int, choices=[0, 1], required=True,
help="Indicates if it is necessary to break down data per category "
"into a view per ingress and egress ASes."
"Options: 1 - yes or 0 - no")
parser.add_argument('-pc', dest='to_process_categories', type=int, choices=[0, 1], required=True,
help="Process the categories flow traffic data files - incone, ouf-of-cone, unverifiable. "
"Options: 1 - yes or 0 - no (meaning that the whole traffic will be analyzed)")
parser.add_argument('-cat', dest='set_of_categories_to_process', required=False,
help="Define the set of categories that must be processed to compute the metrics. "
" Syntax: '[incone, out-of-cone, unverifiable]' ")
parser.add_argument('-ccid', dest='customercone_algoid', type=int, choices=[4, 8], required=True,
help="Options: "
"4 - IMC17 FullCone "
"8 - CoNEXT19 Prefix-Level Customer Cone.")
# ------------------------------------------------------------------
# parse parameters
# ------------------------------------------------------------------
parsed_args = parser.parse_args()
# set up of variables to generate flow file names
if parsed_args.time_window_op:
tw_start, tw_end = cmdutil.get_timewindow_to_process(parsed_args.time_window_op)
# number of concurrent process (performance control)
if parsed_args.number_concur_process:
n_cores_to_use = int(parsed_args.number_concur_process)
else:
n_cores_to_use = None
# Customer Cone method algorithm
id_customer_cone_algo_dataset = parsed_args.customercone_algoid
# Process data Ingress and Egress
is_to_process_data_per_ingress_egress = parsed_args.to_process_data_per_ingress_egress
if parsed_args.set_of_categories_to_process:
l_set_of_filters_traffic_categories = ast.literal_eval(parsed_args.set_of_categories_to_process)
# directory paths set up for the conversion process
flowfiles_basedir = parsed_args.flows_dir_path
base_tmp_dir = parsed_args.temp_path
# Filter to apply to each flow data file
if parsed_args.flow_filter:
d_filter_to_apply = ast.literal_eval(parsed_args.flow_filter)
filter_ip_version = d_filter_to_apply['ip']
if 'svln' in d_filter_to_apply:
filter_svln = d_filter_to_apply['svln']
else:
filter_svln = "all"
# ------------------------------------------------------------------
# Filtering logic processes start
# ------------------------------------------------------------------
start = timer()
# init global dict to avoid duplicated computation (used together with MaxMind GeoIP)
d_global_get_prefix24 = dict()
d_global_get_ip_country = dict()
geolocation_db_path = cons.DEFAULT_PATH_TO_GEOLITE2_DATABASE
print "---Loading Routeviews ip2prefix-asn database file..."
f_global_asndb_routeviews = load_database_ip2prefixasn_routeviews_by_timewindow(tw_start)
print "---Loading netacq-edge geo database file..."
ipm_netacq_db, i_geodb_id = geoutil.load_netacq_edge_geodb_by_timewindow(tw_start)
print "---Loading mac2asn mapping data..."
d_mapping_macaddress_member_asn = ixp_member_util.build_dict_mapping_macaddress_members_asns(cons.DEFAULT_MACADDRESS_ASN_MAPPING)
print "---Creating list of files for processing (5-min flow files):"
# if user input choice is to process each file category generate input names to multiprocessing step
default_flowtraffic_datafile = ".avro"
if parsed_args.to_process_categories:
pattern_file_extension = '{def_ext}.idcc={id_cc_version}.class={lbl_class}'
# if enabled to lookup to a specific class, prepare the list of files for only these categories
# possibilities and indexing [incone, out-of-cone, unverifiable]
if parsed_args.set_of_categories_to_process:
l_pattern_file_extensions = list()
i_index = 0
for lbl_category in l_set_of_filters_traffic_categories:
#########
# incone
if lbl_category == 1 and i_index == 0:
print "Preparing to process IN-CONE traffic."
l_pattern_file_extensions.append(
pattern_file_extension.format(def_ext=default_flowtraffic_datafile,
id_cc_version=id_customer_cone_algo_dataset,
lbl_class=cons.CATEGORY_LABEL_AS_SPECIFIC_CLASS_INCONE)
)
##############
# out-of-cone
if lbl_category == 1 and i_index == 1:
print "Preparing to process OUT-OF-CONE traffic."
l_pattern_file_extensions.append(
pattern_file_extension.format(def_ext=default_flowtraffic_datafile,
id_cc_version=id_customer_cone_algo_dataset,
lbl_class=cons.CATEGORY_LABEL_AS_SPECIFIC_CLASS_OUTOFCONE)
)
##############
# unverifiable
if lbl_category == 1 and i_index == 2:
print "Preparing to process UNVERIFIABLE traffic."
l_pattern_file_extensions.append(
pattern_file_extension.format(def_ext=default_flowtraffic_datafile,
id_cc_version=id_customer_cone_algo_dataset,
lbl_class=cons.CATEGORY_LABEL_UNVERIFIABLE_CLASS)
)
i_index += 1
else:
print "Preparing to process IN-CONE, OUT-OF-CONE and UNVERIFIABLE traffic flow data."
l_pattern_file_extensions = [pattern_file_extension.format(def_ext=default_flowtraffic_datafile, id_cc_version=id_customer_cone_algo_dataset, lbl_class=cons.CATEGORY_LABEL_AS_SPECIFIC_CLASS_INCONE),
pattern_file_extension.format(def_ext=default_flowtraffic_datafile, id_cc_version=id_customer_cone_algo_dataset, lbl_class=cons.CATEGORY_LABEL_AS_SPECIFIC_CLASS_OUTOFCONE),
pattern_file_extension.format(def_ext=default_flowtraffic_datafile, id_cc_version=id_customer_cone_algo_dataset, lbl_class=cons.CATEGORY_LABEL_UNVERIFIABLE_CLASS)]
l_filenames_to_process = cmdutil.generate_filenames_to_process_bysetof_extensions(tw_start, tw_end,
flowfiles_basedir,
l_pattern_file_extensions)
else:
l_filenames_to_process = cmdutil.generate_flow_filenames_to_process(tw_start, tw_end,
flowfiles_basedir,
default_flowtraffic_datafile)
print "---Started multiprocessing classification of traffic..."
mp = mpPool.MultiprocessingPool(n_cores_to_use)
results = mp.get_results_map_multiprocessing(do_iplevel_analysis, l_filenames_to_process)
end = timer()
print "---Total execution time: {} seconds".format(end - start)
print "---Sending e-mail notification about the execution status:"
notifutil.send_notification_end_of_execution(sys.argv, sys.argv[0], start, end)
| 2.203125 | 2 |
pages/urls.py | mena-hub/social_site | 0 | 12789766 | <filename>pages/urls.py
from django.urls import path
from pages.views import PageListView, PageDetailView, PageCreateView, PageDeleteView, PageUpdateView
pages_patterns = ([
path('', PageListView.as_view(), name='pages'),
path('<int:pk>/<slug:slug>/', PageDetailView.as_view(), name="page"),
path('create/', PageCreateView.as_view(), name="create"),
path('delete/<int:pk>/', PageDeleteView.as_view(), name="delete"),
path('update/<int:pk>/', PageUpdateView.as_view(), name="update"),
], 'pages') | 2.125 | 2 |
databass/compile/scan.py | ivychen/databass-public | 6 | 12789767 | from ..ops import GroupBy
from .translator import *
class SubQueryTranslator(Translator):
def __init__(self, *args, **kwargs):
super(SubQueryTranslator, self).__init__(*args, **kwargs)
def produce(self, ctx):
self.child_translator.produce(ctx)
def consume(self, ctx):
self.parent_translator.consume(ctx)
class ScanTranslator(Translator):
def __init__(self, *args, **kwargs):
super(ScanTranslator, self).__init__(*args, **kwargs)
self.l_o = None
def consume(self, ctx):
self.parent_translator.consume(ctx)
| 2.578125 | 3 |
ptsr/data/srdata.py | Weepingchestnut/rcan-it | 0 | 12789768 | <reponame>Weepingchestnut/rcan-it
import os
import glob
import random
import pickle
from ptsr.data import common
from ptsr.utils.utility import calc_psnr_numpy
import numpy as np
import imageio
from skimage.transform import resize
import torch
import torch.utils.data as data
class SRData(data.Dataset):
def __init__(self, cfg, name='', train=True, benchmark=False):
self.cfg = cfg
self.name = name
self.train = train
self.split = 'train' if train else 'test'
self.do_eval = True
self.benchmark = benchmark
self.input_large = False
self.scale = cfg.DATASET.DATA_SCALE
self.idx_scale = 0
self._set_filesystem(cfg.DATASET.DATA_DIR)
if cfg.DATASET.DATA_EXT.find('img') < 0:
path_bin = os.path.join(self.apath, 'bin')
os.makedirs(path_bin, exist_ok=True)
list_hr, list_lr = self._scan()
if cfg.DATASET.DATA_EXT.find('bin') >= 0:
# Binary files are stored in 'bin' folder
# If the binary file exists, load it. If not, make it.
list_hr, list_lr = self._scan()
self.images_hr = self._check_and_load(
cfg.DATASET.DATA_EXT, list_hr, self._name_hrbin()
)
self.images_lr = [
self._check_and_load(cfg.DATASET.DATA_EXT,
l, self._name_lrbin(s))
for s, l in zip(self.scale, list_lr)
]
else:
if cfg.DATASET.DATA_EXT.find('img') >= 0 or benchmark:
self.images_hr, self.images_lr = list_hr, list_lr
elif cfg.DATASET.DATA_EXT.find('sep') >= 0:
os.makedirs(
self.dir_hr.replace(self.apath, path_bin),
exist_ok=True
)
for s in self.scale:
os.makedirs(
os.path.join(
self.dir_lr.replace(self.apath, path_bin),
'X{}'.format(s)
),
exist_ok=True
)
self.images_hr, self.images_lr = [], [[] for _ in self.scale]
for h in list_hr:
b = h.replace(self.apath, path_bin)
b = b.replace(self.ext[0], '.pt')
self.images_hr.append(b)
self._check_and_load(
cfg.DATASET.DATA_EXT, [h], b, verbose=True, load=False
)
for i, ll in enumerate(list_lr):
for l in ll:
b = l.replace(self.apath, path_bin)
b = b.replace(self.ext[1], '.pt')
self.images_lr[i].append(b)
self._check_and_load(
cfg.DATASET.DATA_EXT, [l], b, verbose=True, load=False
)
if train:
self.n_train_samples = cfg.SOLVER.ITERATION_TOTAL * cfg.SOLVER.SAMPLES_PER_BATCH # <==> the sample times of one epoch
n_patches = cfg.SOLVER.SAMPLES_PER_BATCH * cfg.SOLVER.TEST_EVERY
n_images = len(cfg.DATASET.DATA_TRAIN) * len(self.images_hr)
if n_images == 0:
self.repeat = 0
else:
self.repeat = max(n_patches // n_images, 1)
def _scan(self):
list_hr = []
list_lr = [[] for _ in self.scale]
for i in range(self.begin, self.end + 1):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, filename + self.ext[0]))
for si, s in enumerate(self.scale):
list_lr[si].append(os.path.join(
self.dir_lr,
'X{}/{}x{}{}'.format(s, filename, s, self.ext[1])
))
return list_hr, list_lr
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
if self.input_large:
self.dir_lr += 'L'
self.ext = ('.png', '.png')
def _name_hrbin(self):
return os.path.join(
self.apath,
'bin',
'{}_bin_HR.pt'.format(self.split)
)
def _name_lrbin(self, scale):
return os.path.join(
self.apath,
'bin',
'{}_bin_LR_X{}.pt'.format(self.split, scale)
)
def _check_and_load(self, ext, l, f, verbose=True, load=True):
if os.path.isfile(f) and ext.find('reset') < 0:
if load:
if verbose:
print('Loading {}...'.format(f))
with open(f, 'rb') as _f:
ret = pickle.load(_f)
return ret[:len(l)] # 读取指定图像数
else:
return None
else:
if verbose:
if ext.find('reset') >= 0:
print('Making a new binary: {}'.format(f))
else:
print('{} does not exist. Now making binary...'.format(f))
if ext.find('bin') >= 0:
print('Bin pt file with name and image')
b = [{
'name': os.path.splitext(os.path.basename(_l))[0],
'image': imageio.imread(_l)
} for _l in l]
with open(f, 'wb') as _f:
pickle.dump(b, _f)
return b
else:
print('Direct pt file without name or image')
b = imageio.imread(l[0])
with open(f, 'wb') as _f:
pickle.dump(b, _f)
# return b
def __getitem__(self, idx):
lr, hr, filename = self._load_file(idx)
pair = self.get_patch(lr, hr)
pair = common.set_channel(*pair, n_channels=self.cfg.DATASET.CHANNELS)
pair_t = common.np2Tensor(*pair, rgb_range=self.cfg.DATASET.RGB_RANGE)
return pair_t[0], pair_t[1], filename
def __len__(self):
if self.train:
return self.n_train_samples
else:
return len(self.images_hr)
def _get_index(self, idx):
if not self.train:
return idx
idx = random.randrange(self.n_train_samples)
return idx % len(self.images_hr)
def _load_file(self, idx):
idx = self._get_index(idx)
f_hr = self.images_hr[idx]
f_lr = self.images_lr[self.idx_scale][idx]
if self.cfg.DATASET.DATA_EXT.find('bin') >= 0:
filename = f_hr['name']
hr = f_hr['image']
lr = f_lr['image']
else:
filename, _ = os.path.splitext(os.path.basename(f_hr))
if self.cfg.DATASET.DATA_EXT == 'img' or self.benchmark:
hr = imageio.imread(f_hr)
lr = imageio.imread(f_lr)
elif self.cfg.DATASET.DATA_EXT.find('sep') >= 0:
with open(f_hr, 'rb') as _f:
hr = pickle.load(_f)
with open(f_lr, 'rb') as _f:
lr = pickle.load(_f)
return lr, hr, filename
def get_patch(self, lr, hr):
scale = self.scale[self.idx_scale]
if not self.train:
ih, iw = lr.shape[:2]
hr_patch = hr[0:ih * scale, 0:iw * scale]
lr_patch = lr
return lr_patch, hr_patch
# rejection sampling for training
while True:
lr_patch, hr_patch = common.get_patch(
lr, hr, patch_size=self.cfg.DATASET.OUT_PATCH_SIZE,
scale=scale, multi=(len(self.scale) > 1),
input_large=self.input_large)
rej_cfg = self.cfg.DATASET.REJECTION_SAMPLING
if not rej_cfg.ENABLED:
break
bicub_sr = resize(lr_patch, hr_patch.shape, order=3, # bicubic
preserve_range=True, anti_aliasing=False)
bicub_psnr = calc_psnr_numpy(bicub_sr, hr_patch, scale,
float(self.cfg.DATASET.RGB_RANGE))
if bicub_psnr < rej_cfg.MAX_PSNR or random.random() < rej_cfg.PROB:
break
aug_cfg = self.cfg.AUGMENT
if aug_cfg.ENABLED:
lr_patch, hr_patch = common.augment(
lr_patch, hr_patch, invert=aug_cfg.INVERT,
c_shuffle=aug_cfg.CHANNEL_SHUFFLE)
return lr_patch, hr_patch
def set_scale(self, idx_scale):
if not self.input_large:
self.idx_scale = idx_scale
else:
self.idx_scale = random.randint(0, len(self.scale) - 1)
| 2.125 | 2 |
primes/misc/events.py | pyrustic/primes | 0 | 12789769 | class Events:
user_submit_number = "user submit number"
user_click_stop = "user click stop"
user_click_clear = "user click clear"
user_click_exit = "user click exit"
host_send_prime = "host send prime"
core_end_computation = "core end computation"
gui_end_displaying = "gui end displaying"
| 1.515625 | 2 |
homeassistant/components/loopenergy/__init__.py | domwillcode/home-assistant | 23 | 12789770 | <gh_stars>10-100
"""The loopenergy component."""
| 0.863281 | 1 |
painterfun.py | AdamRuddGH/opencv_image_repainter | 83 | 12789771 | print('importing packages...')
import numpy as np
import cv2
import math
import random
import time
import rotate_brush as rb
import gradient
from thready import amap
import os
import threading
canvaslock = threading.Lock()
canvaslock.acquire()
canvaslock.release()
def lockgen(canvas,ym,yp,xm,xp):
# given roi, know which lock.
#
# if left:
# return leftcanvaslock:
# if right:
# return rightcanvaslock:
# if riding:
# reutrn canvaslock:
pass
def load(filename='flower.jpg'):
print('loading',filename,'...')
global imname,flower,canvas,hist
global rescale,xs_small,ys_small,smallerflower
imname = filename.split('.')[0]
# original image
flower = cv2.imread(filename)
xshape = flower.shape[1]
yshape = flower.shape[0]
rescale = xshape/640
# display rescaling: you'll know when it's larger than your screen
if rescale<1:
rescale=1
xs_small = int(xshape/rescale)
ys_small = int(yshape/rescale)
smallerflower = cv2.resize(flower,dsize=(xs_small,ys_small)).astype('float32')/255
# for preview purpose,
# if image too large
# convert to float32
flower = flower.astype('float32')/255
# canvas initialized
canvas = flower.copy()
canvas[:,:] = 0.8
#clear hist
hist=[]
print(filename,'loaded.')
load()
def rn():
return random.random()
def showimg():
if rescale==1:
smallercanvas = canvas
else:
smallercanvas = cv2.resize(canvas,dsize=(xs_small,ys_small),interpolation=cv2.INTER_NEAREST)
i,j,d = wherediff(smallercanvas,smallerflower)
sd = np.mean(d)
print('mean diff:',sd)
d[i,:]=1.0
d[:,j]=1.0
cv2.imshow('canvas',smallercanvas)
cv2.imshow('flower',smallerflower)
cv2.imshow('diff',d)
cv2.waitKey(1)
cv2.waitKey(1)
def destroy():
cv2.destroyAllWindows()
def positive_sharpen(i,overblur=False,coeff=8.): #no darken to original image
# emphasize the edges
blurred = cv2.blur(i,(5,5))
sharpened = i + (i - blurred) * coeff
if overblur:
return cv2.blur(np.maximum(sharpened,i),(11,11))
return cv2.blur(np.maximum(sharpened,i),(3,3))
def diff(i1,i2,overblur=False):
#calculate the difference of 2 float32 BGR images.
# # use lab
# i1=i1.astype(np.float32)
# i2=i2.astype(np.float32)
# lab1 = cv2.cvtColor(i1,cv2.COLOR_BGR2LAB)
# lab2 = cv2.cvtColor(i2,cv2.COLOR_BGR2LAB)
# d = lab1-lab2
# d = d*d / 10000
# # use rgb
d = (i1-i2)# * [0.2,1.5,1.3]
d = d*d
d = positive_sharpen(np.sum(d,-1),overblur=overblur)
return d
# grayscalize
def wherediff(i1=None,i2=None):
global canvas,flower
if i1 is None:
i1 = canvas
if i2 is None:
i2 = flower
# find out where max difference point is.
d = diff(i1,i2,overblur=True)
i,j = np.unravel_index(d.argmax(),d.shape)
return i,j,d
def get_random_color():
return np.array([rn(),rn(),rn()]).astype('float32')
#danger: default to float64
def limit(x,minimum,maximum):
return min(max(x,minimum),maximum)
# history and replay section
# global history.
hist = []
def record(sth):
hist.append(sth)
# repaint the image from history
def repaint(constraint_angle=False,upscale=1.,batchsize=16):
starttime = time.time()
newcanvas = np.array(canvas).astype('uint8')
# newcanvas = cv2.cvtColor(newcanvas,cv2.COLOR_BGR2BGRA) # fastest format
if upscale!=1.:
newcanvas = cv2.resize(newcanvas,dsize=(int(newcanvas.shape[1]*upscale),int(newcanvas.shape[0]*upscale)))
newcanvas[:,:,:] = int(0.8*255)
def showthis():
showsize = 640
resize_scale = min(showsize/newcanvas.shape[1],1.)
resizedx,resizedy = int(newcanvas.shape[1]*resize_scale),int(newcanvas.shape[0]*resize_scale)
smallercanvas = cv2.resize(newcanvas,dsize=(resizedx,resizedy),interpolation=cv2.INTER_NEAREST)
cv2.imshow('repaint',smallercanvas)
cv2.waitKey(1)
def paintone(histitem):
x,y,radius,srad,angle,cb,cg,cr,brushname = histitem
cb,cg,cr = int(cb*255),int(cg*255),int(cr*255)
# cv2.ellipse(newcanvas,(int(x),int(y)),(radius,srad),angle,0,360,color=(cb,cg,cr),thickness=-1)
b,key = rb.get_brush(brushname)
if constraint_angle:
angle = constraint_angle+rn()*20-10
if upscale!=1:
x,y,radius,srad = x*upscale,y*upscale,radius*upscale,srad*upscale
rb.compose(newcanvas,b,x=x,y=y,rad=radius,srad=srad,angle=angle,color=[cb,cg,cr],useoil=True,lock=canvaslock)
k = 0
batch = []
def runbatch(batch):
from thready import amap # multithreading
return amap(paintone,batch)
lastep = 0
while k<len(hist):
while len(batch)<batchsize and k<len(hist):
batch.append(hist[k])
k+=1
runbatch(batch)
print(k,'painted. one of them:',batch[0])
# show progress:
ep = int(k/(newcanvas.shape[1]*upscale)) # larger image => longer wait per show
if ep >lastep:
showthis()
lastep = ep # show every 32p
batch=[]
print(time.time()-starttime,'s elapsed')
showthis()
return newcanvas
import json
def savehist(filename='hist.json'):
f = open(filename,'w')
json.dump(hist,f)
f.close()
def loadhist(filename='hist.json'):
f = open(filename,'r')
global hist
hist = json.load(f)
# end hist section
def paint_one(x,y,brushname='random',angle=-1.,minrad=10,maxrad=60):
oradius = rn()*rn()*maxrad+minrad
fatness = 1/(1+rn()*rn()*6)
brush,key = rb.get_brush(brushname)
def intrad(orad):
#obtain integer radius and shorter-radius
radius = int(orad)
srad = int(orad*fatness+1)
return radius,srad
radius,srad = intrad(oradius)
#set initial angle
if angle == -1.:
angle = rn()*360
# set initial color
# c = get_random_color()
# sample color from image => converges faster.
c = flower[int(y),int(x),:]
delta = 1e-4
# get copy of square ROI area, to do drawing and calculate error.
def get_roi(newx,newy,newrad):
radius,srad = intrad(newrad)
xshape = flower.shape[1]
yshape = flower.shape[0]
yp = int(min(newy+radius,yshape-1))
ym = int(max(0,newy-radius))
xp = int(min(newx+radius,xshape-1))
xm = int(max(0,newx-radius))
if yp<=ym or xp<=xm:
# if zero w or h
raise NameError('zero roi')
ref = flower[ym:yp,xm:xp]
bef = canvas[ym:yp,xm:xp]
aftr = np.array(bef)
# print(flower.dtype,canvas.dtype,ref.dtype)
return ref,bef,aftr
# paint one stroke with given config and return the error.
def paint_aftr_w(color,angle,nx,ny,nr):
ref,bef,aftr = get_roi(nx,ny,nr)
radius,srad = intrad(nr)
# cv2.circle(aftr,(radius,radius),radius,color=color,thickness=-1)
# cv2.ellipse(aftr,(radius,radius),(radius,srad),angle,0,360,color=color,thickness=-1)
rb.compose(aftr,brush,x=radius,y=radius,rad=radius,srad=srad,angle=angle,color=color,usefloat=True,useoil=False)
# if useoil here set to true: 2x slow down + instability
err_aftr = np.mean(diff(aftr,ref))
return err_aftr
# finally paint the same stroke onto the canvas.
def paint_final_w(color,angle,nr):
radius,srad = intrad(nr)
# cv2.circle(canvas,(x,y), radius, color=color,thickness=-1)
# cv2.ellipse(canvas,(int(x),int(y)),(radius,srad),angle,0,360,color=color,thickness=-1)
rb.compose(canvas,brush,x=x,y=y,rad=radius,srad=srad,angle=angle,color=color,usefloat=True,useoil=True,lock=canvaslock)
# enable oil effects on final paint.
# np.float64 will cause problems
rec = [x,y,radius,srad,angle,color[0],color[1],color[2],brushname]
rec = [float(r) if type(r)==np.float64 or type(r)==np.float32 else r for r in rec]
record(rec)
# log it!
# given err, calculate gradient of parameters wrt to it
def calc_gradient(err):
b,g,r = c[0],c[1],c[2]
cc = b,g,r
err_aftr = paint_aftr_w((b+delta,g,r),angle,x,y,oradius)
gb = err_aftr - err
err_aftr = paint_aftr_w((b,g+delta,r),angle,x,y,oradius)
gg = err_aftr - err
err_aftr = paint_aftr_w((b,g,r+delta),angle,x,y,oradius)
gr = err_aftr - err
err_aftr = paint_aftr_w(cc,(angle+5.)%360,x,y,oradius)
ga = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x+2,y,oradius)
gx = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x,y+2,oradius)
gy = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x,y,oradius+3)
gradius = err_aftr - err
return np.array([gb,gg,gr])/delta,ga/5,gx/2,gy/2,gradius/3,err
# max and min steps for gradient descent
tryfor = 12
mintry = 3
for i in range(tryfor):
try: # might have error
# what is the error at ROI?
ref,bef,aftr = get_roi(x,y,oradius)
orig_err = np.mean(diff(bef,ref))
# do the painting
err = paint_aftr_w(c,angle,x,y,oradius)
# if error decreased:
if err<orig_err and i>=mintry :
paint_final_w(c,angle,oradius)
return True,i
# if not satisfactory
# calculate gradient
grad,anglegrad,gx,gy,gradius,err = calc_gradient(err)
except NameError as e:
print(e)
print('error within calc_gradient')
return False,i
if printgrad: #debug purpose.
if i==0:
print('----------')
print('orig_err',orig_err)
print('ep:{}, err:{:3f}, color:{}, angle:{:2f}, xy:{:2f},{:2f}, radius:{:2f}'.format(i,err,c,angle,x,y,oradius))
# do descend
if i<tryfor-1:
c = c - (grad*.3).clip(max=0.3,min=-0.3)
c = c.clip(max=1.,min=0.)
angle = (angle - limit(anglegrad*100000,-5,5))%360
x = x - limit(gx*1000*radius,-3,3)
y = y - limit(gy*1000*radius,-3,3)
oradius = oradius* (1-limit(gradius*20000,-0.2,.2))
oradius = limit(oradius,7,100)
# print('after desc:x:{:2f},y:{:2f},angle:{:2f},oradius:{:5f}'
# .format(x,y,angle,oradius))
return False,tryfor
def putstrokes(howmany):
def samplepoints():
# sample a lot of points from one error image - save computation cost
point_list = []
y,x,d = wherediff()
phasemap = gradient.get_phase(flower)
# while not enough points:
while len(point_list)<howmany:
# randomly pick one point
yshape,xshape = flower.shape[0:2]
ry,rx = int(rn()*yshape),int(rn()*xshape)
# accept with high probability if error is large
# and vice versa
if d[ry,rx]>0.5*rn():
# get gradient orientation info from phase map
phase = phasemap[ry,rx] # phase should be between [0,2pi)
# choose direction perpendicular to gradient
angle = (phase/math.pi*180+90)%360
# angle = 22.5
point_list.append((ry,rx,angle))
return point_list
def pcasync(tup):
y,x,angle = tup
b,key = rb.get_brush(key='random') # get a random brush
return paint_one(x,y,brushname=key,minrad=10,maxrad=50,angle=angle) #num of epoch
if True:
from thready import amap # multithreading
point_list = samplepoints()
return amap(pcasync,point_list)
else: # single threading test
point_list = samplepoints()
res={}
for idx,item in enumerate(point_list):
print('single threaded mode.',idx)
res[idx] = pcasync(item)
return res
# autosave during canvas painting
dosaveimage = True
# dosaveimage = False
# gradient debug info print
printgrad = False
# printgrad = True
# run the whole thing
def r(epoch=1):
# filename prefix for each run
seed = int(rn()*1000)
print('running...')
st = time.time()
# timing counter for autosave and showimg()
timecounter = 0
showcounter = 0
for i in range(epoch):
loopfor = 1
paranum = 256
# number of stroke tries per batch, sent to thread pool
# smaller number decreases efficiency
succeeded = 0 # how many strokes being placed
ti = time.time()
# average step of gradient descent performed
avgstep=0.
for k in range(loopfor):
res = putstrokes(paranum) # res is a map of results
for r in res:
status,step = res[r]
avgstep += step
succeeded += 1 if status else 0
avgstep/=loopfor*paranum
steptime = time.time()-ti
tottime = time.time()-st
#info out
print('epoch',i,'/',epoch ,'succeeded:',succeeded,'/',loopfor*paranum,'avg step:' ,avgstep,'time:{:.1f}s, total:{:.1f}s'.format(steptime,tottime))
#autosave
timecounter+=steptime
if(timecounter>20):
timecounter=0
if dosaveimage:
print('saving to disk...')
if not os.path.exists('./'+imname):
os.mkdir('./'+imname)
cv2.imwrite(imname+'/{}_{:04d}.png'.format(seed,i),canvas*255)
print('saved.')
# refresh view
showcounter+=steptime
if(showcounter>3):
showcounter=0
showimg()
showimg()
| 2.453125 | 2 |
snapx/setup.py | ruth-ann/snap-python | 242 | 12789772 | from setuptools import setup, find_packages
if __name__ == "__main__":
setup(
name="snapx",
author="<EMAIL>",
version="0.0.1",
packages=find_packages(),
description="""SnapX: An experimental SNAP API with NetworkX-like interface"""
)
| 1.40625 | 1 |
WX_backend/RESTful_backend.py | StuGRua/Flask_WXAPP_SSDUTHelper | 0 | 12789773 | <filename>WX_backend/RESTful_backend.py
import datetime
from flask import jsonify, abort, request
from flask import url_for
from model.model import *
# WX_APPID = 'wx933173854a5a9ba2'
# WX_SECRET = '<KEY>'
# 具体导入配
# 根据需求导入仅供参考
@app.route('/api/what', methods=['GET'])
def what_info():
return "无内鬼"
@app.route('/api/users', methods=['POST'])
def new_user():
print(request.json)
rqjson = request.json
userid = rqjson.get('userid')
phonenumber = rqjson.get('phonenumber')
password = <PASSWORD>('password')
user_name = rqjson.get('user_name')
major = rqjson.get('major')
grade = rqjson.get('grade')
if userid is None or password is None or phonenumber is None:
abort(400) # missing arguments
if User.query.filter_by(userid=userid).first() is not None or User.query.filter_by(
phonenumber=phonenumber).first() is not None:
abort(400) # existing user
user = User(userid=userid, phonenumber=phonenumber, user_name=user_name, major=major, grade=grade)
user.hash_password(password)
db.session.add(user)
db.session.commit()
return (jsonify({'userid': user.userid}), 201,
{'Location': url_for('get_user', id=user.userid, _external=True)})
@app.route('/api/users/<int:id>')
def get_user(id):
user = User.query.get(id)
if not user:
abort(400)
return jsonify({'userid': user.userid, 'username': user.user_name})
@app.route('/api/token')
@auth.login_required
def get_auth_token():
token = g.user.generate_auth_token(600)
return jsonify({'token': token, 'duration': 600, 'user_name': g.user.user_name})
@app.route('/api/resource')
@auth.login_required
def get_resource():
return jsonify({'data': 'Hello, %s!' % g.user.userid})
@app.route('/api/all_undo_order')
@auth.login_required
def all_undo_order():
pre_res = db.session.query(Order).filter(Order.order_stat == '未接受').all()
res = OrderToDict(pre_res)
return jsonify(res), 201
@app.route('/api/my_order_pub')
@auth.login_required
def my_order_pub():
res = Order.query.filter_by(pub_id=g.user.userid).all()
res = OrderToDict(res)
return jsonify(res), 201
@app.route('/api/my_order_recv')
@auth.login_required
def my_order_recv():
res = Order.query.filter_by(rec_id=g.user.userid)
res = OrderToDict(res)
return jsonify(res), 201
@app.route('/api/publish_order', methods=['POST'])
@auth.login_required
def publish_order():
rqjson = request.json
print(rqjson)
order_title = rqjson.get('order_title')
pub_id = g.user.userid
# rec_id=rqjson.get('rec_id')
start_time = datetime.datetime.strptime(rqjson.get('start_time'), '%Y-%m-%d %H:%M')
end_time = datetime.datetime.strptime(rqjson.get('end_time'), '%Y-%m-%d %H:%M')
order_payment = rqjson.get('order_payment')
order_info = rqjson.get('order_info')
if order_title is None or pub_id is None or order_payment is None:
abort(400) # missing arguments
if User.query.filter_by(userid=pub_id).first() is None:
abort(400) # no existing user
order = Order(order_title=order_title, pub_id=pub_id, start_time=start_time, end_time=end_time,
order_payment=order_payment, order_info=order_info)
db.session.add(order)
db.session.commit()
return (jsonify({'order_id': order.order_id, 'order_name': order.order_title}), 201,
{'Location': url_for('get_user', id=g.user.userid, _external=True)})
@app.route('/api/get_order', methods=['POST'])
@auth.login_required
def get_order():
rqjson = request.json
print(rqjson)
order_id = rqjson.get('order_id')
if order_id is None:
abort(400) # missing arguments
if Order.query.filter_by(order_id=order_id).first() is None:
abort(400) # no existing user
db.session.query(Order).filter(Order.order_id == order_id). \
update({"order_stat": '正在进行', "rec_id": g.user.userid})
return jsonify({'order_id': order_id, "order_stat": '正在进行'}), 201
@app.route('/api/finish_order', methods=['POST'])
@auth.login_required
def finish_order():
rqjson = request.json
print(rqjson)
order_id = rqjson.get('order_id')
if order_id is None:
abort(400) # missing arguments
if Order.query.filter_by(order_id=order_id, pub_id=g.user.userid).first() is None:
abort(400) # no existing user
db.session.query(Order).filter(Order.order_id == order_id). \
update({"order_stat": '已完成'})
return jsonify({'order_id': order_id, "order_stat": '正在进行'}), 201
@app.route('/api/del_order', methods=['POST'])
@auth.login_required
def del_order():
rqjson = request.json
print(rqjson)
order_id = rqjson.get('order_id')
if order_id is None:
abort(400) # missing arguments
pre_del = db.session.query(Order).filter(Order.order_id == order_id).first()
if pre_del is None:
abort(400) # no existing user
db.session.delete(pre_del)
db.session.commit()
return jsonify({'order_id': order_id, "order_stat": '正在进行'}), 201
@app.route('/api/modify_order', methods=['POST'])
@auth.login_required
def modify_order():
rqjson = request.json
print(rqjson)
order_id = rqjson.get('order_id')
order_title = rqjson.get('order_title')
pub_id = g.user.userid
# rec_id=rqjson.get('rec_id')
start_time = datetime.datetime.strptime(rqjson.get('start_time'), '%Y-%m-%d')
end_time = datetime.datetime.strptime(rqjson.get('end_time'), '%Y-%m-%d')
order_payment = rqjson.get('order_payment')
order_info = rqjson.get('order_info')
if order_title is None or pub_id is None or order_payment is None:
abort(400) # missing arguments
pre_fix = db.session.query(Order).filter(Order.pub_id == pub_id, Order.order_id == order_id).first()
if pre_fix is None:
abort(400) # no existing user
pre_fix.update(
{'order_title': order_title, 'start_time': start_time, 'end_time': end_time, 'order_payment': order_payment,
'order_info': order_info})
db.session.commit()
return (jsonify({'order_id': order_id}), 201,
{'Location': url_for('get_user', id=g.user.userid, _external=True)})
def SimpleOrderToDict(pre_res):
if not pre_res:
return
res = []
for x in pre_res:
temp = {
'order_id': x.order_id,
'order_title': x.order_title,
'pub_id': x.pub_id,
'start_time': x.start_time,
'end_time': x.end_time,
'order_stat': x.order_stat,
'order_payment': x.order_payment,
}
res.append(temp)
return res
def OrderToDict(pre_res):
if not pre_res:
return
res = []
for x in pre_res:
temp = {
'order_id': x.order_id,
'order_title': x.order_title,
'pub_id': x.pub_id,
'rec_id': x.rec_id,
'start_time': x.start_time,
'end_time': x.end_time,
'order_stat': x.order_stat,
'order_payment': x.order_payment,
'order_info': x.order_info
}
res.append(temp)
return res
if __name__ == '__main__':
app.run(debug=True)
app.run(host='0.0.0.0', port=5000, debug=True)
# curl -H "Content-Type:application/json" -H "Data_Type:msg" -X POST --data "{\"username\":\"xxx\",password:\"<PASSWORD>\"}" http://127.0.0.1:5000/api/users
| 2.625 | 3 |
diff.py | yakomak/pth1 | 0 | 12789774 |
# --- dD3MA2 ---
import tkinter as tk
from tkinter import ttk
# Определение столбцов
tree=ttk.Treeview(master)
tree["columns"]=("one","two","three")
tree.column("#0", width=270, minwidth=270, stretch=tk.NO)
tree.column("one", width=150, minwidth=150, stretch=tk.NO)
tree.column("two", width=400, minwidth=200)
tree.column("three", width=80, minwidth=50, stretch=tk.NO)
# Определение заголовков
tree.heading("#0",text="Name",anchor=tk.W)
tree.heading("one", text="Date modified",anchor=tk.W)
tree.heading("two", text="Type",anchor=tk.W)
tree.heading("three", text="Size",anchor=tk.W)
# Вставить несколько строк
# Level 1
folder1=tree.insert("", 1, "", text="Folder 1", values=("23-Jun-17 11:05","File folder",""))
tree.insert("", 2, "", text="text_file.txt", values=("23-Jun-17 11:25","TXT file","1 KB"))
# Level 2
tree.insert(folder1, "end", "", text="photo1.png", values=("23-Jun-17 11:28","PNG file","2.6
KB"))
tree.insert(folder1, "end", "", text="photo2.png", values=("23-Jun-17 11:29","PNG file","3.2
KB"))
tree.insert(folder1, "end", "", text="photo3.png", values=("23-Jun-17 11:30","PNG file","3.1
KB"))
# Упаковка
tree.pack(side=tk.TOP,fill=tk.X)
| 3 | 3 |
tests/integration/test_company.py | orikalinski/python-intercom | 0 | 12789775 | # -*- coding: utf-8 -*-
import os
import unittest
from intercom.client import Client
from . import delete_company
from . import delete_user
from . import get_or_create_user
from . import get_or_create_company
from . import get_timestamp
intercom = Client(
os.environ.get('INTERCOM_PERSONAL_ACCESS_TOKEN'))
class CompanyTest(unittest.TestCase):
@classmethod
def setup_class(cls):
nowstamp = get_timestamp()
cls.company = get_or_create_company(intercom, nowstamp)
cls.user = get_or_create_user(intercom, nowstamp)
@classmethod
def teardown_class(cls):
delete_company(intercom, cls.company)
delete_user(intercom, cls.user)
def test_add_user(self):
user = intercom.contacts.find(email=self.user.email)
user.companies = [
{"company_id": 6, "name": "Intercom"},
{"company_id": 9, "name": "Test Company"}
]
intercom.contacts.save(user)
user = intercom.contacts.find(email=self.user.email)
self.assertEqual(len(user.companies), 2)
self.assertEqual(user.companies[0].company_id, "9")
def test_add_user_custom_attributes(self):
user = intercom.contacts.find(email=self.user.email)
user.companies = [
{
"id": 6,
"name": "Intercom",
"custom_attributes": {
"referral_source": "Google"
}
}
]
intercom.contacts.save(user)
user = intercom.contacts.find(email=self.user.email)
self.assertEqual(len(user.companies), 2)
self.assertEqual(user.companies[0].company_id, "9")
# check the custom attributes
company = intercom.companies.find(company_id=6)
self.assertEqual(
company.custom_attributes['referral_source'], "Google")
def test_find_by_company_id(self):
# Find a company by company_id
company = intercom.companies.find(company_id=self.company.company_id)
self.assertEqual(company.company_id, self.company.company_id)
def test_find_by_company_name(self):
# Find a company by name
company = intercom.companies.find(name=self.company.name)
self.assertEqual(company.name, self.company.name)
def test_find_by_id(self):
# Find a company by _id
company = intercom.companies.find(id=self.company.id)
self.assertEqual(company.company_id, self.company.company_id)
def test_update(self):
# Find a company by id
company = intercom.companies.find(id=self.company.id)
# Update a company
now = get_timestamp()
updated_name = 'Company %s' % (now)
company.name = updated_name
intercom.companies.save(company)
company = intercom.companies.find(id=self.company.id)
self.assertEqual(company.name, updated_name)
def test_iterate(self):
# Iterate over all companies
for company in intercom.companies.all():
self.assertTrue(company.id is not None)
def test_users(self):
company = intercom.companies.find(id=self.company.id)
# Get a list of users in a company
for user in intercom.companies.contacts(company.id):
self.assertIsNotNone(user.email)
| 2.625 | 3 |
webserver/contest/models.py | theSage21/judge-interface | 3 | 12789776 | from socket import create_connection
from django.db import models
from django.utils import timezone
class Slave(models.Model):
def __str__(self):
return self.ip + str(self.port)
ip = models.GenericIPAddressField()
port = models.IntegerField()
busy = models.BooleanField(default=False)
def is_alive(self):
addr = (self.ip, self.port)
try:
con = create_connection(addr)
except:
return False
else:
con.sendall('Alive'.encode('utf-8'))
return True
def get_address(self):
return (self.ip, self.port)
def __enter__(self):
self.busy = True
self.save()
def __exit__(self, exc_type, exc_value, traceback):
self.busy = False
self.save()
class ContestControl(models.Model):
"Control for the contest"
def __str__(self):
return self.name
name = models.CharField(max_length=100, default='Contest')
start = models.DateTimeField(default=timezone.now)
end = models.DateTimeField(default=timezone.now)
| 2.453125 | 2 |
Pset4_hand_in/Rep_agent_labor2.py | SkanderGar/QuantMacro | 0 | 12789777 | import numpy as np
from numpy import vectorize
import scipy.optimize as so
@vectorize
def U(c, h, kappa, nu):
if c<=0:
u = -np.inf
elif c>0:
u = np.log(c) - (kappa*h**(1+1/nu))/((1+1/nu))
return u
class rep_ag:
def __init__(self, theta, beta, delta, kappa, nu, kmin, kmax, hmin, hmax, num_node=20, order=3):
self.theta = theta
self.beta = beta
self.delta = delta
self.kappa = kappa
self.nu = nu
self.kmin = kmin
self.kmax = kmax
self.hmin = hmin
self.hmax = hmax
self.num_node = num_node
self.order = order
##### creating the basis functions
func = []
Psi1 = np.vectorize(lambda x: 1)
Psi2 = np.vectorize(lambda x: x)
func.append(Psi1)
func.append(Psi2)
for i in range(2,order):
f = np.vectorize(lambda x, n=i: 2*x*func[n-1](x)-func[n-2](x))
func.append(f)
self.func = func
self.gridk, self.gridk_cheb = self.cheb_node(kmin, kmax, num_node, cheby=0)
PHI = []
for f in self.func:
phi = f(2*(self.gridk-self.kmin)/(self.kmax-self.kmin) -1)
PHI.append(phi)
self.PHI = np.array(PHI).T
def cheb_node(self, a, b, num, cheby=1):
vec = np.arange(0,num)
vec = np.flip(vec, axis=0)
chb = np.cos((vec*np.pi)/(num-1))
points = (a+b)/2 + ((b-a)/2)*chb
if cheby == 0:
vec_unit = 1/2 + (1/2)*chb
return np.array(points), np.array(vec_unit)
else:
return np.array(points)
def update_val(self, Theta_guess, ki, start): #Theta_guess here is just for a specific ki so we also need ki
Kp = lambda c, h: (1-self.delta)*ki + ki**(1-self.theta) *h**self.theta - c
Kp_cheb = lambda c, h: 2*(Kp(c,h)-self.kmin)/(self.kmax-self.kmin) -1 # here the value is function of kp not k so we need to map kp to (0,1) not k
Suma = lambda c, h: sum(Theta_guess[i]*self.func[i](Kp_cheb(c,h)) for i in range(len(self.func)))
VnotM = lambda x: -U(x[0], x[1], self.kappa, self.nu) - self.beta*Suma(x[0],x[1]) # - the objective because I am minimizing when I want to maximize
#non linear constraint
const = ({'type': 'ineq', 'fun': lambda x: ki**(1-self.theta)* x[1]**self.theta -x[0]})#higher or equal to zero
Boundc = (0.01*ki**(1-self.theta), None)
Boundh = (0.001*self.hmin,self.hmax)
Bound = (Boundc, Boundh)
res = so.minimize(VnotM, start, method = 'SLSQP', bounds = Bound, constraints=const)# start should be the solution found previously so we have interest in storing previous solution
# it should be an enequality not an upper_bound
Value = -res.fun
c_opt = res.x[0]
h_opt = res.x[1]
return Value, c_opt, h_opt
def update_theta(self, Theta_Old, Old_opt):
New_opt = []
V = []
for i in range(len(self.gridk)):
Value, c_opt, h_opt = self.update_val(Theta_Old, self.gridk[i], Old_opt[i,:]) #Old_opt is going to be a matrix containing the previews policy funtions
New_opt.append([c_opt, h_opt])
V.append(Value)
New_opt = np.array(New_opt)
V = np.array(V)
New_theta = np.linalg.inv([email protected])@self.PHI.T@V
return New_opt, New_theta
def problem(self, Old_theta = None, Tol = 10**(-6)):
if Old_theta == None:
Old_theta = np.zeros(len(self.func))
Old_c = (self.kmax/4)**(1-self.theta) *np.ones(len(self.gridk))
Old_h = (self.hmax/4)*np.ones(len(self.gridk))
Old_opt = np.vstack((Old_c,Old_h)).T
err = 1
j = 0
while err>Tol:
New_opt, New_theta = self.update_theta(Old_theta, Old_opt)
err = np.max(np.abs(Old_theta-New_theta))
if j%50 == 0:
print('iteration:', j)
print('error:', err)
Old_theta = New_theta
Old_opt = New_opt
j = j+1
self.New_opt = New_opt
self.New_theta = New_theta
return New_opt, New_theta
def Val_pol_fun(self):
kc = lambda k: 2*(k-self.kmin)/(self.kmax-self.kmin) -1
self.V = np.vectorize(lambda k: sum(self.New_theta[i]*self.func[i](kc(k)) for i in range(len(self.func))))
self.Theta_c = np.linalg.inv([email protected])@[email protected]_opt[:,0]
self.Theta_h = np.linalg.inv([email protected])@[email protected]_opt[:,1]
self.gc = np.vectorize(lambda k: sum(self.Theta_c[i]*self.func[i](kc(k)) for i in range(len(self.func))))
self.gh = np.vectorize(lambda k: sum(self.Theta_h[i]*self.func[i](kc(k)) for i in range(len(self.func))))
| 2.515625 | 3 |
towhee/tests/engine/test_pipeline.py | NbnbZero/towhee | 1 | 12789778 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import unittest
from PIL import Image
from towhee import pipeline, _get_pipeline_cache, _PIPELINE_CACHE_ENV
from towhee.engine.engine import EngineConfig
CACHE_PATH = Path(__file__).parent.parent.resolve()
class TestPipeline(unittest.TestCase):
"""
Tests `pipeline` functionality.
"""
def setUp(self):
conf = EngineConfig()
conf.cache_path = CACHE_PATH
conf.sched_interval_ms = 20
def test_empty_input(self):
p = pipeline('test_util/simple_pipeline', cache=str(CACHE_PATH))
self.assertEqual(p(), [])
def test_simple_pipeline(self):
p = pipeline('test_util/simple_pipeline', cache=str(CACHE_PATH))
res = p(0)
self.assertEqual(res[0], 3)
def test_embedding_pipeline(self):
p = pipeline('test_util/resnet50_embedding',
cache=str(CACHE_PATH))
img_path = CACHE_PATH / 'data' / 'dataset' / 'kaggle_dataset_small' / \
'train' / '0021f9ceb3235effd7fcde7f7538ed62.jpg'
img = Image.open(str(img_path))
res = p(img)
self.assertEqual(res[0].size, 1000)
def test_simple_pipeline_multirow(self):
#pylint: disable=protected-access
p = pipeline('test_util/simple_pipeline', cache=str(CACHE_PATH))
p._pipeline.parallelism = 2
res = p(list(range(1000)))
for n in range(1000):
self.assertEqual(res[n], n+3)
class TestPipelineCache(unittest.TestCase):
def test_pipeline_cache(self):
self.assertEqual(_get_pipeline_cache(
None), Path.home() / '.towhee/pipelines')
os.environ[_PIPELINE_CACHE_ENV] = '/opt/.pipeline'
self.assertEqual(_get_pipeline_cache(
None), Path('/opt/.pipeline'))
self.assertEqual(_get_pipeline_cache(
'/home/mycache'), Path('/home/mycache'))
if __name__ == '__main__':
unittest.main()
| 2.046875 | 2 |
apis_core/apis_entities/views.py | sviatoplok/apis-core | 1 | 12789779 | <reponame>sviatoplok/apis-core
# -*- coding: utf-8 -*-
import json
import reversion
from reversion_compare.views import HistoryCompareDetailView
from reversion.models import Version
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse_lazy
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import FieldError
from django.utils.decorators import method_decorator
from django.urls import reverse
from django.views.generic.edit import DeleteView
from django.views import generic
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.db.models import Q
from django_tables2 import SingleTableView
from django_tables2 import RequestConfig
from django_tables2.export.views import ExportMixin
from apis_core.helper_functions.utils import access_for_all
from .models import Person, Place, Institution, Event, Work
from apis_core.apis_vocabularies.models import LabelType
from apis_core.apis_metainfo.models import Uri, UriCandidate, TempEntityClass, Text
from apis_core.helper_functions.stanbolQueries import retrieve_obj
from apis_core.helper_functions.RDFparsers import GenericRDFParser
from apis_core.helper_functions.utils import (
access_for_all, access_for_all_function, ENTITIES_DEFAULT_COLS
)
from apis_core.apis_labels.models import Label
from .forms import (
FullTextForm, SearchForm, GenericFilterFormHelper,
NetworkVizFilterForm, PersonResolveUriForm,
get_entities_form, GenericEntitiesStanbolForm
)
from .tables import (
PersonTable, PlaceTable, InstitutionTable, EventTable, WorkTable,
get_entities_table
)
from .filters import (
PersonListFilter, PlaceListFilter, InstitutionListFilter, EventListFilter, WorkListFilter,
get_generic_list_filter
)
if 'apis_highlighter' in settings.INSTALLED_APPS:
from apis_highlighter.forms import SelectAnnotationProject, SelectAnnotatorAgreement
from apis_core.helper_functions.highlighter import highlight_text
if 'charts' in settings.INSTALLED_APPS:
from charts.models import ChartConfig
from charts.views import create_payload
###########################################################################
############################################################################
#
# Helper Functions
#
############################################################################
############################################################################
@user_passes_test(access_for_all_function)
def set_session_variables(request):
ann_proj_pk = request.GET.get('project', None)
types = request.GET.getlist('types', None)
users_show = request.GET.getlist('users_show', None)
edit_views = request.GET.get('edit_views', False)
if types:
request.session['entity_types_highlighter'] = types
if users_show:
request.session['users_show_highlighter'] = users_show
if ann_proj_pk:
request.session['annotation_project'] = ann_proj_pk
if edit_views:
if edit_views != 'false':
request.session['edit_views'] = True
return request
@user_passes_test(access_for_all_function)
def get_highlighted_texts(request, instance):
if 'apis_highlighter' in settings.INSTALLED_APPS:
set_ann_proj = request.session.get('annotation_project', 1)
entity_types_highlighter = request.session.get('entity_types_highlighter', None)
users_show = request.session.get('users_show_highlighter', None)
object_texts = [{'text': highlight_text(
x,
set_ann_proj=set_ann_proj,
types=entity_types_highlighter,
users_show=users_show).strip(),
'id': x.pk,
'kind': x.kind} for x in Text.objects.filter(tempentityclass=instance)]
ann_proj_form = SelectAnnotationProject(
set_ann_proj=set_ann_proj,
entity_types_highlighter=entity_types_highlighter,
users_show_highlighter=users_show)
return object_texts, ann_proj_form
else:
object_texts = [{
'text': x.text,
'id': x.pk,
'kind': x.kind
} for x in Text.objects.filter(tempentityclass=instance)]
return object_texts, False
############################################################################
############################################################################
#
# GenericViews
#
############################################################################
############################################################################
class GenericListViewNew(UserPassesTestMixin, ExportMixin, SingleTableView):
formhelper_class = GenericFilterFormHelper
context_filter_name = 'filter'
paginate_by = 25
template_name = getattr(settings, 'APIS_LIST_VIEW_TEMPLATE', 'apis_entities/generic_list.html')
login_url = '/accounts/login/'
def get_model(self):
model = ContentType.objects.get(
app_label__startswith='apis_', model=self.entity.lower()
).model_class()
return model
def test_func(self):
access = access_for_all(self, viewtype="list")
if access:
self.request = set_session_variables(self.request)
return access
def get_queryset(self, **kwargs):
self.entity = self.kwargs.get('entity')
qs = ContentType.objects.get(
app_label__startswith='apis_', model=self.entity.lower()
).model_class().objects.all()
self.filter = get_generic_list_filter(self.entity.title())(self.request.GET, queryset=qs)
self.filter.form.helper = self.formhelper_class()
return self.filter.qs
def get_table(self, **kwargs):
session = getattr(self.request, 'session', False)
entity = self.kwargs.get('entity')
selected_cols = self.request.GET.getlist("columns")
if session:
edit_v = self.request.session.get('edit_views', False)
else:
edit_v = False
if 'table_fields' in settings.APIS_ENTITIES[entity.title()]:
default_cols = settings.APIS_ENTITIES[entity.title()]['table_fields']
else:
default_cols = ['name']
default_cols = default_cols + selected_cols
self.table_class = get_entities_table(
self.entity.title(), edit_v, default_cols=default_cols
)
table = super(GenericListViewNew, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
return table
def get_context_data(self, **kwargs):
model = self.get_model()
context = super(GenericListViewNew, self).get_context_data()
context[self.context_filter_name] = self.filter
context['entity'] = self.entity
context['app_name'] = 'apis_entities'
entity = self.entity.title()
context['entity_create_stanbol'] = GenericEntitiesStanbolForm(self.entity)
if 'browsing' in settings.INSTALLED_APPS:
from browsing.models import BrowsConf
context['conf_items'] = list(
BrowsConf.objects.filter(model_name=self.entity)
.values_list('field_path', 'label')
)
context['docstring'] = "{}".format(model.__doc__)
if model._meta.verbose_name_plural:
context['class_name'] = "{}".format(model._meta.verbose_name.title())
else:
if model.__name__.endswith('s'):
context['class_name'] = "{}".format(model.__name__)
else:
context['class_name'] = "{}s".format(model.__name__)
try:
context['get_arche_dump'] = model.get_arche_dump()
except AttributeError:
context['get_arche_dump'] = None
try:
context['create_view_link'] = model.get_createview_url()
except AttributeError:
context['create_view_link'] = None
if 'charts' in settings.INSTALLED_APPS:
app_label = model._meta.app_label
filtered_objs = ChartConfig.objects.filter(
model_name=model.__name__.lower(),
app_name=app_label
)
context['vis_list'] = filtered_objs
context['property_name'] = self.request.GET.get('property')
context['charttype'] = self.request.GET.get('charttype')
if context['charttype'] and context['property_name']:
qs = self.get_queryset()
chartdata = create_payload(
context['entity'],
context['property_name'],
context['charttype'],
qs,
app_label=app_label
)
context = dict(context, **chartdata)
try:
context['enable_merge'] = settings.APIS_ENTITIES[entity.title()]['merge']
except KeyError:
context['enable_merge'] = False
try:
togg_cols = settings.APIS_ENTITIES[entity.title()]['additional_cols']
except KeyError:
togg_cols = []
if context['enable_merge'] and self.request.user.is_authenticated:
togg_cols = togg_cols + ['merge']
context['togglable_colums'] = togg_cols + ENTITIES_DEFAULT_COLS
return context
def render_to_response(self, context, **kwargs):
download = self.request.GET.get('sep', None)
if download and 'browsing' in settings.INSTALLED_APPS:
import datetime
import time
import pandas as pd
sep = self.request.GET.get('sep', ',')
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')
filename = "export_{}".format(timestamp)
response = HttpResponse(content_type='text/csv')
if context['conf_items']:
conf_items = context['conf_items']
try:
df = pd.DataFrame(
list(
self.get_queryset().values_list(*[x[0] for x in conf_items])
),
columns=[x[1] for x in conf_items]
)
except AssertionError:
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(
filename
)
return response
else:
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename)
return response
if sep == "comma":
df.to_csv(response, sep=',', index=False)
elif sep == "semicolon":
df.to_csv(response, sep=';', index=False)
elif sep == "tab":
df.to_csv(response, sep='\t', index=False)
else:
df.to_csv(response, sep=',', index=False)
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename)
return response
else:
response = super(GenericListViewNew, self).render_to_response(context)
return response
############################################################################
############################################################################
#
# OtherViews
#
############################################################################
############################################################################
@login_required
def getGeoJson(request):
'''Used to retrieve GeoJsons for single objects'''
# if request.is_ajax():
pk_obj = request.GET.get("object_id")
instance = get_object_or_404(Place, pk=pk_obj)
uria = Uri.objects.filter(entity=instance)
urib = UriCandidate.objects.filter(entity=instance)
if urib.count() > 0:
uric = urib
elif uria.count() > 0:
uric = uria
add_info = ""
lst_json = []
if uric.count() > 0 and not instance.status.startswith('distinct'):
for x in uric:
o = retrieve_obj(x.uri)
if o:
url_r = reverse_lazy('apis_entities:resolve_ambigue_place', kwargs={'pk': str(instance.pk), 'uri': o['representation']['id'][7:]})
select_text = "<a href='{}'>Select this URI</a>".format(url_r)
try:
add_info = "<b>Confidence:</b> {}<br/><b>Feature:</b> <a href='{}'>{}</a>".format(x.confidence, x.uri, x.uri)
except:
add_info = "<b>Confidence:</b>no value provided <br/><b>Feature:</b> <a href='{}'>{}</a>".format(x.uri, x.uri)
r = {"geometry": {
"type": "Point",
"coordinates": [
float(
o['representation']
['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value']),
float(
o['representation']
['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value'])
]
},
"type": "Feature",
"properties": {
"popupContent": "<b>ÖBL name:</b> %s<br/><b>Geonames:</b> %s<br/>%s<br/>%s" % (instance.name, o['representation']['http://www.geonames.org/ontology#name'][0]['value'], select_text, add_info)
},
"id": x.pk
}
lst_json.append(r)
elif instance.lat is not None and instance.lng is not None:
r = {"geometry": {
"type": "Point",
"coordinates": [
instance.lng,
instance.lat
]
},
"type": "Feature",
"properties": {
"popupContent": "<b>Name:</b> %s<br/>" % (instance.name)
},
"id": instance.pk
}
lst_json.append(r)
return HttpResponse(json.dumps(lst_json), content_type='application/json')
@login_required
def getGeoJsonList(request):
'''Used to retrieve a list of GeoJsons. To generate the list the kind of connection
and the connected entity is needed'''
relation = ContentType.objects.get(app_label='apis_relations', model=request.GET.get("relation")).model_class()
#relation_type = request.GET.get("relation_type")
objects = relation.objects.filter(
related_place__status='distinct').select_related('related_person', 'related_place', 'relation_type')
lst_json = []
for x in objects:
pers_url = x.related_person.get_absolute_url()
place_url = x.related_place.get_absolute_url()
r = {"geometry": {
"type": "Point",
"coordinates": [x.related_place.lng, x.related_place.lat]
},
"type": "Feature",
"relation_type": x.relation_type.name,
"properties": {
"popupContent": "<b>Person:</b> <a href='%s'>%s</a><br/><b>Connection:</b> %s<br/><b>Place:</b> <a href='%s'>%s</a>" % (pers_url, x.related_person, x.relation_type, place_url, x.related_place)
},
"id": x.pk
}
lst_json.append(r)
return HttpResponse(json.dumps(lst_json), content_type='application/json')
@login_required
def getNetJsonList(request):
'''Used to retrieve a Json to draw a network'''
relation = ContentType.objects.get(app_label='apis_relations', model='PersonPlace').model_class()
objects = relation.objects.filter(
related_place__status='distinct')
nodes = dict()
edges = []
for x in objects:
if x.related_place.pk not in nodes.keys():
place_url = reverse_lazy('apis_entities:place_edit', kwargs={'pk': str(x.related_place.pk)})
tt = "<div class='arrow'></div>\
<div class='sigma-tooltip-header'>%s</div>\
<div class='sigma-tooltip-body'>\
<table>\
<tr><th>Type</th> <td>%s</td></tr>\
<tr><th>Entity</th> <td><a href='%s'>Link</a></td></tr>\
</table>\
</div>"% (x.related_place.name, 'place', place_url)
nodes[x.related_place.pk] = {'type': 'place', 'label': x.related_place.name, 'id': str(x.related_place.pk), 'tooltip': tt}
if x.related_person.pk not in nodes.keys():
pers_url = reverse_lazy('apis_entities:person_edit', kwargs={'pk': str(x.related_person.pk)})
tt = "<div class='arrow'></div>\
<div class='sigma-tooltip-header'>%s</div>\
<div class='sigma-tooltip-body'>\
<table>\
<tr><th>Type</th> <td>%s</td></tr>\
<tr><th>Entity</th> <td><a href='%s'>Link</a></td></tr>\
</table>\
</div>"% (str(x.related_person), 'person', pers_url)
nodes[x.related_person.pk] = {'type': 'person', 'label': str(x.related_person), 'id': str(x.related_person.pk), 'tooltip': tt}
edges.append({'source': x.related_person.pk, 'target': x.related_place.pk, 'kind': x.relation_type.name, 'id': str(x.pk)})
lst_json = {'edges': edges, 'nodes': [nodes[x] for x in nodes.keys()]}
return HttpResponse(json.dumps(lst_json), content_type='application/json')
@login_required
def getNetJsonListInstitution(request):
'''Used to retrieve a Json to draw a network'''
relation = ContentType.objects.get(app_label='apis_relations', model='PersonInstitution').model_class()
objects = relation.objects.all()
nodes = dict()
edges = []
for x in objects:
if x.related_institution.pk not in nodes.keys():
inst_url = reverse_lazy('apis_entities:institution_edit', kwargs={'pk': str(x.related_institution.pk)})
tt = "<div class='arrow'></div>\
<div class='sigma-tooltip-header'>%s</div>\
<div class='sigma-tooltip-body'>\
<table>\
<tr><th>Type</th> <td>%s</td></tr>\
<tr><th>Entity</th> <td><a href='%s'>Link</a></td></tr>\
</table>\
</div>"% (x.related_institution.name, 'institution', inst_url)
nodes[x.related_institution.pk] = {'type': 'institution', 'label': x.related_institution.name, 'id': str(x.related_institution.pk), 'tooltip': tt}
if x.related_person.pk not in nodes.keys():
pers_url = reverse_lazy('apis_entities:person_edit', kwargs={'pk': str(x.related_person.pk)})
tt = "<div class='arrow'></div>\
<div class='sigma-tooltip-header'>%s</div>\
<div class='sigma-tooltip-body'>\
<table>\
<tr><th>Type</th> <td>%s</td></tr>\
<tr><th>Entity</th> <td><a href='%s'>Link</a></td></tr>\
</table>\
</div>"% (str(x.related_person), 'person', pers_url)
nodes[x.related_person.pk] = {'type': 'person', 'label': str(x.related_person), 'id': str(x.related_person.pk), 'tooltip': tt}
edges.append({'source': x.related_person.pk, 'target': x.related_institution.pk, 'kind': x.relation_type.name, 'id': str(x.pk)})
lst_json = {'edges': edges, 'nodes': [nodes[x] for x in nodes.keys()]}
return HttpResponse(json.dumps(lst_json), content_type='application/json')
@login_required
def resolve_ambigue_place(request, pk, uri):
'''Only used to resolve place names.'''
with reversion.create_revision():
uri = 'http://'+uri
entity = Place.objects.get(pk=pk)
pl_n = GenericRDFParser(uri, kind='Place')
pl_n_1 = pl_n.save()
pl_n_1 = pl_n.merge(entity)
url = pl_n_1.get_absolute_url()
if pl_n.created:
pl_n_1.status = 'distinct (manually resolved)'
pl_n_1.save()
UriCandidate.objects.filter(entity=entity).delete()
reversion.set_user(request.user)
return HttpResponseRedirect(url)
@login_required
def resolve_ambigue_person(request):
if request.method == "POST":
form = PersonResolveUriForm(request.POST)
if form.is_valid():
pers = form.save()
return redirect(reverse('apis_entities:person_edit', kwargs={'pk': pers.pk}))
############################################################################
############################################################################
#
# VisualizationViews
#
############################################################################
############################################################################
@login_required
def birth_death_map(request):
return render(request, 'apis_entities/map_list.html')
@login_required
def pers_place_netw(request):
return render(request, 'apis_entities/network.html')
@login_required
def pers_inst_netw(request):
return render(request, 'apis_entities/network_institution.html')
@login_required
def generic_network_viz(request):
if request.method == 'GET':
form = NetworkVizFilterForm()
return render(request, 'apis_entities/generic_network_visualization.html',
{'form': form})
############################################################################
############################################################################
#
# Reversion Views
#
############################################################################
############################################################################
class ReversionCompareView(HistoryCompareDetailView):
template_name = 'apis_entities/compare_base.html'
def dispatch(self, request, app, kind, pk, *args, **kwargs):
self.model = ContentType.objects.get(app_label=app, model=kind).model_class()
return super(ReversionCompareView, self).dispatch(request, *args, **kwargs)
| 1.523438 | 2 |
announcements/label.py | CMU-Robotics-Club/roboticsclub.org | 0 | 12789780 | <filename>announcements/label.py
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.utils.text import slugify
from crm.label import Label
def create_announcement_label(announcement):
label = Label()
org_start, org_end = label.add_text("#000000", (0,0), "Roboto-Thin.ttf", 200, "Carnegie Mellon Robotics Club")
rect_start, rect_end = label.add_rectangle("#aaaaaa", (org_end[0] + 20, 0), (20, org_end[1]))
type_start, type_end = label.add_text("#000000", (rect_end[0] + 20, 0), "Roboto-Regular.ttf", 200, "Official Club Announcement")
header_start, header_end = label.add_text_split("#0000AA", (int(type_end[0]/2), rect_end[1] + 80), "Roboto-Regular.ttf", 400, announcement.header, center_x=True, words_per_line=3)
body_start, body_end = label.add_text_split("#000000", (int(type_end[0]/2), header_end[1] + 80), "Roboto-Thin.ttf", 200, announcement.body, center_x=True, words_per_line=5)
return label.create()
| 2.328125 | 2 |
lec06_class/class 03.py | SOOIN-KIM/lab-python | 0 | 12789781 | '''
클래스 작성, 객체 생성, 메소드 사용 연습
'''
class Employee:
'''
field: empno, ename, salary, deptno
method : raise salary(self, pct)
'''
def __init__(self,empno, ename, salary, deptno):
self.empno = empno
self.ename = ename
self.salary = salary
self.deptno = deptno
def raise_salry(self,pct):
self.salary = (1 + pct) * self.salary
return self.salary
def __repr__(self):
return f'(사번: {self.empno}, 이름 : {self.ename}, 급여: {self.salary}, 부서번호{self.deptno})'
gil_dong = Employee(1010,'홍길동',1000,10)
print(gil_dong.__repr__())
gil_dong.raise_salry(0.1)
print(gil_dong.__repr__())
scott = Employee(1011, 'Scott',10000,20)
print(scott.__repr__())
scott.raise_salry(-0.1)
print(scott.__repr__())
ohssam = Employee(1012,'오쌤',500,30)
employees = [ohssam,gil_dong, scott]
print(employees)
print(sorted(employees,key=lambda x: x.empno))
print(sorted(employees,key=lambda x: x.salary))
print(sorted(employees,key=lambda x: x.ename))
print(sorted(employees,key=lambda x: x.deptno))
| 4.09375 | 4 |
pyaz/sql/mi/__init__.py | py-az-cli/py-az-cli | 0 | 12789782 | <filename>pyaz/sql/mi/__init__.py<gh_stars>0
'''
Manage SQL managed instances.
'''
from ... pyaz_utils import _call_az
from . import ad_admin, ad_only_auth, key, op, tde_key
def create(name, resource_group, subnet, admin_password=None, admin_user=None, assign_identity=None, backup_storage_redundancy=None, capacity=None, collation=None, enable_ad_only_auth=None, external_admin_name=None, external_admin_principal_type=None, external_admin_sid=None, family=None, identity_type=None, key_id=None, license_type=None, location=None, maint_config_id=None, minimal_tls_version=None, no_wait=None, primary_user_assigned_identity_id=None, proxy_override=None, public_data_endpoint_enabled=None, storage=None, tags=None, tier=None, timezone_id=None, user_assigned_identity_id=None, vnet_name=None, yes=None):
'''
Create a managed instance.
Required Parameters:
- name -- The managed instance name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- subnet -- Name or ID of the subnet that allows access to an Azure Sql Managed Instance. If subnet name is provided, --vnet-name must be provided.
Optional Parameters:
- admin_password -- The administrator login password (required for managed instance creation).
- admin_user -- Administrator username for the managed instance. Can only be specified when the managed instance is being created (and is required for creation).
- assign_identity -- Generate and assign an Azure Active Directory Identity for this managed instance for use with key management services like Azure KeyVault.
- backup_storage_redundancy -- Backup storage redundancy used to store backups. Allowed values include: Local, Zone, Geo.
- capacity -- The capacity of the managed instance in integer number of vcores.
- collation -- The collation of the managed instance.
- enable_ad_only_auth -- Enable Azure Active Directory Only Authentication for this server.
- external_admin_name -- Display name of the Azure AD administrator user, group or application.
- external_admin_principal_type -- User, Group or Application
- external_admin_sid -- The unique ID of the Azure AD administrator. Object Id for User or Group, Client Id for Applications
- family -- The compute generation component of the sku. Allowed values include: Gen4, Gen5.
- identity_type -- Type of Identity to be used. Possible values are SystemAsssigned,UserAssigned, SystemAssignedUserAssigned and None.
- key_id -- The key vault URI for encryption.
- license_type -- The license type to apply for this managed instance.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- maint_config_id -- Assign maintenance configuration to this managed instance.
- minimal_tls_version -- The minimal TLS version enforced by the managed instance for inbound connections.
- no_wait -- Do not wait for the long-running operation to finish.
- primary_user_assigned_identity_id -- The ID of the primary user managed identity.
- proxy_override -- The connection type used for connecting to the instance.
- public_data_endpoint_enabled -- Whether or not the public data endpoint is enabled for the instance.
- storage -- The storage size of the managed instance. Storage size must be specified in increments of 32 GB
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- tier -- The edition component of the sku. Allowed values include: GeneralPurpose, BusinessCritical.
- timezone_id -- The time zone id for the instance to set. A list of time zone ids is exposed through the sys.time_zone_info (Transact-SQL) view.
- user_assigned_identity_id -- Generate and assign an User Managed Identity(UMI) for this server.
- vnet_name -- The virtual network name
- yes -- Do not prompt for confirmation.
'''
return _call_az("az sql mi create", locals())
def delete(name, resource_group, no_wait=None, yes=None):
'''
Delete a managed instance.
Required Parameters:
- name -- The managed instance name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az sql mi delete", locals())
def show(name, resource_group, expand_ad_admin=None):
'''
Get the details for a managed instance.
Required Parameters:
- name -- The managed instance name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- expand_ad_admin -- Expand the Active Directory Administrator for the server.
'''
return _call_az("az sql mi show", locals())
def list(expand_ad_admin=None, resource_group=None):
'''
List available managed instances.
Optional Parameters:
- expand_ad_admin -- Expand the Active Directory Administrator for the server.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az sql mi list", locals())
def update(name, resource_group, add=None, admin_password=<PASSWORD>, assign_identity=None, capacity=None, family=None, force_string=None, identity_type=None, key_id=None, license_type=None, maint_config_id=None, minimal_tls_version=None, no_wait=None, primary_user_assigned_identity_id=None, proxy_override=None, public_data_endpoint_enabled=None, remove=None, set=None, storage=None, subnet=None, tags=None, tier=None, user_assigned_identity_id=None, vnet_name=None):
'''
Update a managed instance.
Required Parameters:
- name -- The managed instance name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- admin_password -- The administrator login password (required for managed instance creation).
- assign_identity -- Generate and assign an Azure Active Directory Identity for this managed instance for use with key management services like Azure KeyVault. If identity is already assigned - do nothing.
- capacity -- The capacity of the managed instance in integer number of vcores.
- family -- The compute generation component of the sku. Allowed values include: Gen4, Gen5.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- identity_type -- Type of Identity to be used. Possible values are SystemAsssigned,UserAssigned, SystemAssignedUserAssigned and None.
- key_id -- The key vault URI for encryption.
- license_type -- The license type to apply for this managed instance.
- maint_config_id -- Change maintenance configuration for this managed instance.
- minimal_tls_version -- The minimal TLS version enforced by the managed instance for inbound connections.
- no_wait -- Do not wait for the long-running operation to finish.
- primary_user_assigned_identity_id -- The ID of the primary user managed identity.
- proxy_override -- The connection type used for connecting to the instance.
- public_data_endpoint_enabled -- Whether or not the public data endpoint is enabled for the instance.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
- storage -- The storage size of the managed instance. Storage size must be specified in increments of 32 GB
- subnet -- Name or ID of the subnet that allows access to an Azure Sql Managed Instance. If subnet name is provided, --vnet-name must be provided.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- tier -- The edition component of the sku. Allowed values include: GeneralPurpose, BusinessCritical.
- user_assigned_identity_id -- Generate and assign an User Managed Identity(UMI) for this server.
- vnet_name -- The virtual network name
'''
return _call_az("az sql mi update", locals())
def failover(name, resource_group, no_wait=None, replica_type=None):
'''
Failover a managed instance.
Required Parameters:
- name -- The managed instance name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- replica_type -- The type of replica to be failed over.
'''
return _call_az("az sql mi failover", locals())
| 2.046875 | 2 |
b3j0f/sync/store/registry.py | b3j0f/sync | 1 | 12789783 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Accessor definition module."""
__all__ = ['StoreRegistry']
from ..record.core import Record
from .core import Store
from six import reraise
class StoreRegistry(Record):
"""Manage stores.
This class is used for synchronizing stores or for executing methods of CRUD
on several stores.
"""
class Error(Exception):
"""handle synchronizer errors."""
DEFAULT_COUNT = 5000 #: default synchronization count per step.
def __init__(self, stores=None, count=DEFAULT_COUNT, *args, **kwargs):
"""
:param list stores: stores to synchronize.
:param int count: number of data to sync per iteration.
"""
super(StoreRegistry, self).__init__(
stores=stores, count=count, *args, **kwargs
)
def synchronize(
self,
rtypes=None, data=None, sources=None, targets=None, count=None,
override=False
):
"""Synchronize the source store with target stores.
:param list rtypes: record types to synchronize.
:param dict data: matching data content to retrieve from the sources.
:param list sources: stores from where get data. Default self stores.
:param list targets: stores from where put data. Default self stores.
:param int count: number of data to synchronize iteratively.
:param bool override: if False, update only data which does not exist
in targets."""
if sources is None:
sources = self.stores
if rtypes is None:
rtypes = set()
for source in sources:
rtypes |= set(source.rtypes)
rtypes = list(rtypes)
if targets is None:
targets = self.stores
if count is None:
count = self.count
for source in sources:
skip = 0
while True:
records = source.find(
rtypes=rtypes, data=data, skip=skip, limit=count
)
if records:
for target in targets:
try:
target.update(
records=records, upsert=True, override=override
)
except Store.Error as ex:
reraise(
StoreRegistry.Error, StoreRegistry.Error(ex)
)
skip += count
else:
break
def _execute(self, func, stores=None, *args, **kwargs):
"""
:param str func: store func name to execute.
:param list stores: stores where apply the func. Default is self source
and targets.
:param tuple args: func var arguments.
:param dict kwargs: func keyword arguments.
:return: func result if not many. Otherwise, an array of func results.
"""
result = {}
if stores is None:
stores = self.stores
for store in stores:
try:
result[store] = getattr(store, func)(*args, **kwargs)
except Store.Error:
continue
return result
def add(self, records, stores=None):
"""Add records in a store.
:param list records: records to add to the store.
:param list stores: specific stores to use.
:return: added records by store.
:rtype: dict"""
return self._execute(func='add', records=records, stores=stores)
def update(self, records, upsert=False, stores=None):
"""Update records in a store.
:param list records: records to update in the input store.
:param bool upsert: if True (default False), add record if not exist.
:param list stores: specific stores to use.
:return: updated records by store.
:rtype: dict"""
return self._execute(
func='update', upsert=upsert, records=records, stores=stores
)
def get(self, record, stores=None):
"""Get a record from stores.
:param Record record: record to get from the store.
:param list stores: specific stores to use.
:return: record by store.
:rtype: dict"""
return self._execute(func='get', record=record, stores=stores)
def find(
self, stores=None,
rtypes=None, records=None, data=None,
limit=None, skip=None, sort=None,
):
"""Find records from stores.
:param list stores: specific stores to use.
:param list rtypes: record types to find. Default is all.
:param list records: records to find.
:param int limit: maximal number of records to retrieve.
:param int skip: number of elements to avoid.
:param list sort: data field name to sort.
:return: records by store.
:rtype: dict"""
return self._execute(
func='find', stores=stores,
rtypes=rtypes, records=records, data=data,
limit=limit, skip=skip, sort=sort
)
def remove(self, records=None, rtypes=None, data=None, stores=None):
"""Remove records from target stores.
:param list records: records to remove.
:param list rtypes: record types to remove.
:param dict data: data content to filter.
:param list stores: specific stores to use.
:return: removed records by store.
:rtype: dict
"""
return self._execute(
func='remove',
records=records, rtypes=rtypes, data=data, stores=stores
)
| 1.429688 | 1 |
deepwto/graphql.py | DeepWTO/deepwto-dataset-explore | 1 | 12789784 | <gh_stars>1-10
from typing import Union, List
import requests
import json
from deepwto.constants import available_ds, available_article, cited_by_ds
class AppSyncClient:
latest_version = "1.0.0"
available_ds_num = len(available_ds)
available_ds = available_ds
available_article_num = len(available_article)
available_article = available_article
def __init__(self, api_key, endpoint_url):
self.api_key = api_key
self.endpoint_url = endpoint_url
self.headers = {
"Content-Type": "application/graphql",
"x-api-key": api_key,
"cache-control": "no-cache",
}
def execute_gql(self, query):
payload_obj = {"query": query}
payload = json.dumps(payload_obj)
response = requests.request(
"POST", self.endpoint_url, data=payload, headers=self.headers
)
return response
def get_factual(self, ds: int, version: str = "1.0.0"):
assert ds in self.available_ds, (
"Make sure choose ds number from " "available_ds"
)
ds = "{}".format(str(ds))
version = '"{}"'.format(version)
query = """
query GetFactual{{
getFactual(
ds: {0},
version: {1}) {{
factual
}}
}}
""".format(
ds, version
)
res = self.execute_gql(query).json()
return res["data"]["getFactual"]["factual"]
def get_article(self, article: str, version: str = "1.0.0"):
assert article in self.available_article, (
"Make sure choose article " "from available_article "
)
article = '"{}"'.format(article)
version = '"{}"'.format(version)
query = """
query GetGATT{{
getGATT(
article: {0},
version: {1}) {{
content
}}
}}
""".format(
article, version
)
res = self.execute_gql(query).json()
return res["data"]["getGATT"]["content"]
def get_label(self, ds: int, article: str, version: str = "1.0.0"):
assert ds in self.available_ds, (
"Make sure choose ds number from " "available_ds"
)
assert article in self.available_article, (
"Make sure choose article " "from available_article "
)
ds_art = '"{}"'.format(str(ds) + "_" + article)
version = '"{}"'.format(version)
query = """
query GetLabel{{
getLabel(
ds_art: {0},
version: {1}) {{
cited
}}
}}
""".format(
ds_art, version
)
res = self.execute_gql(query).json()
return res["data"]["getLabel"]["cited"]
def get_cited(self, contains: str = None):
"""
Retrieve all cited: {eq: true} items or,
Retrieve all cited: {eq: true} that contains certain string (Article)
Returns:
cited: List[dict]
list of dictionaries where each dict has following format:
{
"ds_art": "67_Article XXIII:1",
"version": "1.0.0",
"cited": true,
"split": "train"
}
"""
nextToken: Union[None, str] = None
cited: List[dict] = []
if contains:
contains = '"{}"'.format(contains)
if not nextToken:
query = """
query ListLabels {{
listLabels(
limit: {0}
filter: {{
ds_art: {{contains: {1}}}
cited: {{eq: true}}
}}
) {{
items {{
ds_art
version
cited
split
}}
nextToken
}}
}}
""".format(
11440, contains
)
res = self.execute_gql(query).json()
cited.extend(res["data"]["listLabels"]["items"])
nextToken = '"{}"'.format(res["data"]["listLabels"]["nextToken"])
while nextToken:
query = """
query ListLabels {{
listLabels(
nextToken: {0}
limit: {1}
filter: {{
ds_art: {{contains: {2}}}
cited: {{eq: true}}
}}
) {{
items {{
ds_art
version
cited
split
}}
nextToken
}}
}}
""".format(
nextToken, 11440, contains
)
res = self.execute_gql(query).json()
cited.extend(res["data"]["listLabels"]["items"])
nextToken = '"{}"'.format(res["data"]["listLabels"]["nextToken"])
if nextToken == '"None"':
break
return cited
if not contains:
if not nextToken:
query = """
query ListLabels {{
listLabels(
limit: {0}
filter: {{
cited: {{eq: true}}
}}
) {{
items {{
ds_art
version
cited
split
}}
nextToken
}}
}}
""".format(
11440
)
res = self.execute_gql(query).json()
cited.extend(res["data"]["listLabels"]["items"])
nextToken = '"{}"'.format(res["data"]["listLabels"]["nextToken"])
while nextToken:
query = """
query ListLabels {{
listLabels(
nextToken: {0}
limit: {1}
filter: {{
cited: {{eq: true}}
}}
) {{
items {{
ds_art
version
cited
split
}}
nextToken
}}
}}
""".format(
nextToken, 11440
)
res = self.execute_gql(query).json()
cited.extend(res["data"]["listLabels"]["items"])
nextToken = '"{}"'.format(res["data"]["listLabels"]["nextToken"])
if nextToken == '"None"':
break
return cited
@staticmethod
def get_cited_by_ds(ds: int):
return cited_by_ds[ds]
@staticmethod
def parse_ds(data: List[dict]):
ds_nums = []
for item in data:
ds = int(item['ds_art'].split('_')[0])
ds_nums.append(ds)
data = list(set(ds_nums))
data.sort()
return data | 2.390625 | 2 |
experiments/helpers/os_utils.py | Project-Ellie/capsnet-fashion | 1 | 12789785 | import tensorflow as tf
import platform
def os_info():
return {
'machine': platform.machine(),
'node': platform.node(),
'os': platform.platform(),
'cuda': tf.test.is_built_with_cuda()
}
| 1.882813 | 2 |
utils.py | jroivas/klapi | 3 | 12789786 | <reponame>jroivas/klapi
import random
import string
import uuid
def generatePassword(characters=8):
# Disabling 0 and O to prevent misreadings
return ''.join(random.SystemRandom().choice(string.letters.replace('O','') + string.digits.replace('0','')) for _ in range(characters))
def generateID():
return str(uuid.uuid4())
def generateApiKey():
return generateID()
| 2.8125 | 3 |
Run.py | austinjcheng/chatterbox | 3 | 12789787 | <filename>Run.py
import string
from Read import getUser, getMessage
from Initialize import createUsers, switchUser, startRooms
from Settings import usernames
import threading
from GUI import GUI
def refreshMessages():
from Initialize import users, currentUserIndex
readbuffer = ""
while True:
readbuffer = readbuffer + users[currentUserIndex].recv(1024)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
gui.displayMessage(line)
user = getUser(line)
message = getMessage(line)
gui.displayMessage(user + " typed :" + message)
createUsers()
global gui
gui = GUI()
guiThread = threading.Thread(target=gui.startGUI)
guiThread.start()
startRooms(gui)
messageThread = threading.Thread(target=refreshMessages)
messageThread.start()
| 2.71875 | 3 |
tests/test_studies.py | asteriogonzalez/pytest-study | 3 | 12789788 | <reponame>asteriogonzalez/pytest-study
import pytest
import time
# make some 'alias'
study = pytest.mark.study
pre = pytest.mark.pre
# You can put the regular tests and the studies in any order
# pytest-study will reorder later
def test_independent():
"A regular isolated test"
time.sleep(0.05)
# test marked as study will not be executed unless pass --runstudy
# in command line
@pre(name='AI')
def test_foo():
"This is a prerequisite test that belongs to the 'AI' study"
time.sleep(0.1)
print "Inside foo test!"
assert True
@pre(name='AI', order=5)
def test_gather_info():
"Another prerequisite for 'AI' study"
time.sleep(0.1)
@study(name='AI')
def test_study_one():
"""This is a long computation study that will be executed
only if test_gather_info() and test_foo() has been passed (in that order)
"""
time.sleep(0.2)
print "Study 1 Hello World!"
@pre
def test_bar():
"This is a prerequisite test belonging to 'default' study"
time.sleep(0.15)
print "Inside bar test!"
assert True
@pre(order=5)
def test_prior_bar():
"This is the prerequisite that is executed prior test_bar()"
time.sleep(0.15)
@study(order=1)
def test_study_two():
"""This studio will be executed before test_study_one because
we have changed the order. All test_study_two() prerequisite will
be executed before calling, but not test_study_one() prerequisites.
This allows to execute the studies ASAP.
"""
time.sleep(0.3)
print "Study 2 Hello World again!"
@pre('nonkeyword_name')
def test_nonkeyword_name_1():
pass
@pre('nonkeyword_name')
def test_nonkeyword_name_2():
pass
@study('nonkeyword_name')
def test_nonkeyword_study():
pass
| 2.78125 | 3 |
examples/case1/case2_mpc.py | JavierArroyoBastida/forecast-gen | 1 | 12789789 | <filename>examples/case1/case2_mpc.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import os
import time
from statsmodels.tsa.stattools import acf
from scipy.stats import norm
import mshoot
from fcastgen import error
# Set up logging
logging.basicConfig(filename='mpc_case1.log', filemode='w', level='DEBUG')
# Random seed
# np.random.seed(12345)
# Paths
ms_file = os.path.join('examples', 'case1', 'measurements.csv')
fmu_dir = os.path.join('examples', 'case1', 'models')
# FMU list
fmus = os.listdir(fmu_dir)
# Simulation period
t0 = '2018-04-05 00:00:00'
t1 = '2018-04-08 00:00:00'
# Read measurements
ms = pd.read_csv(ms_file)
ms['datetime'] = pd.to_datetime(ms['datetime'])
ms = ms.set_index('datetime')
ms = ms.loc[t0:t1]
# Resample
ms = ms.resample('1h').mean().ffill().bfill()
# Assign model inputs
inp = ms[['solrad', 'Tout', 'occ', 'dpos', 'vpos']]
inp['time'] = (inp.index - inp.index[0]).total_seconds()
inp = inp.set_index('time')
# Modify inputs
inp['dpos'] = 0
# Initial state
airT0 = 20. + 273.15
x0 = [airT0]
# Cost function
def cfun(xdf, ydf):
"""
:param ydf: DataFrame, model states
:param ydf: DataFrame, model outputs
:return: float
"""
Q = (ydf['qr']**2).sum()
return Q
# Iterate over FMUs
horizons = [2, 6]
relerr = [0, 10, 20, 30] # %
runs = 5
for fmu in fmus:
for hrz in horizons:
for re in relerr:
for run in range(1, runs + 1):
fmu_name = fmu.split('.')[0]
par_file = os.path.join('examples', 'case1', 'results', 'est',
fmu_name, 'parameters.csv')
wdir = os.path.join('examples', 'case1', 'results', 'mpc',
fmu_name, "h{}".format(hrz),
"re{}".format(re), "{}".format(run))
# Skip the case, if results already there
if os.path.exists(wdir):
pass
else:
os.makedirs(wdir)
fmu_file = os.path.join(fmu_dir, fmu)
# Read parameters and modify heating power (used for heating/cooling in this example)
pm = pd.read_csv(par_file)
parameters = dict()
for p in pm:
parameters[p] = pm.iloc[0][p]
parameters['maxHeat'] = 6000. # [W]
# Instantiate emulation and control models
model_emu = mshoot.SimFMU(
fmu_file,
outputs=['T', 'qr', 'vetot'],
states=['cair.T'],
parameters=parameters,
verbose=False)
model_ctr = mshoot.SimFMU(
fmu_file,
outputs=['T', 'qr', 'vetot'],
states=['cair.T'], # States should be initialized with fixed=False
parameters=parameters,
verbose=False)
# Instantiate MPCEmulation
mpc = mshoot.MPCEmulation(model_emu, cfun)
step = 1
horizon = hrz
# Contraints
Tmin = np.where((ms.index.hour >= 8) & (ms.index.hour < 17), 21 + 273.15, 19 + 273.15)
Tmax = np.where((ms.index.hour >= 8) & (ms.index.hour < 17), 22 + 273.15, 24 + 273.15)
constr = pd.DataFrame(data=np.column_stack((Tmin, Tmax)),
columns=['Tmin', 'Tmax'], index=inp.index)
constr.to_csv(os.path.join(wdir, 'constr.csv'))
# Add error to forecasts (control input): Tout
inp_ctr = inp.copy()
inp_emu = inp.copy()
n = inp.index.size
mae_Tout = re * (inp['Tout'].max() - inp['Tout'].min()) / 100.
mae_solrad = re * (inp['solrad'].max() - inp['solrad'].min()) / 100.
mae_occ = re * (inp['occ'].max() - inp['occ'].min()) / 100.
inp_ctr['Tout'] = inp_ctr['Tout'] + error(0.9, n, mae_Tout, eps=0.01)
inp_ctr['solrad'] = np.where(inp_ctr['solrad'] != 0, inp_ctr['solrad'] + error(0.9, n, mae_solrad, eps=0.01), inp_ctr['solrad'])
inp_ctr['solrad'] = np.maximum(np.zeros(inp_ctr.index.size), inp_ctr['solrad'])
inp_ctr['occ'] = np.where(inp_ctr['occ'] != 0, inp_ctr['occ'] + error(0.9, n, mae_occ, eps=0.01), inp_ctr['occ'])
inp_ctr['occ'] = np.maximum(np.zeros(inp_ctr.index.size), inp_ctr['occ'])
inp_ctr.to_csv(os.path.join(wdir, 'inp_ctr.csv'))
inp_emu.to_csv(os.path.join(wdir, 'inp_emu.csv'))
# Run
t0 = time.time()
u, xctr, xemu, yemu, u_hist = mpc.optimize(
model=model_ctr,
inp_ctr=inp_ctr,
inp_emu=inp_emu,
free=['vpos'],
ubounds=[(-100., 100.)],
xbounds=[(Tmin, Tmax)],
x0=x0,
maxiter=50,
ynominal=[300., 100., 2e6],
step=step,
horizon=horizon
)
cputime = int(time.time() - t0)
# Save results
u.to_csv(os.path.join(wdir, 'u.csv'))
xctr.to_csv(os.path.join(wdir, 'xctr.csv'))
xemu.to_csv(os.path.join(wdir, 'xemu.csv'))
yemu.to_csv(os.path.join(wdir, 'yemu.csv'))
for i in range(len(u_hist)):
u_hist[i].to_csv(os.path.join(wdir, 'u{}.csv'.format(i)))
with open(os.path.join(wdir, 'cputime.txt'), 'w') as f:
f.write("CPU time: {} s".format(cputime))
| 2.09375 | 2 |
algoritmo.py | jorgemauricio/procesamiento_he5 | 0 | 12789790 | <filename>algoritmo.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#######################################
# Script que permite la generación
# automática de mapas de particulas en el
# aire
# Author: <NAME>
# Email: <EMAIL>
# Date: Created on Thu Sep 28 08:38:15 2017
# Version: 1.0
#######################################
"""
# librerías
import os
import pandas as pd
import numpy as np
import h5py
import requests
from bs4 import BeautifulSoup
import urllib.request
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import time
from time import gmtime, strftime
import schedule
# límites Lat y Long
LONG_MIN = -115
LONG_MAX = -111
LAT_MIN = 29
LAT_MAX = 32
PATH = "/home/jorge/Documents/Research/procesamiento_he5"
array_URLs = ["https://acdisc.gesdisc.eosdis.nasa.gov/data/Aura_OMI_Level3/OMNO2d.003/2018/",
"https://acdisc.gsfc.nasa.gov/data/Aura_OMI_Level3/OMDOAO3e.003/2018/",
"https://acdisc.gsfc.nasa.gov/data/Aura_OMI_Level3/OMSO2e.003/2018/",
"https://acdisc.gsfc.nasa.gov/data/Aura_OMI_Level3/OMTO3e.003/2018/",
"https://acdisc.gesdisc.eosdis.nasa.gov/data/Aura_OMI_Level3/OMAEROe.003/2018/"]
#array_URLs = ["https://acdisc.gesdisc.eosdis.nasa.gov/data/Aura_OMI_Level3/OMNO2d.003/2018/"]
array_Archivo = []
# JOB
def job():
# fecha de la descarga
# descarga de información
descarga_de_archivos()
procesamientoNO2()
procesamientoO3()
procesamientoSO2()
procesamientoTO3()
procesamientoAERO()
# función para procesar NO2
def procesamientoNO2():
# clear plt
plt.clf()
# Open file.
FILE_NAME = array_Archivo[0]
DATAFIELD_NAME = 'HDFEOS/GRIDS/ColumnAmountNO2/Data Fields/ColumnAmountNO2'
with h5py.File(FILE_NAME, mode='r') as f:
# Read dataset.
dset = f[DATAFIELD_NAME]
data = dset[:]
# Handle fill value.
data[data == dset.fillvalue] = np.nan
data = np.ma.masked_where(np.isnan(data), data)
# Get attributes needed for the plot.
# String attributes actually come in as the bytes type and should
# be decoded to UTF-8 (python3).
title = dset.attrs['Title'].decode()
units = dset.attrs['Units'].decode()
# There is no geolocation data, so construct it ourselves.
longitude = np.arange(0., 1440.0) * 0.25 - 180 + 0.125
latitude = np.arange(0., 720.0) * 0.25 - 90 + 0.125
# leer coordenadas
dataEstaciones = pd.read_csv("{}/data/coordenadas_estaciones.csv".format(PATH))
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=LAT_MIN, urcrnrlat = LAT_MAX,
llcrnrlon=LONG_MIN, urcrnrlon = LONG_MAX)
m.scatter(xC, yC, latlon=True, s=1, marker='o', color='r', zorder=25)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, cmap='jet')
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, title))
fig = plt.gcf()
# plt.show()
pngfile = "{}.png".format(basename)
fig.savefig(pngfile, dpi=600)
# función para procesar 03
def procesamientoO3():
# clear plt
plt.clf()
# Open file.
FILE_NAME = array_Archivo[1]
DATAFIELD_NAME = 'HDFEOS/GRIDS/ColumnAmountO3/Data Fields/ColumnAmountO3'
with h5py.File(FILE_NAME, mode='r') as f:
# Read dataset.
dset = f[DATAFIELD_NAME]
data = dset[:]
# Handle fill value.
data[data == dset.fillvalue] = np.nan
data = np.ma.masked_where(np.isnan(data), data)
# Get attributes needed for the plot.
# String attributes actually come in as the bytes type and should
# be decoded to UTF-8 (python3).
title = dset.attrs['Title'].decode()
units = dset.attrs['Units'].decode()
# There is no geolocation data, so construct it ourselves.
longitude = np.arange(0., 1440.0) * 0.25 - 180 + 0.125
latitude = np.arange(0., 720.0) * 0.25 - 90 + 0.125
# leer coordenadas
dataEstaciones = pd.read_csv("{}/data/coordenadas_estaciones.csv".format(PATH))
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=LAT_MIN, urcrnrlat = LAT_MAX,
llcrnrlon=LONG_MIN, urcrnrlon = LONG_MAX)
m.scatter(xC, yC, latlon=True, s=1, marker='o', color='r', zorder=25)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, cmap='jet')
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, title))
fig = plt.gcf()
# plt.show()
pngfile = "{}.png".format(basename)
fig.savefig(pngfile, dpi=600)
# función para procesar SO2
def procesamientoSO2():
# clear plt
plt.clf()
# Open file.
FILE_NAME = array_Archivo[2]
DATAFIELD_NAME = 'HDFEOS/GRIDS/OMI Total Column Amount SO2/Data Fields/ColumnAmountSO2_PBL'
with h5py.File(FILE_NAME, mode='r') as f:
# Read dataset.
dset = f[DATAFIELD_NAME]
data = dset[:]
# Handle fill value.
data[data == dset.fillvalue] = np.nan
data = np.ma.masked_where(np.isnan(data), data)
# Get attributes needed for the plot.
# String attributes actually come in as the bytes type and should
# be decoded to UTF-8 (python3).
title = dset.attrs['Title'].decode()
units = dset.attrs['Units'].decode()
# There is no geolocation data, so construct it ourselves.
longitude = np.arange(0., 1440.0) * 0.25 - 180 + 0.125
latitude = np.arange(0., 720.0) * 0.25 - 90 + 0.125
# leer coordenadas
dataEstaciones = pd.read_csv("{}/data/coordenadas_estaciones.csv".format(PATH))
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=LAT_MIN, urcrnrlat = LAT_MAX,
llcrnrlon=LONG_MIN, urcrnrlon = LONG_MAX)
m.scatter(xC, yC, latlon=True, s=1, marker='o', color='r', zorder=25)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, cmap='jet')
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, title))
fig = plt.gcf()
# plt.show()
pngfile = "{}.png".format(basename)
fig.savefig(pngfile, dpi=600)
# función para procesar TO3
def procesamientoTO3():
# clear plt
plt.clf()
# Open file.
FILE_NAME = array_Archivo[3]
DATAFIELD_NAME = 'HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/ColumnAmountO3'
with h5py.File(FILE_NAME, mode='r') as f:
# Read dataset.
dset = f[DATAFIELD_NAME]
data = dset[:]
# Handle fill value.
data[data == dset.fillvalue] = np.nan
data = np.ma.masked_where(np.isnan(data), data)
# Get attributes needed for the plot.
# String attributes actually come in as the bytes type and should
# be decoded to UTF-8 (python3).
title = dset.attrs['Title'].decode()
units = dset.attrs['Units'].decode()
# There is no geolocation data, so construct it ourselves.
longitude = np.arange(0., 1440.0) * 0.25 - 180 + 0.125
latitude = np.arange(0., 720.0) * 0.25 - 90 + 0.125
# leer coordenadas
dataEstaciones = pd.read_csv("{}/data/coordenadas_estaciones.csv".format(PATH))
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=LAT_MIN, urcrnrlat = LAT_MAX,
llcrnrlon=LONG_MIN, urcrnrlon = LONG_MAX)
m.scatter(xC, yC, latlon=True, s=1, marker='o', color='r', zorder=25)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, cmap='jet')
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, title))
fig = plt.gcf()
# plt.show()
pngfile = "{}.png".format(basename)
fig.savefig(pngfile, dpi=600)
# función para procesar TO3
def procesamientoAERO():
# clear plt
plt.clf()
# Open file.
FILE_NAME = array_Archivo[4]
DATAFIELD_NAME = 'HDFEOS/GRIDS/ColumnAmountAerosol/Data Fields/UVAerosolIndex'
with h5py.File(FILE_NAME, mode='r') as f:
# Read dataset.
dset = f[DATAFIELD_NAME]
data = dset[:]
# Handle fill value.
data[data == dset.fillvalue] = np.nan
data = np.ma.masked_where(np.isnan(data), data)
# Get attributes needed for the plot.
# String attributes actually come in as the bytes type and should
# be decoded to UTF-8 (python3).
title = dset.attrs['Title'].decode()
units = dset.attrs['Units'].decode()
# There is no geolocation data, so construct it ourselves.
longitude = np.arange(0., 1440.0) * 0.25 - 180 + 0.125
latitude = np.arange(0., 720.0) * 0.25 - 90 + 0.125
# leer coordenadas
dataEstaciones = pd.read_csv("{}/data/coordenadas_estaciones.csv".format(PATH))
xC = np.array(dataEstaciones['Long'])
yC = np.array(dataEstaciones['Lat'])
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=LAT_MIN, urcrnrlat = LAT_MAX,
llcrnrlon=LONG_MIN, urcrnrlon = LONG_MAX)
m.scatter(xC, yC, latlon=True, s=1, marker='o', color='r', zorder=25)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, cmap='jet')
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, title))
fig = plt.gcf()
# plt.show()
pngfile = "{}.png".format(basename)
fig.savefig(pngfile, dpi=600)
# función descarga de archivos
def descarga_de_archivos():
# fecha de la descarga
fechaPronostico = strftime("%Y-%m-%d")
# cambiar a carpeta data
os.chdir("{}/data".format(PATH))
# crear directorio de fecha de descarga
os.mkdir("{}/data/{}".format(PATH,fechaPronostico))
# cambiar de directorio a data
os.chdir("{}/data/{}".format(PATH,fechaPronostico))
# ciclo para la descarga de información
for URL in array_URLs:
# generar la consulta de información
r = requests.get(URL)
# parsear el html para determinar los links a descargar
soup = BeautifulSoup(r.text, "html.parser")
# crear un array para guardar los links
array_links = []
# ciclo para filtrar los links con información
for link in soup.find_all("a"):
array_links.append(link.get("href"))
# nombre del archivo a descargar
nombre_archivo = array_links[-5]
# imprimir el nombre del archivo
print(nombre_archivo)
# guardar el nombre del archivo para el post procesamiento
array_Archivo.append(nombre_archivo)
# generar el url para la descarga de información
URL_DESCARGA = "{}{}".format(URL, nombre_archivo)
# print url de descarga
print(URL_DESCARGA)
os.system("wget --load-cookies ~/.urs_cookies --save-cookies ~/.urs_cookies --keep-session-cookies {}".format(URL_DESCARGA))
schedule.every().day.at("07:00").do(job)
while 1:
schedule.run_pending()
time.sleep(1)
| 1.90625 | 2 |
pywps/application.py | kvold/pywps | 1 | 12789791 | <reponame>kvold/pywps
from pywps.app.Service import Service
def make_app(processes=None, cfgfiles=None):
app = Service(processes=processes, cfgfiles=cfgfiles)
return app
| 1.382813 | 1 |
tests/python/test_dataflow.py | tlemo/nwcpp_pybind11 | 0 | 12789792 |
import unittest
import nwcpp
class VariablesTestCase(unittest.TestCase):
def test_variable_declarations(self):
dag = nwcpp.Dataflow()
a = dag.declare_variable('a')
with self.assertRaises(RuntimeError):
# duplicate name
dag.declare_variable('a')
b = dag.declare_variable(name='b')
def test_variables_list(self):
dag = nwcpp.Dataflow()
c = dag.declare_variable('c')
b = dag.declare_variable('b')
a = dag.declare_variable('a')
self.assertEqual(dag.variables, [a, b, c])
def test_variable_lookup(self):
dag = nwcpp.Dataflow()
c = dag.declare_variable('c')
b = dag.declare_variable('b')
a = dag.declare_variable('a')
self.assertIsNone(dag.lookup_variable(name='B'))
self.assertIsNone(dag.lookup_variable(name='aa'))
for v in dag.variables:
self.assertEqual(dag.lookup_variable(name=v.name), v)
class OperationsTestCase(unittest.TestCase):
def test_mixing_dags(self):
dag_1 = nwcpp.Dataflow()
dag_2 = nwcpp.Dataflow()
a_1 = dag_1.declare_variable('a')
a_2 = dag_2.declare_variable('a')
sum_1 = dag_1.create_binary_op('+', a_1, a_1)
sum_2 = dag_2.create_binary_op('+', a_2, a_2)
with self.assertRaises(RuntimeError):
dag_1.create_binary_op('+', a_1, a_2)
with self.assertRaises(RuntimeError):
dag_2.create_binary_op('+', a_1, a_2)
with self.assertRaises(RuntimeError):
dag_1.create_binary_op('+', sum_1, sum_2)
with self.assertRaises(RuntimeError):
dag_2.create_binary_op('+', sum_1, sum_2)
def test_binary_operations(self):
dag = nwcpp.Dataflow()
a = dag.declare_variable('a')
b = dag.declare_variable('b')
# test all supported forms
div_1 = dag.create_binary_op('/', a, b)
div_2 = dag.div(a, b)
div_3 = a / b
with self.assertRaises(RuntimeError):
div_1.eval()
a.assign(8)
b.assign(4)
self.assertEqual(div_1.eval(), 2)
self.assertEqual(div_2.eval(), 2)
self.assertEqual(div_3.eval(), 2)
def test_operator_overloading(self):
dag = nwcpp.Dataflow()
a = dag.declare_variable('a')
b = dag.declare_variable('b')
c = dag.declare_variable('c')
result = a + b * c
a.assign(1)
b.assign(2)
c.assign(3)
self.assertEqual(result.eval(), 7)
if __name__ == '__main__':
unittest.main()
| 3 | 3 |
23_yolov3-nano/01_float32/03_weight_quantization.py | khanfarhan10/PINTO_model_zoo | 1,529 | 12789793 | ### tf-nightly-2.2.0.dev20200418
import tensorflow as tf
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,tf.lite.OpsSet.SELECT_TF_OPS]
tflite_quant_model = converter.convert()
with open('yolov3_nano_voc_416_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - yolov3_nano_voc_416_weight_quant.tflite")
| 2.453125 | 2 |
janch/components/__init__.py | taarimalta/janch | 0 | 12789794 | <reponame>taarimalta/janch<gh_stars>0
"""This modules contains code that enable the components of the Janch config file
"""
from janch.components.formatters import get_default_formatters
from janch.components.gatherers import get_default_gatherers
from janch.components.inspectors import get_default_inspectors
from janch.components.loggers import get_default_loggers
__all__ = [
'get_default_gatherers',
'get_default_inspectors',
'get_default_formatters',
'get_default_loggers']
| 1.515625 | 2 |
dspftwplot/plot_complex.py | dspftw/dspftwplot | 0 | 12789795 | # vim: expandtab tabstop=4 shiftwidth=4
from numpy import ndarray
import matplotlib.pyplot as plt
def plot_complex(*args, **kwargs):
'''
Plots complex data in the complex plane.
Parameters
----------
args: array_like
The complex arrays to plot
kwargs: dict
Parameters passed through to plt.Figure.scatter().
'''
plotargs = []
for arg in args:
if type(arg) is ndarray:
plotargs.append(arg.real)
plotargs.append(arg.imag)
else:
plotargs.append(arg)
plt.plot(*plotargs, **kwargs)
def plotc(*args, **kwargs):
'''
An alias of plot_complex().
'''
return plot_complex(*args, **kwargs)
| 2.71875 | 3 |
config.py | kibetrono/Blogging-Website | 0 | 12789796 | <filename>config.py
import os
class Config:
'''General configuration parent class'''
SQLALCHEMY_DATABASE_URI='postgresql+psycopg2://kibet:KibetFlask@localhost/ownblog'
SECRET_KEY ='FlSkPItchA@*ppL&iCA^$tio***n'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>("MAIL_PASSWORD")
UPLOADED_PHOTOS_DEST = 'app/static/photos'
class ProdConfig(Config):
"""Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
"""
# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kibet:KibetFlask@localhost/ownblog'
SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URL") # or other relevant config var
if SQLALCHEMY_DATABASE_URI and SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("postgres://", "postgresql://", 1)
# rest of connection code using the connection string `uri`
class TestConfig(Config):
""""""
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kibet:KibetFlask@localhost/blog_test'
class DevConfig(Config):
"""Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
"""
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test':TestConfig
}
| 2.40625 | 2 |
module_06/src/pages/checkout_first.py | AngieGarciaT/2021_python_selenium | 0 | 12789797 | """Implements sauce lab login checkout first step."""
from enum import Enum
from selenium.webdriver.remote.webdriver import WebDriver
from module_06.src.elements.base_page_element import BasePageElement
from module_06.src.elements.header import Header
from module_06.src.elements.inventory_items import InventoryItems
from module_06.src.elements.select_element import SelectElement
from module_06.src.locators.inventory import InventoryPageLoc
from module_06.src.locators.cart import CartItemLoc
from module_06.src.locators.checkout import CheckoutItemLoc
from module_06.src.pages.base_page import BasePage
from module_06.src.mixin.InventoryItemMixin import InventoryItemMixin
from module_06.src.locators.inventory_details import InventoryDetailsLoc
from module_06.src.elements.checkout_info import ContactCheckout
from module_06.src.pages.cart import CartPage
_URL = 'https://www.saucedemo.com/checkout-step-one.html'
class CheckoutFirstStep(InventoryItemMixin, BasePage):
def __init__(self, driver: WebDriver, timeout: int = 5):
super().__init__(driver, _URL, timeout)
self._info_checkout = ContactCheckout(self._wait)
self.header = Header(self._wait)
def fill_info(self, firstname="", lastname="", postal_code=""):
self._info_checkout.fill_info(firstname, lastname, postal_code)
def checkout(self):
self._info_checkout.checkout()
return CartPage(self._wait._driver, self._wait._timeout)
def back_to_cart(self):
self._info_checkout.back_to_cart()
def get_error_msg(self):
return self._info_checkout.get_error_msg()
| 2.28125 | 2 |
tests/simple/test_simple.py | iklasky/timemachines | 253 | 12789798 | <gh_stars>100-1000
from timemachines.skaters.simple.movingaverage import precision_ema_ensemble, aggressive_ema_ensemble
SIMPLE_TO_TEST = [ precision_ema_ensemble, aggressive_ema_ensemble ]
from timemachines.inclusion.sklearninclusion import using_sklearn
if using_sklearn:
from timemachines.skatertools.evaluation.evaluators import hospital_mean_square_error_with_sporadic_fit, \
hospital_exog_mean_square_error_with_sporadic_fit
def test_ensemble_errors():
for f in SIMPLE_TO_TEST:
err = hospital_mean_square_error_with_sporadic_fit(f=f, k=5, n=150, fit_frequency=1)
if __name__=='__main__':
assert using_sklearn
test_ensemble_errors() | 2.015625 | 2 |
setup.py | YasielCabrera/dj-graphene | 0 | 12789799 | <filename>setup.py
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name = 'dj-graphene',
packages = ['dj_graphene'],
version = '0.0.2-b.2',
license='MIT',
description = 'A graphene-django wrapper to do stuffs in the Django way 💃🕺',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/YasielCabrera/dj-graphene',
keywords = ['GRAPHENE', 'GRAPHENE-DJANGO', 'GRAPHQL', 'DJANGO', 'MODELS', 'API', 'PERMISSIONS'],
install_requires = [
'graphene',
'graphene-django',
'django'
],
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Environment :: Web Environment',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 1.289063 | 1 |
grab/spider/base.py | mawentao007/reading_grab | 0 | 12789800 | <filename>grab/spider/base.py
#coding:utf-8
from __future__ import absolute_import
import types
import logging
import time
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from random import randint
try:
import Queue as queue
except ImportError:
import queue
from copy import deepcopy
import six
import os
from weblib import metric
from contextlib import contextmanager
from traceback import format_exc
import multiprocessing
import threading
from grab.base import Grab
from grab.error import GrabInvalidUrl
from grab.spider.error import (SpiderError, SpiderMisuseError, FatalError,
NoTaskHandler, NoDataHandler,
SpiderConfigurationError)
from grab.spider.task import Task
from grab.spider.data import Data
from grab.spider.transport.multicurl import MulticurlTransport
from grab.proxylist import ProxyList, BaseProxySource
from grab.util.misc import camel_case_to_underscore
from weblib.encoding import make_str, make_unicode
from grab.base import GLOBAL_STATE
from grab.stat import Stat, Timer
from grab.spider.parser_pipeline import ParserPipeline
DEFAULT_TASK_PRIORITY = 100
DEFAULT_NETWORK_STREAM_NUMBER = 3
DEFAULT_TASK_TRY_LIMIT = 3
DEFAULT_NETWORK_TRY_LIMIT = 3
RANDOM_TASK_PRIORITY_RANGE = (50, 100)
NULL = object()
logger = logging.getLogger('grab.spider.base')
logger_verbose = logging.getLogger('grab.spider.base.verbose')
# If you need verbose logging just
# change logging level of that logger
logger_verbose.setLevel(logging.FATAL)
class SpiderMetaClass(type):
"""
This meta class does following things::
* It creates Meta attribute if it does not defined in
Spider descendant class by copying parent's Meta attribute
* It reset Meta.abstract to False if Meta is copied from parent class
* If defined Meta does not contains `abstract`
attribute then define it and set to False
"""
def __new__(cls, name, bases, namespace):
if 'Meta' not in namespace:
for base in bases:
if hasattr(base, 'Meta'):
# copy contents of base Meta
meta = type('Meta', (object,), dict(base.Meta.__dict__))
# reset abstract attribute
meta.abstract = False
namespace['Meta'] = meta
break
# Process special case (SpiderMetaClassMixin)
if 'Meta' not in namespace:
namespace['Meta'] = type('Meta', (object,), {})
if not hasattr(namespace['Meta'], 'abstract'):
namespace['Meta'].abstract = False
return super(SpiderMetaClass, cls).__new__(cls, name, bases, namespace)
@six.add_metaclass(SpiderMetaClass)
class Spider(object):
"""
Asynchronous scraping framework.
"""
# You can define here some urls and initial tasks
# with name "initial" will be created from these
# urls
# If the logic of generating initial tasks is complex
# then consider to use `task_generator` method instead of
# `initial_urls` attribute
initial_urls = None
# The base url which is used to resolve all relative urls
# The resolving takes place in `add_task` method
base_url = None
class Meta:
# Meta.abstract means that this class will not be
# collected to spider registry by `grab crawl` CLI command.
# The Meta is inherited by descendant classes BUT
# Meta.abstract is reset to False in each descendant
abstract = True
# *************
# Class Methods
# *************
@classmethod
def setup_spider_config(cls, config):
pass
@classmethod
def get_spider_name(cls):
if hasattr(cls, 'spider_name'):
return cls.spider_name
else:
return camel_case_to_underscore(cls.__name__)
# **************
# Public Methods
# **************
def __init__(self, thread_number=None,
network_try_limit=None, task_try_limit=None,
request_pause=NULL,
priority_mode='random',
meta=None,
only_cache=False,
config=None,
slave=None,
max_task_generator_chunk=None,
args=None,
# New options start here
taskq=None,
# MP:
network_result_queue=None,
parser_result_queue=None,
waiting_shutdown_event=None,
shutdown_event=None,
mp_mode=False,
parser_pool_size=None,
parser_mode=False,
parser_requests_per_process=10000,
# http api
http_api_port=None,
):
"""
Arguments:
* thread-number - Number of concurrent network streams
* network_try_limit - How many times try to send request
again if network error was occurred, use 0 to disable
* network_try_limit - Limit of tries to execute some task
this is not the same as network_try_limit
network try limit limits the number of tries which
are performed automatically in case of network timeout
of some other physical error
but task_try_limit limits the number of attempts which
are scheduled manually in the spider business logic
* priority_mode - could be "random" or "const"
* meta - arbitrary user data
* retry_rebuild_user_agent - generate new random user-agent for each
network request which is performed again due to network error
* args - command line arguments parsed with `setup_arg_parser` method
New options:
* taskq=None,
* newtork_response_queue=None,
"""
if slave is not None:
raise SpiderConfigurtionError(
'Slave mode is not supported anymore. '
'Use `mp_mode=True` option to run multiple HTML'
' parser processes.')
# API:
self.http_api_port = http_api_port
# MP:
self.mp_mode = mp_mode
if self.mp_mode:
from multiprocessing import Process, Event, Queue
else:
from multiprocessing.dummy import Process, Event, Queue
if network_result_queue is not None:
self.network_result_queue = network_result_queue
else:
self.network_result_queue = Queue()
self.parser_result_queue = parser_result_queue
self.waiting_shutdown_event = waiting_shutdown_event
if shutdown_event is not None:
self.shutdown_event = shutdown_event
else:
self.shutdown_event = Event()
if not self.mp_mode and parser_pool_size and parser_pool_size > 1:
raise SpiderConfigurationError(
'Parser pool size could be only 1 in '
'non-multiprocess mode')
self.parser_pool_size = parser_pool_size
self.parser_mode = parser_mode
self.parser_requests_per_process = parser_requests_per_process
self.stat = Stat()
self.timer = Timer()
self.task_queue = taskq
if args is None:
self.args = {}
else:
self.args = args
self.max_task_generator_chunk = max_task_generator_chunk
self.timer.start('total')
if config is not None:
self.config = config
else:
self.config = {}
if meta:
self.meta = meta
else:
self.meta = {}
self.task_generator_enabled = False
self.only_cache = only_cache
self.thread_number = (
thread_number or
int(self.config.get('thread_number',
DEFAULT_NETWORK_STREAM_NUMBER)))
self.task_try_limit = (
task_try_limit or
int(self.config.get('task_try_limit', DEFAULT_TASK_TRY_LIMIT)))
self.network_try_limit = (
network_try_limit or
int(self.config.get('network_try_limit',
DEFAULT_NETWORK_TRY_LIMIT)))
self._grab_config = {}
if priority_mode not in ['random', 'const']:
raise SpiderMisuseError('Value of priority_mode option should be '
'"random" or "const"')
else:
self.priority_mode = priority_mode
# Initial cache-subsystem values
self.cache_enabled = False
self.cache = None
self.work_allowed = True
if request_pause is not NULL:
logger.error('Option `request_pause` is deprecated and is not '
'supported anymore')
self.proxylist_enabled = None
self.proxylist = None
self.proxy = None
self.proxy_auto_change = False
self.interrupted = False
def setup_cache(self, backend='mongo', database=None, use_compression=True,
**kwargs):
if database is None:
raise SpiderMisuseError('setup_cache method requires database '
'option')
self.cache_enabled = True
mod = __import__('grab.spider.cache_backend.%s' % backend,
globals(), locals(), ['foo'])
self.cache = mod.CacheBackend(database=database,
use_compression=use_compression,
spider=self, **kwargs)
def setup_queue(self, backend='memory', **kwargs):
logger.debug('Using %s backend for task queue' % backend)
mod = __import__('grab.spider.queue_backend.%s' % backend,
globals(), locals(), ['foo'])
self.task_queue = mod.QueueBackend(spider_name=self.get_spider_name(),
**kwargs)
def add_task(self, task, raise_error=False):
"""
Add task to the task queue.
"""
# MP:
# ***
if self.parser_mode:
self.parser_result_queue.put((task, None))
return
if self.task_queue is None:
raise SpiderMisuseError('You should configure task queue before '
'adding tasks. Use `setup_queue` method.')
if task.priority is None or not task.priority_is_custom:
task.priority = self.generate_task_priority()
task.priority_is_custom = False
else:
task.priority_is_custom = True
try:
if not task.url.startswith(('http://', 'https://', 'ftp://',
'file://', 'feed://')):
if self.base_url is None:
msg = 'Could not resolve relative URL because base_url ' \
'is not specified. Task: %s, URL: %s'\
% (task.name, task.url)
raise SpiderError(msg)
else:
task.url = urljoin(self.base_url, task.url)
# If task has grab_config object then update it too
if task.grab_config:
task.grab_config['url'] = task.url
except Exception as ex:
self.stat.collect('task-with-invalid-url', task.url)
if raise_error:
raise
else:
logger.error('', exc_info=ex)
return False
# TODO: keep original task priority if it was set explicitly
self.task_queue.put(task, task.priority, schedule_time=task.schedule_time)
return True
def stop(self):
"""
This method set internal flag which signal spider
to stop processing new task and shuts down.
"""
logger_verbose.debug('Method `stop` was called')
self.work_allowed = False
def load_proxylist(self, source, source_type=None, proxy_type='http',
auto_init=True, auto_change=True,
**kwargs):
self.proxylist = ProxyList()
if isinstance(source, BaseProxySource):
self.proxylist.set_source(source)
elif isinstance(source, six.string_types):
if source_type == 'text_file':
self.proxylist.load_file(source, proxy_type=proxy_type)
elif source_type == 'url':
self.proxylist.load_url(source, proxy_type=proxy_type)
else:
raise SpiderMisuseError('Method `load_proxylist` received '
'invalid `source_type` argument: %s'
% source_type)
else:
raise SpiderMisuseError('Method `load_proxylist` received '
'invalid `source` argument: %s'
% source)
self.proxylist_enabled = True
self.proxy = None
if not auto_change and auto_init:
self.proxy = self.proxylist.get_random_proxy()
self.proxy_auto_change = auto_change
def process_next_page(self, grab, task, xpath,
resolve_base=False, **kwargs):
"""
Generate task for next page.
:param grab: Grab instance
:param task: Task object which should be assigned to next page url
:param xpath: xpath expression which calculates list of URLS
:param **kwargs: extra settings for new task object
Example::
self.follow_links(grab, 'topic', '//div[@class="topic"]/a/@href')
"""
try:
# next_url = grab.xpath_text(xpath)
next_url = grab.doc.select(xpath).text()
except IndexError:
return False
else:
url = grab.make_url_absolute(next_url, resolve_base=resolve_base)
page = task.get('page', 1) + 1
grab2 = grab.clone()
grab2.setup(url=url)
task2 = task.clone(task_try_count=1, grab=grab2,
page=page, **kwargs)
self.add_task(task2)
return True
def render_stats(self, timing=True):
out = ['------------ Stats: ------------']
out.append('Counters:')
# Process counters
items = sorted(self.stat.counters.items(),
key=lambda x: x[0], reverse=True)
for item in items:
out.append(' %s: %s' % item)
out.append('')
out.append('Lists:')
# Process collections sorted by size desc
col_sizes = [(x, len(y)) for x, y in self.stat.collections.items()]
col_sizes = sorted(col_sizes, key=lambda x: x[1], reverse=True)
for col_size in col_sizes:
out.append(' %s: %d' % col_size)
out.append('')
# Process extra metrics
if 'download-size' in self.stat.counters:
out.append('Network download: %s' %
metric.format_traffic_value(
self.stat.counters['download-size']))
out.append('Queue size: %d' % self.task_queue.size()
if self.task_queue else 'NA')
out.append('Network streams: %d' % self.thread_number)
if timing:
out.append('')
out.append(self.render_timing())
return '\n'.join(out) + '\n'
def render_timing(self):
out = ['Timers:']
out.append(' DOM: %.3f' % GLOBAL_STATE['dom_build_time'])
time_items = [(x, y) for x, y in self.timer.timers.items()]
time_items = sorted(time_items, key=lambda x: x[1])
for time_item in time_items:
out.append(' %s: %.03f' % time_item)
return '\n'.join(out) + '\n'
# ********************************
# Methods for spider customization
# ********************************
def prepare(self):
"""
You can do additional spider customization here
before it has started working. Simply redefine
this method in your Spider class.
"""
def prepare_parser(self):
"""
You can do additional spider customization here
before it has started working. Simply redefine
this method in your Spider class.
This method is called only from Spider working in parser mode
that, in turn, is spawned automatically by main spider proces
working in multiprocess mode.
"""
def shutdown(self):
"""
You can override this method to do some final actions
after parsing has been done.
"""
pass
def update_grab_instance(self, grab):
"""
Use this method to automatically update config of any
`Grab` instance created by the spider.
"""
pass
def create_grab_instance(self, **kwargs):
# Back-ward compatibility for deprecated `grab_config` attribute
# Here I use `_grab_config` to not trigger warning messages
if self._grab_config and kwargs:
merged_config = deepcopy(self._grab_config)
merged_config.update(kwargs)
grab = Grab(**merged_config)
elif self._grab_config and not kwargs:
grab = Grab(**self._grab_config)
else:
grab = Grab(**kwargs)
return grab
def task_generator(self):
"""
You can override this method to load new tasks smoothly.
It will be used each time as number of tasks
in task queue is less then number of threads multiplied on 2
This allows you to not overload all free memory if total number of
tasks is big.
"""
if False:
# Some magic to make this function empty generator
yield ':-)'
return
# ***************
# Private Methods
# ***************
def check_task_limits(self, task):
"""
Check that task's network & try counters do not exceed limits.
Returns:
* if success: (True, None)
* if error: (False, reason)
"""
if task.task_try_count > self.task_try_limit:
logger.debug('Task tries (%d) ended: %s / %s' % (
self.task_try_limit, task.name, task.url))
return False, 'task-try-count'
if task.network_try_count > self.network_try_limit:
logger.debug('Network tries (%d) ended: %s / %s' % (
self.network_try_limit, task.name, task.url))
return False, 'network-try-count'
return True, None
def generate_task_priority(self):
if self.priority_mode == 'const':
return DEFAULT_TASK_PRIORITY
else:
return randint(*RANDOM_TASK_PRIORITY_RANGE)
def process_task_generator(self):
"""
Load new tasks from `self.task_generator_object`
Create new tasks.
If task queue size is less than some value
then load new tasks from tasks file.
"""
if self.task_generator_enabled:
queue_size = self.task_queue.size()
if self.max_task_generator_chunk is not None:
min_limit = min(self.max_task_generator_chunk,
self.thread_number * 10)
else:
min_limit = self.thread_number * 10
if queue_size < min_limit:
logger_verbose.debug(
'Task queue contains less tasks (%d) than '
'allowed limit (%d). Trying to add '
'new tasks.' % (queue_size, min_limit))
try:
for x in six.moves.range(min_limit - queue_size):
item = next(self.task_generator_object)
logger_verbose.debug('Got new item from generator. '
'Processing it.')
self.process_handler_result(item)
except StopIteration:
# If generator have no values to yield
# then disable it
logger_verbose.debug('Task generator has no more tasks. '
'Disabling it')
self.task_generator_enabled = False
def start_task_generator(self):
"""
Process `self.initial_urls` list and `self.task_generator`
method. Generate first portion of tasks.
"""
logger_verbose.debug('Processing initial urls')
if self.initial_urls:
for url in self.initial_urls:
self.add_task(Task('initial', url=url))
self.task_generator_object = self.task_generator()
self.task_generator_enabled = True
# Initial call to task generator before spider has started working
self.process_task_generator()
def get_task_from_queue(self):
start = time.time()
try:
with self.timer.log_time('task_queue'):
return self.task_queue.get()
except queue.Empty:
size = self.task_queue.size()
if size:
logger_verbose.debug(
'No ready-to-go tasks, Waiting for '
'scheduled tasks (%d)' % size)
return True
else:
logger_verbose.debug('Task queue is empty.')
return None
def setup_grab_for_task(self, task):
grab = self.create_grab_instance()
if task.grab_config:
grab.load_config(task.grab_config)
else:
grab.setup(url=task.url)
# Generate new common headers
grab.config['common_headers'] = grab.common_headers()
self.update_grab_instance(grab)
return grab
def is_task_cacheable(self, task, grab):
if ( # cache is disabled for all tasks
not self.cache_enabled
# cache data should be refreshed
or task.get('refresh_cache', False)
# cache could not be used
or task.get('disable_cache', False)
# request type is not cacheable
or grab.detect_request_method() != 'GET'):
return False
else:
return True
def load_task_from_cache(self, task, grab, grab_config_backup):
with self.timer.log_time('cache'):
with self.timer.log_time('cache.read'):
cache_item = self.cache.get_item(
grab.config['url'], timeout=task.cache_timeout)
if cache_item is None:
return None
else:
with self.timer.log_time('cache.read.prepare_request'):
grab.prepare_request()
with self.timer.log_time('cache.read.load_response'):
self.cache.load_response(grab, cache_item)
grab.log_request('CACHED')
self.stat.inc('spider:request-cache')
return {'ok': True, 'grab': grab,
'grab_config_backup': grab_config_backup,
'task': task, 'emsg': None}
def is_valid_network_response_code(self, code, task):
"""
Answer the question: if the response could be handled via
usual task handler or the task faield and should be processed as error.
"""
return (code < 400 or code == 404 or
code in task.valid_status)
def process_handler_error(self, func_name, ex, task):
self.stat.inc('spider:error-%s' % ex.__class__.__name__.lower())
if hasattr(ex, 'tb'):
logger.error('Error in %s function' % func_name)
logger.error(ex.tb)
else:
logger.error('Error in %s function' % func_name, exc_info=ex)
# Looks strange but I really have some problems with
# serializing exception into string
try:
ex_str = six.text_type(ex)
except TypeError:
try:
ex_str = ex.decode('utf-8', 'ignore')
except TypeError:
ex_str = str(ex)
task_url = task.url if task is not None else None
self.stat.collect('fatal', '%s|%s|%s|%s' % (
func_name, ex.__class__.__name__, ex_str, task_url))
if isinstance(ex, FatalError):
#raise FatalError()
#six.reraise(FatalError, ex)
#logger.error(ex.tb)
raise ex
def find_data_handler(self, data):
try:
return getattr(data, 'handler')
except AttributeError:
try:
handler = getattr(self, 'data_%s' % data.handler_key)
except AttributeError:
raise NoDataHandler('No handler defined for Data %s'
% data.handler_key)
else:
return handler
def is_valid_network_result(self, res):
if res['task'].get('raw'):
return True
if res['ok']:
res_code = res['grab'].response.code
if self.is_valid_network_response_code(res_code, res['task']):
return True
return False
def run_parser(self):
"""
Main work cycle of spider process working in parser-mode.
"""
# Use Stat instance that does not print any logging messages
if self.parser_mode:
self.stat = Stat(logging_period=None)
self.prepare_parser()
process_request_count = 0
try:
recent_task_time = time.time()
while True:
try:
result = self.network_result_queue.get(True, 0.1)
except queue.Empty:
logger_verbose.debug('Network result queue is empty')
# Set `waiting_shutdown_event` only after 1 seconds
# of waiting for tasks to avoid
# race-condition issues
if time.time() - recent_task_time > 1:
self.waiting_shutdown_event.set()
if self.shutdown_event.is_set():
logger_verbose.debug('Got shutdown event')
return
else:
process_request_count += 1
recent_task_time = time.time()
if self.parser_mode:
self.stat.reset()
if self.waiting_shutdown_event.is_set():
self.waiting_shutdown_event.clear()
try:
handler = self.find_task_handler(result['task'])
except NoTaskHandler as ex:
ex.tb = format_exc()
self.parser_result_queue.put((ex, result['task']))
self.stat.inc('parser:handler-not-found')
else:
self.process_network_result_with_handler_mp(
result, handler)
self.stat.inc('parser:handler-processed')
finally:
if self.parser_mode:
data = {
'type': 'stat',
'counters': self.stat.counters,
'collections': self.stat.collections,
}
self.parser_result_queue.put((data, result['task']))
if self.parser_mode:
if self.parser_requests_per_process:
if process_request_count >= self.parser_requests_per_process:
break
except Exception as ex:
logging.error('', exc_info=ex)
raise
finally:
self.waiting_shutdown_event.set()
def process_network_result_with_handler_mp(self, result, handler):
"""
This is like `process_network_result_with_handler` but
for multiprocessing version
"""
handler_name = getattr(handler, '__name__', 'NONE')
try:
with self.timer.log_time('response_handler'):
with self.timer.log_time('response_handler.%s' % handler_name):
handler_result = handler(result['grab'], result['task'])
if handler_result is None:
pass
else:
for something in handler_result:
self.parser_result_queue.put((something,
result['task']))
except NoDataHandler as ex:
ex.tb = format_exc()
self.parser_result_queue.put((ex, result['task']))
except Exception as ex:
ex.tb = format_exc()
self.parser_result_queue.put((ex, result['task']))
def find_task_handler(self, task):
if task.origin_task_generator is not None:
return self.handler_for_inline_task
callback = task.get('callback')
if callback:
return callback
else:
try:
handler = getattr(self, 'task_%s' % task.name)
except AttributeError:
raise NoTaskHandler('No handler or callback defined for '
'task %s' % task.name)
else:
return handler
def handler_for_inline_task(self, grab, task):
# It can be subroutine for the first call,
# So we should check it
if isinstance(task, types.GeneratorType):
coroutines_stack = []
sendval = None
origin_task_generator = task
target = origin_task_generator
else:
coroutines_stack = task.coroutines_stack
sendval = grab
origin_task_generator = task.origin_task_generator
target = origin_task_generator
while True:
try:
result = target.send(sendval)
# If it is subroutine we have to initialize it and
# save coroutine in the coroutines stack
if isinstance(result, types.GeneratorType):
coroutines_stack.append(target)
sendval = None
target = result
origin_task_generator = target
else:
new_task = result
new_task.origin_task_generator = origin_task_generator
new_task.coroutines_stack = coroutines_stack
self.add_task(new_task)
return
except StopIteration:
# If coroutine is over we should check coroutines stack,
# may be it is subroutine
if coroutines_stack:
target = coroutines_stack.pop()
origin_task_generator = target
else:
return
def log_network_result_stats(self, res, from_cache=False):
# Increase stat counters
self.stat.inc('spider:request-processed')
self.stat.inc('spider:task')
self.stat.inc('spider:task-%s' % res['task'].name)
if (res['task'].network_try_count == 1 and
res['task'].task_try_count == 1):
self.stat.inc('spider:task-%s-initial' % res['task'].name)
# Update traffic statistics
if res['grab'] and res['grab'].response:
resp = res['grab'].response
self.timer.inc_timer('network-name-lookup', resp.name_lookup_time)
self.timer.inc_timer('network-connect', resp.connect_time)
self.timer.inc_timer('network-total', resp.total_time)
if from_cache:
self.stat.inc('spider:download-size-with-cache',
resp.download_size)
self.stat.inc('spider:upload-size-with-cache',
resp.upload_size)
else:
self.stat.inc('spider:download-size', resp.download_size)
self.stat.inc('spider:upload-size', resp.upload_size)
def process_grab_proxy(self, task, grab):
"Assign new proxy from proxylist to the task"
if task.use_proxylist:
if self.proxylist_enabled:
# Need this to work around
# pycurl feature/bug:
# pycurl instance uses previously connected proxy server
# even if `proxy` options is set with another proxy server
grab.setup(connection_reuse=False)
if self.proxy_auto_change:
self.proxy = self.change_proxy(task, grab)
def change_proxy(self, task, grab):
proxy = self.proxylist.get_random_proxy()
grab.setup(proxy=proxy.get_address(),
proxy_userpwd=proxy.get_userpwd(),
proxy_type=proxy.proxy_type)
return proxy
def submit_task_to_transport(self, task, grab, grab_config_backup):
self.stat.inc('spider:request-network')
self.stat.inc('spider:task-%s-network' % task.name)
with self.timer.log_time('network_transport'):
logger_verbose.debug('Submitting task to the transport '
'layer')
try:
self.transport.start_task_processing(
task, grab, grab_config_backup)
except GrabInvalidUrl:
logger.debug('Task %s has invalid URL: %s' % (
task.name, task.url))
self.stat.collect('invalid-url', task.url)
def is_valid_for_cache(self, res):
"""
Check if network transport result could
be saved to cache layer.
res: {ok, grab, grab_config_backup, task, emsg}
"""
if res['ok']:
if self.cache_enabled:
if res['grab'].request_method == 'GET':
if not res['task'].get('disable_cache'):
if self.is_valid_network_response_code(
res['grab'].response.code, res['task']):
return True
return False
def start_api_thread(self):
from grab.spider.http_api import HttpApiThread
proc = HttpApiThread(self)
proc.start()
return proc
def is_ready_to_shutdown(self):
# Things should be true to shutdown spider
# 1) No active network connections
# 2) Network result queue is empty
# 3) Task queue is empty
# 4) Parser pipeline is ready to shutdown
# 5) Task generator has completed
return (
self.parser_pipeline.is_waiting_shutdown()
and not self.task_generator_enabled
and not self.transport.get_active_threads_number()
and not self.task_queue.size()
and not self.network_result_queue.qsize()
)
def run(self):
"""
Main method. All work is done here.
"""
if self.mp_mode:
from multiprocessing import Process, Event, Queue
else:
from multiprocessing.dummy import Process, Event, Queue
self.timer.start('total')
self.transport = MulticurlTransport(self.thread_number)
if self.http_api_port:
http_api_proc = self.start_api_thread()
else:
http_api_proc = None
self.parser_pipeline = ParserPipeline(
bot=self,
mp_mode=self.mp_mode,
pool_size=self.parser_pool_size,
shutdown_event=self.shutdown_event,
network_result_queue=self.network_result_queue,
requests_per_process=self.parser_requests_per_process,
)
network_result_queue_limit = max(10, self.thread_number * 2)
try:
# Run custom things defined by this specific spider
# By defaut it does nothing
self.prepare()
# Setup task queue if it has not been configured yet
if self.task_queue is None:
self.setup_queue()
# Initiate task generator. Only in main process!
with self.timer.log_time('task_generator'):
self.start_task_generator()
while self.work_allowed:
with self.timer.log_time('task_generator'):
if self.task_generator_enabled:
self.process_task_generator()
result_from_cache = None
free_threads = self.transport.get_free_threads_number()
# Load new task only if self.network_result_queue is not full
if (self.transport.get_free_threads_number()
and (self.network_result_queue.qsize()
< network_result_queue_limit)):
logger_verbose.debug(
'Transport and parser have free resources. '
'Trying to load new task from task queue.')
task = self.get_task_from_queue()
# If no task received from task queue
# try to query task generator
# and then check if spider could be shuted down
if task is None:
if not self.transport.get_active_threads_number():
self.process_task_generator()
if task is None:
# If no task received from task queue
# check if spider could be shut down
if self.is_ready_to_shutdown():
self.shutdown_event.set()
self.stop()
break # Break `if self.work_allowed` cycle
elif isinstance(task, bool) and (task is True):
# Take some sleep to not load CPU
if not self.transport.get_active_threads_number():
time.sleep(0.1)
else:
logger_verbose.debug('Got new task from task queue: %s'
% task)
task.network_try_count += 1
is_valid, reason = self.check_task_limits(task)
if is_valid:
grab = self.setup_grab_for_task(task)
grab_config_backup = grab.dump_config()
result_from_cache = None
if self.is_task_cacheable(task, grab):
result_from_cache = self.load_task_from_cache(
task, grab, grab_config_backup)
if result_from_cache:
logger_verbose.debug(
'Task data is loaded from the cache. ')
else:
if self.only_cache:
logger.debug('Skipping network request to '
'%s' % grab.config['url'])
else:
self.process_grab_proxy(task, grab)
self.submit_task_to_transport(
task, grab, grab_config_backup)
else:
self.log_rejected_task(task, reason)
handler = task.get_fallback_handler(self)
if handler:
handler(task)
with self.timer.log_time('network_transport'):
logger_verbose.debug('Asking transport layer to do '
'something')
self.transport.process_handlers()
logger_verbose.debug('Processing network results (if any).')
# Collect completed network results
# Each result could be valid or failed
# Result is dict {ok, grab, grab_config_backup, task, emsg}
results = [(x, False) for x in
self.transport.iterate_results()]
if result_from_cache:
results.append((result_from_cache, True))
# Some sleep to avoid thousands of iterations per second.
# If no results from network transport
if not results:
# If task queue is empty (or if there are only
# delayed tasks)
if task is None or bool(task) == True:
# If no network activity
if not self.transport.get_active_threads_number():
# If parser result queue is empty
if not self.parser_pipeline.has_results():
# Just sleep some time, do not kill CPU
time.sleep(0.1)
for result, from_cache in results:
if not from_cache:
if self.is_valid_for_cache(result):
with self.timer.log_time('cache'):
with self.timer.log_time('cache.write'):
self.cache.save_response(
result['task'].url, result['grab'])
self.log_network_result_stats(
result, from_cache=from_cache)
if self.is_valid_network_result(result):
#handler = self.find_task_handler(result['task'])
#self.process_network_result_with_handler(
# result, handler)
# MP:
# ***
self.network_result_queue.put(result)
else:
self.log_failed_network_result(result)
# Try to do network request one more time
if self.network_try_limit > 0:
result['task'].refresh_cache = True
result['task'].setup_grab_config(
result['grab_config_backup'])
self.add_task(result['task'])
if from_cache:
self.stat.inc('spider:task-%s-cache' % task.name)
self.stat.inc('spider:request')
# MP:
# ***
while True:
try:
p_res, p_task = self.parser_pipeline.get_result()
except queue.Empty:
break
else:
self.stat.inc('spider:parser-result')
self.process_handler_result(p_res, p_task)
if not self.shutdown_event.is_set():
self.parser_pipeline.check_pool_health()
logger_verbose.debug('Work done')
except KeyboardInterrupt:
logger.info('\nGot ^C signal in process %d. Stopping.'
% os.getpid())
self.interrupted = True
raise
finally:
# This code is executed when main cycles is breaked
self.timer.stop('total')
self.stat.print_progress_line()
self.shutdown()
# Stop HTTP API process
if http_api_proc:
http_api_proc.server.shutdown()
http_api_proc.join()
self.task_queue.clear()
# Stop parser processes
self.shutdown_event.set()
self.parser_pipeline.shutdown()
logger.debug('Main process [pid=%s]: work done' % os.getpid())
def log_failed_network_result(self, res):
# Log the error
if res['ok']:
msg = 'http-%s' % res['grab'].response.code
else:
msg = res['error_abbr']
self.stat.inc('error:%s' % msg)
#logger.error(u'Network error: %s' % msg)#%
#make_unicode(msg, errors='ignore'))
def log_rejected_task(self, task, reason):
logger_verbose.debug('Task %s is rejected due to '
'%s limit'
% (task.name, reason))
if reason == 'task-try-count':
self.stat.collect('task-count-rejected',
task.url)
elif reason == 'network-try-count':
self.stat.collect('network-count-rejected',
task.url)
else:
raise SpiderError('Unknown response from '
'check_task_limits: %s'
% reason)
def process_handler_result(self, result, task=None):
"""
Process result received from the task handler.
Result could be:
* None
* Task instance
* Data instance.
"""
if isinstance(result, Task):
self.add_task(result)
elif isinstance(result, Data):
handler = self.find_data_handler(result)
try:
data_result = handler(**result.storage)
if data_result is None:
pass
else:
for something in data_result:
self.process_handler_result(something, task)
except Exception as ex:
self.process_handler_error('data_%s' % result.handler_key, ex,
task)
elif result is None:
pass
elif isinstance(result, Exception):
handler = self.find_task_handler(task)
handler_name = getattr(handler, '__name__', 'NONE')
self.process_handler_error(handler_name, result, task)
elif isinstance(result, dict):
if result.get('type') == 'stat':
for name, count in result['counters'].items():
self.stat.inc(name, count)
for name, items in result['collections'].items():
for item in items:
self.stat.collect(name, item)
else:
raise SpiderError('Unknown result type: %s' % result)
else:
raise SpiderError('Unknown result type: %s' % result)
# ******************
# Deprecated Methods
# ******************
def add_item(self, list_name, item):
logger.debug('Method `Spider::add_item` is deprecated. '
'Use `Spider::stat.collect` method instead.')
self.stat.collect(list_name, item)
def inc_count(self, key, count=1):
logger.debug('Method `Spider::inc_count` is deprecated. '
'Use `Spider::stat.inc` method instead.')
self.stat.inc(key, count)
def start_timer(self, key):
logger.debug('Method `Spider::start_timer` is deprecated. '
'Use `Spider::timer.start` method instead.')
self.timer.start(key)
def stop_timer(self, key):
logger.debug('Method `Spider::stop_timer` is deprecated. '
'Use `Spider::timer.stop` method instead.')
self.timer.stop(key)
@property
def items(self):
logger.debug('Attribute `Spider::items` is deprecated. '
'Use `Spider::stat.collections` attribute instead.')
return self.stat.collections
@property
def counters(self):
logger.debug('Attribute `Spider::counters` is deprecated. '
'Use `Spider::stat.counters` attribute instead.')
return self.stat.counters
@contextmanager
def save_timer(self, key):
logger.debug('Method `Spider::save_timer` is deprecated. '
'Use `Spider::timer.log_time` method instead.')
self.timer.start(key)
try:
yield
finally:
self.timer.stop(key)
def get_grab_config(self):
logger.error('Using `grab_config` attribute is deprecated. Override '
'`create_grab_instance method instead.')
return self._grab_config
def set_grab_config(self, val):
logger.error('Using `grab_config` attribute is deprecated. Override '
'`create_grab_instance method instead.')
self._grab_config = val
grab_config = property(get_grab_config, set_grab_config)
def setup_grab(self, **kwargs):
logger.error('Method `Spider::setup_grab` is deprecated. '
'Define `Spider::create_grab_instance` or '
'Spider::update_grab_instance` methods in your '
'Spider sub-class.')
self.grab_config.update(**kwargs)
def valid_response_code(self, code, task):
logger.error('Method `Spider::valid_response_code` is deprecated. '
'Use `Spider::is_valid_network_response_code` method or '
'`Spider::is_valid_network_result` method.')
return self.is_valid_network_response_code(code, task)
@property
def taskq(self):
logger.error('Attribute `Spider::taskq` is deprecated. '
'Use `Spider::task_queue` attribute.')
return self.task_queue
| 2.078125 | 2 |
cgi-bin/request/grx_rings.py | jamayfieldjr/iem | 1 | 12789801 | #!/usr/bin/env python
"""Author: <NAME>"""
import math
import cgi
from pyiem.util import ssw
def createCircleAroundWithRadius(lat, lon, radiusMiles):
"""Create circle."""
latArray = []
lonArray = []
for brng in range(0, 360):
lat2, lon2 = getLocation(lat, lon, brng, radiusMiles)
latArray.append(lat2)
lonArray.append(lon2)
return lonArray, latArray
def getLocation(lat1, lon1, brng, distanceMiles):
"""getLocation."""
lat1 = lat1 * math.pi / 180.0
lon1 = lon1 * math.pi / 180.0
# earth radius - If ever needed to be in km vs. miles, change R
R = 3959
distanceMiles = distanceMiles/R
brng = (brng / 90) * math.pi / 2
lat2 = (
math.asin(
math.sin(lat1) * math.cos(distanceMiles) + math.cos(lat1) *
math.sin(distanceMiles) * math.cos(brng))
)
lon2 = (
lon1 + math.atan2(
math.sin(brng) * math.sin(distanceMiles) * math.cos(lat1),
math.cos(distanceMiles) - math.sin(lat1) * math.sin(lat2))
)
lon2 = 180.0 * lon2 / math.pi
lat2 = 180.0 * lat2 / math.pi
return lat2, lon2
def main():
"""Go Main Go."""
form = cgi.FieldStorage()
ssw("Content-type: application/octet-stream\n")
ssw(('Content-Disposition: attachment; filename=placefile_rings.txt\n\n'))
# Things for the user to theoretically input:
loc = form.getfirst("loc", "Jack Trice Stadium")
pointLat = float(form.getfirst("lat", 42.014004))
pointLon = float(form.getfirst("lon", -93.635773))
ssw((
"; This is a placefile to draw a range ring x miles from: %s\n"
"; Created by <NAME> - 8/9/2019\n"
"; Code adapted from <NAME> (2016)\n\n\n"
"Threshold: 999 \n"
"Title: Rings @ %s\n"
) % (loc, loc))
for i in range(3):
distanceInMiles = float(form.getfirst("m%s" % (i, ), 100))
if distanceInMiles <= 0.00001:
continue
r = int(form.getfirst('r%s' % (i, ), 255))
g = int(form.getfirst('g%s' % (i, ), 255))
b = int(form.getfirst('b%s' % (i, ), 0))
a = int(form.getfirst('a%s' % (i, ), 255))
# Create the lon/lat pairs
X, Y = createCircleAroundWithRadius(
pointLat, pointLon, distanceInMiles)
ssw((
"Color: %s %s %s %s\n"
"Line: 2, 0, \"%.1f miles from %s\" \n"
) % (r, g, b, a, distanceInMiles, loc))
for x, y in zip(X, Y):
ssw(" %s, %s\n" % (y, x))
ssw("End:\n\n")
if __name__ == '__main__':
main()
| 3.390625 | 3 |
app/database/api/test/test_with_lunar_sortie_data.py | space-logistics-org/spacenet | 1 | 12789802 | <reponame>space-logistics-org/spacenet<gh_stars>1-10
"""
This module contains tests for API routes via the lunar sortie data.
"""
import pytest
from fastapi.testclient import TestClient
from app.database.api.database import get_db
from app.database.api.main import app
from app.auth_dependencies import current_user
from ..schemas.constants import CREATE_SCHEMAS
from app.database.api.test.utilities import get_current_user, get_test_db
from spacenet.schemas.element import Element
from spacenet.schemas.node import Node
from spacenet.schemas.edge import Edge
from spacenet.schemas.resource import Resource
from spacenet.schemas.test.utilities import (
lunar_sortie_elements,
lunar_sortie_edges,
lunar_sortie_nodes,
lunar_sortie_resources,
)
pytestmark = [pytest.mark.unit, pytest.mark.database, pytest.mark.lunar_sortie]
app.dependency_overrides[get_db] = get_test_db
app.dependency_overrides[current_user] = get_current_user
client = TestClient(app)
def schema_superclass(type_):
"""
:param type_: subclass of exactly one of Element, Node, Edge, or Resource schemas
:return: the superclass of type_ from (Element, Node, Edge, Resource)
"""
for super_ in (Element, Node, Edge, Resource):
if issubclass(type_, super_):
return super_
TYPE_TO_SUPER = {cls: schema_superclass(cls) for cls in CREATE_SCHEMAS}
SUPER_TO_PREFIX = {Element: "element", Edge: "edge", Node: "node", Resource: "resource"}
def object_to_prefix(obj: dict) -> str:
"""
:param obj: object, which fits some schema in CREATE_SCHEMAS
:return: routing prefix for the object schema's type
"""
for schema in CREATE_SCHEMAS:
try:
obj = schema.parse_obj(obj)
except ValueError:
pass
else:
super_ = TYPE_TO_SUPER[schema]
return SUPER_TO_PREFIX[super_]
else:
raise ValueError(f"Could not find prefix mapping to {obj}")
@pytest.mark.parametrize(
"domain_objects",
(
pytest.param(
pytest.lazy_fixture("lunar_sortie_" + obj_type + "s"),
marks=getattr(pytest.mark, obj_type),
)
for obj_type in ["element", "edge", "node", "resource"]
),
)
def test_routers_with_lunar_sortie_data(domain_objects):
for obj in domain_objects:
prefix = object_to_prefix(obj)
post_response = client.post(f"/{prefix}/", json=obj)
assert 201 == post_response.status_code
result = post_response.json()
id_ = result.get("id")
with_id = dict(obj, id=id_)
assert with_id == result
get_response = client.get(f"/{prefix}/{id_}")
assert 200 == get_response.status_code
assert with_id == get_response.json()
delete_response = client.delete(f"/{prefix}/{id_}")
assert 200 == delete_response.status_code
assert with_id == delete_response.json()
assert 404 == client.get(f"/{prefix}/{id_}").status_code
| 2.1875 | 2 |
run.py | LeoTheBestCoder/wordle-solver | 5 | 12789803 | from random import randint as rint
from sys import stderr, exit
wordlist = []
GREEN, YELLOW, GRAY = ('0', '1', '2')
def info():
"""
Wordle Game Solver
https://www.nytimes.com/games/wordle/index.html
Created by Leo (<NAME>), 2022
Any suggestion is welcome!
Check my code at https://github.com/LeoTheBestCoder/wordle-solver
"""
return
def showrule():
print('========================================================================')
print('If the result is GREEN, enter 0')
print('If the result is YELLOW, enter 1')
print('If the result is GRAY, enter 2')
print('Only a string with length = 5 and contains ONLY 0, 1, 2 is ACCEPTED!')
print('ex. Enter 12200 if the result is "yellow gray gray green green".')
print('========================================================================')
input('\nReady to start? (Press ENTER to continue)')
def getword():
idx = rint(0, len(wordlist) - 1)
return wordlist[idx]
def readfile():
global wordlist
with open('wordlist.txt', 'r') as fh:
wordlist = list(map(lambda w: w[:-1] if w[-1] == '\n' else w, fh.readlines()))
def check_r(res: str) -> bool:
if len(res) != 5:
return False
for ch in res:
if ch not in ['0', '1', '2']:
return False
return True
def update(word: str, res: str):
global wordlist
try:
assert check_r(res)
if res != '00000':
wordlist.remove(word)
for i in range(5):
invalid = []
if res[i] == GREEN:
# correct character + correct position
for w in wordlist:
if w[i] != word[i]:
invalid.append(w)
elif res[i] == YELLOW:
# correct character + wrong position
for w in wordlist:
if word[i] not in w:
invalid.append(w)
elif w[i] == word[i]:
invalid.append(w)
elif res[i] == GRAY:
# wrong character
for w in wordlist:
if word[i] in w:
special_case = False
for j in range(5):
if i != j and word[i] == word[j] and res[j] in [GREEN, YELLOW]:
special_case = True
if not special_case:
invalid.append(w)
# else:
# print(f'{w} is a special case')
for i_word in invalid:
wordlist.remove(i_word)
except:
stderr.write('Invalid result!\n')
exit(-1)
def run():
print(info.__doc__)
readfile()
showrule()
word = getword()
while len(set(word)) != 5:
word = getword()
print(f'Try to guess "{word}". What is the result? ', end = '')
res = input()
update(word, res)
# print(f'len = {len(wordlist)}')
# print(wordlist)
while res != '00000':
word = getword()
print(f'Try to guess "{word}". What is the result? ', end = '')
res = input()
update(word, res)
# print(f'len = {len(wordlist)}')
# print(wordlist)
print('Congratulations!')
if __name__ == '__main__':
run()
| 3.453125 | 3 |
src/__main__.py | Nalhin/Econometrics | 0 | 12789804 | import pandas as pd
from .ols_estimator import OLSEstimator
ols = OLSEstimator(pd.read_csv("./data/listings_summary.csv"))
ols.clean_data()
ols.calculate_models()
ols.output_latex()
| 2.328125 | 2 |
cobald_tests/controller/test_relative_supply.py | maxfischer2781/cobald | 7 | 12789805 | <reponame>maxfischer2781/cobald
import pytest
from cobald.controller.relative_supply import RelativeSupplyController
from ..mock.pool import MockPool
class TestRelativeSupplyController(object):
def test_low_scale(self):
pool = MockPool()
with pytest.raises(Exception):
RelativeSupplyController(pool, low_scale=0.9, high_scale=1.0)
def test_high_scale(self):
pool = MockPool()
with pytest.raises(Exception):
RelativeSupplyController(pool, low_scale=0.5, high_scale=0.9)
def test_both_scales(self):
pool = MockPool()
with pytest.raises(Exception):
RelativeSupplyController(pool, low_scale=1.1, high_scale=0.9)
def test_adjustment(self):
pool = MockPool()
relative_supply_controller = RelativeSupplyController(pool)
pool.utilisation = pool.allocation = 1.0
expected_demand = 0
for _ in range(5):
relative_supply_controller.regulate(1)
assert pool.demand == expected_demand
pool.demand = 1
for _ in range(5):
expected_demand = pool.supply * relative_supply_controller.high_scale
relative_supply_controller.regulate(1)
assert pool.demand == expected_demand
pool.utilisation = pool.allocation = 0.1
for _ in range(5):
expected_demand = pool.supply * relative_supply_controller.low_scale
relative_supply_controller.regulate(1)
assert pool.demand == expected_demand
pool.utilisation = pool.allocation = 0.5
expected_demand = pool.supply
for _ in range(5):
relative_supply_controller.regulate(1)
assert pool.demand == expected_demand
| 2.328125 | 2 |
tests/model/test_fencer.py | zoldello/fencing | 0 | 12789806 | <filename>tests/model/test_fencer.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `model/fencing` package."""
from model.fencer import Fencer
#from __future__ import absolute_import
#import unittest
#import imp
#foo = imp.load_source('Fencer', 'fencing/model/fencer.py')
class TestFencing(unittest.TestCase):
"""Model/Fencing. unit test"""
def test___init___ensure_initialization_work(self):
#target = { "last_name": "jackson123", 'first_name': 'michael', 'club': 'blood on the dance floor', 'skill_level': 'A18'}
#fencer = Fencer(target)
#self.assertEqual(fencer.last_name, target.last_name, 'Fencer model should properly set last name')
if __name__ == '__main__':
unittest.main()
| 3.0625 | 3 |
lr-bbs-server/server/commands.py | scott306lr/network_programming | 0 | 12789807 | import dataparser as dp
import sqlite3
import os.path
import re
from datetime import date
'''
{
"boardName1":{
"post1_SN":[SN,board,title,author,date,content]
"post2_SN":[SN,board,title,author,date,content]
}
"boardName2":{
"post1_SN":[SN,board,title,author,date,content]
}
......
}
'''
class Post():
counter = 0
def __init__(self,board,title,author,date,content,available=True):
if (available==True):
Post.counter += 1
self.SN=Post.counter
self.board=board
self.title=title
self.author=author
self.date=date
self.content=content
self.comments=""
self.available=available
def read(self):
c = self.content.split('<br>')
msg = f"Author: {self.author}\nTitlte: {self.title}\nDate: {self.date}\n--\n"
for comp in c:
msg = msg + comp + '\n'
msg = msg + '--' + self.comments
return msg
def update(self,ntype,new):
if(ntype=="title"): self.title=new
if(ntype=="content"): self.content=new
def comment(self,user,comm):
self.comments += f'\n{user}: {comm}'
def emptypost():
return Post(0,0,0,0,0,False)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, 'userinf.db')
posts=[emptypost()]
Chatrooms={}
def make_jsend(command,data):
return { "command":command, "data":data }
def usrReg(username, email, password):
con = sqlite3.connect(db_path)
c = con.cursor()
c.execute(f"SELECT * FROM user WHERE name='{username}'")
if c.fetchone() is None:
c.execute(f"INSERT INTO USER VALUES (null,'{username}','{email}','{password}')")
con.commit()
con.close()
return 0
else:
con.close()
return -1
def usrlogin(username, password):
con = sqlite3.connect(db_path)
c = con.cursor()
c.execute(f"SELECT * FROM user WHERE name='{username}' AND password='{password}'")
result = c.fetchone()
con.close()
if result != None:
return result[0]
else:
return -1
def listusers():
con = sqlite3.connect(db_path)
c = con.cursor()
msg = 'Name'.ljust(15, ' ') + 'Email'.ljust(20, ' ') + '\n'
for row in c.execute('SELECT * FROM user ORDER BY id'):
msg = msg + str(row[1]).ljust(15, ' ') + str(row[2]).ljust(20, ' ') + '\n'
con.close()
return msg
def listboards():
con = sqlite3.connect(db_path)
c = con.cursor()
msg = 'Index'.ljust(15, ' ') + 'Name'.ljust(15, ' ') + 'Moderator'.ljust(15, ' ') + '\n'
for row in c.execute('SELECT * FROM board ORDER BY "index"'):
msg = msg + str(row[0]).ljust(15, ' ') + str(row[1]).ljust(15, ' ') + str(row[2]).ljust(15, ' ') + '\n'
con.close()
return msg
def addBoard(board_name,moderator):
con = sqlite3.connect(db_path)
c = con.cursor()
c.execute(f"SELECT * FROM BOARD WHERE name='{board_name}'")
if c.fetchone() is None:
c.execute(f"INSERT INTO BOARD VALUES (null,'{board_name}','{moderator}')")
con.commit()
con.close()
return 0
else:
con.close()
return -1
def postExist(post_SN):
if (int(post_SN) <= Post.counter and posts[int(post_SN)].available == True):
return True
else:
return False
def listposts(board_name):
con = sqlite3.connect(db_path)
c = con.cursor()
c.execute(f"SELECT * FROM BOARD WHERE name='{board_name}'")
if c.fetchone() is None: return "Board does not exist."
else:
msg = 'S/N'.ljust(10, ' ') + 'Title'.ljust(15, ' ') + 'Author'.ljust(15, ' ') + 'Date'.ljust(15, ' ') + '\n'
for p in posts:
if (p.available == True and p.board == board_name) :
msg += str(p.SN).ljust(10, ' ') + p.title.ljust(15, ' ') + p.author.ljust(15, ' ') + p.date.ljust(15, ' ') + '\n'
return msg
def addPost(board,title,author,content):
con = sqlite3.connect(db_path)
c = con.cursor()
c.execute(f"SELECT * FROM BOARD WHERE name='{board}'")
if c.fetchone() is None:
return -1
else:
global posts
today = date.today()
MD = f"{today.month}/{today.day}"
#posts[board] = {f'{post_counter}:[{post_counter},{board},{title},{author},{MD},{content}]'}
post = Post(board,title,author,MD,content)
posts.append(post)
return 0
def listrooms():
msg = 'chatroom-name'.ljust(15, ' ') + 'status'.ljust(8, ' ') + '\n'
for k in Chatrooms:
msg = msg + k.ljust(15, ' ') + Chatrooms[k]["status"].ljust(8, ' ') + '\n'
return msg
class tcpCmdHandler(dp.Data_Parser):
command = dp.CommandListener("tcp")
cmdlist = command.tcp_savecmds
def __init__(self, lock, addr):
dp.Data_Parser.__init__(self,lock)
self.addr=addr
@command.listen()
def login(self,jMsg,username,password):
if (jMsg["user"] != "none"):
command = 'none'
sendmsg = 'Please logout first!'
else:
uid = usrlogin(username,password)
if (uid != -1):
command = f'setuser {username} {uid}'
sendmsg = f'Welcome, {username}.'
else:
command = 'none'
sendmsg = 'Login failed.'
return make_jsend(command,sendmsg)
@command.listen(name="get-ip")
def get_ip(self,jMsg):
command = 'none'
sendmsg = f'IP: {self.addr[0]}:{self.addr[1]}'
return make_jsend(command,sendmsg)
@command.listen()
def logout(self,jMsg):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
elif jMsg["user"] in Chatrooms and Chatrooms[ jMsg["user"] ]["status"]=="open":
command = "none"
sendmsg = 'Please do "attach" and "leave-chatroom" first.'
else:
command = "logout"
sendmsg = f"Bye, {jMsg['user']}"
if jMsg["user"] in Chatrooms : Chatrooms.pop(jMsg["user"])
return make_jsend(command,sendmsg)
@command.listen(name="list-user")
def listuser(self,jMsg):
command = "none"
sendmsg = listusers()
return make_jsend(command,sendmsg)
@command.listen()
def exit(self,jMsg):
if jMsg["user"] in Chatrooms : Chatrooms.pop(jMsg["user"])
command = "exit"
sendmsg = ""
return make_jsend(command,sendmsg)
######## Bulletin Board System ########
@command.listen(name="create-board")
def create_board(self,jMsg,name):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
check = addBoard(name,jMsg["user"])
if check == 0 :
command = "none"
sendmsg = "Create board successfully."
else:
command = "none"
sendmsg = 'Board already exists.'
return make_jsend(command,sendmsg)
@command.listen(name="create-post", usage="<board-name> --title <title> --content <content>")
def create_post(self,jMsg,board,title,content):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
self.lock.acquire()
check = addPost(board,title,jMsg['user'],content)
self.lock.release()
if check == 0 :
command = "none"
sendmsg = "Create post successfully."
else:
command = "none"
sendmsg = "Board does not exist."
return make_jsend(command,sendmsg)
@command.listen(name="list-board")
def list_board(self,jMsg):
command = "none"
sendmsg = listboards()
return make_jsend(command,sendmsg)
@command.listen(name="list-post", usage="<board-name>")
def list_post(self,jMsg,board_name):
self.lock.acquire()
command = "none"
sendmsg = listposts(board_name)
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(usage="<post-S/N>")
def read(self,jMsg,post_SN):
self.lock.acquire()
if( postExist(post_SN) ):
command = "none"
sendmsg = posts[int(post_SN)].read()
else:
command = "none"
sendmsg = "Post does not exist."
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(name="delete-post", usage="<post-S/N>")
def delete_post(self,jMsg,post_SN):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
global posts
self.lock.acquire()
if (postExist(post_SN)):
if(posts[int(post_SN)].author == jMsg['user']):
posts[int(post_SN)] = emptypost()
command = "none"
sendmsg = "Delete successfully."
else:
command = "none"
sendmsg = "Not the post owner."
else:
command = "none"
sendmsg = "Post does not exist."
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(name="update-post", usage="<post-S/N> --title/content <new>")
def update_post(self,jMsg,post_SN,which,inf):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
global posts
self.lock.acquire()
if (postExist(post_SN)):
if(posts[int(post_SN)].author == jMsg['user']):
if(which=="title"):
posts[int(post_SN)].title = inf
if(which=="content"):
posts[int(post_SN)].content = inf
command = "none"
sendmsg = "Update successfully."
else:
command = "none"
sendmsg = "Not the post owner."
else:
command = "none"
sendmsg = "Post does not exist."
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(usage="<post-S/N> <comment>")
def comment(self,jMsg,post_SN,comment):
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
global posts
self.lock.acquire()
if (postExist(post_SN)):
posts[int(post_SN)].comment(jMsg['user'],comment)
command = "none"
sendmsg = "Comment successfully."
else:
command = "none"
sendmsg = "Post does not exist."
self.lock.release()
return make_jsend(command,sendmsg)
################### Chat-Server ########################
@command.listen(name="create-chatroom")
def create_chatroom(self,jMsg,port):
global Chatrooms
self.lock.acquire()
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
elif jMsg["user"] in Chatrooms :
command = "none"
sendmsg = "User has already created the chatroom."
else:
Chatrooms[ jMsg["user"] ] = { "port":port, "status":"open" }
command = f"create_chatroom {port}"
sendmsg = ""
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(name="join-chatroom")
def join_chatroom(self,jMsg,chatroom_name):
global Chatrooms
self.lock.acquire()
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
elif (chatroom_name not in Chatrooms) or (Chatrooms[chatroom_name]["status"]=="close"):
command = "none"
sendmsg = "The chatroom does not exist or the chatroom is close."
else:
port = Chatrooms[ chatroom_name ]["port"]
owner = chatroom_name
command = f"join_chatroom {owner} {port}"
sendmsg = ""
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen(name="restart-chatroom")
def restart_chatroom(self,jMsg):
global Chatrooms
self.lock.acquire()
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
elif (jMsg["user"] not in Chatrooms):
command = "none"
sendmsg = "Please create chatroom first."
elif (Chatrooms[ jMsg["user"] ]["status"]=="open"):
command = "none"
sendmsg = "Your chatroom is still running."
else:
port = Chatrooms[ jMsg["user"] ]["port"]
Chatrooms[ jMsg["user"] ]["status"]="open"
owner = jMsg["user"]
command = f"join_chatroom {owner} {port}"
sendmsg = ""
self.lock.release()
return make_jsend(command,sendmsg)
@command.listen()
def close_chatroom(self,jMsg):
global Chatrooms
self.lock.acquire()
Chatrooms[ jMsg['user'] ]["status"]="close"
self.lock.release()
command = "none"
sendmsg = "none"
return make_jsend(command,sendmsg)
@command.listen()
def attach(self,jMsg):
global Chatrooms
self.lock.acquire()
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
elif (jMsg["user"] not in Chatrooms):
command = "none"
sendmsg = "Please create-chatroom first."
elif (Chatrooms[jMsg["user"]]["status"]=="close"):
command = "none"
sendmsg = "Please restart-chatroom first."
else:
port = Chatrooms[ jMsg["user"] ]["port"]
owner = jMsg["user"]
command = f"join_chatroom {owner} {port}"
sendmsg = ""
self.lock.release()
return make_jsend(command,sendmsg)
########################################################
class udpCmdHandler(dp.Data_Parser):
command = dp.CommandListener("udp")
cmdlist = command.udp_savecmds
def __init__(self,lock):
dp.Data_Parser.__init__(self,lock)
@command.listen()
def hello(self,jMsg):
command = "none"
sendmsg = "hello!"
return make_jsend(command,sendmsg)
@command.listen()
def register(self,jMsg,username,email,password):
check = usrReg(username,email,password)
if check == 0 :
command = "none"
sendmsg = "Register successfully."
else:
command = "none"
sendmsg = 'Username is already used.'
return make_jsend(command,sendmsg)
@command.listen()
def whoami(self,jMsg):
if (jMsg["user"] != "none"):
command = "none"
sendmsg = jMsg["user"]
else:
command = "none"
sendmsg = "Please login first!"
return make_jsend(command,sendmsg)
@command.listen()
def hi(self,jMsg):
command = "none"
sendmsg = "hi."
return make_jsend(command,sendmsg)
@command.listen(name="list-chatroom")
def list_chatroom(self,jMsg):
global Chatrooms
self.lock.acquire()
if (jMsg["user"] == "none"):
command = "none"
sendmsg = "Please login first!"
else:
command = "none"
sendmsg = listrooms()
self.lock.release()
return make_jsend(command,sendmsg)
| 2.953125 | 3 |
q2_api_client/clients/v3/authentication_client.py | jcook00/q2-api-client | 0 | 12789808 | from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.v3_endpoints import AuthenticationEndpoint
class AuthenticationClient(BaseQ2Client):
def keep_alive(self):
"""GET /v3/keepalive
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.KEEP_ALIVE.value
return self._get(url=self._build_url(endpoint))
def login(self, request_body, csr_assist=None):
"""POST /v3/login
:param float csr_assist: query parameter
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.LOGIN.value
query_parameters = self._copy_query_parameters()
query_parameters['csrassist'] = csr_assist
return self._post(url=self._build_url(endpoint), json=request_body, query_parameters=query_parameters)
def logoff(self):
"""POST /v3/logoff
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.LOGOFF.value
return self._post(url=self._build_url(endpoint))
def mfa_login(self, device_key, request_body):
"""POST /v3/mfaLogin
:param str device_key: header
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.MFA_LOGIN.value
headers = self._copy_headers()
headers['deviceKey'] = device_key
return self._post(url=self._build_url(endpoint), json=request_body, headers=headers)
def pre_login(self, request_body):
"""POST /v3/preLogin
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.PRE_LOGIN.value
return self._post(url=self._build_url(endpoint), json=request_body)
def register_device(self):
"""POST /v3/registerDevice
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.REGISTER_DEVICE.value
return self._post(url=self._build_url(endpoint))
def retrieve_status(self):
"""POST /v3/retrieveStatus
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.RETRIEVE_STATUS.value
return self._post(url=self._build_url(endpoint))
def validate_mfa(self, request_body):
"""POST /v3/validateMfa
:param dict request_body: Dictionary object to send in the body of the request
:return: Response object
:rtype: requests.Response
"""
endpoint = AuthenticationEndpoint.VALIDATE_MFA.value
return self._post(url=self._build_url(endpoint), json=request_body)
| 2.6875 | 3 |
software/old_stuff/other/sleep_tester.py | 84ace/esp32_smart_keezer | 1 | 12789809 | <gh_stars>1-10
from machine import Pin, deepsleep, reset_cause, DEEPSLEEP_RESET
from time import sleep
print("running")
sys_ok_led = Pin(25, Pin.OUT, Pin.PULL_UP)
sys_ok_led.value(1)
sys_ok_led.PULL_HOLD
sleep(2)
sys_ok_led = Pin(25, Pin.OUT)
sys_ok_led.PULL_UP
sys_ok_led.value(1)
sys_ok_led.PULL_HOLD
print("sleeping")
deepsleep(5000)
| 2.484375 | 2 |
datasets/pcapframeparser.py | mihsamusev/pytrl_demo | 5 | 12789810 | <reponame>mihsamusev/pytrl_demo
from struct import unpack
import dpkt
from .dataentities import Packet,LaserFiring,Frame
import numpy as np
class PcapFrameParser:
def __init__(self, pcap_file):
# check if PCAP file is really .pcap
self.pcap_file = pcap_file
self.packetStream = dpkt.pcap.Reader(open(self.pcap_file, 'rb'))
self.frameCount = 0
self.lastAzi = -1
self.frame = Frame()
def is_correct_port(self, buffer, port=2368):
# get destination port from the UDP header
dport = unpack(">H",buffer[36:38])[0]
return dport == port
def generator(self):
for ts, buf in self.packetStream:
if(len(buf) != 1248):
continue
if not self.is_correct_port(buf, port=2368):
continue
payload = buf[42:]
res = Packet(payload).getFirings()
for firing in res:
# Yield complete frames and create new Frame() container
if(self.lastAzi > firing.azimuth[-1]):
self.frame.finalize()
yield (ts, self.frame)
self.frame = Frame()
try:
self.frame.append(firing)
except:
pass
# update last seen azimuth
self.lastAzi = firing.azimuth[-1]
def peek_size(self):
prev_max_rot = 0
n = 0
for _, buffer in self.packetStream:
if len(buffer) != 1248:
continue
if not self.is_correct_port(buffer, port=2368):
continue
rot = np.ndarray((12,), '<H', buffer, 42+2, (100,))
min_rot = rot[0]
max_rot = rot[11]
if max_rot < min_rot or prev_max_rot > min_rot:
n += 1
prev_max_rot = max_rot
return n
| 2.46875 | 2 |
enaml/qt/editor/qt_ace_editor.py | mmckerns/enaml | 11 | 12789811 | from PySide.QtCore import QObject, Signal, Slot
from string import Template
import os
EVENT_TEMPLATE = Template("""
py_${func} = function() {
py_ace_editor.${func}(${args});
}
editor.${target}.on("${event_name}", py_${func});
""")
BINDING_TEMPLATE = Template("""
py_ace_editor.${signal}.connect(${target}, "${func}")
""")
class QtAceEditor(QObject):
text_changed = Signal(unicode)
mode_changed = Signal(unicode)
theme_changed = Signal(unicode)
auto_pair_changed = Signal(bool)
font_size_changed = Signal(int)
margin_line_changed = Signal(bool)
margin_line_column_changed = Signal(int)
def __init__(self, parent=None):
""" Initialize the editor
"""
super(QtAceEditor, self).__init__(parent)
self._events = []
self._bindings = []
def set_text(self, text):
""" Set the text of the editor
"""
self._text = text
self.text_changed.emit(text)
@Slot(unicode)
def set_text_from_js(self, text):
""" Set the text from the javascript editor. This method is required
because set_text emits the signal to update the text again.
"""
self._text = text
def text(self):
""" Return the text of the editor
"""
return self._text
def set_mode(self, mode):
""" Set the mode of the editor
"""
if mode.startswith('ace/mode/'):
self._mode = mode
else:
self._mode = 'ace/mode/' + mode
self.mode_changed.emit(self._mode)
def mode(self):
""" Return the mode of the editor
"""
return self._mode
def set_theme(self, theme):
""" Set the theme of the editor
"""
if theme.startswith('ace/theme/'):
self._theme = theme
else:
self._theme = "ace/theme/" + theme
self.theme_changed.emit(self._theme)
def theme(self):
""" Return the theme of the editor
"""
return self._theme
def set_auto_pair(self, auto_pair):
""" Set the auto_pair behavior of the editor
"""
self._auto_pair = auto_pair
self.auto_pair_changed.emit(auto_pair)
def set_font_size(self, font_size):
""" Set the font size of the editor
"""
self._font_size = font_size
self.font_size_changed.emit(font_size)
def show_margin_line(self, margin_line):
""" Set the margin line of the editor
"""
self._margin_line = margin_line
self.margin_line_changed.emit(margin_line)
def set_margin_line_column(self, margin_line_col):
""" Set the margin line column of the editor
"""
self._margin_line_column = margin_line_col
self.margin_line_column_changed.emit(margin_line_col)
def generate_ace_event(self, _func, _target, _args, _event_name):
""" Generate a Javascript ace editor event handler.
Parameters
-----------
_func : string
The python method to be called on the python AceEditor object
_args : string
The javascript expression to pass to the method
_target : string
The Ace Editor target to tie the event to
_event_name : string
The name of the AceEditor event
"""
event = EVENT_TEMPLATE.substitute(func=_func, args=_args,
target=_target,
event_name=_event_name)
self._events.append(event)
def generate_binding(self, _signal, _target, _func):
""" Generate a connection between a Qt signal and a javascript function.
Any parameters given to the signal will be passed to the javascript
function.
Parameters
----------
_signal : string
The name of the Qt signal
_target : string
The name of the target Javascript object
_func : string
The name of the function to call on the target object
"""
binding = BINDING_TEMPLATE.substitute(signal=_signal, target=_target,
func=_func)
self._bindings.append(binding)
def generate_html(self):
""" Generate the html code for the ace editor
"""
# XXX better way to access files here?
p = os.path
template_path = p.join(p.dirname(p.abspath(__file__)),
'tab_ace_test.html')
template = Template(open(template_path, 'r').read())
_r_path = "file://" + p.join(p.dirname(p.abspath(__file__)))
_events = '\n'.join(self._events)
_bindings = '\n'.join(self._bindings)
return template.substitute(events=_events, resource_path=_r_path,
bindings=_bindings)
| 2.390625 | 2 |
saulscript/syntax_tree/syntax_tree.py | lysol/saulscript | 0 | 12789812 | import logging
import nodes
from .. import exceptions
from ..lexer import tokens
import traceback
import sys
class SyntaxTree(object):
def _debug(self, text, *args):
if type(text) != str:
text = repr(text)
logging.debug(("<Line %d, Token %d> " % (self.line_num, self.token_counter)) + text, *args)
def _inc_line(self):
self.line_num += 1
self.token_counter = 0
def execute(self, context, time_limit=-1, op_limit=-1):
context.set_op_limit(op_limit)
context.set_time_limit(time_limit)
for expression in self.tree:
expression.reduce(context)
return context
@property
def next_token(self):
try:
return self.tokens[0]
except IndexError:
raise exceptions.OutOfTokens(self.line_num, 'at next token')
def shift_token(self):
self._debug('Shifting token %s', self.next_token)
self.token_counter += 1
try:
token = self.tokens.pop(0)
self.line_num = token.line_num
return token
except IndexError:
raise exceptions.ParseError(self.line_num, "Unexpected end of input")
def unshift_token(self, item):
return self.tokens.insert(0, item)
def __init__(self, context_class, tokens):
self.tokens = tokens
self.tree = nodes.Branch([])
self.line_num = 0
self.context_class = context_class
self.token_counter = 0
def is_identifier(self, token, body):
return isinstance(token, tokens.IdentifierToken) and \
token.body == body
def run(self):
while True:
try:
# look ahead and if there is a binaryoperator in our future,
# handle it
self.tree.append(self.handle_expression())
except exceptions.OutOfTokens as e:
self._debug('*** Out of tokens: %s', e.message)
for line in self.tree:
self._debug("FINAL AST: %s", line)
break
except exceptions.EndContextExecution:
logging.error('Unexpected }')
raise exceptions.ParseError(self.line_num, "Unexpected }")
def dump(self):
for line_num, branch in enumerate(self.tree):
self._debug("Operation %d:\n%s", line_num, branch)
def handle_function_definition(self):
self._debug("Handling a function definition")
self.shift_token() # get rid of (
sig_names = []
while True:
token = self.shift_token()
if isinstance(token, tokens.RightParenToken):
self._debug("Found right paren, continue with rest of function definition")
break # get rid of it
if isinstance(token, tokens.CommaToken):
self._debug("Found comma, continue to next argument")
continue # eat it
if not isinstance(token, tokens.IdentifierToken):
raise exceptions.ParseError(self.line_num,
"Expected an argument name, got %s" % token)
sig_names.append(token.body)
if not isinstance(self.next_token, tokens.LeftCurlyBraceToken):
raise exceptions.ParseError(self.line_num, "Expected {, got %s" % self.next_token)
self.shift_token() # get rid of {
new_branch = nodes.Branch()
while True:
try:
new_branch.append(self.handle_expression())
except exceptions.EndContextExecution:
# end of function declaration
break
func_node = nodes.FunctionNode(self.line_num, self.context_class, sig_names, new_branch)
return func_node
def handle_subscript_notation(self, variable_token):
self._debug("Handling a subscript notation")
self.shift_token() # get rid of [
index_node = self.handle_operator_expression() # ends before ]
sub_node = nodes.SubscriptNotationNode(self.line_num,
nodes.VariableNode(self.line_num, variable_token.body), index_node)
if not isinstance(self.next_token, tokens.RightSquareBraceToken):
raise exceptions.ParseError(self.line_num,
"Unexpected %s during subscript notation parse" %
self.next_token)
self.shift_token()
return sub_node
def handle_function_invocation(self, name_token):
self._debug("Handling a function invocation")
self.shift_token() # get rid of (
arg_tokens = []
self._debug("Examining arguments")
while True:
token = self.next_token
self._debug("Current argument set: %s", repr(arg_tokens))
self._debug("Function Invocation: Consider %s" % token)
if isinstance(token, tokens.RightParenToken):
self.shift_token()
break
arg = self.handle_operator_expression()
if arg is None:
raise exceptions.ParseError(self.line_num,
"Unexpected character")
arg_tokens.append(arg)
if isinstance(self.next_token, tokens.CommaToken):
self._debug("Found comma, continue to next argument")
# eat the comma and keep going
self.shift_token()
continue
self._debug("Done reading arguments in function invocation")
return nodes.InvocationNode(self.line_num, name_token.body, arg_tokens)
def handle_list_expression(self):
self._debug("Handling a list expression")
self.shift_token() # get rid of [
data = nodes.ListNode()
while isinstance(self.next_token, tokens.LineTerminatorToken):
# ignore line breaks here until we see data
self.shift_token()
while True:
self._debug("List looks like this now: %s", data)
if isinstance(self.next_token, tokens.RightSquareBraceToken):
self._debug(
"Encountered a ], shift it off and return the list node.")
self.shift_token()
break
expression = self.handle_operator_expression()
if isinstance(self.next_token, tokens.CommaToken):
# eat the comma and keep going
self.shift_token()
if expression is not None:
data.append(expression)
return data
def handle_dictionary_expression(self):
self._debug("Handling a dictionary expression")
self.shift_token() # get rid of {
data = nodes.DictionaryNode(self.line_num)
while True:
name = self.shift_token()
if isinstance(name, tokens.LineTerminatorToken):
# So, we can have whitespace after a {
self._inc_line()
continue
if isinstance(name, tokens.RightCurlyBraceToken):
# done with this dictionary since we got a }
break
if not isinstance(name, tokens.IdentifierToken) and \
not isinstance(name, tokens.NumberLiteralToken) and \
not isinstance(name, tokens.StringLiteralToken):
raise exceptions.ParseError(self.line_num,
"Expected a name, got %s (%s)" %
(name, name.__class__))
colon = self.shift_token()
if not isinstance(colon, tokens.ColonToken):
raise exceptions.ParseError(self.line_num, "Expected a colon")
# Goes until the end of a line. No comma needed!
expression = self.handle_operator_expression()
if expression is not None:
data[name.body] = expression
return data
def handle_operator_expression(self):
self._debug("Handling operator expression.")
output = []
op_stack = []
prev_token = None
# keep track of the parens opened.
# If we deplete all the (s, stop parsing the operator expression
paren_count = 0
while True:
self._debug("Output stack: %s", output)
self._debug("Operator stack: %s", op_stack)
try:
self._debug('The next token is %s', self.next_token)
if isinstance(self.next_token,
tokens.LeftCurlyBraceToken):
self._debug(">> Calling handle_dictionary_expression from operator_expression")
output.append(self.handle_dictionary_expression())
elif isinstance(self.next_token,
tokens.LeftSquareBraceToken):
self._debug(">> Calling handle_list_expression from operator_expression")
output.append(self.handle_list_expression())
elif isinstance(self.next_token,
tokens.RightCurlyBraceToken):
self._debug(
">> } encountered, stop processing operator expression")
self._debug(
str(paren_count))
if paren_count > 0:
self._debug("Paren count is over 1 while a } has been encountered.")
raise exceptions.ParseError("Unexpected }")
break
elif isinstance(self.next_token,
tokens.RightSquareBraceToken):
self._debug(
">> ] encountered, stop processing operator expression")
if paren_count > 0:
self._debug("Paren count is over 1 while a } has been encountered.")
raise exceptions.ParseError("Unexpected }")
break
if isinstance(self.next_token, tokens.LeftParenToken):
self._debug('Incrementing number of parens.')
paren_count += 1
if isinstance(self.next_token, tokens.RightParenToken):
paren_count -= 1
self._debug(">> Decrementing number of parens.")
if paren_count < 1:
self._debug(">> Found an unmatched ), which means this is the end of the operator expression")
# too many )s found. This is the end of
# the operator expression
break
if isinstance(self.next_token, tokens.RightParenToken):
self._debug("THE RIGHT PAREN IS HERE")
self._debug('Parent Count: %d', paren_count)
token = self.shift_token()
self._debug("Operator context: Consider %s", token)
except IndexError:
self._debug("Encountered IndexError, break")
break
if isinstance(token, tokens.LineTerminatorToken) or \
isinstance(token, tokens.RightCurlyBraceToken) or \
isinstance(token, tokens.CommaToken):
self._debug(
'encountered a line terminator, comma, or }, break it out')
if isinstance(token, tokens.LineTerminatorToken):
self._inc_line()
break
if (prev_token is None or
isinstance(prev_token, tokens.OperatorToken)) and \
isinstance(token, tokens.SubtractionOperatorToken):
# unary -
token = tokens.NegationOperatorToken()
if not isinstance(token, tokens.OperatorToken) and not \
isinstance(token, tokens.LiteralToken) and not \
isinstance(token, tokens.IdentifierToken):
msg = "Expected an operator, literal, or identifier. (Got %s: %s)" % \
(token.__class__, token.body)
logging.error(msg)
raise exceptions.ParseError(self.line_num, msg)
if isinstance(token, nodes.Node) or not \
isinstance(token, tokens.OperatorToken):
# If anything is a node, append it
if isinstance(self.next_token, tokens.LeftParenToken):
# function invocation or definition
if token.body == 'function':
output.append(self.handle_function_definition())
else:
output.append(self.handle_function_invocation(token))
elif isinstance(self.next_token,
tokens.LeftSquareBraceToken):
# subscript syntax
output.append(self.handle_subscript_notation(token))
else:
output.append(token)
else:
while len(op_stack) > 0:
token2 = op_stack[-1]
is_left_associative = \
token.associativity == tokens.OperatorToken.LEFT
is_right_associative = \
token.associativity == tokens.OperatorToken.RIGHT
self._debug("Is Left Associative: %s\t"
"Is Right Associative: %s",
is_left_associative, is_right_associative)
if (is_left_associative and token.precedence >= token2.precedence) or \
(is_right_associative and
token.precedence > token2.precedence):
if not isinstance(token, tokens.RightParenToken):
if not isinstance(token2,
tokens.LeftParenToken):
op_token = op_stack.pop()
self._debug(
"Popping %s off stack", op_token.body)
output.append(op_token)
else:
# break because we hit a left paren
break
else:
if not isinstance(token2,
tokens.LeftParenToken):
op_token = op_stack.pop()
output.append(op_token)
else:
# discard left paren and break
op_stack.pop()
break
else:
# left operator is equal or larger than right. breakin
break
if not isinstance(token, tokens.RightParenToken):
# push current operator to stack
op_stack.append(token)
# ignore right paren
# hold onto this for the next run in case we need to
# check for unary operators
prev_token = token
self._debug('Done feeding in tokens, now drain the operator stack')
# drain the operator stack
while len(op_stack) > 0:
operator = op_stack.pop()
output.append(operator)
self._debug('Output: ')
self._debug(output)
if len(output) == 0:
# nothing. probably a \n after a ,
return None
tree_stack = []
# turn the list of output tokens into a tree branch
self._debug('Turn list of output tokens into a tree branch')
while True:
try:
token = output.pop(0)
self._debug("Consider %s from output" % token)
except IndexError:
break
if not isinstance(token, tokens.OperatorToken):
tree_stack.append(self.handle_token(token))
else:
self._debug("Tree stack: %s", tree_stack)
self._debug("Determining if %s is unary or binary", token)
if isinstance(token, tokens.BinaryOperatorToken):
self._debug("%s is binary", token)
try:
right, left = tree_stack.pop(), tree_stack.pop()
except IndexError:
logging.error("Encountered IndexError. Tree stack: %s",
tree_stack)
raise exceptions.ParseError(self.line_num)
tree_stack.append(token.get_node(self.line_num, left, right))
elif isinstance(token, tokens.UnaryOperatorToken):
self._debug("%s is unary", token)
target = tree_stack.pop()
tree_stack.append(token.get_node(self.line_num, target))
self._debug("%s" % tree_stack)
if len(tree_stack) != 1:
logging.error("Tree stack length is not 1. Contents: %s",
tree_stack)
if len(tree_stack) != 1:
raise exceptions.ParseError(self.line_num)
self._debug('The final tree leaf: %s', tree_stack[0])
return tree_stack.pop() # -----------===============#################*
def handle_token(self, token):
self._debug("handle token")
if isinstance(token, nodes.Node) or isinstance(token, nodes.ListNode) or \
isinstance(token, nodes.DictionaryNode):
# already resolved down the chain
self._debug("This token is actually a node, so return it")
return token
elif isinstance(token, tokens.IdentifierToken):
# variable?
if token.body == 'true':
return nodes.BooleanNode(self.line_num, True)
elif token.body == 'false':
return nodes.BooleanNode(self.line_num, False)
self._debug("Deciding that %s is a variable" % token)
return nodes.VariableNode(self.line_num, token.body)
elif isinstance(token, tokens.NumberLiteralToken):
return nodes.NumberNode(self.line_num, token.body)
elif isinstance(token, tokens.StringLiteralToken):
return nodes.StringNode(self.line_num, token.body)
assert "Unexpected token: %s (%s)" % (token, token.__class__)
def handle_expression(self):
while True:
try:
self._debug('Handling expression')
token = self.next_token
self._debug("Consider %s", token.__class__)
except IndexError:
# didn't shift the token off yet so make sure the line num is accurate
raise exceptions.OutOfTokens(self.line_num + 1, 'During handle expression')
if isinstance(token, tokens.IdentifierToken) or \
isinstance(token, tokens.LiteralToken):
if self.handler_exists(token):
return self.handle_identifier()
else:
return self.handle_operator_expression()
elif isinstance(token, tokens.LineTerminatorToken):
self._debug("Delete this infernal line terminator")
self._inc_line()
self.shift_token()
return nodes.NopNode(self.line_num)
elif isinstance(token, tokens.RightCurlyBraceToken):
self._debug("Found }, beat it")
self.shift_token()
raise exceptions.EndContextExecution(self.line_num)
else:
raise exceptions.ParseError(self.line_num)
def handler_exists(self, token):
self._debug("* Checking if there is a handler for %s" % token)
method_name = 'handle_identifier_' + token.body
return hasattr(self, method_name)
def handle_identifier(self):
token = self.shift_token()
method_name = 'handle_identifier_' + token.body
method = getattr(self, method_name)
return method(token)
def handle_identifier_if(self, token):
self._debug("Handling IF")
condition = self.handle_operator_expression()
then_branch = nodes.Branch([])
else_branch = nodes.Branch([])
while not isinstance(self.next_token, tokens.IdentifierToken) or \
self.next_token.body not in ['else', 'end']:
self._debug("Checking next expression as part of THEN clause")
try:
then_branch.append(self.handle_expression())
except exceptions.EndContextExecution:
logging.error("There shouldn't be a } here "
"because we're in an if statement")
raise exceptions.ParseError(self.line_num, "Unexpected }")
except exceptions.OutOfTokens:
raise exceptions.SaulRuntimeError(self.line_num,
"Unexpected end of file during if statement")
if isinstance(self.next_token, tokens.IdentifierToken) and \
self.next_token.body == 'else':
self.shift_token()
while not isinstance(self.next_token, tokens.IdentifierToken) or \
self.tokens[0:2] != ['end', 'if']:
self._debug(
"Checking next expression as part of ELSE clause")
try:
else_branch.append(self.handle_expression())
except exceptions.EndContextExecution:
logging.error("There shouldn't be a } here "
"because we're in an if statement")
raise exceptions.ParseError(self.line_num, "Unexpected }")
except exceptions.OutOfTokens:
raise exceptions.SaulRuntimeError(self.line_num,
"Unexpected end of file during if statement")
end_token = self.shift_token()
if_token = self.shift_token()
self._debug("Then: %s, Else: %s, End If: %s %s",
then_branch, else_branch, end_token.body, if_token.body)
assert isinstance(end_token, tokens.IdentifierToken) and \
end_token.body == 'end'
assert isinstance(if_token, tokens.IdentifierToken) and \
if_token.body == 'if'
return nodes.IfNode(self.line_num, condition, then_branch, else_branch)
def handle_identifier_true(self, token):
self._debug("Encountered 'true'")
assert token.value.lower() == 'true'
return nodes.BooleanNode(self.line_num, True)
def handle_identifier_false(self, token):
self._debug("Encountered 'false'")
assert token.value.lower() == 'false'
return nodes.BooleanNode(self.line_num, False)
def handle_identifier_return(self, token):
self._debug("Handling return statement")
return_node = self.handle_operator_expression()
return nodes.ReturnNode(self.line_num, return_node)
def handle_identifier_while(self, token):
self._debug("Handling while loop")
condition = self.handle_operator_expression()
branch = nodes.Branch()
try:
while not isinstance(self.next_token, tokens.IdentifierToken) or \
self.next_token.body not in ['end']:
try:
branch.append(self.handle_expression())
except exceptions.EndContextExecution:
logging.error("There shouldn't be a } here "
"because we're in a while statement")
raise exceptions.ParseError(self.line_num, "Unexpected }")
except exceptions.OutOfTokens:
raise exceptions.SaulRuntimeError(self.line_num, "end while expected")
end_token = self.shift_token()
while_token = self.shift_token()
assert isinstance(end_token, tokens.IdentifierToken) and \
end_token.body == 'end'
assert isinstance(while_token, tokens.IdentifierToken) and \
while_token.body == 'while'
return nodes.WhileNode(self.line_num, condition, branch)
def handle_identifier_for(self, token):
self._debug("Handling for loop")
token = self.shift_token()
if not isinstance(token, tokens.IdentifierToken):
raise exceptions.ParseError(self.line_num, "Expected a name, got %s" % token)
var_name = token.body
token = self.shift_token()
if not isinstance(token, tokens.IdentifierToken) or \
token.body != 'in':
raise exceptions.ParseError(self.line_num, "Expected 'in', got %s" % token)
iterable = self.handle_operator_expression()
self._debug("The iterable is %s" % iterable)
branch = nodes.Branch()
try:
while not isinstance(self.next_token, tokens.IdentifierToken) or \
self.next_token.body not in ['end']:
self._debug("For Loop: Consider %s" % self.next_token)
try:
branch.append(self.handle_expression())
self._debug(
"Just handled an expression."
"Branch looks like %s now" % branch)
except exceptions.EndContextExecution:
logging.error("There shouldn't be a } here"
"because we're in a for loop")
raise exceptions.ParseError(self.line_num, "Unexpected }")
except exceptions.OutOfTokens:
raise exceptions.SaulRuntimeError(self.line_num, "end for expected")
end_token = self.shift_token()
for_token = self.shift_token()
self._debug("End token: %s, For token: %s" % (end_token, for_token))
assert isinstance(end_token, tokens.IdentifierToken) and \
end_token.body == 'end'
assert isinstance(for_token, tokens.IdentifierToken) and \
for_token.body == 'for'
self._debug("Returning for loop node")
return nodes.ForNode(self.line_num, var_name, iterable, branch)
| 2.90625 | 3 |
xxmaker/gamepart/Paper.py | pijll/xxmaker | 0 | 12789813 | import Colour
import Font
import OutputFunctions
from Definitions import *
from graphics.cairo import Draw
from math import ceil
from graphics.cairo.Draw import FillStyle, TextStyle
class Paper:
def __init__(self, width=63*mm, height=39*mm, marker=None):
self.width = width
self.height = height
self.canvas = Draw.Canvas((0, 0), width, height)
if isinstance(marker, Draw.Canvas):
self.canvas.draw(marker, (2*mm, height - 2*mm))
elif marker is not None:
Draw.text(self.canvas, (0, height), marker, TextStyle(Font.very_small, Colour.grey, 'bottom', 'left'))
def split_into_parts(self, max_width, max_height):
def how_many_fit(large, small):
return ceil(large / small)
n_portrait_pages = how_many_fit(self.width, max_width) * how_many_fit(self.height, max_height)
n_landscape_pages = how_many_fit(self.width, max_height) * how_many_fit(self.height, max_width)
if n_landscape_pages < n_portrait_pages:
split_in_parts_horizontally = how_many_fit(self.width, max_height)
split_in_parts_vertically = how_many_fit(self.height, max_width)
else:
split_in_parts_horizontally = how_many_fit(self.width, max_width)
split_in_parts_vertically = how_many_fit(self.height, max_height)
width_map_part = self.width / split_in_parts_horizontally
height_map_part = self.height / split_in_parts_vertically
for column in range(split_in_parts_horizontally):
for row in range(split_in_parts_vertically):
paper_part = Paper(width_map_part, height_map_part)
paper_part.canvas.draw(self.canvas, (-column * width_map_part, -row * height_map_part))
yield paper_part
class Certificate(Paper):
def __init__(self, colour, price=None, name=None, icon=None, marker=None):
super().__init__()
self.colour = colour
c = self.canvas
Draw.rectangle(c, (0, 0), self.width, self.height, FillStyle(colour.faded()))
Draw.rectangle(c, (3*mm, 0), 13*mm, self.height, FillStyle(colour))
if isinstance(marker, Draw.Canvas):
self.canvas.draw(marker, (0, self.height - 3*mm))
elif marker is not None:
Draw.text(self.canvas, (0, self.height), marker, TextStyle(Font.very_small, Colour.grey, 'bottom', 'left'))
if name:
y = self.height/2 if price else 14*mm
OutputFunctions.draw_centered_lines(name, Font.certificate_name, c,
x_c=(self.width + 16*mm)/2, y=y,
width=self.width - 16*mm - 6*mm)
if price:
Draw.text(c, (self.width - 3*mm, 2.8*mm), price,
TextStyle(Font.price, Colour.black, 'top', 'right'))
if icon:
Draw.load_image(c, icon, (9.5*mm, 7*mm), width=10*mm, height=10*mm)
| 3.15625 | 3 |
config.py | xsy2004/DH-TelegramBot | 12 | 12789814 | <filename>config.py<gh_stars>10-100
import os
ENV = bool(os.environ.get('ENV', False))
if ENV:
TOKEN = os.environ.get("TOKEN", None)
BanMeReplayAddress = os.environ.get("BanMeReplayAddress", None)
api_id = os.environ.get("api_id", None)
api_hash = os.environ.get("api_hash", None)
BOT_NAME = os.environ.get("BOT_NAME", None)
AllGroupMemberDatabaseName = os.environ.get("AllGroupMemberDatabaseName", None)
else:
TOKEN = ''
BanMeReplayAddress = "https://ae02.alicdn.com/kf/U54e2d33aa0694530897b008b097a5fb5U.png"
api_id =
api_hash = ''
BOT_NAME = ''
AllGroupMemberDatabaseName = ''
AUTO_DELETE = False
| 1.828125 | 2 |
lec3.py | dbutler20/ISAT252Lab1Butler | 0 | 12789815 | """
Lecture 3 notes 5/20
"""
| 1.859375 | 2 |
core.py | Arturok/multiprocesador | 0 | 12789816 | import cacheL1
import cacheL1Controller
import clock
import threading
import random
from time import sleep
class core:
isa = ['read', 'write', 'calc']
state = 'awake'
class processor(threading.Thread):
countInstructions = 1
processTime = 1
def __init__(self, coreID, chipID, clock, cacheL1, cacheL1Controller, update):
self.coreID = coreID
self.chipID = chipID
self.clock = clock
self.cacheL1 = cacheL1
self.cacheL1Controller = cacheL1Controller
self.standBy = threading.Condition()
self.cacheL1Controller.addPause(self.standBy)
self.update = update
threading.Thread.__init__(self)
def process(self):
sleep(self.processTime)
self.update(self.coreID, 'rates')
def run(self):
while(True):
if(self.clock.play):
instruction = core.generateInstruction()
self.countInstructions += 1
command = "# Clicle: {} Instruction: {} ->\n {} ".format(self.clock.countCicle, self.countInstructions, instruction)
if (instruction in ['read', 'write']):
mainMemAdd = random.randrange(16)
command += "Address: {} CoreId: {} ChipId: {}".format(mainMemAdd, self.coreID, self.chipID)
self.update(self.coreID, 'log', log = command)
if(instruction == 'read'):
self.cacheL1Controller.rea
# Core Constructor
def __init__(self, coreID, chipID, clock, update):
self.coreID = coreID
self.chipID = chipID
self.clock = clock
self.update = update
# Start
def start(self):
self.processor.start()
# Get instruction
@staticmethod
def generateInstruction():
# Special Distribution Function
return core.isa[random.randrange(3)]
| 3.09375 | 3 |
models/endstation.py | stuartcampbell/osiris-api | 0 | 12789817 | from typing import Optional
from pydantic.main import BaseModel
from models.instrument import Instrument
| 1.125 | 1 |
web/app/djrq/model/ampache/listeners.py | bmillham/djrq2 | 1 | 12789818 | from . import *
class Listeners(Base):
__tablename__ = "listeners"
id = Column(Integer, primary_key=True)
current = Column(Integer)
max = Column(Integer)
| 2.046875 | 2 |
app/business/admin.py | HenriqueLR/lawyer_business | 2 | 12789819 | <gh_stars>1-10
#coding: utf-8
from django.contrib import admin
from business.models import BusinessModel
class BusinessAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
search_fields = ('name',)
admin.site.register(BusinessModel, BusinessAdmin)
| 1.539063 | 2 |
crypto/crc_forgery/crack.py | skyzh/sjtuctf-2019-writeup | 1 | 12789820 | <filename>crypto/crc_forgery/crack.py
#!/usr/bin/env python3
# coding=utf-8
import binascii
import os
import random
def b2n(b):
res = 0
for i in b:
res *= 2
res += i
return res
def n2b(n, length):
tmp = bin(n)[2:]
tmp = '0'*(length-len(tmp)) + tmp
return [int(i) for i in tmp]
def s2n(s):
return int(binascii.hexlify(s), 16)
def crc64(msg):
msg = n2b(s2n(msg), len(msg)*8)
msg += const
for shift in range(len(msg)-64):
if msg[shift]:
for i in range(65):
msg[shift+i] ^= poly[i]
res = msg[-64:]
return b2n(res)
const = n2b(0xdeadbeeffeedcafe, 64)
poly = n2b(0x10000000247f43cb7, 65)
msg_str = input()
blank_begin = msg_str.find("_") * 4
blank_end = blank_begin + 64
msg = bytes.fromhex(msg_str.replace("_", "0"))
crc = n2b(0x1337733173311337, 64)
plain_msg = n2b(s2n(msg), len(msg)*8) + const
crc_msg = [0] * (len(msg) * 8) + crc
print("cracking %d - %d bit" % (blank_begin, blank_end))
for shift in range(2047, blank_begin - 1, -1):
if plain_msg[shift + 64] != crc_msg[shift + 64]:
for i in range(65):
crc_msg[shift + i] ^= poly[i]
assert(crc_msg[shift + 64] == plain_msg[shift + 64])
for shift in range(0, blank_begin):
if plain_msg[shift]:
for i in range(65):
plain_msg[shift+i] ^= poly[i]
blank = [0] * 64
print(plain_msg[blank_begin:blank_end])
print(crc_msg[blank_begin:blank_end])
for i in range(blank_begin, blank_end):
blank[i - blank_begin] = plain_msg[i] ^ crc_msg[i]
print(blank)
print(hex(b2n(blank))[2:])
| 2.828125 | 3 |
obtain/database.py | dushyantkhosla/python-snippets | 6 | 12789821 | import os
import sqlite3
import pandas as pd
import numpy as np
from .pybash import get_file_info
def connect_to_db(path):
"""
Interact with a SQLite database
Parameters
----------
path: str
Location of the SQLite database
Returns
-------
conn: Connector
The SQLite connection object
curs: Cursor
The SQLite cursor object
Usage
-----
conn, curs = connect_to_db("data/raw/foo.db")
"""
try:
if os.path.exists(path):
print("Connecting to Existing DB")
conn = sqlite3.connect(path)
else:
print("Initialising new SQLite DB")
conn = sqlite3.connect(path)
curs = conn.cursor()
except:
print("An error occured. Please check the file path")
return conn, curs
def print_table_names(path_to_db):
"""
Print and return the names of tables in a SQLite database
"""
conn, curs = connect_to_db(path_to_db)
result = curs.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
print(result)
return result
def load_file_to_db(path_to_file, path_to_db, table_name, delim):
"""
Load a text file of any size into a SQLite database
Parameters
----------
path_to_file: str
Location of the text file
path_to_db: str
Location of the SQLite db
table_name: str
Name of the table to be created in the database
delim: str
The delimiter for the text file
Returns
-------
None
"""
conn, curs = connect_to_db(path_to_db)
print("The database at {} contains the following tables.".format(path_to_db))
print(curs.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall())
if os.path.exists(path_to_file):
size_ = get_file_info(path_to_file).get('size')
rows_ = get_file_info(path_to_file).get('rows')
try:
if size_ < 250:
print("{} is a small file. Importing directly.".format(path_to_file))
df_ = pd.read_csv(
path_to_file,
sep=delim,
low_memory=False,
error_bad_lines=False,
quoting=csv.QUOTE_NONE
)
df_.to_sql(
name=table_name,
con=conn,
index=False,
if_exists='append'
)
print("Done.")
else:
print("{} is large. Importing in chunks.".format(path_to_file))
csize = int(np.ceil(rows_/10))
chunks = pd.read_csv(
path_to_file,
sep=delim,
chunksize=csize,
error_bad_lines=False,
low_memory=False,
quoting=csv.QUOTE_NONE
)
for c in chunks:
c.to_sql(
name=table_name,
con=conn,
index=False,
if_exists='append'
)
print("Done")
except:
print("An error occurred while reading the file.")
else:
print("File not found at {}, please check the path".format(path_to_file))
return None
| 3.71875 | 4 |
venv/lib/python3.8/site-packages/cffi/cparser.py | Retraces/UkraineBot | 2 | 12789822 | <reponame>Retraces/UkraineBot<gh_stars>1-10
/home/runner/.cache/pip/pool/ac/ef/f5/a442d1c35808d431359b8822da293924c7e9c68b800022d7a513e55440 | 1.023438 | 1 |
app/general/utils/json/json.py | rafiq10/rrhh_tdd_backend | 0 | 12789823 | import json
from collections import namedtuple
def _json_object_hook(d):
return namedtuple('X', d.keys())(*d.values())
def json2obj(data):
a = json.dumps(data)
return json.loads(a, object_hook=_json_object_hook) | 3.015625 | 3 |
normalization.py | jwieckowski/mcda | 0 | 12789824 | """
script to matrix normalization
"""
from functools import reduce
import math as m
import numpy as np
def minmax_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: min max normalized column of matrix data
"""
if min(x) == max(x):
return np.ones(x.shape)
if type == 'cost':
return (max(x) - x) / (max(x) - min(x))
return (x - min(x)) / (max(x) - min(x))
def max_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: max normalized column of matrix data
"""
if type == 'cost':
return 1 - x/max(x)
return x / max(x)
def sum_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: sum normalized column of matrix data
"""
if type == 'cost':
return (1/x) / sum(1/x)
return x / sum(x)
def vector_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: vector normalized column of matrix data
"""
if type == 'cost':
return 1 - (x / np.sqrt(sum(x ** 2)))
return x / np.sqrt(sum(x ** 2))
def logaritmic_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: logarithmic normalized column of matrix data
"""
prod = reduce(lambda a, b: a*b, x)
if type == 'cost':
return (1 - (np.log(x) / m.log(prod))) / (len(x) - 1)
return np.log(x) / m.log(prod)
def normalize(matrix, types, method, precision = 2):
"""
:param matrix: decision matrix
:param types: types of normalization for columns
:param method: method of normalization
:param precision: precision
:return: normalized matrix
"""
if matrix.shape[1] != len(types):
print('Sizes does not match')
normalized_matrix = matrix.astype('float')
for i in range(len(types)):
if type == 1:
normalized_matrix[:, i] = np.round(method(matrix[:, i], types[i]), precision)
else:
normalized_matrix[:, i] = np.round(method(matrix[:, i], types[i]), precision)
return normalized_matrix
| 3.5 | 4 |
cvat/apps/engine/models.py | ItayHoresh/improvedCvat | 0 | 12789825 | <reponame>ItayHoresh/improvedCvat
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField, JSONField
from io import StringIO
from enum import Enum
import shlex
import csv
import re
import os
class StatusChoice(Enum):
ANNOTATION = 'annotation'
VALIDATION = 'validation'
COMPLETED = 'completed'
@classmethod
def choices(self):
return tuple((x.name, x.value) for x in self)
def __str__(self):
return self.value
class SafeCharField(models.CharField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value:
return value[:self.max_length]
return value
class Projects(models.Model):
name = models.CharField(max_length=256)
has_score = models.BooleanField(default=False)
class Meta:
default_permissions = ()
class Task(models.Model):
name = SafeCharField(max_length=256)
size = models.PositiveIntegerField()
path = models.CharField(max_length=256)
mode = models.CharField(max_length=32)
owner = models.ForeignKey(User, null=True, blank=True,
on_delete=models.SET_NULL, related_name="owners")
assignee = models.ForeignKey(User, null=True, blank=True,
on_delete=models.SET_NULL, related_name="assignees")
bug_tracker = models.CharField(max_length=2000, blank=True, default="")
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now_add=True)
overlap = models.PositiveIntegerField(default=0)
z_order = models.BooleanField(default=False)
flipped = models.BooleanField(default=False)
source = SafeCharField(max_length=256, default="unknown")
status = models.CharField(max_length=32, default=StatusChoice.ANNOTATION)
project = models.ForeignKey(Projects, on_delete=models.CASCADE, default=1)
score = models.FloatField(default=0)
last_viewed_frame = models.PositiveIntegerField(default=0)
video_id = models.IntegerField(default=-1)
# Extend default permission model
class Meta:
default_permissions = ()
unique_together = ['name', 'project']
def get_upload_dirname(self):
return os.path.join(self.path, ".upload")
def get_data_dirname(self):
return os.path.join(self.path, "data")
def get_dump_path(self):
name = re.sub(r'[\\/*?:"<>|]', '_', self.name)
return os.path.join(self.path, "{}.dump".format(name))
def get_log_path(self):
return os.path.join(self.path, "task.log")
def get_client_log_path(self):
return os.path.join(self.path, "client.log")
def get_image_meta_cache_path(self):
return os.path.join(self.path, "image_meta.cache")
def set_task_dirname(self, path):
self.path = path
self.save(update_fields=['path'])
def get_task_dirname(self):
return self.path
def __str__(self):
return self.name
class TaskSource(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
source_name = models.CharField(max_length=256)
frame = models.IntegerField()
class Meta:
default_permissions = ()
class Watershed(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
paintings = JSONField()
watershed = ArrayField(ArrayField(models.IntegerField()), null=True)
class Meta:
default_permissions = ()
class FrameProperties(models.Model):
prop = models.CharField(max_length=256)
value = models.CharField(max_length=256, null=True, default=None)
project = models.ForeignKey(Projects, on_delete=models.CASCADE)
parent = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class TaskFrameSpec(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
propVal = models.ForeignKey(FrameProperties, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class KeyframeSpec(models.Model):
frame = models.PositiveIntegerField()
frameSpec = models.ForeignKey(TaskFrameSpec, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class ObjectStorages(models.Model):
name = models.CharField(max_length=256)
secret_key = models.CharField(max_length=256)
access_key = models.CharField(max_length=256)
endpoint_url = models.CharField(max_length=256, default='example.com')
class Meta:
default_permissions = ()
class Projects_Users(models.Model):
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
project = models.ForeignKey(Projects, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class Projects_ObjectStorages(models.Model):
project = models.ForeignKey(Projects, on_delete=models.CASCADE)
object_storage = models.ForeignKey(ObjectStorages, on_delete=models.CASCADE)
channels = ArrayField(models.CharField(max_length=256), null=True)
class Meta:
default_permissions = ()
class LabelTypes(models.Model):
label = models.CharField(max_length=256)
attribute = models.CharField(max_length=256, null=True, default=None)
value = models.CharField(max_length=256, null=True, default=None)
project = models.ForeignKey(Projects, on_delete=models.CASCADE)
parent = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class AttributeDetails(models.Model):
can_change = models.BooleanField(default=True)
labelType = models.ForeignKey(LabelTypes, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class LabelDetails(models.Model):
color = models.CharField(max_length=256)
catagory = models.CharField(max_length=256, default='category')
labelType = models.ForeignKey(LabelTypes, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
class Segment(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
start_frame = models.IntegerField()
stop_frame = models.IntegerField()
class Meta:
default_permissions = ()
class Job(models.Model):
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
assignee = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
status = models.CharField(max_length=32, default=StatusChoice.ANNOTATION)
max_shape_id = models.BigIntegerField(default=-1)
class Meta:
default_permissions = ()
class Comments(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
comment = models.CharField(max_length=9999)
class Meta:
default_permissions = ()
class Label(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
name = SafeCharField(max_length=64)
def __str__(self):
return self.name
class Meta:
default_permissions = ()
def parse_attribute(text):
match = re.match(r'^([~@])(\w+)=(\w+):?(.+)?$', text)
prefix = match.group(1)
name = match.group(3)
if match.group(4):
values = list(csv.reader(StringIO(match.group(4)), quotechar="'"))[0]
type = match.group(2)
else:
values = []
type = 'text'
return {'prefix':prefix, 'type':type, 'name':name, 'values':values}
class AttributeSpec(models.Model):
label = models.ForeignKey(Label, on_delete=models.CASCADE)
text = models.CharField(max_length=1024)
class Meta:
default_permissions = ()
def get_attribute(self):
return parse_attribute(self.text)
def is_mutable(self):
attr = self.get_attribute()
return attr['prefix'] == '~'
def get_type(self):
attr = self.get_attribute()
return attr['type']
def get_name(self):
attr = self.get_attribute()
return attr['name']
def get_default_value(self):
attr = self.get_attribute()
return attr['values'][0]
def get_values(self):
attr = self.get_attribute()
return attr['values']
def __str__(self):
return self.get_attribute()['name']
class AttributeVal(models.Model):
# TODO: add a validator here to be sure that it corresponds to self.label
id = models.BigAutoField(primary_key=True)
spec = models.ForeignKey(AttributeSpec, on_delete=models.CASCADE)
value = SafeCharField(max_length=64)
class Meta:
abstract = True
default_permissions = ()
class Annotation(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE)
label = models.ForeignKey(Label, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
group_id = models.PositiveIntegerField(default=0)
client_id = models.BigIntegerField(default=-1)
class Meta:
abstract = True
class Shape(models.Model):
occluded = models.BooleanField(default=False)
z_order = models.IntegerField(default=0)
class Meta:
abstract = True
default_permissions = ()
class BoundingBox(Shape):
id = models.BigAutoField(primary_key=True)
xtl = models.FloatField()
ytl = models.FloatField()
xbr = models.FloatField()
ybr = models.FloatField()
class Meta:
abstract = True
default_permissions = ()
class PolyShape(Shape):
id = models.BigAutoField(primary_key=True)
points = models.TextField()
class Meta:
abstract = True
default_permissions = ()
class LabeledBox(Annotation, BoundingBox):
pass
class LabeledBoxAttributeVal(AttributeVal):
box = models.ForeignKey(LabeledBox, on_delete=models.CASCADE)
class LabeledPolygon(Annotation, PolyShape):
pass
class LabeledPolygonAttributeVal(AttributeVal):
polygon = models.ForeignKey(LabeledPolygon, on_delete=models.CASCADE)
class LabeledPolyline(Annotation, PolyShape):
pass
class LabeledPolylineAttributeVal(AttributeVal):
polyline = models.ForeignKey(LabeledPolyline, on_delete=models.CASCADE)
class LabeledPoints(Annotation, PolyShape):
pass
class LabeledPointsAttributeVal(AttributeVal):
points = models.ForeignKey(LabeledPoints, on_delete=models.CASCADE)
class ObjectPath(Annotation):
id = models.BigAutoField(primary_key=True)
shapes = models.CharField(max_length=10, default='boxes')
class ObjectPathAttributeVal(AttributeVal):
track = models.ForeignKey(ObjectPath, on_delete=models.CASCADE)
class TrackedObject(models.Model):
track = models.ForeignKey(ObjectPath, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
outside = models.BooleanField(default=False)
class Meta:
abstract = True
default_permissions = ()
class TrackedBox(TrackedObject, BoundingBox):
pass
class TrackedBoxAttributeVal(AttributeVal):
box = models.ForeignKey(TrackedBox, on_delete=models.CASCADE)
class TrackedPolygon(TrackedObject, PolyShape):
pass
class TrackedPolygonAttributeVal(AttributeVal):
polygon = models.ForeignKey(TrackedPolygon, on_delete=models.CASCADE)
class TrackedPolyline(TrackedObject, PolyShape):
pass
class TrackedPolylineAttributeVal(AttributeVal):
polyline = models.ForeignKey(TrackedPolyline, on_delete=models.CASCADE)
class TrackedPoints(TrackedObject, PolyShape):
pass
class TrackedPointsAttributeVal(AttributeVal):
points = models.ForeignKey(TrackedPoints, on_delete=models.CASCADE)
| 2.09375 | 2 |
epsilon/hotfixes/loopbackasync_reentrancy.py | palfrey/epsilon | 4 | 12789826 |
"""
Fix from Twisted r23970
"""
from twisted.internet.task import deferLater
from twisted.protocols.loopback import _loopbackAsyncBody
def _loopbackAsyncContinue(ignored, server, serverToClient, client, clientToServer):
# Clear the Deferred from each message queue, since it has already fired
# and cannot be used again.
clientToServer._notificationDeferred = serverToClient._notificationDeferred = None
# Schedule some more byte-pushing to happen. This isn't done
# synchronously because no actual transport can re-enter dataReceived as
# a result of calling write, and doing this synchronously could result
# in that.
from twisted.internet import reactor
return deferLater(
reactor, 0,
_loopbackAsyncBody, server, serverToClient, client, clientToServer)
def install():
from twisted.protocols import loopback
loopback._loopbackAsyncContinue = _loopbackAsyncContinue
| 2.078125 | 2 |
Utils/ContentCheck.py | eurobronydevs/Canni-AI | 1 | 12789827 | import discord
def StrContains(string: str, contains: str) -> bool:
return contains.lower() in string.lower()
def StrStartWith(string: str, startsWith: str) -> bool:
print("String: {0}, type {1}".format(startsWith, type(startsWith)))
return string.lower().startswith(startsWith.lower())
def StrContainsWord(string: str, word: str) -> bool:
wordsArray = string.split(" ")
for wrd in wordsArray:
if word.lower() == wrd.lower():
return True
return False | 3.40625 | 3 |
tests/parser_test.py | mwawrzos/schwab_parser | 0 | 12789828 | <filename>tests/parser_test.py
import pytest
import schwab_parser.parsers as p
def split_csv(txt):
return [row.split(',') for row in txt.strip().splitlines()]
@pytest.fixture
def espp_deposit_lines():
deposit_lines = '''
"Date","Action","Symbol","Description","Quantity"''' \
''',"Fees & Commissions","Disbursement Election","Amount"
"2001/04/05","Deposit","AAPL","ESPP",42,"","",""
'''
return split_csv(deposit_lines)
@pytest.fixture
def parser(espp_deposit_lines):
header, *_ = espp_deposit_lines
return p.Parser(header)
@pytest.fixture
def rows(espp_deposit_lines):
return espp_deposit_lines[1:]
def test_wrong_header(espp_deposit_lines):
header, *rows = espp_deposit_lines
with pytest.raises(Exception, match='Header not supported'):
p.Parser([])
def test_short_row(parser):
with pytest.raises(Exception, match='Expected more than two columns'):
parser.parse([])
def test_not_supported(parser, rows):
row = rows[0]
row[1] = 'not supported deposit'
with pytest.raises(Exception, match='Not supported row'):
parser.parse(row)
def test_espp_deposit(parser, rows):
parser, _ = parser.parse(rows[0])
assert isinstance(parser, p.DepositParser)
| 2.9375 | 3 |
gtgs/users/migrations/0003_merge_20171204_1603.py | robertatakenaka/gtgs | 0 | 12789829 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-04 18:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20171201_1517'),
('users', '0002_auto_20171204_1549'),
]
operations = [
]
| 1.375 | 1 |
hackerearth/Algorithms/Studious Amit and His New College/solution.py | ATrain951/01.python-com_Qproject | 4 | 12789830 | <filename>hackerearth/Algorithms/Studious Amit and His New College/solution.py
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n, m = map(int, input().strip().split())
prerequisites = {}
for _ in range(m):
u, v = map(int, input().strip().split())
prerequisites[u] = v
for course in prerequisites:
if prerequisites[course] in prerequisites and prerequisites[prerequisites[course]] == course:
print(0)
break
else:
print(1)
| 3.640625 | 4 |
scripts/scrape.py | alexbotello/concert_finder | 1 | 12789831 | <gh_stars>1-10
import json
import sys
import time
import requests
import settings
import logging
from sqlalchemy.orm import sessionmaker
from models import Concert, create_concert_table, db_connect
from utils import post_to_discord
# Initialize Database connections
engine = db_connect()
create_concert_table(engine)
Session = sessionmaker(bind=engine)
session = Session()
def find_all_concerts():
"""
Use BandsinTown API to search for concert events
"""
for art in settings.ARTISTS:
URL = settings.BIT_URL + art + '/events/search'
param = {'app_id': settings.ID, 'api_version': settings.API,
'location': settings.LOCATION, 'radius': settings.RADIUS,
'format': 'json'}
resp = requests.get(URL, params=param )
if resp.status_code != 200:
raise ApiError('{} response code'.format(resp.status_code))
# Load JSON data from response
json_data = json.loads(resp.text)
old_result_found = 0
new_results_found = 0
post_image = 1
for event in json_data:
# Query the database to compare current listing to any
# that already exists
listing = session.query(Concert).filter_by(c_id=event['id']).first()
if listing:
old_result_found += 1
# If no listing is found
if listing is None:
listing = Concert(
c_id =event['id'],
title=event['title'],
datetime=event['formatted_datetime']
)
session.add(listing)
session.commit()
new_results_found += 1
if post_image == 1:
# Post picture of band/artist to discord channel
image = {'embed':
{'image':
{'url': json_data[0]['artists'][0]['thumb_url']}
}
}
post_to_discord(image)
# Discord API requires messages to be JSON formatted
# using 'content' field
message = {'content': event['title'] + '\n' +
event['formatted_datetime']}
buffers = {'content': '-' * 60}
try:
post_to_discord(message)
time.sleep(1)
post_to_discord(buffers)
time.sleep(3)
except Exception as exc:
logging.warning('An error occured while posting to discord',
sys.exc_info())
# Change post_image to False to avoid repeated image posts
post_image -= 1
if json_data == []:
logging.info('Found No Results For {}'.format(art))
if json_data:
logging.info('Found {} posted result and '
'{} new results for {}'.format(old_result_found,
new_results_found, art))
logging.info("Scrape was completed")
class ApiError(Exception):
pass
| 2.640625 | 3 |
outerspace/TransformationMethod.py | sirbiscuit/outerspace | 3 | 12789832 | from abc import ABC, abstractmethod
class TransformationMethod(ABC):
@abstractmethod
def get_widgets(self):
''' Create and return widgets for manipulating the parameters of the
transformation method.
Returns
-------
OrderedDict[str -> ipywidgets.Widget]
A dict that represents a mapping of labels to widgets (of the
ipywidgets library). The values will be rendered in the order that
is given by the items() iterator of the dictionary. For that reason
it is sensible to return an instance of Pythons OrderedDict. The
keys are used to identify and find widgets e.g. in
get_current_params.
'''
pass
@abstractmethod
def get_current_params(self, widgets):
''' Returns the current parameters of the transformation method from
the current state of the widgets.
Returns
-------
dict
The parameters of the transformation method that will be used in
run_transformation.
'''
pass
@abstractmethod
def set_current_params(self, widgets, embedding_params):
''' Returns the current parameters of the transformation method from
the current state of the widgets.
Parameters
----------
widgets : dict[str, ipywidget]
The mapping of parameter names to ipywidgets.
embedding_params : dict
The parameters for the transformation method.
'''
pass
@abstractmethod
def get_embedding_code(self, widgets):
''' Returns the Python code that reproduces the current embedding.
Returns
-------
dict
The parameters of the transformation method that will be used in
run_transformation.
'''
pass
@abstractmethod
def run_transformation(self, X, y, transformation_params, callback):
''' Executes the actual transformation method.
Parameters
----------
X : np.ndarray of shape (n_examples, n_features)
The feature variables.
y : array_like of shape (n_examples,)
The target variable used for coloring the data points.
transformation_params : dict
Parameters for the transformation method.
callback : Callable[[command, iteration, payload]]
A callback that is intended for providing feedback to the user.
Multiple different commands are available:
* start: the transformation method was initialized and is
running. The payload is expected to be a dict
{ error_metrics } where error_metrics is an array of
dicts { name, label }. Each entry creates a widget
with description text "label". The field "name" is an
identifier that will be useful in the "embedding" command.
* embedding: a new embedding is available. The payload is a
dict { embedding, error_metrics } where embedding is a
numpy.ndarray and error_metrics is a dict.
* error: an error occured and payload contains the error
message.
* status: for providing other feedback to the user. The
message in the payload is displayed.
'''
pass
| 3.640625 | 4 |
analyse_functions.py | kcotar/Abundance_space_dimensionality | 0 | 12789833 | import os, imp
import matplotlib
if os.environ.get('DISPLAY') is None:
# enables figure saving on clusters with ssh connection
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
imp.load_source('helper', '../tSNE_test/helper_functions.py')
from helper import get_spectra
# ----------------------------------------
# Global variables
# ----------------------------------------
GALAH_BLUE = [4718, 4903]
GALAH_GREEN = [5649, 5873]
GALAH_RED = [6481, 6739]
GALAH_IR = [7590, 7890]
# ----------------------------------------
# Functions
# ----------------------------------------
def range_in_band(x_range, bands_in_dataset=[1,2,3,4]):
mean_range = np.mean(x_range)
if mean_range > GALAH_BLUE[0] and mean_range < GALAH_BLUE[1]:
use_band = np.where(np.array(bands_in_dataset) == 1)
elif mean_range > GALAH_GREEN[0] and mean_range < GALAH_GREEN[1]:
use_band = np.where(np.array(bands_in_dataset) == 2)
elif mean_range > GALAH_RED[0] and mean_range < GALAH_RED[1]:
use_band = np.where(np.array(bands_in_dataset) == 3)
elif mean_range > GALAH_IR[0] and mean_range < GALAH_IR[1]:
use_band = np.where(np.array(bands_in_dataset) == 4)
else:
return np.nan
if np.size(use_band) != 1:
# band was probably not read
return np.nan
else:
return int(use_band[0])
def get_sobject_spectra(sobject_ids, root=None, bands=[1,2,3,4]):
spectra_data = list()
wvl_data = list()
for sobj_id in sobject_ids:
spectrum, wavelengths = get_spectra(str(sobj_id), root=root, bands=bands)
spectra_data.append(spectrum)
wvl_data.append(wavelengths)
return (spectra_data, wvl_data)
def plot_spectra_collection(spetra_data, wvl_data, cannon_data_subset, linelist_subset, elem_col, path='spectra.png', title=None):
total_plots = len(linelist_subset) # might be smaller than space allocated for all plots
x_plots = 5
y_plots = np.ceil(1.*total_plots/x_plots)
fig, ax = plt.subplots(np.int8(y_plots), np.int8(x_plots),
figsize=(3*x_plots, 3*y_plots))
for i_plot in np.arange(total_plots):
p_x_pos = i_plot % x_plots
p_y_pos = np.int8(np.floor(i_plot / x_plots))
if total_plots > x_plots:
plot_pos = (p_y_pos, p_x_pos)
else:
plot_pos = p_x_pos
# add individual spectra to the plot
x_range = [linelist_subset['segment_start'].get_values()[i_plot], linelist_subset['segment_end'].get_values()[i_plot]]
for i_data in range(len(cannon_data_subset)):
# determine which band should be plotted
idx_band = range_in_band(x_range, bands_in_dataset=[1,2,3,4])
# for limited number of spectra lines add plot title that will be shown in legend
if i_data < 8:
plot_label = str(cannon_data_subset[elem_col][i_data])
else:
plot_label = None
ax[plot_pos].plot(wvl_data[i_data][idx_band],
spetra_data[i_data][idx_band],
label=plot_label)
# add legend in the middle of the plot
if p_x_pos==0 and p_y_pos==0:
ax[plot_pos].legend(loc='center right', bbox_to_anchor=(-0.3, 0.5))
ax[plot_pos].axvline(x=linelist_subset['line_centre'].get_values()[i_plot], color='black', linewidth=1)
ax[plot_pos].axvline(x=linelist_subset['line_start'].get_values()[i_plot], color='black', linewidth=2)
ax[plot_pos].axvline(x=linelist_subset['line_end'].get_values()[i_plot], color='black', linewidth=2)
ax[plot_pos].set(xlim=x_range,
ylim=[0.3, 1.1])
fig.suptitle(title, y=1.)
fig.tight_layout()
fig.savefig(path, dpi=300, bbox_inches='tight')
plt.close(fig)
def reduce_spectra_collection(spetra_data, wvl_data, linelist_subset):
results = list()
for center_wvl in linelist_subset['line_centre']:
# select all observations closest to the center of the absorption line
spectra_median = list()
for i_data in range(len(wvl_data)):
idx_get = np.nanargmin(np.abs(wvl_data[i_data] - center_wvl))
spectra_median.append(spetra_data[i_data][np.unravel_index(idx_get, np.shape(spetra_data[i_data]))])
results.append(np.median(spectra_median))
return results
def plot_abundace_grid(data, prefix='', lines_wvl=None):
# data is assumed to be a MxN pandas array
colnames = data.keys().values
plot_y = ['teff', 'logg', 'feh']
plot_y_fixed = [5600, 4., -0.25]
plot_x = 'abund'
col_lines = [col for col in colnames if 'line' in col]
# prepare grid of observations
abund_uniq = np.unique(data[plot_x+'_min'])
abund_range = [np.nanmin(data[plot_x+'_min'].values), np.nanmax(data[plot_x+'_max'].values)]
# mean of min and max of observed abundance value
x_labels = (np.unique(data[plot_x+'_max']) + np.unique(data[plot_x+'_min'])) / 2.
# plot gathered information from spectra at absorption lines
for i_line in range(len(col_lines)):
col_line_use = col_lines[i_line]
print 'Plotting data for '+col_line_use
for i_p_y in range(len(plot_y)):
# determine rows with predefined constant values of parameters
const_cols = [v for v in range(len(plot_y)) if i_p_y != v]
idx_const_values = np.logical_and(data[plot_y[const_cols[0]] + '_min'] == plot_y_fixed[const_cols[0]],
data[plot_y[const_cols[1]] + '_min'] == plot_y_fixed[const_cols[1]])
# create image array that will be shown in plot
plot_y_use = plot_y[i_p_y]
print ' Parameter '+plot_y_use
y_uniq = np.unique(data[plot_y_use+'_min'])
y_range = [np.nanmin(data[plot_y_use+'_min'].values), np.nanmax(data[plot_y_use+'_max'].values)]
y_labels = (np.unique(data[plot_y_use + '_max']) + np.unique(data[plot_y_use + '_min'])) / 2.
img_z = np.ndarray((len(y_uniq), len(abund_uniq)))
for i_val in range(len(y_uniq)):
idx_select = np.logical_and(data[plot_y_use+'_min'] == y_uniq[i_val],
idx_const_values)
img_z[i_val] = data[col_line_use][idx_select]
# now plot resulting image
plt.imshow(img_z, cmap='gist_heat', vmin=0, vmax=1, interpolation="nearest"),
#extent=[abund_range[0], abund_range[1], y_range[0], y_range[1]])
plt.xlabel('Abundance')
plt.ylabel(plot_y_use.capitalize())
if lines_wvl is not None:
plt.title('Absorption line at '+str(lines_wvl[i_line]))
# plt.xlim(abund_range)
# plt.ylim(y_range)
idx_x_ticks = range(0, len(x_labels), 2)
plt.xticks(idx_x_ticks, x_labels[idx_x_ticks])
idx_y_ticks = range(0, len(y_labels), 2)
plt.yticks(idx_y_ticks, y_labels[idx_y_ticks])
plt.colorbar()
plt.tight_layout()
plt.savefig(prefix+'_'+col_line_use+'_'+plot_y_use+'.png', dpi=200)
plt.close()
| 2.265625 | 2 |
SW_stops_amount.py | cesarborroch/Kneat_Challenge | 0 | 12789834 | # Library used to return the content of a URL
from urllib.request import Request, urlopen
# Library to decode text to JSON
import json
class SW_stops_amount:
def __init__(self):
pass
# Decodes the consumables
def calc(self, strConsumables):
intHOURS_IN_YEAR = 8760
intHOURS_IN_MONTH = 730
intHOURS_IN_WEEK = 168
intHOURS_IN_DAY = 24
# Gets the number part of the string
strValue = ''
for s in strConsumables.split():
if s.isdigit():
strValue += s
intNumber = int(strValue)
# Interprets the text part in consumables
if 'day' in strConsumables:
return intNumber * intHOURS_IN_DAY
if 'week' in strConsumables:
return intNumber * intHOURS_IN_WEEK
if 'month' in strConsumables:
return intNumber * intHOURS_IN_MONTH
if 'year' in strConsumables:
return intNumber * intHOURS_IN_YEAR
def get_amount(self, intDistance, strConsumables, strMGLT):
return int(intDistance / (self.calc(strConsumables) * int(strMGLT)))
# Prints the amount of stops given the ship and distance
def analyze_ship(self, ship, intDistance):
# Calculates the amount of stops
strName = ship['name']
strConsumables = ship['consumables']
strMGLT = ship['MGLT']
# Can't calculate when certain values are missing
if strConsumables != 'unknown' and strMGLT != 'unknown':
intAmountOfStops = self.get_amount(intDistance, strConsumables, strMGLT)
print('Ship: "{}", Amount of stops: {}'.format(strName, intAmountOfStops))
else:
print('Ship: "{}", Consumables and/or MGLT are unknown.'.format(strName))
def run(self):
# Header
print('Amount of Stops Calculator for SW Ships')
print()
# Asks the user for a value
bAskingForInput = True
while bAskingForInput:
try:
print('How far are you heading? Insert a numerical value for a distance in MGLT.')
strInput = input()
intDistance = int(strInput)
bAskingForInput = False
except:
print('The inserted value "{}" is invalid as a number. Try again.'.format(strInput))
print()
strURL_SWAPI_STARSHIPS = 'https://swapi.co/api/starships/'
print('Downloading data from {}...'.format(strURL_SWAPI_STARSHIPS))
print()
# Controls how many pages should be read
bThereIsMoreData = True;
intAmountOfShips = 0
while bThereIsMoreData:
# Gets the starships and their data
req = Request(strURL_SWAPI_STARSHIPS, headers={'User-Agent': 'Mozilla/5.0'})
content = urlopen(req).read()
data = json.loads(content.decode())
# Does the calc for each starship
for ship in data['results']:
intAmountOfShips += 1
self.analyze_ship(ship, intDistance)
strURL_SWAPI_STARSHIPS = data['next']
bThereIsMoreData = strURL_SWAPI_STARSHIPS is not None;
print()
input('{} ships in total. Hit ENTER to finish.'.format(intAmountOfShips))
App = SW_stops_amount()
if __name__ == '__main__':
App.run()
| 3.890625 | 4 |
examples/Extra_Examples/PDE/Burgers1D.py | leakec/tfc | 15 | 12789835 | <filename>examples/Extra_Examples/PDE/Burgers1D.py
import tqdm
import numpy as onp
import jax.numpy as np
from tfc import mtfc
from tfc.utils import NLLS, egrad
# Constants and switches:
c = 1.
nu = 0.5
xInit = -3.
xFinal = 3.
alpha = 1.
x0 = np.array([xInit,0.])
xf = np.array([xFinal,1.])
n = 30
nTest = 100
m = 600
usePlotly = True
# Real analytical solution:
real = lambda x,t: c/alpha-c/alpha*np.tanh(c*(x-c*t)/(2.*nu))
# Create the mtfc class:
N = [n,n]
nC = -1
tfc = mtfc(N,nC,m,dim=2,basis='ELMTanh',x0=x0,xf=xf)
x = tfc.x
# Get the basis functions
H = tfc.H
# Create the TFC constrained expression
u1 = lambda xi,*x: np.dot(H(*x),xi)+\
(xFinal-x[0])/(xFinal-xInit)*(c/alpha-c/alpha*np.tanh(c*(xInit-c*x[1])/(2.*nu))-np.dot(H(xInit*np.ones_like(x[0]),x[1]),xi))+\
(x[0]-xInit)/(xFinal-xInit)*(c/alpha-c/alpha*np.tanh(c*(xFinal-c*x[1])/(2.*nu))-np.dot(H(xFinal*np.ones_like(x[0]),x[1]),xi))
u = lambda xi,*x: u1(xi,*x)+\
c/alpha-c/alpha*np.tanh(c*x[0]/(2.*nu))-u1(xi,x[0],np.zeros_like(x[1]))
# Create the residual
ux = egrad(u,1)
d2x = egrad(ux,1)
ut = egrad(u,2)
r = lambda xi: ut(xi,*x)+alpha*u(xi,*x)*ux(xi,*x)-nu*d2x(xi,*x)
# Solve the problem
xi = np.zeros(H(*x).shape[1])
xi,it,time = NLLS(xi,r,method='lstsq',timer=True,timerType="perf_counter")
# Calculate error at the test points:
dark = np.meshgrid(np.linspace(xInit,xFinal,nTest),np.linspace(0.,1.,nTest))
xTest = tuple([j.flatten() for j in dark])
err = np.abs(u(xi,*xTest)-real(*xTest))
print("Training time: "+str(time))
print("Max error: "+str(np.max(err)))
print("Mean error: "+str(np.mean(err)))
# Plot analytical solution
if usePlotly:
from tfc.utils.PlotlyMakePlot import MakePlot
p = MakePlot(r"x",r"t",zlabs=r"u(x,t)")
p.Surface(x=xTest[0].reshape((nTest,nTest)),
y=xTest[1].reshape((nTest,nTest)),
z=real(*xTest).reshape((nTest,nTest)),
showscale=False)
p.PartScreen(9,8)
p.show()
else:
from matplotlib import cm
from MakePlot import MakePlot
p = MakePlot(r"$x$",r"$t$",zlabs=r"$u(x,t)$")
p.ax[0].plot_surface(xTest[0].reshape((nTest,nTest)),
xTest[1].reshape((nTest,nTest)),
real(*xTest).reshape((nTest,nTest)),
cmap=cm.gist_rainbow)
p.ax[0].xaxis.labelpad = 10
p.ax[0].yaxis.labelpad = 20
p.ax[0].zaxis.labelpad = 10
p.show()
| 2.25 | 2 |
truhanen.serobot.api/truhanen/serobot/api/hardware/gpio.py | truhanen/serobot | 1 | 12789836 | <reponame>truhanen/serobot
from enum import IntEnum, auto
import logging
from typing import Optional, List
try:
import RPi.GPIO as GPIO
except ModuleNotFoundError:
GPIO = None
from .bcm_channel import BcmChannel
class GpioDirection(IntEnum):
IN = GPIO.IN if GPIO is not None else auto()
OUT = GPIO.OUT if GPIO is not None else auto()
class GpioState(IntEnum):
HIGH = GPIO.HIGH if GPIO is not None else auto()
LOW = GPIO.LOW if GPIO is not None else auto()
# This tells e.g. that a GPIO state could not be read.
UNKNOWN = 2 if GPIO is not None else auto()
class GpioPull(IntEnum):
OFF = GPIO.PUD_OFF if GPIO is not None else auto()
DOWN = GPIO.PUD_DOWN if GPIO is not None else auto()
UP = GPIO.PUD_UP if GPIO is not None else auto()
# Module-level logger
logger = logging.getLogger(__name__)
if GPIO is not None:
# Set general settings whenever this module is used.
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
else:
logger.warning('Module RPi.GPIO is missing. General GPIO settings were '
'not applied.')
class GpioSetup:
"""Class for managing a general GPIO channel, input, output, or PWM."""
def __init__(self, bcm_channel: BcmChannel,
direction: GpioDirection,
pull: Optional[GpioPull] = None,
initial: Optional[GpioState] = None):
self._channel = bcm_channel
if GPIO is not None:
# Create kwargs without None values.
kwargs = dict(direction=direction)
if pull is not None:
kwargs['pull_up_down'] = pull
if initial is not None:
kwargs['initial'] = initial
GPIO.setup(bcm_channel, **kwargs)
else:
logger.warning(f'Module RPi.GPIO is missing. Did not really '
f'setup {bcm_channel!r}.')
def __del__(self):
if GPIO is not None:
GPIO.cleanup(self.channel)
else:
logger.warning(f'Module RPi.GPIO is missing. Did not really '
f'cleanup {self.channel!r}.')
@property
def channel(self):
return self._channel
class GpioInput(GpioSetup):
"""Class for managing a GPIO input channel."""
def __init__(self, bcm_channel: BcmChannel, pull: Optional[GpioPull] = None):
super().__init__(bcm_channel, direction=GpioDirection.IN, pull=pull)
@property
def state(self) -> GpioState:
"""The state of the GPIO input pin."""
if GPIO is not None:
state = GpioState(GPIO.input(self.channel))
else:
logger.info(f'Module RPi.GPIO is missing. Did not really read '
f'the state of {self.channel!r}.')
state = GpioState.UNKNOWN
return state
class GpioOutput(GpioSetup):
"""Class for managing a GPIO output channel."""
def __init__(self, bcm_channel: BcmChannel, initial: Optional[GpioState] = None):
super().__init__(bcm_channel, direction=GpioDirection.OUT, initial=initial)
@property
def state(self) -> GpioState:
"""The state of the GPIO output pin."""
if GPIO is not None:
state = GpioState(GPIO.input(self.channel))
else:
logger.info(f'Module RPi.GPIO is missing. Did not really read '
f'the state of {self.channel!r}.')
state = GpioState.UNKNOWN
return state
@state.setter
def state(self, value: GpioState):
if GPIO is not None:
GPIO.output(self.channel, value)
else:
logger.info(f'Module RPi.GPIO is missing. Did not physically change '
f'the state of {self.channel!r}.')
@classmethod
def set_multiple(cls, outputs: List['GpioOutput'], states: List[GpioState]):
"""Set the states of multiple GPIO outputs with a single call."""
channels = [output.channel for output in outputs]
if GPIO is not None:
GPIO.output(channels, states)
else:
logger.info(f'Module RPi.GPIO is missing. Did not physically change '
f'the states of {channels!r}.')
class GpioPwm(GpioSetup):
"""Class for managing a GPIO PWM channel."""
def __init__(self, bcm_channel: BcmChannel,
initial: Optional[GpioState] = None,
frequency: int = 1,
duty_cycle: int = 100):
"""
Parameters
----------
bcm_channel
The GPIO channel of the PWM.
initial
Initial state of the GPIO channel.
frequency
The initial frequency of the PWM, in Hertz.
duty_cycle
The initial duty cycle of the PWM, as percentage.
"""
super().__init__(bcm_channel, direction=GpioDirection.OUT, initial=initial)
self._frequency = frequency
self._duty_cycle = duty_cycle
if GPIO is not None:
self._pwm = GPIO.PWM(self.channel, frequency)
else:
logger.warning(f'Module RPi.GPIO is missing. Did not really '
f'setup the PWM for {bcm_channel!r}.')
self._pwm = None
@property
def frequency(self) -> float:
return self._frequency
@frequency.setter
def frequency(self, value: float):
self._frequency = value
if self._pwm is not None:
self._pwm.ChangeFrequency(value)
else:
logger.info(f'The PWM instance is missing. Did not physically change '
f'the frequency of the PWM in {self.channel!r}.')
@property
def duty_cycle(self) -> float:
return self._duty_cycle
@duty_cycle.setter
def duty_cycle(self, value: float):
self._duty_cycle = value
if self._pwm is not None:
self._pwm.ChangeDutyCycle(value)
else:
logger.info(f'The PWM instance is missing. Did not physically change '
f'the duty cycle of the PWM in {self.channel!r}.')
def start(self):
if self._pwm is not None:
self._pwm.start(self.duty_cycle)
else:
logger.info(f'The PWM instance is missing. Did not physically '
f'start the PWM in {self.channel!r}.')
def stop(self):
if self._pwm is not None:
self._pwm.stop()
else:
logger.info(f'The PWM instance is missing. Did not physically '
f'stop the PWM in {self.channel!r}.')
| 2.46875 | 2 |
historic/singleCOUNTYdiffMILES.py | adambreznicky/smudge_python | 1 | 12789837 | # Python 2.6.5 (r265:79096, Mar 19 2010, 21:48:26) [MSC v.1500 32 bit (Intel)] on win32
## Type "copyright", "credits" or "license()" for more information.
# ****************************************************************
# Personal firewall software may warn about the connection IDLE
# makes to its subprocess using this computer's internal loopback
# interface. This connection is not visible on any external
# interface and no data is sent to or received from the Internet.
# ****************************************************************
#import sys, os
#sys.path[0]
#print sys.path
#raw_input("Press any key to continue")
# IDLE 2.6.5
def totalcountymiles():
import arcpy
from arcpy import env
import datetime
import os
now = datetime.datetime.now()
suffix = now.strftime("%Y%m%d")
dbaseNAME = arcpy.GetParameterAsText(0)
prelimMILES = arcpy.GetParameterAsText(1)
directory = os.path.dirname(prelimMILES)
filename = os.path.basename(prelimMILES)
filenamePARTS = filename.split("_")
for item in filenamePARTS:
name = str(filenamePARTS[0])
THEcnty = str(filenamePARTS[1])
subfiles = "Database Connections\\"+dbaseNAME+"\\TPP_GIS.MCHAMB1.SUBFILES"
arcpy.AddMessage("Connection Established!")
where = """ "SUBFILE" = 2 AND "HIGHWAY_STATUS" = 4 AND "ADMIN_SYSTEM" = 3 AND "COUNTY" = """+THEcnty
final = directory+"\\"+name+"_"+THEcnty+"_DiffMileage"+suffix+".dbf"
arcpy.MakeQueryTable_management(subfiles, "temptable", "ADD_VIRTUAL_KEY_FIELD", "", "", where)
arcpy.Statistics_analysis("temptable", final, [["LEN_OF_SECTION", "SUM"]], "RTE_ID")
arcpy.JoinField_management(final, "RTE_ID", prelimMILES, "RTE_ID", "RTE_Miles")
arcpy.AddField_management(final, "Updated_Mi", "DOUBLE")
arcpy.AddField_management(final, "Mi_Diff", "DOUBLE")
cursor = arcpy.UpdateCursor(final)
for row in cursor:
newMILES = row.getValue("SUM_LEN_OF")
oldMILES = row.getValue("RTE_Miles")
difference = newMILES - oldMILES
row.setValue("Updated_Mi", newMILES)
row.setValue("Mi_Diff", difference)
cursor.updateRow(row)
arcpy.DeleteField_management(final, ["SUM_LEN_OF"])
arcpy.AddMessage("WooHoo! Go get them mileages!")
totalcountymiles()
print "success!"
| 2.40625 | 2 |
sphinx_design/cards.py | OriolAbril/sphinx-design | 43 | 12789838 | import re
from typing import List, NamedTuple, Optional, Tuple
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.util.docutils import SphinxDirective
from sphinx.util.logging import getLogger
from .shared import (
WARNING_TYPE,
PassthroughTextElement,
create_component,
is_component,
make_choice,
margin_option,
text_align,
)
LOGGER = getLogger(__name__)
DIRECTIVE_NAME_CARD = "card"
DIRECTIVE_NAME_CAROUSEL = "card-carousel"
REGEX_HEADER = re.compile(r"^\^{3,}\s*$")
REGEX_FOOTER = re.compile(r"^\+{3,}\s*$")
def setup_cards(app: Sphinx) -> None:
"""Setup the card components."""
app.add_directive(DIRECTIVE_NAME_CARD, CardDirective)
app.add_directive(DIRECTIVE_NAME_CAROUSEL, CardCarouselDirective)
class CardContent(NamedTuple):
"""Split card into header (optional), body, footer (optional).
(offset, content)
"""
body: Tuple[int, StringList]
header: Optional[Tuple[int, StringList]] = None
footer: Optional[Tuple[int, StringList]] = None
class CardDirective(SphinxDirective):
"""A card component."""
has_content = True
required_arguments = 0
optional_arguments = 1 # card title
final_argument_whitespace = True
option_spec = {
"width": make_choice(["auto", "25%", "50%", "75%", "100%"]),
"margin": margin_option,
"text-align": text_align,
"img-top": directives.uri,
"img-bottom": directives.uri,
"img-background": directives.uri,
"link": directives.uri,
"link-type": make_choice(["url", "any", "ref", "doc"]),
"shadow": make_choice(["none", "sm", "md", "lg"]),
"class-card": directives.class_option,
"class-header": directives.class_option,
"class-body": directives.class_option,
"class-title": directives.class_option,
"class-footer": directives.class_option,
}
def run(self) -> List[nodes.Node]:
return [self.create_card(self, self.arguments, self.options)]
@classmethod
def create_card(
cls, inst: SphinxDirective, arguments: Optional[list], options: dict
) -> nodes.Node:
"""Run the directive."""
# TODO better degradation for latex
card_classes = ["sd-card", "sd-sphinx-override"]
if "width" in options:
card_classes += [f'sd-w-{options["width"].rstrip("%")}']
card_classes += options.get("margin", ["sd-mb-3"])
card_classes += [f"sd-shadow-{options.get('shadow', 'sm')}"]
if "link" in options:
card_classes += ["sd-card-hover"]
card = create_component(
"card",
card_classes
+ options.get("text-align", [])
+ options.get("class-card", []),
)
inst.set_source_info(card)
container = card
if "img-background" in options:
card.append(
nodes.image(
uri=options["img-background"],
classes=["sd-card-img"],
alt="background image",
)
)
overlay = create_component("card-overlay", ["sd-card-img-overlay"])
inst.set_source_info(overlay)
card += overlay
container = overlay
if "img-top" in options:
image_top = nodes.image(
"",
uri=options["img-top"],
alt="card-img-top",
classes=["sd-card-img-top"],
)
container.append(image_top)
components = cls.split_content(inst.content, inst.content_offset)
if components.header:
container.append(
cls._create_component(
inst, "header", options, components.header[0], components.header[1]
)
)
body = cls._create_component(
inst, "body", options, components.body[0], components.body[1]
)
if arguments:
title = create_component(
"card-title",
["sd-card-title", "sd-font-weight-bold"]
+ options.get("class-title", []),
)
textnodes, _ = inst.state.inline_text(arguments[0], inst.lineno)
title.extend(textnodes)
body.insert(0, title)
container.append(body)
if components.footer:
container.append(
cls._create_component(
inst, "footer", options, components.footer[0], components.footer[1]
)
)
if "img-bottom" in options:
image_bottom = nodes.image(
"",
uri=options["img-bottom"],
alt="card-img-bottom",
classes=["sd-card-img-bottom"],
)
container.append(image_bottom)
if "link" in options:
link_container = PassthroughTextElement()
if options.get("link-type", "url") == "url":
link = nodes.reference(
"",
"",
refuri=options["link"],
classes=["sd-stretched-link"],
)
else:
options = {
# TODO the presence of classes raises an error if the link cannot be found
"classes": ["sd-stretched-link"],
"reftarget": options["link"],
"refdoc": inst.env.docname,
"refdomain": "" if options["link-type"] == "any" else "std",
"reftype": options["link-type"],
"refexplicit": True,
"refwarn": True,
}
link = addnodes.pending_xref("", nodes.Text(""), **options)
inst.set_source_info(link)
link_container += link
container.append(link_container)
return card
@staticmethod
def split_content(content: StringList, offset: int) -> CardContent:
"""Split the content into header, body and footer."""
header_index, footer_index, header, footer = None, None, None, None
body_offset = offset
for index, line in enumerate(content):
# match the first occurrence of a header regex
if (header_index is None) and REGEX_HEADER.match(line):
header_index = index
# match the final occurrence of a footer regex
if REGEX_FOOTER.match(line):
footer_index = index
if header_index is not None:
header = (offset, content[:header_index])
body_offset += header_index + 1
if footer_index is not None:
footer = (offset + footer_index + 1, content[footer_index + 1 :])
body = (
body_offset,
content[
(header_index + 1 if header_index is not None else None) : footer_index
],
)
return CardContent(body, header, footer)
@classmethod
def _create_component(
cls,
inst: SphinxDirective,
name: str,
options: dict,
offset: int,
content: StringList,
) -> nodes.container:
"""Create the header, body, or footer."""
component = create_component(
f"card-{name}", [f"sd-card-{name}"] + options.get(f"class-{name}", [])
)
inst.set_source_info(component) # TODO set proper lines
inst.state.nested_parse(content, offset, component)
cls.add_card_child_classes(component)
return component
@staticmethod
def add_card_child_classes(node):
"""Add classes to specific child nodes."""
for para in node.traverse(nodes.paragraph):
para["classes"] = ([] if "classes" not in para else para["classes"]) + [
"sd-card-text"
]
# for title in node.traverse(nodes.title):
# title["classes"] = ([] if "classes" not in title else title["classes"]) + [
# "sd-card-title"
# ]
class CardCarouselDirective(SphinxDirective):
"""A component, which is a container for cards in a single scrollable row."""
has_content = True
required_arguments = 1 # columns
optional_arguments = 0
option_spec = {
"class": directives.class_option,
}
def run(self) -> List[nodes.Node]:
"""Run the directive."""
self.assert_has_content()
try:
cols = make_choice([str(i) for i in range(1, 13)])(
self.arguments[0].strip()
)
except ValueError as exc:
raise self.error(f"Invalid directive argument: {exc}")
container = create_component(
"card-carousel",
["sd-sphinx-override", "sd-cards-carousel", f"sd-card-cols-{cols}"]
+ self.options.get("class", []),
)
self.set_source_info(container)
self.state.nested_parse(self.content, self.content_offset, container)
for item in container.children:
if not is_component(item, "card"):
LOGGER.warning(
"All children of a 'card-carousel' "
f"should be 'card' [{WARNING_TYPE}.card]",
location=item,
type=WARNING_TYPE,
subtype="card",
)
break
return [container]
| 2.109375 | 2 |
examples/configs_examples/convnet_cifar10.py | polyaxon/polyaxon-lib | 7 | 12789839 | <reponame>polyaxon/polyaxon-lib
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import polyaxon_lib as plx
from polyaxon_lib.polyaxonfile import local_runner
if __name__ == "__main__":
"""Creates an experiment using cnn for CIFAR-10 dataset classification task.
References:
* Learning Multiple Layers of Features from Tiny Images, <NAME>, 2009.
Links:
* [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html)
"""
plx.datasets.cifar10.prepare('../data/cifar10')
local_runner.run('./yaml_configs/convnet_cifar10.yml')
| 1.765625 | 2 |
nylas/util/debug.py | nylas/nylas-production-python | 19 | 12789840 | <reponame>nylas/nylas-production-python
import sys
import time
import traceback
import collections
import gevent._threading
import greenlet
from nylas.logging import get_logger
MAX_BLOCKING_TIME = 5
class Tracer(object):
"""Log if a greenlet blocks the event loop for too long, and optionally log
statistics on time spent in individual greenlets.
Parameters
----------
gather_stats: bool
Whether to periodically log statistics about time spent.
max_blocking_time: float
Log a warning if a greenlet blocks for more than max_blocking_time
seconds.
"""
def __init__(self, gather_stats=False,
max_blocking_time=MAX_BLOCKING_TIME):
self.gather_stats = gather_stats
self.max_blocking_time = max_blocking_time
self.time_spent_by_context = collections.defaultdict(float)
self.total_switches = 0
self._last_switch_time = None
self._switch_flag = False
self._active_greenlet = None
self._main_thread_id = gevent._threading.get_ident()
self._hub = gevent.hub.get_hub()
self.log = get_logger()
def start(self):
self.start_time = time.time()
greenlet.settrace(self._trace)
# Spawn a separate OS thread to periodically check if the active
# greenlet on the main thread is blocking.
gevent._threading.start_new_thread(self._monitoring_thread, ())
def log_stats(self, max_stats=60):
total_time = round(time.time() - self.start_time, 2)
greenlets_by_cost = sorted(self.time_spent_by_context.items(),
key=lambda (k, v): v, reverse=True)
formatted_times = {k: round(v, 2) for k, v in
greenlets_by_cost[:max_stats]}
self.log.info('greenlet stats',
times=formatted_times,
total_switches=self.total_switches,
total_time=total_time)
def _trace(self, event, (origin, target)):
self.total_switches += 1
current_time = time.time()
if self.gather_stats and self._last_switch_time is not None:
time_spent = current_time - self._last_switch_time
if origin is not self._hub:
context = getattr(origin, 'context', None)
else:
context = 'hub'
self.time_spent_by_context[context] += time_spent
self._active_greenlet = target
self._last_switch_time = current_time
self._switch_flag = True
def _check_blocking(self):
if self._switch_flag is False:
active_greenlet = self._active_greenlet
if active_greenlet is not None and active_greenlet != self._hub:
# greenlet.gr_frame doesn't work on another thread -- we have
# to get the main thread's frame.
frame = sys._current_frames()[self._main_thread_id]
formatted_frame = '\t'.join(traceback.format_stack(frame))
self.log.warning(
'greenlet blocking', frame=formatted_frame,
context=getattr(active_greenlet, 'context', None),
blocking_greenlet_id=id(active_greenlet))
self._switch_flag = False
def _monitoring_thread(self):
last_logged_stats = time.time()
try:
while True:
self._check_blocking()
if self.gather_stats and time.time() - last_logged_stats > 60:
self.log_stats()
last_logged_stats = time.time()
gevent.sleep(self.max_blocking_time)
# Swallow exceptions raised during interpreter shutdown.
except Exception:
if sys is not None:
raise
| 2.5 | 2 |
resqueue/matlab.py | YSanchezAraujo/slurm_handler | 4 | 12789841 | import os
from resqueue import utils
class Matlab(object):
""" very preliminary, initial matlab class
to support matlab computing
"""
def __init__(self, matfile, cmd=None):
self.matfile = matfile
self.cmd = cmd
def _file_exists(self):
if not os.path.isfile(self.matfile):
raise Exception("file cannot be found: matfile")
def _add_prefix_suffix(self):
""" method reads in the matlab script and makes it
a function so that it can be called via the malab
terminal command
"""
with open(self.matfile, 'r') as mfile:
matfile = mfile.read()
self.prefix = "function[proxy]=resmat()\n"
self.suffix = "\nend\n"
self.mat_text = self.prefix + matfile + self.suffix
new_path = os.path.join(os.path.dirname(self.matfile), "resmat.m")
with open(new_path, 'w') as mfile:
mfile.write(self.mat_text)
self.edited_matfile = new_path
def _mlabcmd(self):
if self.cmd is None:
self.cmd = "matlab -nodesktop -nojvm -nodisplay -r"
self.cmd = self.cmd + " resmat();quit"
def run(self):
self._file_exists()
self._add_prefix_suffix()
self._mlabcmd()
if os.getcwd() != os.path.dirname(self.edited_matfile):
os.chdir(os.path.dirname(self.edited_matfile))
return utils.shell(self.cmd.split())
| 2.875 | 3 |
test/fixtures/goldstandard/benchmark_confirm.py | nachogentile/PatentsProcessor | 53 | 12789842 | <gh_stars>10-100
import sqlite3 as sql
import os
import sys
import logging
# bmVerify(['final_r7', 'final_r8'], filepath="/home/ysun/disambig/newcode/all/", outdir = "/home/ayu/results_v2/")
# Text Files
txt_file = 'patentlist.txt'
opened_file = open(txt_file, 'U')
log_file = 'benchmark_results.log'
# Logging
logging.basicConfig(filename=log_file, level=logging.DEBUG)
open(log_file, "w")
# Set Up SQL Connections
con = sql.connect('/test/goldstandard/invnum_N_zardoz_with_invpat.sqlite3')
with con:
con_cur = con.cursor()
logging.info("Beginning to query database")
con_cur.execute("CREATE INDEX IF NOT EXISTS index_invnum ON invpat (Invnum)");
con_cur.execute("CREATE INDEX IF NOT EXISTS index_lastname ON invpat (Lastname)");
con_cur.execute("CREATE INDEX IF NOT EXISTS index_firstname ON invpat (Firstname)");
count = 0
errors = 0
success = 0
while True:
line_read = opened_file.readline()
# print line_read
if not line_read:
print "EXITING"
break
count = count + 1
if count%100 == 0:
print "starting patent", count
split_lines = line_read.split(', ')
# Strip out weird characters/formatting
# Need to add leading "0" to Patent if not Design/Util/etc..
patent_to_match = split_lines[0].strip(' \t\n\r')
if len(patent_to_match) == 7:
patent_to_match = "0" + patent_to_match
last_name = split_lines[1].strip(' \t\n\r')
first_name = split_lines[2].strip(' \t\n\r')
# print patent_to_match, last_name, first_name
con_cur.execute("SELECT Patent FROM invpat WHERE (Lastname = \"%s\" and Firstname = \"%s\");" % (last_name, first_name))
patents_matched_from_SQL = con_cur.fetchall()
match_found = False
for patent_match in patents_matched_from_SQL:
# print patent_match[0]
# print patent_to_match
if patent_match[0] == patent_to_match:
match_found = True
success = success + 1
if not match_found:
logging.error("Did not find a match for %s, %s, %s" % (first_name, last_name, patent_to_match))
errors = errors + 1
logging.info("Total Patents: %d" % count)
logging.info("Patents ran successfully: %d" % success)
logging.info("Patents FAILED: %d" % errors)
| 2.765625 | 3 |
examples/generate_soundscapes/generate_eval_var_onset.py | turpaultn/DESED | 69 | 12789843 | <reponame>turpaultn/DESED<filename>examples/generate_soundscapes/generate_eval_var_onset.py
# -*- coding: utf-8 -*-
import functools
import glob
import logging
import time
import argparse
import os.path as osp
import pandas as pd
from pprint import pformat
from desed.generate_synthetic import SoundscapesGenerator
from desed.utils import create_folder, modify_fg_onset, modify_jams
from desed.post_process import rm_high_polyphony, post_process_txt_labels
from desed.logger import create_logger
if __name__ == "__main__":
LOG = create_logger(__name__, terminal_level=logging.INFO)
LOG.info(__file__)
t = time.time()
parser = argparse.ArgumentParser()
parser.add_argument(
"--fg_folder",
type=str,
required=True,
help="the foreground folder (in which there are subfolders for each class, "
"here consider using foreground_on_off",
)
parser.add_argument("--bg_folder", type=str, required=True)
parser.add_argument("--out_folder", type=str, required=True)
parser.add_argument(
"--out_tsv_folder",
type=str,
default=osp.join(
"..", "..", "data", "generated", "soundscapes_generated_var_onset"
),
)
parser.add_argument("--number", type=int, default=1000)
args = parser.parse_args()
pformat(vars(args))
# General output folder, in args
base_out_folder = args.out_folder
create_folder(base_out_folder)
out_tsv_folder = args.out_tsv_folder
create_folder(out_tsv_folder)
# ################
# Varying onset of a single event
# ###########
# SCAPER SETTINGS
clip_duration = 10.0
sg = SoundscapesGenerator(
duration=clip_duration, fg_folder=args.fg_folder, bg_folder=args.bg_folder
)
n_soundscapes = args.number
source_time_dist = "const"
source_time = 0.0
event_duration_dist = "uniform"
event_duration_min = 0.25
event_duration_max = 10.0
snr_dist = "uniform"
snr_min = 6
snr_max = 30
pitch_dist = "uniform"
pitch_min = -3.0
pitch_max = 3.0
time_stretch_dist = "uniform"
time_stretch_min = 1
time_stretch_max = 1
event_time_dist = "truncnorm"
event_time_mean = 0.5
event_time_std = 0.25
event_time_min = 0.25
event_time_max = 0.750
out_folder_500 = osp.join(base_out_folder, "500ms")
create_folder(out_folder_500)
sg.generate(
n_soundscapes,
out_folder_500,
min_events=1,
max_events=1,
labels=("choose", []),
source_files=("choose", []),
sources_time=(source_time_dist, source_time),
events_start=(
event_time_dist,
event_time_mean,
event_time_std,
event_time_min,
event_time_max,
),
events_duration=(event_duration_dist, event_duration_min, event_duration_max),
snrs=(snr_dist, snr_min, snr_max),
pitch_shifts=(pitch_dist, pitch_min, pitch_max),
time_stretches=(time_stretch_dist, time_stretch_min, time_stretch_max),
txt_file=True,
)
rm_high_polyphony(out_folder_500, 2)
out_tsv = osp.join(out_tsv_folder, "500ms.tsv")
post_process_txt_labels(
out_folder_500, output_folder=out_folder_500, output_tsv=out_tsv
)
# Generate 2 variants of this dataset
jams_to_modify = glob.glob(osp.join(out_folder_500, "*.jams"))
# Be careful, if changing the values of the added onset value,
# you maybe want to rerun the post_processing_annotations to be sure there is no inconsistency
# 5.5s onset files
out_folder_5500 = osp.join(base_out_folder, "5500ms")
add_onset = 5.0
modif_onset_5s = functools.partial(modify_fg_onset, slice_seconds=add_onset)
modify_jams(jams_to_modify, modif_onset_5s, out_folder_5500)
# we also need to generate a new DataFrame with the right values
df = pd.read_csv(out_tsv, sep="\t")
df["onset"] += add_onset
df["offset"] = df["offset"].apply(lambda x: min(x, add_onset))
df.to_csv(
osp.join(out_tsv_folder, "5500ms.tsv"),
sep="\t",
float_format="%.3f",
index=False,
)
# 9.5s onset files
out_folder_9500 = osp.join(base_out_folder, "9500ms")
add_onset = 9.0
modif_onset_5s = functools.partial(modify_fg_onset, slice_seconds=add_onset)
modify_jams(jams_to_modify, modif_onset_5s, out_folder_5500)
df = pd.read_csv(out_tsv, sep="\t")
df["onset"] += add_onset
df["offset"] = df["offset"].apply(lambda x: min(x, add_onset))
df.to_csv(
osp.join(out_tsv_folder, "9500ms.tsv"),
sep="\t",
float_format="%.3f",
index=False,
)
| 2.203125 | 2 |
Python/easy/e308.py | tlgs/dailyprogrammer | 4 | 12789844 | <reponame>tlgs/dailyprogrammer<gh_stars>1-10
# 27/03/2017
grid = ["########=####/#",
"# | #",
"# # #",
"# # #",
"####### #",
"# _ #",
"###############"]
grid = [[c for c in grid[y]] for y in range(0, len(grid))]
coords = [(1, 1), (1, 2), (1, 3), (5, 6), (4, 2), (1, 1),
(1, 2), (5, 5), (5, 5), (9, 1), (7, 5), (2, 2)]
def update_grid(x, y):
rules = {'S': 'F', ' ': 'S'}
grid[y][x] = rules[grid[y][x]] if grid[y][x] not in "F#|/=_" else grid[y][x]
for y in range(0, len(grid)):
for x in range(0, len(grid[0])):
if grid[y][x] not in 'F_':
continue
try:
if grid[y][x] == '_' and not any(['F' in [grid[y][x-1], grid[y][x+1], grid[y-1][x], grid[y+1][x]]]):
continue
else:
grid[y][x-1] = 'F' if grid[y][x-1] == 'S' else grid[y][x-1]
grid[y][x+1] = 'F' if grid[y][x+1] == 'S' else grid[y][x+1]
grid[y-1][x] = 'F' if grid[y-1][x] == 'S' else grid[y-1][x]
grid[y+1][x] = 'F' if grid[y+1][x] == 'S' else grid[y+1][x]
except:
continue
for c in coords:
update_grid(*c)
print('\n'.join([''.join(l) for l in grid])) | 3.015625 | 3 |
cgitize/git.py | egor-tensin/cgit-repos | 2 | 12789845 | # Copyright (c) 2021 <NAME> <<EMAIL>>
# This file is part of the "cgitize" project.
# For details, see https://github.com/egor-tensin/cgitize.
# Distributed under the MIT License.
from contextlib import contextmanager
import os
from cgitize import utils
GIT_ENV = os.environ.copy()
GIT_ENV['GIT_SSH_COMMAND'] = 'ssh -oBatchMode=yes -oLogLevel=QUIET -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null'
class Config:
def __init__(self, path):
self.path = path
def exists(self):
return os.path.exists(self.path)
def open(self, mode='r'):
return open(self.path, mode=mode, encoding='utf-8')
def read(self):
with self.open(mode='r') as fd:
return fd.read()
def write(self, contents):
with self.open(mode='w') as fd:
fd.write(contents)
@contextmanager
def backup(self):
old_contents = self.read()
try:
yield old_contents
finally:
self.write(old_contents)
# What follows is an exteremely loose interpretation of what the .gitconfig
# syntax is. The source was git-config(1).
class Section:
def __init__(self, name, variables):
Config.Section.validate_name(name)
self.name = name
self.variables = variables
@staticmethod
def validate_name(name):
if not name:
raise RuntimeError('section names cannot be empty')
for c in name:
if c.isalnum() or c == '-' or c == '.':
continue
raise RuntimeError(f'section names must only contain alphanumeric characters, . or -: {name}')
@staticmethod
def format_name(name):
return name
def format(self):
result = f'[{self.format_name(self.name)}]\n'
result += ''.join((var.format() for var in self.variables))
return result
class Subsection:
def __init__(self, section, name, variables):
Config.Section.validate_name(section)
Config.Subsection.validate_name(name)
self.section = section
self.name = name
self.variables = variables
@staticmethod
def validate_name(name):
if '\n' in name:
raise RuntimeError(f'subsection names cannot contain newlines: {name}')
def format_name(self):
name = self.name
# Escape the backslashes:
name = name.replace('\\', '\\\\')
# Escape the quotes:
name = name.replace('"', '\\"')
# Put in quotes:
return f'"{name}"'
def format(self):
result = f'[{Config.Section.format_name(self.section)} {self.format_name()}]\n'
result += ''.join((var.format() for var in self.variables))
return result
class Variable:
def __init__(self, name, value):
Config.Variable.validate_name(name)
Config.Variable.validate_value(value)
self.name = name
self.value = value
@staticmethod
def validate_name(name):
if not name:
raise RuntimeError('variable names cannot be empty')
for c in name:
if c.isalnum() or c == '-':
continue
raise RuntimeError(f'variable name can only contain alphanumeric characters or -: {name}')
if not name[0].isalnum():
raise RuntimeError(f'variable name must start with an alphanumeric character: {name}')
@staticmethod
def validate_value(value):
pass
def format_name(self):
return self.name
def format_value(self):
value = self.value
# Escape the backslashes:
value = value.replace('\\', '\\\\')
# Escape the supported escape sequences (\n, \t and \b):
value = value.replace('\n', '\\n')
value = value.replace('\t', '\\t')
value = value.replace('\b', '\\b')
# Escape the quotes:
value = value.replace('"', '\\"')
# Put in quotes:
value = f'"{value}"'
return value
def format(self):
return f' {self.format_name()} = {self.format_value()}\n'
class Git:
EXE = 'git'
@staticmethod
def check(*args, **kwargs):
return utils.try_run(Git.EXE, *args, env=GIT_ENV, **kwargs)
@staticmethod
def capture(*args, **kwargs):
return utils.try_run_capture(Git.EXE, *args, env=GIT_ENV, **kwargs)
@staticmethod
def get_global_config():
return Config(os.path.expanduser('~/.gitconfig'))
@staticmethod
@contextmanager
def setup_auth(repo):
if not repo.url_auth:
yield
return
config = Git.get_global_config()
with utils.protected_file(config.path):
with config.backup() as old_contents:
variables = [Config.Variable('insteadOf', repo.clone_url)]
subsection = Config.Subsection('url', repo.clone_url_with_auth, variables)
new_contents = f'{old_contents}\n{subsection.format()}'
config.write(new_contents)
yield
| 2.390625 | 2 |
problem/nearby_zips/tsp/tsp_test.py | jhanley634/testing-tools | 0 | 12789846 | <reponame>jhanley634/testing-tools<gh_stars>0
# Copyright 2018 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import unittest
from geopy.distance import distance
from problem.nearby_zips.tsp.tsp import PlaceGroup
class TspTest(unittest.TestCase):
def test_distance(self):
sfo = 37.619, -122.375
sjc = 37.363, -121.929
self.assertEqual(48610, int(distance(sfo, sjc).m))
def test_find_origin(self):
pg = PlaceGroup()
locs = [loc for loc, _ in pg.places_with_description]
self.assertEqual((37, -122), tuple(map(round, pg._find_origin(locs))))
| 1.960938 | 2 |
dtables/funs.py | mianos/dtables | 2 | 12789847 | <filename>dtables/funs.py
import logging
from flask import request
logger = logging.getLogger(__name__)
def vhandler(query, all_tables, dtable, manual_map=False, count=None):
# initialise the reply with the draw item from the request as datatables wants it back
reply = dict(draw=int(request.args['draw']), data=[])
if manual_map is not True:
columns = list()
column_names = list()
for col in dtable.dt_data_columns():
if '__' in col:
table, column = col.split('__')
if column not in all_tables[table].columns:
logger.error("Column missing, check metadata.tables[table].columns")
continue
columns.append(all_tables[table].columns[column])
column_names.append(col)
else:
# no table name, underlying expression column or something else aliased
# so don't add the column and assume the developer puts it somewhere in the query
continue
existing_query_columns = set(ii['name'] for ii in query.column_descriptions)
for col, name in zip(columns, column_names):
if name in existing_query_columns:
continue
query = query.add_columns(col.label(name))
sortCol = request.args.get('sortCol', None)
if sortCol:
scol = dtable.columns[int(sortCol)]
col = next(dd['expr'] for dd in query.column_descriptions if dd['name'] == scol[0])
if request.args.get('sortDir', 'asc') == 'desc':
query = query.order_by(col.expression.desc())
else:
query = query.order_by(col.expression)
else:
for cname, dtcol in dtable.columns:
if 'hidden' not in dtcol.options:
for dd in query.column_descriptions:
if dd['name'] == cname:
col = dd['expr']
break
else:
logger.error("Column %s 'missing' - class fields are of the form <table>__<field> where __ is a double underscore" % cname)
continue
if request.args.get('sortDir', 'asc') == 'desc':
query = query.order_by(col.desc())
else:
query = query.order_by(col.asc())
break
reply['recordsTotal'] = count if count else query.count()
reply['recordsFiltered'] = reply['recordsTotal']
items = query.offset(int(request.args['start']))
if 'length' in request.args:
try:
ll = int(request.args['length'])
if ll > 0:
items = items.limit(ll)
except ValueError:
pass
item_data = list()
for item in items:
ff = dtable.dt_map_columns(item._asdict())
item_id = str(dtable.dt_item_id(item))
ff['DT_RowId'] = item_id
ff['DT_RowData'] = dict(pkey=item_id)
item_data.append(ff)
reply['data'] = item_data
return reply
| 2.3125 | 2 |
pynfold/error_calculation.py | vincecr0ft/pynFold | 3 | 12789848 | <filename>pynfold/error_calculation.py
from scipy.linalg import svd, inv
from numpy.linalg import LinAlgError
import numpy as np
import logging
def variance_of_matrix(A):
# -------------------------------- #
# variance is diagonal of (A'A)^-1 #
# From SVD: #
# A = U.S.V' #
# A'A = VSU'USV' #
# since S is diagonal and U is orth#
# A'A = VS^2V' #
# (A'A)^-1 = VS^-2V' #
# -------------------------------- #
U, S, VT = svd(A)
S2 = np.diag(S * S)
V = VT.T
try:
covar = V.dot(inv(S2)).dot(VT)
except LinAlgError:
logging.info('singular value matrix det is 0')
logging.info('using pseudo inverse for error')
covar = V.dot(np.linalg.pinv(S2)).dot(VT)
return np.diag(covar)
| 2.5625 | 3 |
compiler_gym/views/observation.py | LearnCV/CompilerGym | 0 | 12789849 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List
from gym.spaces import Space
from compiler_gym.service import observation2py, observation_t
from compiler_gym.service.proto import Observation, ObservationRequest, ObservationSpace
from compiler_gym.views.observation_space_spec import ObservationSpaceSpec
class ObservationView(object):
"""A view into the available observation spaces of a service.
Example usage:
>>> env = gym.make("llvm-v0")
>>> env.reset()
>>> env.observation.spaces.keys()
["Autophase", "Ir"]
>>> env.observation.spaces["Autophase"].space
Box(56,)
>>> env.observation["Autophase"]
[0, 1, ..., 2]
>>> observation["Ir"]
int main() {...}
"""
def __init__(
self,
get_observation: Callable[[ObservationRequest], Observation],
spaces: List[ObservationSpace],
):
if not spaces:
raise ValueError("No observation spaces")
self.spaces = {
s.name: ObservationSpaceSpec.from_proto(i, s) for i, s in enumerate(spaces)
}
self.session_id = -1
self._get_observation = get_observation
self._base_spaces: Dict[str, Space] = {}
self._translate_cbs: Dict[str, Callable[[observation_t], observation_t]] = {}
def __getitem__(self, observation_space: str) -> observation_t:
"""Request an observation from the given space.
:param observation_space: The observation space to query.
:return: An observation.
:raises KeyError: If the requested observation space does not exist.
"""
request = ObservationRequest(
session_id=self.session_id,
observation_space=self.spaces[observation_space].index,
)
return self.translate(
observation_space,
observation2py(
self._base_spaces.get(
observation_space, self.spaces[observation_space].space
),
self._get_observation(request),
),
)
# TODO(cummins): Register an opaque_data_format handler that replaces the
# "Space" and updates observation2py / observation2str.
def register_derived_space(
self,
base_name: str,
derived_name: str,
derived_space: Space,
cb: Callable[[observation_t], observation_t],
) -> None:
"""Add a hook for implementing derived observation spaces.
Subclasses of ObservationView call this method in their
:code:`__init__()` after initializing the base class to register new
observation spaces that are derived from those provided by the
CompilerService.
Example usage:
Suppose we have a service that provides a "src" observation space
that returns a string of source code. We want to create a new
observation space, "src_len", that returns the length of the source
code. We do this by calling :code:`register_derived_space()` and
providing the a callback to translate from the base observation space
to the derived value:
.. code-block:: python
class MyObservationView(ObservationView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_derived_space(
derived_name="src_len",
base_name="src",
derived_space=Box(low=0, high=float("inf"), shape=(1,), dtype=int),
derive=lambda src: [len(src)],
)
Now we can request for "src_len" observation space and receive
observations from this new derived space.
>>> env.observation["src_len"]
[1021,]
:param base_name: The name of the observation space that this new
observation space is derived from.
:param derived_name: The name of the derived observation space
"""
base_spec = self.spaces[base_name]
spec = ObservationSpaceSpec(id=derived_name, space=derived_space)
spec.index = base_spec.index
spec.deterministic = base_spec.deterministic
spec.platform_dependent = base_spec.platform_dependent
self.spaces[derived_name] = spec
self._translate_cbs[derived_name] = cb
def __repr__(self):
return f"ObservationView[{', '.join(sorted(self.spaces.keys()))}]"
def translate(
self, observation_space: str, observation: observation_t
) -> observation_t:
"""Translate an observation according to the space.
This methods translates the value returned by a CompilerSpace according
to any derived observation spaces, as registered using
register_derived_space(). If the requested observation space is not
derived the observation is returned unmodified.
:param observation_space: The name of the observation space.
:param observation: An observation returned by a CompilerService.
:return: An observation, after applying any derived space translations.
"""
return self._translate_cbs.get(observation_space, lambda x: x)(observation)
| 2.453125 | 2 |
django_filters/__init__.py | buriy/django-filter | 0 | 12789850 | <reponame>buriy/django-filter
from filterset import FilterSet
from filters import *
| 0.996094 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.