blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
176d31a4c77d9a9650c5c3e08607b1e8d08aefbe | 4f792275d800f90501615de3432768fbc0f88839 | /catkin_ws/build/catkin_generated/stamps/Project/_setup_util.py.stamp | c61717a79d66a52a300918a90a7f61a3d092de17 | [] | no_license | poolec4/rbe500_team2_pa3 | dbc80646f2f6227523be5e7e1653b1a9e941aec4 | 22c5cde68416ab8d87497ad868cec29eb01d7299 | refs/heads/master | 2023-06-26T23:13:03.096721 | 2021-07-28T00:10:22 | 2021-07-28T00:10:22 | 380,965,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,357 | stamp | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/chris/rbe500_team2_pa3/catkin_ws/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"[email protected]"
] | |
73829092e2f3d7817b1fb45bfadc46dc178673f0 | b3943565e361393488764718d72c1e480ad1afdd | /algorithms/Problem_Solving_Algorithms_Data Structures/sorting_and_search/bubble_sort.py | ec2b110441127d45296bb6055f4ae7d9277f6679 | [] | no_license | DerevenetsArtyom/pure-python | 1f48f83c7475dfbbc28b71c97b81126246d8ffc8 | 186b9ed52962158d8c9f2c4dddeff5a7e7c5369c | refs/heads/master | 2021-04-09T13:47:55.743862 | 2018-04-01T17:05:49 | 2018-04-01T17:05:49 | 125,518,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | def bubble_sort(arr):
"""
Вне зависимости от первоначального порядка элементов,
для списка из n элементов будет сделан n−1 проход
"""
# reduce number of elements in every next iteration with shifting
# right bound, but that doesn't reduce complexity (((
for right_bound in range(len(arr) - 1, 0, -1):
for i in range(right_bound):
if arr[i] > arr[i + 1]:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
lst = [11, 1, 3, 4, 2, 66, 8]
bubble_sort(lst)
assert lst == [1, 2, 3, 4, 8, 11, 66]
"""
Однако, поскольку пузырьковая сортировка делает проход по всей несортированной
части списка, она умеет то, что не могут большинство сортировочных алгоритмов.
В частности, если во время прохода не было сделано ни одной перестановки,
то мы знаем, что список уже отсортирован.
Таким образом, алгоритм может быть модифицирован, чтобы останавливаться раньше,
если обнаруживает, что задача выполнена.
Т.е. для списков, которым нужно всего несколько проходов, пузырьковая сортировка
имеет преимущество, поскольку умеет распознать сортированный список
и остановиться.
Эту модификацию, которую часто называют коротким пузырьком.
"""
def short_bubble_sort(arr):
exchanges = True # keep track of made exchanges
right_bound = len(arr) - 1
while right_bound > 0 and exchanges:
exchanges = False # hope that elements will not exchange
for i in range(right_bound):
if arr[i] > arr[i + 1]:
exchanges = True
arr[i], arr[i + 1] = arr[i + 1], arr[i]
right_bound -= 1
alist = [20, 30, 40, 90, 50, 60, 70, 80, 100, 110]
short_bubble_sort(alist)
assert alist == [20, 30, 40, 50, 60, 70, 80, 90, 100, 110]
| [
"[email protected]"
] | |
992b3eeeeb12af1c4f87a8ebc9d1a998c7f36590 | 3d4cdbca777ad6c2b86ab0e463090484a184455a | /geoload1.py | 546bb52de2b0fcd9bb5dcfd841ed1cf3cf70971f | [] | no_license | samironta/pythonplay | 388900b9c31a99afcee23fd4e16ff5b78e75fd88 | 4e29c7e18daf149ed0da219a3cb3f4d673a1e0c5 | refs/heads/main | 2023-04-22T21:00:03.609072 | 2021-05-12T10:28:40 | 2021-05-12T10:28:40 | 366,677,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | import urllib.request, urllib.parse, urllib.error
import http
import sqlite3
import json
import time
import ssl
import sys
api_key = False
# If you have a Google Places API key, enter it here
# api_key = 'AIzaSy___IDByT70'
if api_key is False:
api_key = 42
serviceurl = "http://py4e-data.dr-chuck.net/json?"
else :
serviceurl = "https://maps.googleapis.com/maps/api/geocode/json?"
# Additional detail for urllib
# http.client.HTTPConnection.debuglevel = 1
conn = sqlite3.connect('geodata.sqlite')
cur = conn.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Locations (address TEXT, geodata TEXT)''')
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
fh = open("where1.data")
count = 0
for line in fh:
if count > 200 :
print('Retrieved 200 locations, restart to retrieve more')
break
address = line.strip()
print('')
cur.execute("SELECT geodata FROM Locations WHERE address= ?",
(memoryview(address.encode()), ))
try:
data = cur.fetchone()[0]
print("Found in database ",address)
continue
except:
pass
parms = dict()
parms["address"] = address
if api_key is not False: parms['key'] = api_key
url = serviceurl + urllib.parse.urlencode(parms)
print('Retrieving', url)
uh = urllib.request.urlopen(url, context=ctx)
data = uh.read().decode()
print('Retrieved', len(data), 'characters', data[:20].replace('\n', ' '))
count = count + 1
try:
js = json.loads(data)
except:
print(data) # We print in case unicode causes an error
continue
if 'status' not in js or (js['status'] != 'OK' and js['status'] != 'ZERO_RESULTS') :
print('==== Failure To Retrieve ====')
print(data)
break
cur.execute('''INSERT INTO Locations (address, geodata)
VALUES ( ?, ? )''', (memoryview(address.encode()), memoryview(data.encode()) ) )
conn.commit()
if count % 10 == 0 :
print('Pausing for a bit...')
time.sleep(5)
print("Run geodump.py to read the data from the database so you can vizualize it on a map.")
| [
"[email protected]"
] | |
72274c90e2fd879e0b7587929824304189aaf536 | a89c67e57352448941f14e4441ca09bc75d819f9 | /Support Vector Regression (SVR)/suvecreg.py | 5af8e4c98759e01ee85403d3af0b4b763b52af99 | [] | no_license | SaiSiddhanthGujjari/Regression-Models | 1345e1f86440f34a137594fe340b1ab73357831c | 5f043dfadd42f8c08b8c213a73cb9e3f1fdf84ae | refs/heads/master | 2020-12-18T09:43:29.349213 | 2020-01-21T12:31:32 | 2020-01-21T12:41:47 | 235,333,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 22:08:42 2018
@author: Sai Siddhanth
"""
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
#Creating matrix of features and an dependent vector
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:,2].values
#Splitting the dataset into training set and test set
"""from sklearn.cross_validation import train_test_split
train_X,test_X,train_y,test_y = train_test_split(X,y , test_size = 0.2, random_state = 0)"""
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
#Fitting SVR to the dataset
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X,y)
#Predicting a new result with polynomial regression
regressor.predict(6.5)
#Visualzing the SVR results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict((X)), color = 'blue')
plt.title('Level vs Salary (Regression)')
plt.xlabel('Level')
plt.ylabel('Salary')
plt.show()
| [
"[email protected]"
] | |
0b34c179615c5c47fadc78c555343f10233332c6 | 0baa5beb0ca5044b006b3b1aa67fd5042334720d | /webserver/serverapp/migrations/0006_auto_20170319_2206.py | e0c0fbf8d4cac3480bded23d47ab9ef86a7206a9 | [] | no_license | meddeb9989/tntwebserver | 20791d29ba45705234ae3679e139ca6126e0233b | 6526a36994288b53290d0149c2c97e9d67b8b3cb | refs/heads/master | 2021-01-20T10:21:27.527155 | 2017-12-04T16:05:06 | 2017-12-04T16:05:06 | 83,928,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-19 22:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('serverapp', '0005_transaction_confirmed'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='confirmed',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
1f126186de13c9e59d1d5342af2047ea667277f9 | 009594f6a84c9fc66e4be6a60d59c8d2c7c5aed8 | /PythonThings/ElseWithLoops.py | 136a35c6bb3baf7713c2f7cb688a4743307d61ef | [] | no_license | cyperh17/python | c8c04877a6007656d3a19b471c17a9720e68cc91 | 2279c630e02cf6a4eaef2a8f65a665ce246bd8a2 | refs/heads/master | 2021-01-24T09:31:17.241324 | 2016-09-28T15:28:44 | 2016-09-28T15:28:44 | 68,818,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | l = range(0, 10)
#for
for i in l:
print(i, end=' ')
if(i == 3):
break #causes loop to break so then else not get called
else: #also called 'no break'. Else is called when break didnt called in the loop
print('\nHit the for/else statetement!')
print('\n')
#while
i = 0
while i < (len(l) - 1):
print(i, end=' ')
i += 1
if(i == 3):
break #causes loop to break so then else not get called
else: #also called 'no break'. Else is called when break didnt called in the loop
print('\nHit the for/else statetement!')
print('\n')
#example
def find_index(to_search, target):
for i, value in enumerate(to_search):
if(value == target):
break
else:
return -1
return i
l = ['a', 'b', 'c', 'd']
ind_one = find_index(l, 'd')
ind_two = find_index(l, 'ddd')
print(ind_one)
print(ind_two) | [
"[email protected]"
] | |
96bd79305b964700660216a7aff3846297cd700f | 055e3b34d8f288d354c6a09e75f14bc1584bf2eb | /text (laba 4,5,6)/laba5.py | 669de58713ebe210092b14aa0153fe18127b2469 | [] | no_license | jmaynard-n/oavi | c55f2e897305bf41e1222d4e725cf9055bfa35ec | 042b2eb31f3fac964fb0017a036ee3389a71a815 | refs/heads/master | 2021-01-04T18:04:45.371911 | 2020-04-19T23:47:31 | 2020-04-19T23:47:31 | 240,701,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | import csv
from bin import Thresholding_Otsu as otsu
from semi import semitone
from PIL import Image
import numpy as np
from profile import profiles, prof_axis
from croping import crop
from segmentation import segment
from generator import str_gen
import os
import shutil
def imprt(file):
img = Image.open(file, 'r')
img = img.convert("RGB")
img = otsu(semitone(img))
return img
def set_dirs(i):
if os.path.exists("./phrases/" + i):
shutil.rmtree("./phrases/" + i)
os.mkdir("./phrases/" + i)
if os.path.exists("./phrases/" + i + "/smbls"):
shutil.rmtree("./phrases/" + i + "/smbls")
os.mkdir("./phrases/" + i + "/smbls")
if os.path.exists("./phrases/" + i + "/prof_x"):
shutil.rmtree("./phrases/" + i + "/prof_x")
os.mkdir("./phrases/" + i + "/prof_x")
if os.path.exists("./phrases/" + i + "/prof_y"):
shutil.rmtree("./phrases/" + i + "/prof_y")
os.mkdir("./phrases/" + i + "/prof_y")
phrases = ["HEY WHERE DO YOU GET IT FROM","ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ALEJANDRO WAS FILLED WITH THE TRANSCENDENCE OF WHAT WAS HAPPENING AND KNEW THE MEANING OF LIFE","BUT VIXEN IS PIQUED", "WHAT GOES UP MUST GO DOWN", "GRAPE JELLY WAS TASTY", "LOVE IS NOT LIKE PIZZA"]
# with open('features.csv') as f:
# reader = csv.reader(f, delimiter=';')
# gold = list(reader)
# gold.pop(0)
# alf = {key: val for key, val in enumerate("ABCDEFGHIJKLMNOPQRSTUVWXYZ")}
for i in range(1, 2):
str_gen(phrases[i - 1], str(i))
new = imprt("phrase" + str(i) + ".png")
set_dirs(str(i))
new.save("./phrases/" + str(i) + "/" + "phrase" +".bmp")
map = np.asarray(new, dtype=np.uint8)
px = map.reshape((new.height, new.width))
prof_axis(px, str(i * 1000), 0, "./phrases/" + str(i) + "/prof")
prof_axis(px, str(i * 1000), 1, "./phrases/" + str(i) + "/prof")
px = crop(px, [0])
# profiles(None, px, i * 1000, "./phrases/" + str(i) + "/prof")
pr_x = prof_axis(px, str(i), 0, None)
coords = segment(pr_x)
print(coords)
k = 1;
for c in coords:
new = Image.fromarray(px[:, c[2] : c[3]], "L")
new.save("./phrases/" + str(i) + "/smbls/" + str(k) + ".png")
profiles(None, px[:, c[2] : c[3]], k, "./phrases/" + str(i) + "/prof")
k+=1
| [
"[email protected]"
] | |
5f967271f6f88bdf986b54e47f62f5bcfbb94893 | e8421a737d596d757e617087ae549505cacafa52 | /tests/api_utils.py | 13956f212fcc22af4b0060daad3f17be59a3667d | [] | no_license | webclinic017/valkyrie-2 | e6e533aedb39bbf98833e0ebea55446e1088f795 | 76e7c1ba31387fdf1982e2ad0ff579c8c41f061e | refs/heads/master | 2023-04-03T03:14:59.941236 | 2021-04-18T16:38:51 | 2021-04-18T16:38:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | import os
from datetime import datetime
from typing import List
from uuid import uuid4
from backtrader import TimeFrame
from valkyrie.api import api
from valkyrie.configs import BrokerConfig, MarketConfig
def use_api(
graph: List[dict],
task_id: str = str(uuid4()),
broker_config: BrokerConfig = BrokerConfig(cash=100_000_000),
market_config: MarketConfig = MarketConfig(
from_date=datetime(year=2014, month=1, day=1),
to_date=datetime(year=2014, month=1, day=4),
timeframe=TimeFrame.Minutes,
symbols=["A", "F"],
),
env: dict = {"unittest": True},
):
return api(
graph=graph,
task_id=task_id,
broker_config=broker_config,
market_config=market_config,
env=env,
)
def use_market_node(id: str):
return {
"node_str": "market",
"module_str": "market",
"id": id,
"type": "MARKET_NODE",
"parameters": {},
"inputs": {},
"outputs": ["open", "high", "low", "close", "volume"],
}
| [
"[email protected]"
] | |
86e3c088412f6fbe1a0e19035adc5ccd92678946 | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/main/prompto/expression/IteratorExpression.py | b971b1ee518405148002356368f86332f182ab37 | [] | no_license | prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | Python | UTF-8 | Python | false | false | 3,006 | py | from prompto.expression.IExpression import IExpression
from prompto.expression.ParenthesisExpression import ParenthesisExpression
from prompto.runtime.Variable import Variable
from prompto.error.InternalError import InternalError
from prompto.statement.UnresolvedCall import UnresolvedCall
from prompto.type.IteratorType import IteratorType
from prompto.value.IterableValue import IterableValue
class IteratorExpression(IExpression):
def __init__(self, name, source, exp):
self.name = name
self.source = source
self.expression = exp
def check(self, context):
elemType = self.source.check(context).checkIterator(context)
child = context.newChildContext()
context.registerValue(Variable(self.name, elemType))
itemType = self.expression.check(child)
return IteratorType(itemType)
def interpret(self, context):
elemType = self.source.check(context).checkIterator(context)
items = self.source.interpret(context)
length = items.getMemberValue(context, "count", False)
iterator = self.getIterator(context, items)
return IterableValue(context, self.name, elemType, iterator, length, self.expression)
def getIterator(self, context, src):
if getattr(src, "getIterator", None) is None:
raise InternalError("Should never get there!")
else:
return src.getIterator(context)
def toMDialect(self, writer):
expression = IteratorExpression.extractFromParenthesisIfPossible(self.expression)
expression.toDialect(writer)
writer.append(" for each ")
writer.append(self.name)
writer.append(" in ")
self.source.toDialect(writer)
def toODialect(self, writer):
expression = IteratorExpression.extractFromParenthesisIfPossible(self.expression)
expression.toDialect(writer)
writer.append(" for each ( ")
writer.append(self.name)
writer.append(" in ")
self.source.toDialect(writer)
writer.append(" )")
def toEDialect(self, writer):
expression = IteratorExpression.encloseInParenthesisIfRequired(self.expression)
expression.toDialect(writer)
writer.append(" for each ")
writer.append(self.name)
writer.append(" in ")
self.source.toDialect(writer)
@staticmethod
def encloseInParenthesisIfRequired(expression):
if IteratorExpression.mustBeEnclosedInParenthesis(expression):
return ParenthesisExpression(expression)
else:
return expression
@staticmethod
def mustBeEnclosedInParenthesis(expression):
return isinstance(expression, UnresolvedCall)
@staticmethod
def extractFromParenthesisIfPossible(expression):
if isinstance(expression, ParenthesisExpression):
if IteratorExpression.mustBeEnclosedInParenthesis(expression.expression):
return expression.expression
return expression
| [
"[email protected]"
] | |
091522fb133d1992f60b8ce09069a41aacd45df5 | 98fd03020731a52b2880c49494514ede718eeaf4 | /week1/DeBrujinGraph.py | 0de0c341ccf7cb986aef5a05861bce58b56b2a01 | [] | no_license | JoaoHenriqueOliveira/Bioinformatics-2 | 6e77eff53d49bcbda4686c92a852f34b5dbe45fe | 18ab53201a3e2057b60ff0fcfaf76242ae96d27e | refs/heads/master | 2022-12-16T04:40:51.129763 | 2020-10-01T15:05:26 | 2020-10-01T15:05:26 | 272,672,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import unittest
def DeBrujinGraph(k, text):
k, n, dic = k - 1, len(text), {}
res = [text[i:i + k] for i in range(n - k + 1)]
res.sort()
for elem in res:
dic[elem] = []
for j in range(n - k):
dic[text[j:j + k]].append(text[j + 1: j + k + 1])
for key in dic:
dic[key].sort()
return dic
def DeBrujin(patterns):
k = len(patterns[0])
graph = {}
for k_mer in patterns:
graph[k_mer[0:(k - 1)]] = []
graph[k_mer[1:k]] = []
for node in graph:
for k_mer in patterns:
if k_mer[0: (k - 1)] == node:
graph[node].append(k_mer[1:k])
return graph
if __name__ == "__main__":
print(DeBrujin(["CTTA","ACCA","TACC","GGCT","GCTT","TTAC"]))
pass
| [
"[email protected]"
] | |
2b03f14f9c80f1dfdc0bae01a6cc959c1db39ca5 | d84933816d3c9b05003198ccc6c8bcdd876a0690 | /pie_examples/old/game_proto1.py | 5a58cf76d960bc75aac7f2e395c0625d9ef2ef4d | [] | no_license | adoc/pieEngine | ae6cc8828c1217a226735fec3c995e06cf7b7baa | f799555ff5a6d86232e9b15408916b60079cd19f | refs/heads/master | 2020-05-20T18:49:13.449808 | 2015-05-11T08:39:05 | 2015-05-11T08:39:05 | 34,555,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | # TODO: Broken!
import time
import pygame
from pygame.locals import *
from lanchester.model.side import Battalion
from pie_examples.ameiosis.sprite import Army
from pie_examples.ameiosis.game import Ameiosis as AmeosisBase
class Ameosis(AmeosisBase):
def __init__(self,*args, **kwa):
super(Ameosis, self).__init__(*args, **kwa)
self.__spawn_size = 1
self.__spawn_team = 0
self.events.bind(K_UP, self.__ev_inc_spawn_size)
self.events.bind(K_DOWN, self.__ev_dec_spawn_size)
self.events.bind(K_LEFT, self.__ev_dec_team)
self.events.bind(K_RIGHT, self.__ev_inc_team)
self.events.bind(K_SPACE, self.__ev_tog_simulate)
self.events.bind(MOUSEBUTTONDOWN, self.__ev_mouse_down)
def __ev_inc_spawn_size(self, ev):
self.spawn_size += 1
def __ev_dec_spawn_size(self, ev):
self.spawn_size -= 1
def __ev_inc_team(self, ev):
self.spawn_team += 1
def __ev_dec_team(self, ev):
self.spawn_team -= 1
def __ev_tog_simulate(self, ev):
self._simulate_battle = not self._simulate_battle
@property
def spawn_size(self):
return self.__spawn_size
@spawn_size.setter
def spawn_size(self, val):
if val > 10:
self.__spawn_size = 10
elif val < 1:
self.__spawn_size = 1
else:
self.__spawn_size = val
@property
def spawn_team(self):
return self.__spawn_team
@spawn_team.setter
def spawn_team(self, val):
if val > 1:
self.__spawn_team = 1
elif val < 0:
self.__spawn_team = 0
else:
self.__spawn_team = val
def __ev_mouse_down(self, ev):
if ev.button == 3: # Right-Click
army = Army(self.__spawn_size, self.__spawn_team, ev.pos)
army.battalion = Battalion(self.__spawn_size * 1000, .01)
self._armies_lanc_factions[self.__spawn_team].add_member(
army.battalion)
self._armies_sprites[self.__spawn_team].add(army)
self.__drag_handler.add(army)
def update(self):
super(Ameosis, self).update()
self._debug_lines.append(("Size (up/down): %s" % (self.__spawn_size), 1, (240, 240, 240)))
self._debug_lines.append(("Team (left/right): %s" % (self.__spawn_team), 1, (240, 240, 240)))
self._debug_lines.append(("Simulate (space): %s" % (self._simulate_battle), 1, (240, 240, 240)))
if __name__ == "__main__":
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((1024, 512))
game = Ameosis(screen, clock)
while not game.stopped:
t1 = time.time()
game.buffer()
game.update()
game.draw()
game.render()
# Will be refactored.
game.draw_debug(tick_time=time.time() - t1) | [
"[email protected]"
] | |
c45b2fb0ebebf62d046dfced155b98f95eed5dc9 | dc1b7ffbb86b12213ee37cbd64b27865055fe52f | /django/composeexample/settings.py | fbfdfd13a31470332f09a46529c8b04a35f51b37 | [
"MIT"
] | permissive | rsskga/lambdata-rsskga | cac3dbbd594d3a1057748abebac458c3eb33f5b3 | fe8898fd2574cad66c6e317221ea33e4d5b9e8f7 | refs/heads/master | 2022-12-11T09:56:07.876931 | 2019-10-30T23:00:57 | 2019-10-30T23:00:57 | 218,158,540 | 0 | 0 | MIT | 2022-12-08T06:48:49 | 2019-10-28T22:41:10 | Python | UTF-8 | Python | false | false | 3,161 | py | """
Django settings for composeexample project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@n9%e(p+y-)w2vvcg$zrojg4^$45_vfc_9szbz3&si$_*--1_9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'composeexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'composeexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
81bda47956fbfa77abceafabf1ba04f77f849ac0 | 58412bd1c5364cd88205030de06cbbdc246b677c | /jupyter_file/hierarchical_helper.py | 827896481959b986e4b7f3f478195e75782344fc | [
"Apache-2.0"
] | permissive | caserwin/daily-learning-python | 764c7430a8fe5dbd2a8884a6a56d075d56203070 | da66c1c0309dc1052d00104b005e8dd11550613b | refs/heads/master | 2022-07-29T02:29:15.860159 | 2022-06-21T21:23:46 | 2022-06-21T21:23:46 | 119,244,311 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,901 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-15 11:14
# @Author : erwin
from collections import deque
import operator
class TreeNode(object):
def __init__(self, idx=-1, data=None, parrent_node=None):
self.idx = idx
self.parrent_node = parrent_node
self.data = data
self.left_node = None
self.right_node = None
def __str__(self):
return '<TreeNode: idx: %s, data: %s>' % (self.idx, str(self.data))
def isLeaf(self):
return not (self.left_node or self.right_node)
class ClusterHelper(object):
def __init__(self, linkage_tree, len):
self.linkage_tree = linkage_tree
self.maxnode = linkage_tree.maxnode
self.len = len
def toMap(self):
child_left, child_right, dist, cnt = self.linkage_tree.get_linkage(self.maxnode)
node_deque = deque([(self.maxnode, child_left), (self.maxnode, child_right)])
s = {}
while len(node_deque) > 0:
from_node, to_node = node_deque.popleft()
s[to_node] = from_node
if to_node >= self.len:
child_left, child_right, dist, cnt = self.linkage_tree.get_linkage(to_node)
node_deque.append((to_node, child_left))
node_deque.append((to_node, child_right))
return s
class HierarchicalHelper(object):
def __init__(self, linkage_tree):
"""
:param tree:
linkage_tree is dtaidistance.clustering import LinkageTree
"""
_, _, dist, cnt = linkage_tree.get_linkage(linkage_tree.maxnode)
self.root = TreeNode(idx=linkage_tree.maxnode, data=[dist, cnt])
self.linkage_tree = linkage_tree
self.idx_node_map = {}
self.buildTree(self.root, None, linkage_tree.maxnode)
def buildTree(self, tree_node, ptree_node, idx):
node_info = self.linkage_tree.get_linkage(idx)
if node_info is not None:
# 说明不是叶节点
child_left, child_right, dist, cnt = node_info[0], node_info[1], node_info[2], node_info[3]
if tree_node is None:
data = [dist, cnt]
tree_node = TreeNode(idx=idx, data=data, parrent_node=ptree_node)
self.idx_node_map[idx] = tree_node
# 构建左边节点
tree_node.left_node = self.buildTree(tree_node.left_node, tree_node, child_left)
# 构建右边节点
tree_node.right_node = self.buildTree(tree_node.right_node, tree_node, child_right)
return tree_node
else:
# 说明是叶节点:没有左右子节点和相应的数据
tree_node = TreeNode(idx=idx, parrent_node=ptree_node)
self.idx_node_map[idx] = tree_node
return tree_node
def iterTreePrint(self, node):
if node is not None:
print(node.idx)
self.iterTreePrint(node.left_node)
self.iterTreePrint(node.right_node)
def iterTree(self, node, res_ls):
if node.isLeaf():
res_ls.append(node)
else:
self.iterTree(node.left_node, res_ls)
self.iterTree(node.right_node, res_ls)
def getClusterByNum(self, tree_node, num, cluster_map):
if len(cluster_map) == 0:
cluster_map[tree_node] = tree_node.data[0]
if num == len(cluster_map):
return cluster_map.keys()
else:
# 每次从cluster_map 选择距离最大的节点
max_tree_node = max(cluster_map.items(), key=operator.itemgetter(1))[0]
lmax_tree_node = max_tree_node.left_node
rmax_tree_node = max_tree_node.right_node
cluster_map.pop(max_tree_node)
cluster_map[lmax_tree_node] = 0 if lmax_tree_node.isLeaf() else lmax_tree_node.data[0]
cluster_map[rmax_tree_node] = 0 if rmax_tree_node.isLeaf() else rmax_tree_node.data[0]
return self.getClusterByNum(tree_node, num, cluster_map)
def getClusterByDist(self, tree_node, dist, cluster_map):
if len(cluster_map) == 0:
cluster_map[tree_node] = tree_node.data[0]
min_tree_node = max(cluster_map.items(), key=operator.itemgetter(1))[0]
if dist > cluster_map[min_tree_node]:
return cluster_map.keys()
else:
# 每次从cluster_map 选择距离最大的节点
max_tree_node = max(cluster_map.items(), key=operator.itemgetter(1))[0]
lmax_tree_node = max_tree_node.left_node
rmax_tree_node = max_tree_node.right_node
cluster_map.pop(max_tree_node)
cluster_map[lmax_tree_node] = 0 if lmax_tree_node.isLeaf() else lmax_tree_node.data[0]
cluster_map[rmax_tree_node] = 0 if rmax_tree_node.isLeaf() else rmax_tree_node.data[0]
return self.getClusterByDist(tree_node, dist, cluster_map)
| [
"[email protected]"
] | |
bd194fe30d0af9dac7e04d16d366e2ec158b15a3 | a738e9ea0012bf4303d3ada815d188cab7b9ce2a | /Day4/while_loop2.py | d62cd0c7fa5044c81e12e18f461d631b4f79f677 | [] | no_license | AzizUddin05/pythonBasic | a8efbe934286faedc6da763ca79744a326ecdb43 | cc1464422a9c4402d28ea4b1b43f0cdcf0c1cac2 | refs/heads/main | 2023-07-12T09:45:48.176180 | 2021-07-31T15:39:15 | 2021-07-31T15:39:15 | 391,391,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | i = 1
while i <= 100:
print(i, end="\t")
i = i + 1 | [
"[email protected]"
] | |
fe47bf42475c493e72dca25cccc333b60767f7e2 | b50bb55a909e726f9547677a72b74a3f3582730c | /qa_tools/__init__.py | e129a0a2ff502051ec372e24b613d769c3292744 | [
"MIT"
] | permissive | keithgroup/qa-atoms-dimers | d3fe81bed1d76265dde73f8857fd309bc1946559 | fff24f5658b45618de8f2dcb35b3b0b7e87898d0 | refs/heads/main | 2023-04-18T02:55:39.252076 | 2022-01-21T19:46:21 | 2022-01-21T19:46:21 | 421,457,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # MIT License
#
# Copyright (c) 2021, Alex M. Maldonado
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| [
"[email protected]"
] | |
75e7b104f4a5d12e2b43805f5b4e15811699bd54 | ae8c1de8eea8e16f9f3b3b7d491fe242e7514e2e | /build/dynamixel-workbench-msgs/dynamixel_workbench_msgs/catkin_generated/pkg.installspace.context.pc.py | b83504a3c2f16a8f195b4851b97484b094bf68fd | [] | no_license | sjlazza/t3_mav_flight | 5209c3de198413366106ce10afc4943b44d4c5a1 | b2140d97c2ed6510d643c47fb47a365c29851859 | refs/heads/master | 2023-06-10T07:30:06.059231 | 2021-06-14T09:16:00 | 2021-06-14T09:16:00 | 374,551,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dynamixel_workbench_msgs"
PROJECT_SPACE_DIR = "/home/mrl/catkin_ws/install"
PROJECT_VERSION = "2.0.1"
| [
"[email protected]"
] | |
94450c5327da7d416b41a53d69aac0436e893583 | 9536f2b718f489bc89c49381b77d3d05a1da3b4b | /image_anncs/urls.py | 13568b0308e8d8633dd35c7d65d3afaf79f81cde | [] | no_license | Billy4195/DigitalSignage | d1b7e7e97052470d23e916135766ba6d76edd4e2 | f516144f3a84f80f8260b11eb63089d2e96af5b1 | refs/heads/master | 2020-09-11T02:01:47.050513 | 2020-05-07T09:38:52 | 2020-05-07T09:38:52 | 221,904,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.contrib import admin
from django.urls import path, include
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
path('upload/', views.upload, name='upload'),
path('delete/<int:img_id>', views.delete, name='img_delete'),
]
| [
"[email protected]"
] | |
ce9a83b4eb122405b35d1dc0d1fedc11545b9972 | 6da19be45ff986768eb820f11691977cb3c84772 | /Python/6_Advance_Python_development/606_Collection_module/Exercise_1.py | fe41fc148353319418877cf6d25a2e7196edf421 | [] | no_license | alexp01/trainings | 9e72f3a571292b79d2b1518f564d2dc0a774ef41 | 9d8daee16f15e0d7851fab12ab3d2505386a686c | refs/heads/master | 2023-05-04T23:37:13.243691 | 2023-05-02T08:02:53 | 2023-05-02T08:02:53 | 272,425,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py |
# https://www.udemy.com/course/the-complete-python-course/learn/quiz/4427862#questions
from collections import defaultdict, OrderedDict, namedtuple, deque
def task1(username: str, userlocation : str) -> defaultdict:
"""
- create a `defaultdict` object, and its default value would be set to the string `Unknown`.
- Add an entry with key name `Alan` and its value being `Manchester`.
- Return the `defaultdict` object you created.
"""
# you code starts here:
local_variable = defaultdict(list)
local_variable.default_factory = 'Unknown'
local_variable[username] = userlocation
return local_variable
call_task1 = task1('Alan','Manchester')
print(call_task1)
# My code is not good
# I forgot to use : (lambda: 'Unknown') in order to overwrite the default dict returned value.
# Training solution:
"""
def task1() -> defaultdict:
dd = defaultdict(lambda: 'Unknown')
dd['Alan'] = 'Manchester'
return dd
"""
| [
"[email protected]"
] | |
9405a8fbdc0c36536291aa6381e95ba5066ed969 | 2ac4951d0a8c2dc8398a4718146bf57f7a336727 | /chapter2/odd_list1.py | 2c3eb8b75f5e089d5c3d94c55e58ba3004e35b17 | [] | no_license | OldJohn86/Langtangen | 5e37a469828415f7aa5ccfc17f9390af999e328a | aaa34a1779d3732c8fa84c0bb3c860f00468178c | refs/heads/master | 2020-08-25T06:21:54.335805 | 2014-10-04T20:38:10 | 2014-10-04T20:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # odd.py prints odd numbers from 1 to n
n = 20
i = 1
list = []
while i <= n:
list.append(i)
i = i + 2
for j in list:
print j
'''
python odd_list1.py
1
3
5
7
9
11
13
15
17
19
'''
| [
"[email protected]"
] | |
582a1b621e09ebb78b94bcb5d50e555ba68e402e | dc39430cc2dd99c63efbfab9d15f190ae5c93b92 | /xnb_parse/type_readers/xTile/__init__.py | 7921e907added554f64aa53acc57eaae7df6613f | [
"MIT"
] | permissive | parhelia512/xnb_parse | f0cd9280c034d12d3e4ae31eef8a8e4a2f48a053 | 923cdce34203317c1e081dd56a171ae84d754b7c | refs/heads/master | 2020-05-29T11:50:36.466784 | 2017-03-27T11:07:25 | 2017-03-27T11:07:25 | 29,853,018 | 0 | 1 | null | 2017-03-27T11:07:26 | 2015-01-26T08:58:19 | Python | UTF-8 | Python | false | false | 155 | py | """
xTile type readers!
"""
from __future__ import print_function
from xnb_parse.type_readers.xTile import xTile_graphics
__all__ = ['xTile_graphics']
| [
"[email protected]"
] | |
0d9d0cb061a679f2682422a87b81894348f4e3a8 | 61347b3363a6818ca6b19b323aee041f6c68da55 | /ecommerce/shop/migrations/0006_auto_20210228_1035.py | 4d900e033c5f59c717c48f705db0750e6d203454 | [] | no_license | gianniskokkosis/django_ecommerce | a53885119f609ac599aeae0fc830e902e491e50c | b0719a19225c4f44c290d92f21e4109a57599405 | refs/heads/main | 2023-03-18T22:36:43.406078 | 2021-03-11T16:09:29 | 2021-03-11T16:09:29 | 303,303,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | # Generated by Django 3.1.6 on 2021-02-28 08:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0005_remove_cart_status'),
]
operations = [
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.product')),
],
),
migrations.DeleteModel(
name='Cart',
),
]
| [
"[email protected]"
] | |
db1a882960a19c51a5976829f37073ea7d82c24c | e41d8daac285e37551e17778fa1d31698d707311 | /Project3/etl.py | 28d9b6cd4d800d5bcddb569a5d07071bfd69d1ed | [] | no_license | Johannes-Handloser/Data-Engineer-Nanodegree | d0224011b28ce1fdd9d9cc9b037032216cffc39a | 5da2123c5b0eff436ff570281be6ed3161d95c2a | refs/heads/master | 2022-11-26T19:47:31.795613 | 2020-08-08T10:34:53 | 2020-08-08T10:34:53 | 271,854,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
"""
Load data from S3 to the previously created staging tables using the queries in the copy_table_queries array
"""
print("inside load staging tables")
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
"""
ETL Function for populating dimension and fact tables from the staging tables using insert_table_queries array
"""
print("inside insert to tables with queries:")
for query in insert_table_queries:
print("query: " + query)
cur.execute(query)
conn.commit()
def main():
"""
Main function to create database connection to AWS Redshift and executing etl functions
"""
config = configparser.ConfigParser()
config.read('dwh.cfg')
try:
conn=psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
except:
print ("I am unable to connect to the database.")
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
7f687d32f8e6e619adc652e3a374840a15d548fc | 720ec1a86771c996b7ed7bb2720f44b90789b599 | /draw.py | 72cafabcdb8e5d951c824549ffc1b5b647db8f1d | [] | no_license | wangqiaoli/DynamicalEnvs | 6abfc821237034ff9a4eca50b0789d8d2ff7eaa4 | c522cbee1096ddfa8473b358781c2fde4ae1cbd1 | refs/heads/master | 2022-12-09T09:23:40.360941 | 2020-09-21T02:22:26 | 2020-09-21T02:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,111 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@author: daishilong
@contact: [email protected]
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as anim
from rigid import RectObs, RectRobot, CircleRobot
def plot_arrow(ax, x, y, yaw, length=0.5, width=0.5, color='k'):
arrow = ax.arrow(x, y, length * np.cos(yaw), length * np.sin(yaw),
width=0.1, head_width=0.4, fc=color, ec=color)
return [arrow]
def plot_robot(ax, rigidrobot, pose):
rigidrobot.pose = pose[:3]
outline = rigidrobot.outline()
plot_outline, = plt.plot(outline[:,0], outline[:,1], c=rigidrobot.color)
return [plot_outline]
def plot_rectobs(ax, obs):
rect = patches.Rectangle(obs.rect[0], *(obs.rect[1]-obs.rect[0]), color=obs.color)
return ax.add_patch(rect)
def plot_obs_list(ax, obs_list):
collection = []
for obs in obs_list:
if isinstance(obs, RectObs):
collection.append(plot_rectobs(ax, obs))
return collection
def plot_problem_definition(ax, obs_list, rigidrobot, start, goal):
"""
plot the obstacles, start and goal
Parameters
----------
ax:
figure axis
obs_list: array_like
list of obstacles
obs_size: float
obstacle size
start: array_like
start state
goal: array_like
goal state
Return
------
collection: list
a collection of matplotlib artists
"""
collection = []
ax_ob = plot_obs_list(ax, obs_list)
start_mark = plot_arrow(ax, *start[:3], color='k')
goal_mark = plot_arrow(ax, *goal[:3], color='b')
collection += ax_ob + start_mark + goal_mark
return collection
def draw_tree(robot_env, start, goal, tree, vertex=True, save_gif=True, fname='rrt_tree'):
"""Draw the tree built by the planner
Args:
robot_env (rl_planner.env.base_env.BaseEnv): the robot gym env
start (numpy.ndarray)
goal (numpy.ndarray)
tree (list): list of nodes
vertex (bool): nodes will be plotted if vertex is True
fname (str)
"""
fig, ax = plt.subplots(figsize=(6,6))
plt.axis([-22,22,-22,22])
plt.xticks([])
plt.yticks([])
collection_list = [] # each entry is a collection
tmp = plot_problem_definition(ax, robot_env.obs_list, robot_env.rigid_robot, start, goal)
collection_list.append(tmp)
for node in tree:
if node.parent:
tmp = tmp.copy()
path = np.array(node.path[:])
ax_path, = plt.plot(path[:,0], path[:,1], "-g", linewidth=0.6)
tmp.append(ax_path)
if vertex:
ax_node, = plt.plot(node.state[0], node.state[1], 'x', c='black', markersize=1.0)
tmp.append(ax_node)
collection_list.append(tmp)
# plt.pause(2)
plt.savefig(fname+'.png')
# plt.show()
gif = anim.ArtistAnimation(fig, collection_list, interval=50)
if save_gif:
gif.save(fname+'.gif', writer = anim.PillowWriter(fps=4))
def draw_path(robot_env, start, goal, path, fname='rrt_path'):
"""Draw the planned path.
Args:
robot_env (rl_planner.env.base_env.BaseEnv): the robot gym env
start (numpy.ndarray)
goal (numpy.ndarray)
path (list): the planned path
fname (str)
"""
fig, ax = plt.subplots(figsize=(6,6))
plt.axis([-22,22,-22,22])
plt.xticks([])
plt.yticks([])
collection_list = [] # each entry is a collection
tmp = plot_problem_definition(ax, robot_env.obs_list, robot_env.rigid_robot, start, goal)
array_path = np.array([state[:2] for state in path])
plt.plot(array_path[:,0], array_path[:,1], c='k', linewidth=1.0)
collection_list.append(tmp)
for state in path:
tmp_ = tmp.copy()
robot_marker = plot_robot(ax, robot_env.rigid_robot, state[:3])
tmp_ += robot_marker
collection_list.append(tmp_)
gif = anim.ArtistAnimation(fig, collection_list, interval=200)
gif.save(fname+'.gif', writer = anim.PillowWriter(fps=5))
| [
"[email protected]"
] | |
c359c8d6b63998a8228d8333a383d55100661cfa | 6a81b9bc6c4a1b3296b5b7535f12ceb30c461604 | /phasing/attachAB.py | 7d8903478abcf68e1b2fa4bd7400fc7c87d3e38e | [] | no_license | ac812/mega-analysis | 71a0744ba72596eac210d3abdbb014904839f608 | a155c655d1f4af64233729d822e8debba7eaf954 | refs/heads/master | 2021-01-17T08:29:54.718241 | 2011-12-15T20:54:44 | 2011-12-15T20:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import sys
for l in sys.stdin:
toksA = l.rstrip().split("\t")
toksB = l.rstrip().split("\t")
toksA[0] += "_A"
toksA[1] += "_A"
toksB[0] += "_B"
toksB[1] += "_B"
print "\t".join(toksA)
print "\t".join(toksB)
| [
"[email protected]"
] | |
cb60e16707f3be241ffe2d78c697d1dd43a6d1a9 | 1892bf56c2eda7d340c841235081a4ec928fd62e | /748. Shortest Completing Word.py | a73dc307b40b69cfc7da6fab5187fcaa9b06dcbe | [] | no_license | WindChimeRan/leetcode-codewars | bd736156b53201b0219a36ea35bcb81b599d0486 | 9f2fca7fc3926a5c18c95bb49dcecfc7900b681c | refs/heads/master | 2021-07-07T11:11:19.988988 | 2020-07-28T03:32:22 | 2020-07-28T03:32:22 | 163,526,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # 1
class Solution:
def shortestCompletingWord(self, licensePlate, words):
"""
:type licensePlate: str
:type words: List[str]
:rtype: str
"""
plate = sorted(map(str.lower, filter(lambda x: str.isalpha(x), licensePlate)))
result = None
length = 1001
for w in words:
inword = sorted(filter(lambda x: x in plate, w))
state = []
for p in plate:
if p in inword:
state.append(True)
inword.remove(p)
else:
state.append(False)
if all(state):
if len(w) < length:
result = w
length = len(w)
return result
| [
"[email protected]"
] | |
a5f25257cbca5020bcd61522aa7920ae66fee4a8 | 0fcc2db432b667872a85a80d77e1e71925f4b441 | /autonpa/autotests/actions.py | 9310f1d1394ef80febffbd8945670e350404cb82 | [
"Apache-2.0"
] | permissive | Jockero1991/Autonpa | e830b85160ffc251108f8ce310bfe629982390f9 | dd90baf2b6b5c66535672973b98b0c90d0d65d51 | refs/heads/master | 2022-12-17T13:02:57.635702 | 2021-03-30T06:37:28 | 2021-03-30T06:37:28 | 124,958,867 | 0 | 1 | Apache-2.0 | 2022-12-08T00:57:36 | 2018-03-12T22:22:38 | Python | UTF-8 | Python | false | false | 908 | py | import postgresql as ps
from selenium import webdriver as wd
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
chrome_options = Options()
chrome_options.add_argument("--window-size=1920,1080")
caps=DesiredCapabilities.CHROME
caps['loggingPrefs']={'browser': 'ALL'}
# ввести текст по переданному селектору
# раскрыть выпадающий список, выбрать значение
# нажать кнопку по переданному селектору
# ввести текст и выбрать значение
# Получить текст элемента по переданному селектору
| [
"[email protected]"
] | |
69203dd0d18666e3761e7f7baee12259aa9dd614 | 40bfe140480d42750aa4a28fa07c2d25c6c51891 | /apps/api/views.py | 6edf3a613b0fc6504611d79efb3d2c74622b8bcf | [] | no_license | kuzentio/mitra | bed5750a1f34bbfeab48aaaa1ca6907b0a04abd8 | f1da84ca5657c8741141cff145487fa6e29b5cfe | refs/heads/master | 2023-04-11T07:16:11.978486 | 2019-03-19T16:22:57 | 2019-03-19T16:22:57 | 135,567,211 | 0 | 1 | null | 2021-03-01T15:49:57 | 2018-05-31T10:09:35 | Python | UTF-8 | Python | false | false | 6,965 | py | from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from rest_framework import generics, status
from rest_framework.decorators import api_view
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.api.serializers import OrderSerializer, StrategyCreateSerializer, AccountCreateSerializer
from apps.order import utils
from apps.order import constants
from apps.order.models import Order
from apps.strategy.models import Strategy
class APIOrderView(generics.ListAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
extra_query = {}
qs = super(APIOrderView, self).get_queryset()
closed_at_range = [
constants.DEFAULT_MIN_DATE,
constants.DEFAULT_MAX_DATE,
]
if self.request.query_params.get('pair') == 'All':
self.request.GET._mutable = True
self.request.query_params.pop('pair')
if self.request.query_params.get('min_date'):
min_date = datetime.strptime(
self.request.query_params.get('min_date'), '%d.%m.%Y'
)
closed_at_range[0] = min_date
if self.request.query_params.get('max_date'):
max_date = datetime.strptime(
self.request.query_params.get('max_date'), '%d.%m.%Y'
)
closed_at_range[1] = max_date
extra_query['closed_at__range'] = closed_at_range
extra_query['pair__icontains'] = self.request.query_params.get('pair', '')
exchange = self.kwargs.get('exchange_name')
if exchange is not None:
extra_query['exchange__name'] = exchange
return qs.filter(**extra_query)
def finalize_response(self, request, response, *args, **kwargs):
response = super(APIOrderView, self).finalize_response(request, response)
if self.get_queryset().exists():
pnl = utils.get_orders_pnl(self.get_queryset().all())
response.data.update(pnl)
response.data.update(
{'pairs': list(self.get_queryset().all().values_list('pair', flat=True).distinct())}
)
return response
class BaseAPIStrategyCreateView(generics.CreateAPIView):
serializer_class = StrategyCreateSerializer
permission_classes = (IsAuthenticated,)
def create(self, request, *args, **kwargs):
serializer = self.serializer_class(data=self.get_serializer_context())
if serializer.is_valid():
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response({'success': True, 'data': serializer.data}, status=status.HTTP_201_CREATED, headers=headers)
response_data = serializer.data
response_data['errors'] = serializer.errors
response_data['success'] = False
return Response(response_data, status=status.HTTP_200_OK)
class APIStrategyCreateView(BaseAPIStrategyCreateView):
serializer_class = StrategyCreateSerializer
permission_classes = (IsAuthenticated,)
def get_serializer_context(self):
request_data = dict(self.request.data)
strategies = Strategy.objects.order_by('-port')
data = {
'data': dict(
zip(request_data.get('key'), request_data.get('value'))
),
'user': self.request.user.id,
'port': strategies.last().port + 1 if strategies.exists() else 7000
}
return data
class APIAccountCreateView(BaseAPIStrategyCreateView):
serializer_class = AccountCreateSerializer
permission_classes = (IsAuthenticated,)
def get_serializer_context(self):
request_data = self.request.data.dict()
data = {
'exchange': request_data.get('exchange'),
'api_key': request_data.get('api_key'),
'api_secret': request_data.get('api_secret'),
'user': self.request.user.id
}
return data
@login_required
@api_view(["POST"])
def strategy_set_value_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
key = request.POST.get('key', '')
value = request.POST.get('value', '')
strategy.set_value(key, value)
return JsonResponse({'success': True, 'data': strategy.data})
@login_required
@api_view(["POST"])
def strategy_delete_key_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
key = request.POST.get('key')
strategy.delete_key(key)
return JsonResponse({'success': True, 'data': strategy.data})
@login_required
@api_view(["POST"])
def strategy_delete_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.is_deleted = True
strategy.save()
return JsonResponse({'success': True})
@login_required
@api_view(["POST"])
def start_strategy_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
result = strategy.up_container()
if result:
return JsonResponse({'success': True})
return JsonResponse({'success': False, 'message': 'Bot could not been initialized, maybe it already exists.'})
@login_required
@api_view(["POST"])
def down_strategy_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.down_container()
return JsonResponse({'success': True})
@login_required
@api_view(["POST"])
def close_orders_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.close_all_orders()
return JsonResponse({'success': True})
@login_required
@api_view(["POST"])
def get_orders_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.get_orders()
return JsonResponse({'success': True})
@login_required
@api_view(["POST"])
def get_history_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.close_all_orders()
return JsonResponse({'success': True})
def sell_all_view(request, strategy_uuid):
strategy = get_object_or_404(Strategy.objects.filter(
uuid=strategy_uuid,
user=request.user
))
strategy.sell_all()
return JsonResponse({'success': True})
| [
"[email protected]"
] | |
2d5e1ef3de847984b2241c0129fbfc933c8e72fd | e5f04e20a7c9a01f6cf9a9827203de62e4c4b66e | /mnmt/trainer/trainer.py | 047f9c1b3d12c3126e03717d7675d250e33efc59 | [
"MIT"
] | permissive | syizhao/Multi-task-NMTransliteration | e2284a1212d68ac894ac4f78848b11fc1ec85b1d | d8e6a957f3d6e870172f6aa92e9871769d863244 | refs/heads/master | 2023-02-14T01:21:42.790144 | 2021-01-09T22:16:24 | 2021-01-09T22:16:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,080 | py | from mnmt.inputter import ArgsFeeder
from mnmt.inputter import generate_batch_iterators
from mnmt.translator import Seq2SeqTranslator
from mnmt.alternating_character_table import AlternatingCharacterTable
from mnmt.alternating_character_table import dict_act_path
from mnmt.trainer.utils import *
import torch
import torch.nn as nn
import torch.optim as optim
import math
import time
import pandas as pd
class Trainer:
data_container: DataContainer
def __init__(self, args_feeder: ArgsFeeder, model):
"""
Args:
args_feeder (ArgsFeeder):
model: the NMT model
"""
self.args_feeder = args_feeder
# init train.log
self.train_log_path = "experiments/exp{}/train.log".format(self.args_feeder.exp_num)
# init model
self.model = model
log_print(self.train_log_path, model.apply(init_weights))
self.num_params = count_parameters(self.model)
self.optimizer = getattr(optim, args_feeder.optim_choice)(model.parameters(), lr=args_feeder.learning_rate)
# learning rate scheduler
if args_feeder.valid_criterion == 'ACC':
self.decay_mode = 'max' # decay when less than maximum
elif args_feeder.valid_criterion == 'LOSS':
self.decay_mode = 'min' # decay when more than minimum
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer=self.optimizer,
mode=self.decay_mode, factor=args_feeder.lr_decay_factor, # 0.9 in paper
patience=args_feeder.decay_patience)
# evaluation memory bank
class EvalMemoryBank:
def __init__(self, best_valid_loss=float('inf'), acc_valid_loss=float('inf'),
best_valid_acc=float(-1), best_valid_epoch=float(-1), best_train_step=float(-1),
early_stopping_patience=args_feeder.early_stopping_patience,
best_valid_loss_aux=float('inf'), best_valid_acc_aux=float(-1)):
self.best_valid_loss = best_valid_loss
self.acc_valid_loss = acc_valid_loss
self.best_valid_acc = best_valid_acc
self.best_valid_epoch = best_valid_epoch
self.best_train_step = best_train_step
self.early_stopping_patience = early_stopping_patience
self.best_valid_loss_aux = best_valid_loss_aux
self.best_valid_acc_aux = best_valid_acc_aux
self.eval_memory_bank = EvalMemoryBank()
# to recover full patience when improving
self.early_stopping_patience = args_feeder.early_stopping_patience
# training memory bank
class TrainMemoryBank:
def __init__(self, exp_num=args_feeder.exp_num,
total_epochs=args_feeder.total_epochs,
n_epoch=0, n_steps=0,
report_interval=args_feeder.report_interval):
self.exp_num = exp_num
self.total_epochs = total_epochs
self.n_epoch = n_epoch
self.n_steps = n_steps
self.report_interval = report_interval
self.train_memory_bank = TrainMemoryBank()
# single or multi task
self.multi_task_ratio = args_feeder.multi_task_ratio
if self.multi_task_ratio == 1:
log_print(self.train_log_path, "Running single-main-task experiment...")
self.task = "Single-Main"
self.FLAG = "main-task (single)"
elif self.multi_task_ratio == 0:
log_print(self.train_log_path, "Running single-auxiliary-task experiment...")
self.task = "Single-Auxiliary"
self.FLAG = "aux-task (single)"
else:
log_print(self.train_log_path, "Running multi-task experiment...")
self.task = "Multi"
self.FLAG = "main-task (multi)"
# data
self.data_container = args_feeder.data_container
self.train_iter, self.valid_iter, self.test_iter = generate_batch_iterators(self.data_container,
self.args_feeder.batch_size,
self.args_feeder.device,
src_lang=self.args_feeder.src_lang)
for (name, field) in self.data_container.fields:
if name == self.args_feeder.src_lang:
self.src_field = field
elif name == self.args_feeder.trg_lang:
self.trg_field = field
elif name == self.args_feeder.auxiliary_name:
self.auxiliary_field = field
# teacher forcing
self.tfr = 0.8
# loss function
self.loss_function = self.construct_loss_function()
# translator
self.translator = Seq2SeqTranslator(self.args_feeder.quiet_translate)
# beam search
self.turn_on_beam = False
def run(self, burn_in_epoch):
try:
for epoch in range(self.train_memory_bank.total_epochs):
self.train_memory_bank.n_epoch = epoch
# save no results the burn-in period, recall Bayesian Modelling
if epoch <= burn_in_epoch:
log_print(self.train_log_path, "Renew Evaluation Records in the Burning Phase...")
# abandon the best checkpoint in early stage
self.eval_memory_bank.best_valid_loss = float('inf')
self.eval_memory_bank.best_valid_acc = 0
self.eval_memory_bank.early_stopping_patience = self.early_stopping_patience
if self.eval_memory_bank.early_stopping_patience == 0:
log_print(self.train_log_path, "Early Stopping!")
break
start_time = time.time()
self.tfr = max(1 - (float(10 + epoch * 1.5) / 50), 0.2)
train_loss = self.train()
end_time = time.time()
epoch_mins, epoch_secs = self.epoch_time(start_time, end_time)
log_print(self.train_log_path,
f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
log_print(self.train_log_path,
f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
# log_print(self.train_log_path, f'\t Val. Loss: {valid_loss:.3f} | '
# f'Val. Acc: {valid_acc:.3f} | '
# f'Val. PPL: {math.exp(valid_loss):7.3f}')
except KeyboardInterrupt:
log_print(self.train_log_path, "Exiting loop")
@staticmethod
def epoch_time(start_time, end_time):
"""
Args:
start_time:
end_time:
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def update(self, valid_loss, valid_acc):
"""
Args:
valid_loss: current validation loss
valid_acc: current validation accuracy
"""
valid_criterion = self.args_feeder.valid_criterion
assert valid_criterion in ['LOSS', 'ACC']
log_print(self.train_log_path, "\n---------------------------------------")
log_print(self.train_log_path, "[Epoch: {}][Validatiing...]".format(self.train_memory_bank.n_epoch))
# For Validation Loss
if valid_loss <= self.eval_memory_bank.best_valid_loss:
log_print(self.train_log_path, '\t\t Better Valid Loss! (at least equal)')
self.eval_memory_bank.best_valid_loss = valid_loss
if valid_criterion == 'LOSS':
torch.save(self.model.state_dict(),
'experiments/exp' + str(self.train_memory_bank.exp_num) + '/loss-model-seq2seq.pt')
# restore full patience if obtain new minimum of the loss
self.eval_memory_bank.early_stopping_patience = self.early_stopping_patience
else:
self.eval_memory_bank.early_stopping_patience = \
max(self.eval_memory_bank.early_stopping_patience - 1, 0) # cannot be lower than 0
# For Validation Accuracy
if valid_acc >= self.eval_memory_bank.best_valid_acc:
log_print(self.train_log_path, '\t\t Better Valid Acc! (at least equal)')
self.eval_memory_bank.best_valid_acc = valid_acc
self.eval_memory_bank.acc_valid_loss = valid_loss
self.eval_memory_bank.best_valid_epoch = self.train_memory_bank.n_epoch
self.eval_memory_bank.best_train_step = self.train_memory_bank.n_steps
if valid_criterion == 'ACC':
torch.save(self.model.state_dict(),
'experiments/exp' + str(self.train_memory_bank.exp_num) + '/acc-model-seq2seq.pt')
log_print(self.train_log_path,
f'\t Early Stopping Patience: '
f'{self.eval_memory_bank.early_stopping_patience}/{self.early_stopping_patience}')
log_print(self.train_log_path,
f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
log_print(self.train_log_path,
f'\t BEST. Val. Loss: {self.eval_memory_bank.best_valid_loss:.3f} | '
f'BEST. Val. Acc: {self.eval_memory_bank.best_valid_acc:.3f} | '
f'Val. Loss: {self.eval_memory_bank.acc_valid_loss:.3f} | '
f'BEST. Val. Epoch: {self.eval_memory_bank.best_valid_epoch} | '
f'BEST. Val. Step: {self.eval_memory_bank.best_train_step}')
log_print(self.train_log_path, "---------------------------------------\n")
def update_aux(self, valid_acc_aux):
if valid_acc_aux >= self.eval_memory_bank.best_valid_acc_aux:
self.eval_memory_bank.best_valid_acc_aux = valid_acc_aux
log_print(self.train_log_path, '\t\t Better Valid Acc on Auxiliary Task! (at least equal)')
log_print(self.train_log_path, f'\tBEST. Val. Acc Aux: {self.eval_memory_bank.best_valid_acc_aux:.3f}')
log_print(self.train_log_path, "---------------------------------------\n")
@staticmethod
def fix_output_n_trg(output, trg):
"""Remove first column because they are <sos> symbols
Args:
output: [trg len, batch size, output dim]
trg: [trg len, batch size]
"""
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim) # [(trg len - 1) * batch size, output dim]
trg = trg[1:].view(-1) # [(trg len - 1) * batch size]
return output, trg
def construct_loss_function(self):
loss_criterion = nn.NLLLoss(ignore_index=self.args_feeder.trg_pad_idx)
if self.task == "Multi":
return lambda output, output_aux, trg, trg_aux: \
(self.multi_task_ratio * loss_criterion(output, trg)) + \
((1 - self.multi_task_ratio) * loss_criterion(output_aux, trg_aux))
else:
return loss_criterion
def compute_loss(self, output, trg):
if isinstance(output, tuple) and isinstance(trg, tuple):
assert self.task == "Multi"
output, output_aux, trg, trg_aux = output[0], output[1], trg[0], trg[1]
output, trg = self.fix_output_n_trg(output, trg)
output_aux, trg_aux = self.fix_output_n_trg(output_aux, trg_aux)
return self.loss_function(output, output_aux, trg, trg_aux)
else:
output, trg = self.fix_output_n_trg(output, trg)
return self.loss_function(output, trg)
def train(self):
self.model.train()
self.model.teacher_forcing_ratio = self.tfr
log_print(self.train_log_path,
"[Train]: Current Teacher Forcing Ratio: {:.3f}".format(self.model.teacher_forcing_ratio))
if self.args_feeder.beam_size > 1:
if self.task == "Multi":
for de in self.model.decoder_list:
de.turn_on_beam = False
else:
self.model.decoder.turn_on_beam = False # turn off beam search during training
epoch_loss = 0
for i, batch in enumerate(self.train_iter):
src, src_lens = getattr(batch, self.args_feeder.src_lang)
trg, trg_lens = getattr(batch, self.args_feeder.trg_lang)
self.optimizer.zero_grad()
if self.task == 'Multi':
trg_aux, trg_lens_aux = getattr(batch, self.args_feeder.auxiliary_name)
output, pred, output_aux, pred_aux = self.model(src, src_lens, trg, trg_aux)
loss = self.compute_loss((output, output_aux), (trg, trg_aux))
else:
output, pred = self.model(src, src_lens, trg)
loss = self.compute_loss(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1) # clip = 1
self.optimizer.step()
epoch_loss += loss.item()
running_loss = epoch_loss / (i + 1)
self.train_memory_bank.n_steps += 1
# print every ${report_interval} batches (${report_interval} steps)
if self.train_memory_bank.n_steps % self.train_memory_bank.report_interval == 0:
lr = -1
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
n_examples = len(self.data_container.dataset['train'].examples)
log_print(self.train_log_path, '[Epoch: {}][#examples: {}/{}][#steps: {}]'.format(
self.train_memory_bank.n_epoch,
(i + 1) * self.args_feeder.batch_size,
n_examples,
self.train_memory_bank.n_steps))
log_print(self.train_log_path, f'\tTrain Loss: {running_loss:.3f} | '
f'Train PPL: {math.exp(running_loss):7.3f} '
f'| lr: {lr:.3e}')
# eval the validation set for every * steps
if (self.train_memory_bank.n_steps % (10 * self.train_memory_bank.report_interval)) == 0:
log_print(self.train_log_path, '-----Val------')
valid_loss, valid_acc, valid_acc_aux = self.evaluate(is_test=False)
# log_print(self.train_log_path, '-----Tst------')
# self.evaluate(is_test=True)
self.update(valid_loss, valid_acc)
if self.task == 'Multi':
self.update_aux(valid_acc_aux)
self.scheduler.step(valid_acc) # scheduled on validation acc
# back to teacher forcing (turn off beam decoding)
if self.args_feeder.beam_size > 1:
if self.task == "Multi":
for de in self.model.decoder_list:
de.turn_on_beam = False
else:
self.model.decoder.turn_on_beam = False # turn off beam search during training
self.model.train()
return epoch_loss / len(self.train_iter)
def evaluate(self, is_test=False, output_file=None, trans_only=False):
self.model.eval()
self.model.teacher_forcing_ratio = 0 # turn off teacher forcing
epoch_loss = 0
correct = 0
correct_aux = 0
iterator = self.valid_iter if not is_test else self.test_iter
if self.turn_on_beam:
if self.task == "Multi":
for de in self.model.decoder_list:
de.turn_on_beam = True
beam_size = de.beam_size
else:
self.model.decoder.turn_on_beam = True # turn on beam search during evaluation
beam_size = self.model.decoder.beam_size
log_print(self.train_log_path, "Start beam (size={}) searching ...".format(beam_size))
with torch.no_grad():
for i, batch in enumerate(iterator):
src, src_lens = getattr(batch, self.args_feeder.src_lang)
trg, trg_lens = getattr(batch, self.args_feeder.trg_lang)
if self.task == 'Multi':
trg_aux, trg_lens_aux = getattr(batch, self.args_feeder.auxiliary_name)
output, pred, output_aux, pred_aux = self.model(src, src_lens, trg, trg_aux)
loss = self.compute_loss((output, output_aux), (trg, trg_aux)) if not trans_only else float("Inf")
correct_aux += self.translator.translate(pred_aux, trg_aux, trg_field=self.auxiliary_field)
else:
output, pred = self.model(src, src_lens, trg)
loss = self.compute_loss(output, trg) if not trans_only else float("Inf")
epoch_loss += loss.item() if not trans_only else float("Inf")
# compute acc through seq2seq translation
correct += self.translator.translate(pred, trg, trg_field=self.trg_field, output_file=output_file)
epoch_loss = epoch_loss / len(iterator)
n_examples = len(self.data_container.dataset['valid'].examples) if not is_test \
else len(self.data_container.dataset['test'].examples)
flag = "TEST" if is_test else "VAL"
log_print(self.train_log_path, '[{}]: The number of correct predictions ({}): {}/{}'
.format(flag, self.FLAG, correct, n_examples))
if self.task == 'Multi':
log_print(self.train_log_path, '[{}]: The number of correct predictions (aux-task (multi)): {}/{}'
.format(flag, correct_aux, n_examples))
acc = correct / n_examples
acc_aux = correct_aux / n_examples # if single-task, then just zero
self.model.teacher_forcing_ratio = self.tfr # restore teacher-forcing ratio
return epoch_loss, acc, acc_aux
def load_best_model(self):
self.model.load_state_dict(torch.load('experiments/exp' +
str(self.args_feeder.exp_num) + '/acc-model-seq2seq.pt'))
def best_model_output(self, enable_acc_act=True, test_ref_dict=None,
beam_size=1, score_choice="N", length_norm_ratio=0.7):
self.load_best_model()
if self.task == "Multi":
for de in self.model.decoder_list:
de.beam_size = beam_size
de.score_choice = score_choice
de.length_norm_ratio = length_norm_ratio
else:
self.model.decoder.beam_size = beam_size
self.model.decoder.score_choice = score_choice
self.model.decoder.length_norm_ratio = length_norm_ratio
self.turn_on_beam = True
log_print(self.train_log_path, "Scoring Method: {}".format(score_choice))
log_print(self.train_log_path, "Length normalisation ratio: {}".format(length_norm_ratio))
# evaluate val set
f = open(self.args_feeder.valid_out_path, 'w')
f.write("PRED\tREF\n")
valid_loss, valid_acc, valid_acc_aux = self.evaluate(is_test=False, output_file=f)
f.close()
# evaluate tst set
f = open(self.args_feeder.test_out_path, 'w')
f.write("PRED\tREF\n")
test_loss, test_acc, test_acc_aux = self.evaluate(is_test=True, output_file=f)
f.close()
# save model settings
with open("experiments/exp{}/settings".format(self.args_feeder.exp_num), "w+") as f:
f.write("Direction\t{}-to-{}\n".format(self.args_feeder.src_lang, self.args_feeder.trg_lang))
f.write("Task\t{}\n".format(self.task))
f.write("MTR\t{}\n".format(self.multi_task_ratio))
f.write("#Params\t{}\n".format(self.num_params))
if self.task == "Multi":
f.write("Auxiliary\t{}".format(self.args_feeder.auxiliary_name))
# save evaluation results
eval_results = pd.DataFrame(columns=["Loss", "ACC"], index=["Valid", "Test"])
eval_results["Loss"] = [valid_loss, test_loss]
eval_results["ACC"] = [valid_acc, test_acc]
# save auxiliary task results
if self.task == 'Multi':
eval_results["ACC-aux"] = [valid_acc_aux, test_acc_aux]
# acc+ where applicable
if test_ref_dict is not None:
test_out_df = pd.read_csv(self.args_feeder.test_out_path, sep='\t')
test_srcs = []
for i, batch in enumerate(self.test_iter):
src, src_lens = getattr(batch, self.args_feeder.src_lang)
src = src[1:].permute(1, 0)
for j in range(src.shape[0]):
src_j = src[j, :]
src_j_toks = []
for t in src_j:
tok = self.src_field.vocab.itos[t]
if tok == '<eos>':
break
else:
src_j_toks.append(tok)
test_srcs.append(''.join(src_j_toks))
test_out_df['SRC'] = test_srcs
count = 0
for i, dp in test_out_df.iterrows():
if dp["PRED"] in test_ref_dict[dp["SRC"]]:
count += 1
eval_results["ACC+"] = ["NA", count / len(test_out_df)]
# acc-act where applicable
if enable_acc_act:
act = AlternatingCharacterTable(act_path=dict_act_path)
valid_out = act.tsv_to_df(self.args_feeder.valid_out_path)
test_out = act.tsv_to_df(self.args_feeder.test_out_path)
results_valid = act.compute_ACC_ACT(valid_out)
results_test = act.compute_ACC_ACT(test_out)
eval_results["ACC-ACT"] = [results_valid["acc-act"], results_test["acc-act"]]
eval_results["Replaced"] = [results_valid["replaced"], results_test["replaced"]]
log_print(self.train_log_path, eval_results)
eval_results.to_csv("experiments/exp" + str(self.args_feeder.exp_num) + "/eval.results", sep="\t")
def translate_only(self, beam_size=1, score_choice="N", length_norm_ratio=0.7, is_test=False, max_length=None):
self.load_best_model()
if self.task == "Multi":
for de in self.model.decoder_list:
de.beam_size = beam_size
de.score_choice = score_choice
de.length_norm_ratio = length_norm_ratio
de.max_length = max_length
else:
self.model.decoder.beam_size = beam_size
self.model.decoder.score_choice = score_choice
self.model.decoder.length_norm_ratio = length_norm_ratio
self.model.decoder.max_length = max_length
self.turn_on_beam = True
log_print(self.train_log_path, "Scoring Method: {}".format(score_choice))
log_print(self.train_log_path, "Length normalisation ratio: {}".format(length_norm_ratio))
# evaluate val set
if not is_test:
f = open(self.args_feeder.valid_out_path, 'w')
f.write("PRED\tREF\n")
self.evaluate(is_test=False, output_file=f, trans_only=True)
f.close()
else:
# evaluate tst set
f = open(self.args_feeder.test_out_path, 'w')
f.write("PRED\tREF\n")
self.evaluate(is_test=True, output_file=f, trans_only=True)
f.close()
f = open(self.args_feeder.test_out_path + '.src', 'w')
for i, batch in enumerate(self.test_iter):
src, src_lens = getattr(batch, self.args_feeder.src_lang)
src = src[1:].permute(1, 0)
for j in range(src.shape[0]):
src_j = src[j, :]
src_j_toks = []
for t in src_j:
tok = self.src_field.vocab.itos[t]
if tok == '<eos>':
break
else:
src_j_toks.append(tok)
f.write(''.join(src_j_toks) + "\n")
| [
"[email protected]"
] | |
c05d8a4e840ef2afbca731b3bbb2dbae8e93ca87 | d0e74db58ce3ec48cd165070157022767f7d14f9 | /exercise-3.py | b5e2d889c65ed3f72c12f257833d0cbff5c980db | [] | no_license | KelseyRocco/control-flow-lab | 7cb85e1f2aeb45f8c589899e672b37917e02620c | f300f0d3bbd60dfc2cb7005742bfc0c54f7fcd06 | refs/heads/main | 2023-06-02T11:34:33.182219 | 2021-06-15T18:00:27 | 2021-06-15T18:00:27 | 377,251,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # exercise-03 Calculate Dog Years
# Write the code that:
# 1. Prompts the user to enter a dog's age in human years like this:
# Input a dog's age in human years:
# 2. Calculates the equivalent dog years, where:
# - The first two years count as 10 years each
# - Any remaining years count as 7 years each
# 3. Prints the answer in the following format:
# The dog's age in dog years is xx
# Hint: Use the int() function to convert the string returned from input() into an integer
human_years = int(input('Input a dogs age in human years: '))
if human_years == 1:
human_years = human_years + 10
elif human_years == 2:
human_years = human_years + 20
elif human_years > 2:
human_years = (human_years * 7) + 18
print(f'You are {int(human_years)} old in dog years') | [
"[email protected]"
] | |
9773ec360833e6176698d87d660d3c5844e46edb | 81663c7e0f7d2ebd8dd0f46766e9d9a55d2ee046 | /course/my_practice/Blog_site/migrations/versions/75f97fe6d6aa_first_migration.py | 7db0eaf6eacfbd47374c3d8609230544af424bb6 | [] | no_license | VladyslavPodrazhanskyi/flask_blog | 49f695947f3da91f73c7e7cd7d7a75d4dc59c744 | 5f3fb73ef70ffe4648a99bf471c3fee5dbdb21c4 | refs/heads/master | 2022-12-15T06:45:37.538683 | 2020-05-17T14:07:46 | 2020-05-17T14:07:46 | 232,903,749 | 0 | 0 | null | 2022-12-08T01:06:07 | 2020-01-09T20:53:18 | Python | UTF-8 | Python | false | false | 1,650 | py | """first migration
Revision ID: 75f97fe6d6aa
Revises:
Create Date: 2020-05-16 18:10:54.256179
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '75f97fe6d6aa'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('profile_image', sa.String(length=20), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('blog_post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=False),
sa.Column('title', sa.String(length=140), nullable=False),
sa.Column('text', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('blog_post')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"https://[email protected]"
] | https://[email protected] |
c24108115824846270de2c3fb788e3860aee965a | 0750ac688e156932ce004aa40fac7388878ac647 | /monitor/monitor.py | ce82ae3df0d4e044b09907243562d81c0f7f934f | [] | no_license | alexandalee/pymonitor | 5126b877d258418edba47dca8e716965b4ff80e3 | a81acad1e67d49085564a48860a36472d3c439bb | refs/heads/master | 2021-01-01T19:15:56.935462 | 2015-05-08T10:33:04 | 2015-05-08T10:33:04 | 35,099,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,656 | py | #-*- coding:utf-8 -*-
__author__ = 'likun'
import os
import sys
import time
import inspect
import logging
import traceback
import rrtdb.normal as rrtdb
import rrtnet
import rrtcfg
class Monitor():
def __init__(self):
self.config = None
self.isAlive = True
self.logger = self._getLogger()
def _getLogger(self):
logger = logging.getLogger('[Monitor]')
this_file = inspect.getfile(inspect.currentframe())
dirpath = os.path.abspath(os.path.dirname(this_file))
handler = logging.FileHandler(os.path.join(dirpath, "monitor.log"))
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def start(self):
#####################################
self.logger.error("monitor is starting....")
try:
# 检查网络
while True:
if rrtnet.ping():
self.logger.error("ping back.")
break
else:
self.logger.error('ping time out.')
time.sleep(30)
continue
# 获取数据库配置信息
self.config = rrtnet.config()
self.logger.error('loading config...')
# 处理特殊任务
self.procTask()
# 处理常规提交
self.procNormal()
except:
errinfo = traceback.format_exc()
print errinfo
self.logger.error(errinfo)
def procTask(self):
"""
处理特殊命令
"""
cmmd = rrtnet.task()
self.logger.error('loading task...')
if not cmmd:
print 'no task'
self.logger.error('no task.')
else:
# 初始化数据库
rrtdb.init(self.config)
rrtdb.setLastTime(cmmd['start'])
while(True):
# 一次取100条数据
rows = rrtdb.query(cmmd['end'])
#print 'get %d rows'% len(rows)
if len(rows) <= 0: break
for row in rows:
rst = rrtnet.send(row, False)
#print rst
ltt = row['opttime'].strip('000')
if str(rst)<>'null': rrtdb.setLastTime(ltt)
time.sleep(0.2)
# 更新任务时间
fresh = { 'id':cmmd['id'], 'now':ltt }
rrtnet.taskover(fresh)
del rows
return True
def procNormal(self):
"""
正常处理
"""
# 初始化数据库
rrtdb.init(self.config)
self.logger.error('init db')
# 获取最后访问时间
arch = rrtnet.archive()
self.logger.error('loading archive...')
rrtdb.setArchive(arch)
# 开始
while self.isAlive:
self.logger.error('pid: %s :: loop...'%(str(os.getpid())))
rows = rrtdb.query()
print 'get %d rows'% len(rows),
print time.strftime('%H:%M:%S')
if len(rows) == 0:
time.sleep(30); continue
for row in rows:
rst = rrtnet.send(row)
ltt = row['opttime'].strip('000')
if str(rst)<>'null': rrtdb.setLastTime(ltt)
time.sleep(0.2)
del rows
return True
def stop(self):
self.isAlive = False
if __name__=='__main__':
mntr = Monitor()
mntr.start() | [
"[email protected]"
] | |
7e1494da693f73f9ed9861d391c4d70b3a547db3 | eddba20dfd883e6ac710fd7c7035a9da1a06ff96 | /analysis/NewClassifierBasedCategoryFiller.py | 4a5c468a6ffecefb7d6ecaca7a6a97254aca1d84 | [] | no_license | philippwindischhofer/HiggsPivoting | cc64ea6f75919c78a88ae4a182e30eb6f2669767 | c103de67d4c8358aba698ecf4b1491c05b8f6494 | refs/heads/paper | 2022-11-27T18:03:17.491300 | 2020-04-15T14:39:43 | 2020-04-15T14:39:43 | 193,477,990 | 3 | 0 | null | 2022-11-21T21:31:39 | 2019-06-24T09:46:32 | Python | UTF-8 | Python | false | false | 3,572 | py | import numpy as np
import pandas as pd
from analysis.Category import Category
from base.Configs import TrainingConfig
from plotting.ModelEvaluator import ModelEvaluator
from training.DataFormatters import TrainingSample
class ClassifierBasedCategoryFiller:
@staticmethod
def _sigeff_range_to_score_range(all_signal_pred, all_signal_weights, sigeff_range):
return (ModelEvaluator._weighted_percentile(all_signal_pred, 1 - sigeff_range[0], weights = all_signal_weights),
ModelEvaluator._weighted_percentile(all_signal_pred, 1 - sigeff_range[1], weights = all_signal_weights))
@staticmethod
def create_classifier_category(mcoll, sig_process_data, sig_process_names, bkg_process_data, bkg_process_names, classifier_sigeff_range = (1.0, 0.0), nJ = 2):
# make sure to base all selections only on signal events with the correct number of jets
sig_process_data = [cur_data.loc[cur_data["nJ"] == nJ] for cur_data in sig_process_data]
bkg_process_data = [cur_data.loc[cur_data["nJ"] == nJ] for cur_data in bkg_process_data]
# convert them to TrainingSamples as well
sig_process_TrainingSamples = [TrainingSample.fromTable(cur_data) for cur_data in sig_process_data]
bkg_process_TrainingSamples = [TrainingSample.fromTable(cur_data) for cur_data in bkg_process_data]
all_signal_TrainingSample = TrainingSample.fromTable(pd.concat(sig_process_data))
# obtain the classifier predictions on all samples
sig_process_preds = [mcoll.predict(cur_data)[:, 1] for cur_data in sig_process_data]
bkg_process_preds = [mcoll.predict(cur_data)[:, 1] for cur_data in bkg_process_data]
all_signal_pred = np.concatenate(sig_process_preds, axis = 0)
# first, determine the cuts on the classifier based on the asked-for signal efficiency
classifier_range = ClassifierBasedCategoryFiller._sigeff_range_to_score_range(all_signal_pred, all_signal_weights = all_signal_TrainingSample.weights, sigeff_range = classifier_sigeff_range)
print("translated signal efficiency range ({}, {}) to classifier output range ({}, {})".format(classifier_sigeff_range[0], classifier_sigeff_range[1],
classifier_range[0], classifier_range[1]))
retcat = Category("clf_{:.2f}_{:.2f}".format(classifier_sigeff_range[0], classifier_sigeff_range[1]))
# then fill all events from all signal + background processes
process_data = sig_process_data + bkg_process_data
process_names = sig_process_names + bkg_process_names
process_preds = sig_process_preds + bkg_process_preds
for cur_process_data, cur_process_name, cur_pred in zip(process_data, process_names, process_preds):
print("predicting on sample {} with length {}".format(cur_process_name, len(cur_process_data)))
cut = np.logical_and.reduce((cur_pred > classifier_range[0], cur_pred < classifier_range[1]))
assert len(cut) == len(cur_process_data)
passed = cur_process_data[cut]
passed = TrainingSample.fromTable(passed)
# fill the category
retcat.add_events(events = passed.data, weights = passed.weights, process = cur_process_name, event_variables = TrainingConfig.training_branches)
print("filled {} events from process '{}'".format(sum(passed.weights), cur_process_name))
return retcat
| [
"[email protected]"
] | |
450ff43113d7b2a34ff264bb64c3258a1dc48f6b | 1c9afb2311f1e5cf8d1358569f26a81e71f89c42 | /finaltest.py | 199e4d6f0dea97f32345f738aa6ed98271f564c9 | [] | no_license | pbsandjay/Glassdoor_Scraping_Example | fb051af4d3a3933aec41bcd9817f5795c740412e | c1482e7ab4a25372a7cc8fb55960eba3cae6d495 | refs/heads/master | 2022-05-17T00:33:19.479014 | 2020-04-21T23:24:10 | 2020-04-21T23:24:10 | 257,739,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,203 | py | import urllib2, sys
from bs4 import BeautifulSoup
import requests
from urllib2 import urlopen
#Oh boy, I can't wait for this code to collapse on itself and cause a python singularity
f = open('reviews.txt','r+')
companyname = raw_input("Please enter company name: ")
companyname = companyname .strip("\n");
req = urllib2.Request("https://www.google.com/search?q=" + companyname + "%20glassdoor%20reviews&rct=j",
headers={"User-Agent" : "Magic Browser"})
con = urllib2.urlopen(req)
webContent = con.read()
appendStartIndex = webContent.find ("https://www.glassdoor.com/Reviews/")
appendEndIndex = webContent.find(".htm")
url = webContent[appendStartIndex: appendEndIndex]
url2 = url + ".htm"
#url = raw_input("Please enter the URL of the website you want to scrape reviews from: ")
site = url2
hdr = {"User-Agent":"Magic Browser"}
req = urllib2.Request(site,headers=hdr)
page = urllib2.urlopen(req)
webpage = page.read()
webpage1 = webpage
startindex = webpage1.find ("<p class=' tightBot '>")
yearsworked = webpage1.find ("<p class=' pros noMargVert truncateThis wrapToggleStr'>")
pros = webpage1.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
cons = webpage1.find ("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
while startindex!=-1:
startindex += 22;
yearsworked += 54;
cons += 59;
webpage1 = webpage1[startindex:]
yearsworked = webpage1[yearsworked:]
endindex = webpage1.find ("</p> <div class='description")
if endindex != -1:
review1 = webpage1[0:endindex]
review1 = review1.replace(" ","");
review1 = review1.replace("</p> </div> </div> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
#print review1
startindex = webpage1.find ("<p class=' tightBot '>")
yearsworked = webpage1.find ("<p class=' pros noMargVert truncateThis wrapToggleStr'>")
pros = 0
startindex = 0
webpage1= webpage
while startindex !=-1:
pros = webpage1.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
startindex += len("<p class=' tightBot '>")
pros += len("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
webpage1 = webpage1[pros:]
endindexpros = webpage1.find("</p> </div> </div> <div class='row padBotLg'> <div class='cell top '> <p class=\"strong tightVert\">")
if endindexpros != -1:
review2 = webpage1[0:endindexpros]
review2 = review2.replace("#","");
review2 = review2.replace("&","");
review2 = review2.replace("<br/>","");
review2 = review2.replace("039;","");
review2 = review2.replace("</p> </div> </div> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
#print review2
startindex = webpage1.find ("<p class=' tightBot '>")
pros = webpage1.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
cons = 0
startindex = 0
webpage1 = webpage
while startindex !=-1:
cons = webpage1.find ("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
startindex += len("<p class=' tightBot '>")
cons += len("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
webpage1 = webpage1[cons:]
endindexcons = webpage1.find("</p> </div> </div>")
if endindexcons != -1:
review3 = webpage1[0:endindexcons]
review3 = review3.replace("<br/>","");
review3 = review3.replace(""","");
review3 = review3.replace("'","");
review3 = review3.replace("</p> </div> </div> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
#print review3
startindex = webpage1.find ("<p class=' tightBot '>")
cons = webpage1.find ("</p <p class=' cons nomargVert truncateThis wrapToggleStr'>")
letstrythis = 0
for i in range(0,5000): # Number of pages plus one
#companyname = raw_input("Please enter company name: ")
while i !=-1:
companyname = companyname.strip("\n");
req = urllib2.Request("https://www.google.com/search?q=" + companyname + "%20glassdoor%20reviews&rct=j",
headers={"User-Agent" : "Magic Browser"})
con = urllib2.urlopen(req)
webContent = con.read()
appendStartIndex = webContent.find ("https://www.glassdoor.com/Reviews/")
appendEndIndex = webContent.find(".htm")
more_pages = "_P"
url = webContent[appendStartIndex: appendEndIndex:]
url2 = url + more_pages + ".htm"
more_pages = "_P"
letstrythis += 1
yep = ".htm"
url3 = url + more_pages + str(letstrythis) + yep
req = urllib2.Request(url3,
headers={"User-Agent" : "Magic Browser"})
con = urllib2.urlopen(req)
webContent = con.read()
#r = requests.get(url3)
#soup = BeautifulSoup(url3, 'html.parser')
#print url3
startindex = webContent.find ("<p class=' tightBot '>")
yearsworked = webContent.find ("<p class=' pros noMargVert truncateThis wrapToggleStr'>")
pros = webContent.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
cons = webContent.find ("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
while startindex!=-1:
startindex += 22;
yearsworked += 54;
cons += 59;
webContent = webContent[startindex:]
yearsworked = webContent[yearsworked:]
endindex = webContent.find ("</p> <div class='description")
if endindex != -1:
review1 = webContent[0:endindex]
review1 = review1.replace(" ","");
print review1
startindex = webContent.find ("<p class=' tightBot '>")
yearsworked = webContent.find ("<p class=' pros noMargVert truncateThis wrapToggleStr'>")
for line in f:
f.write(review1)
#f.close()
pros = 0
startindex = 0
#webContent = webpage1
while startindex !=-1:
pros = webContent.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
startindex += len("<p class=' tightBot '>")
pros += len("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
webContent = webContent[pros:]
endindexpros = webContent.find("</p> </div> </div> <div class='row padBotLg'> <div class='cell top '> <p class=\"strong tightVert\">")
if endindexpros != -1:
review2 = webContent[0:endindexpros]
review2 = review2.replace("#","");
review2 = review2.replace("&","");
review2 = review2.replace("<br/>","");
review2 = review2.replace("039;","");
review2 = review2.replace("</p> </div> </div> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
print review2
startindex = webContent.find ("<p class=' tightBot '>")
pros = webContent.find ("</p> <p class=' pros noMargVert truncateThis wrapToggleStr'>")
for line in f:
f.write(review2)
#f.close()
cons = 0
startindex = 0
#webContent = webpage
while startindex !=-1:
cons = webContent.find ("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
startindex += len("<p class=' tightBot '>")
cons += len("</p> <p class=' cons noMargVert truncateThis wrapToggleStr'>")
webContent = webContent[cons:]
endindexcons = webContent.find("</p> </div> </div> </div> <div class='tbl fill outlookEmpReview'>")
if endindexcons != -1:
review3 = webContent[0:endindexcons]
review3 = review3.replace("<br/>","");
review3 = review3.replace(""","");
review3 = review3.replace("'","");
review3 = review3.replace("</div> </div> </div>","")
review3 = review3.replace("</div> </div>","")
review3 = review3.replace("<div> <div>","")
review3 = review3.replace("</div> </div> </div> </div>","")
review3 = review3.replace("</p> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
print review3
startindex = webContent.find ("<p class=' tightBot '>")
cons = webContent.find ("</p <p class=' cons nomargVert truncateThis wrapToggleStr'>")
for line in f:
f.write(review3)
#f.close()
"""mgmt = 0
startindex = 0
while startindex !=-1:
mgmt = webContent.find ("</p> </div> </div> <div")
startindex += len("<p class=' tightBot '>")
mgmt += len("</p> </div> </div> <div class='row'> <div class='cell top '> <p class='strong tightVert'>")
webContent = webContent[mgmt:]
review4 = ""
endindexmgmt = webContent.find("</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>")
if endindexmgmt != -1:
review4 = webContent[0:endindexmgmt]
review4 = review4.replace("</p> </div> </div> <div class='row'>", "")
review4 = review4.replace("</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
review4 = review4.replace("</div> </div> </div>","")
review4 = review4.replace("</div> </div>","")
review4 = review4.replace("<div> <div>","")
review4 = review4.replace("</div> </div> </div> </div>","")
review4 = review4.replace("</p> <div class='row'> <div class='cell top '> <p class=\"strong tightVert\">Advice to Management</p> <p class=' adviceMgmt noMargVert truncateThis wrapToggleStr'>","")
print review4
startindex = webContent.find ("<p class=' tightBot '>")
mgmt = webContent.find ("</p> </div> </div> <div class='row'> <div class='cell top '> <p class='strong tightVert'>")
if letstrythis > 0:
continue
while letstrythis !=-1:
continue"""
| [
"[email protected]"
] | |
5d8eeb2647ece22b5b27583eec69d257f7248cb3 | e8a3c31a3fb4159878a9848677055008e1a174a7 | /preparedata4cnn.py | d295f3956823d7bc65d2bf9643ce02d49d8352f8 | [] | no_license | giahy2507/ws | 7dc1f9a75a550cc7fe67cc203837af09de62a50e | aee3316a0976e8cae8da9020d198f69e662e0b0f | refs/heads/master | 2021-01-10T11:21:45.300750 | 2016-04-02T07:22:48 | 2016-04-02T07:22:48 | 53,932,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,711 | py | import os
import numpy as np
import pickle
from sys import stdout
import time
import sys
class Vocabulary(object):
def __init__(self, word_index, word_freq, alphabet, min_count):
self.word_index = word_index
self.embed_matrix = None
self.word_freq = word_freq
self.vocab_size = len(word_index.keys())
self.alphabet = alphabet
self.min_count = min_count
def __str__(self):
return "Vocabulary"
def save(self, file):
with open(file, mode="wb") as f:
pickle.dump(self, f)
def get_index(self, word):
A = Vocabulary.str_intersection(word.lower(), self.alphabet)
if len(A) == len(word):
# push to dictionany, freq += 1
if word.lower() in self.word_index.keys():
index = self.word_index[word.lower()]
else:
index = 1
else:
index = 0
return index
@classmethod
def load(cls, file):
if os.path.isfile(file):
with open(file, mode="rb") as f:
vocab = pickle.load(f)
return Vocabulary(vocab.word_index, vocab.word_freq, vocab.alphabet, vocab.min_count)
else:
print("No such file !")
return None
@classmethod
def filter_with_min_count(cls, word_freq, min_count):
idx = 4
word_index = {"symbol":0, "unk": 1, "head": 2, "tail":3}
for word in word_freq.keys():
if word_freq[word] >= min_count:
word_index[word] = idx
idx+=1
return word_index
@classmethod
def load_alphabet(cls, filename):
fi = open(filename, "r")
alphabet = fi.readline()
fi.close()
return alphabet
@classmethod
def str_intersection(cls, s1, s2):
out = ""
for c in s1:
if c in s2:
out += c
return out
def rebuild_vocab_ws(self, train_filepath ,min_count = 5):
# after rebuild wordindex will be changed
print ("rebuild vocab")
with open(train_filepath, mode="r") as fi:
sentences = fi.readlines()
for i, sentence in enumerate(sentences):
if i % 100000 == 0:
print("Rebuild vocab processed line : ", i)
for word in sentence.replace("_"," ").lower().split():
A = Vocabulary.str_intersection(word,self.alphabet)
if len(A) == len(word):
# push to dictionany, freq += 1
if word not in self.word_freq.keys():
self.word_freq[word] = 1
else:
self.word_freq[word] +=1
else:
self.word_freq["symbol"] +=1
self.word_index = Vocabulary.filter_with_min_count(self.word_freq,min_count)
@classmethod
def build_vocab_ws(cls, train_filepath, alphatbet_filepath , min_count = 5):
print ("Build vocab ... ")
with open(train_filepath, mode="r") as fi:
sentences = fi.readlines()
word_freq = {"symbol":0}
alphabet = Vocabulary.load_alphabet(alphatbet_filepath)
for i, sentence in enumerate(sentences):
if i % 100000 == 0:
print("Build vocab processed line ", i)
for word in sentence.replace("_"," ").lower().split():
A = Vocabulary.str_intersection(word,alphabet)
if len(A) == len(word):
# push to dictionany, freq += 1
if word not in word_freq.keys():
word_freq[word] = 1
else:
word_freq[word] +=1
else:
word_freq["symbol"] +=1
word_index = Vocabulary.filter_with_min_count(word_freq,min_count)
return Vocabulary(word_index,word_freq,alphabet,min_count)
def sen_2_index(self, sentence = "", tagset = "YN"):
sentence_result = []
tag_result = []
syllables_result = []
words = sentence.split()
for word in words:
syllables = word.split('_')
if len(syllables) == 0:
print("exception: ", syllables)
sys.exit(2)
elif len(syllables) == 1:
sentence_result.append(self.get_index(syllables[0]))
syllables_result.append(syllables[0])
if tagset == "YN":
tag_result.append(0)
else:
print("Invalid tagset")
sys.exit(2)
else:
if tagset == "YN":
for syllable_idx in range(len(syllables)):
sentence_result.append(self.get_index(syllables[syllable_idx]))
syllables_result.append(syllables[syllable_idx])
if syllable_idx == len(syllables) -1:
tag_result.append(0)
else:
tag_result.append(1)
return sentence_result, syllables_result , tag_result
def gen_data_from_sentence_indexs(words_index, labels):
"""
Params:
words_index:
labels:
:return: [[index_A,index_B,index_C,index_D,index E], ...] , [label_1, label2,]
head: 2
tail: 3
[1 ,2 ,3]
"""
if len(words_index) == 0:
return np.array([[]]), np.array([])
elif len(words_index) == 1:
return np.array([[2,2,words_index[0],3,3]]),np.array(labels)
elif len(words_index) == 2:
return np.array([[2, 2, words_index[0], words_index[1], 3],
[2, words_index[0], words_index[1], 3, 3] ]),np.array(labels)
elif len(words_index) == 3:
return np.array([[2, 2, words_index[0], words_index[1], 3],
[2, words_index[0], words_index[1], words_index[2], 3],
[words_index[0], words_index[1],words_index[2], 3, 3]]),np.array(labels)
elif len(words_index) == 4:
return np.array([[2, 2, words_index[0], words_index[1], 3],
[2, words_index[0], words_index[1], words_index[2], words_index[3]],
[words_index[0], words_index[1],words_index[2], words_index[3], 3],
[words_index[1],words_index[2], words_index[3], 3, 3]]), np.array(labels)
else:
samples = []
samples.append([ 2, 2, words_index[0], words_index[0+1], words_index[0+2]])
samples.append([ 2, words_index[0], words_index[1], words_index[2], words_index[3]])
for i in range(2,len(words_index)-2,1):
samples.append(words_index[i-2:i+2+1])
samples.append([ words_index[-4],words_index[-3], words_index[-2], words_index[-1], 3])
samples.append([ words_index[-3], words_index[-2], words_index[-1], 3, 3])
return np.array(samples), np.array(labels)
def build_vocab(file_path, save_path = "model/vocab.bin", alphabet_path = "rule/alphabet.txt", min_count=3):
vocabulary = Vocabulary.build_vocab_ws(file_path,alphabet_path, min_count=3)
# vocabulary.rebuild_vocab_ws("data/vcl.pre.txt", min_count=3)
vocabulary.save(save_path)
print(len(vocabulary.word_index.keys()))
print(vocabulary.__str__())
if __name__ == "__main__":
start_total = time.time()
with open("model/vocab.bin", mode="rb") as f:
vocabulary = pickle.load(f)
print("Finish read Vocab")
with open("data/vtb.pre.txt", mode="r") as f:
lines = f.readlines()
print("Finish read vtb")
# [index_A index_B index_C index_D index E] ---------- label_C
X = np.empty((0,5),dtype=np.int32)
Y = np.empty((0),dtype=np.int32)
start = time.clock()
for i, line in enumerate(lines):
if i % 10000 == 0:
end = time.clock()
sys.stdout.write("Prepare data from line "+ str( i - 10000) + " to line " +str(i) +" : " + str( int(end - start) )+ "s\n")
sys.stdout.flush()
start = time.clock()
A = line.replace("_"," ").split()
sentence_indexs, syllables_result, tag_results = vocabulary.sen_2_index(line)
if (len(sentence_indexs) != len(tag_results)):
print("2 thang nay ko bang ne")
else:
xx, yy = gen_data_from_sentence_indexs(sentence_indexs,tag_results)
X = np.concatenate((X,xx), axis=0)
Y = np.concatenate((Y,yy))
print(X.shape)
print(Y.shape)
print("Finish process vtb")
# fi = open("data/vcl.pre.txt", mode="r")
# print("Processing vtb")
#
# start = time.clock()
# for i, line in enumerate(fi):
# if i % 10000 == 0:
# end = time.clock()
# sys.stdout.write("Prepare data from line "+ str( i - 10000) + " to line " +str(i) +" : " + str( int(end - start) )+ "s\n")
# sys.stdout.flush()
# start = time.clock()
# if i % 100000 == 0:
# valid_number = int(X.shape[0]*0.8)
# X_train, Y_train, X_validation, Y_validation = X[:valid_number], Y[:valid_number], X[valid_number:], Y[valid_number:]
# with open("data/vtb.pre.txt.train.nparray", mode="wb") as f:
# pickle.dump((X_train, Y_train, X_validation, Y_validation),f)
# print("Saved data to ", "data/vtb.pre.txt.train.nparray")
# start = time.clock()
# A = line.replace("_"," ").split()
# sentence_indexs, tag_results = vocabulary.sen_2_index(line)
#
# if (len(sentence_indexs) != len(tag_results)):
# print("2 thang nay ko bang ne")
# else:
# xx, yy = gen_data_from_sentence_indexs(sentence_indexs,tag_results)
# X = np.concatenate((X,xx), axis=0)
# Y = np.concatenate((Y,yy))
# fi.close()
# print("Finish process vcl")
first1_X = X[0]
first2_X = X[176492]
first3_X = X[176492 + 166828]
last1_X = X[176492-1]
last2_X = X[176492 + 166828 - 1]
last3_X = X[-1]
print("first1_X", first1_X)
print("last1_X", last1_X)
print("first2_X", first2_X)
print("last2_X", last2_X)
print("first3_X", first3_X)
print("last3_X", last3_X)
end_total = time.time()
print("Total time: ", end_total - start_total)
valid_number = int(X.shape[0]*0.8)
X_train, Y_train, X_validation, Y_validation = X[:valid_number], Y[:valid_number], X[valid_number:], Y[valid_number:]
with open("data/vtb.pre.txt.train.nparray", mode="wb") as f:
pickle.dump((X_train, Y_train, X_validation, Y_validation),f)
print("Saved data to ", "data/vtb.pre.txt.train.nparray")
| [
"[email protected]"
] | |
a6e55ce9f4a94bf7fe3b40c358854577daabf543 | 82f1e4eb6c1579f8d780e2b904586f6006fd1c11 | /spider/spiders/proxy_spider.py | 5541e6b431bbfe2805ddc458190ad593a0c62740 | [] | no_license | zhenjunMa/MusicSpider | d7756075deea1e1ed7e7606eccb033c3bfa9bb50 | b55fa08805767d9d7af19b287a27ecc57fc00385 | refs/heads/master | 2021-08-19T12:30:43.605227 | 2017-11-26T08:36:05 | 2017-11-26T08:36:05 | 111,176,566 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,762 | py | # -*- coding: utf-8 -*-
import scrapy
import urllib2
import requests
from bs4 import BeautifulSoup
def parse_proxy(response):
print response
# 获取西刺代理的高匿http代理,并验证
class ProxySpider(scrapy.Spider):
name = "proxy"
proxies = []
start_urls = [
"http://www.xicidaili.com/nn/1"
]
def start_requests(self):
http_proxies = []
https_proxies = []
xici_urls = [
"http://www.xicidaili.com/nn/1",
"http://www.xicidaili.com/nn/2",
"http://www.xicidaili.com/nn/3"
]
for url in xici_urls:
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
referer = 'http://www.zhihu.com/articles'
headers = {"User-Agent": user_agent, 'Referer': referer}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
soup = BeautifulSoup(response.read(), "lxml")
table = soup.find("table", attrs={"id": "ip_list"})
trs = table.find_all("tr")
for i in range(1, len(trs)):
tr = trs[i]
tds = tr.find_all("td")
ip = tds[1].text
port = tds[2].text
desc = tds[4].text
mode = tds[5].text.strip()
if desc.encode('utf-8') == "高匿":
if mode == "HTTPS":
proxy = "https://" + ip + ":" + port
# noinspection PyBroadException
try:
response = requests.get("https://www.baidu.com/js/bdsug.js?v=1.0.3.0", timeout=2, allow_redirects=False, proxies={"https": proxy})
if response.status_code == 200 and response.content.index("function") > -1:
https_proxies.append(proxy)
except Exception, e:
print e
elif mode == "HTTP":
proxy = "http://" + ip + ":" + port
# noinspection PyBroadException
try:
response = requests.get("http://www.baidu.com/js/bdsug.js?v=1.0.3.0", timeout=2, allow_redirects=False, proxies={"http": proxy})
if response.status_code == 200 and response.content.index("function") > -1:
http_proxies.append(proxy)
except Exception, e:
print e
print https_proxies
print http_proxies
def parse(self, response):
pass
# for idx, tr in enumerate(response.xpath('//table[@id="ip_list"]/tr')):
# if idx > 0:
# ip = tr.xpath('td').xpath('text()')[0].extract()
# port = tr.xpath('td').xpath('text()')[1].extract()
# desc = tr.xpath('td').xpath('text()')[4].extract().encode('utf-8')
# mode = tr.xpath('td').xpath('text()')[5].extract().encode('utf-8')
#
# if desc == "高匿" and mode == "HTTP":
# proxy = "http://" + ip + ":" + port
# # noinspection PyBroadException
# try:
# resp = requests.get("http://www.baidu.com/js/bdsug.js?v=1.0.3.0", timeout=1, allow_redirects=False, proxies={"http": proxy})
# if resp.status_code == 200 and resp.content.index("function") > -1:
# print "success"
# self.proxies.append(proxy)
# except Exception:
# pass
# for s in self.proxies:
# print s
| [
"[email protected]"
] | |
60f6598ba438e4ebb1d8210d5ebf7a0a9557a2f5 | f3baf8b850c896231b4c254a22567fd5d7a5035c | /Aula 15/Ex 1 .py | 304e9d38f14bc2eb168f7ea770a7789ea0e364df | [
"MIT"
] | permissive | Katakhan/TrabalhosPython2 | e1c23119ef582038ceea0004c872c00778fd326e | ab47af0ff3c00922857578e58a1a149d9e65e229 | refs/heads/master | 2020-09-21T02:15:04.505791 | 2020-03-19T13:23:41 | 2020-03-19T13:23:41 | 224,650,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | marca = input('Informe a marca da cerveja')
teor = float(input('informe o teor alcoolico da cerveja'))
tipo = input('Informe o tipo da cervaja(Alcoolico(1) e não alcoolico (0))')
cerva_dicionario = {'marca':marca, 'teor':teor, 'tipo':tipo}
def salvar_cerva(cerva_dicionario):
arquivo = open ('Banco de cerva.txt','a')
arquivo.write(f"{cerva_dicionario['marca']};{cerva_dicionario['teor']};{cerva_dicionario['tipo']}\n")
arquivo.close()
def ler():
lista = []
arquivo = open ('Banco de cerva.txt', 'r')
for linha in arquivo:
linha = linha.strip()
lista_linha = linha.split(';')
cerva = {'marca':lista_linha[0] , 'teor':lista_linha[1], 'tipo':lista_linha[2]}
lista.append(cerva)
arquivo.close()
return lista
def salvar(cerva):
arquivo = open ('Banco de cerva.txt', 'r')
for cerva in arquivo:
print('linha')
arquivo.close()
cerva = {'marca':marca , 'teor':teor, 'tipo':tipo}
salvar_cerva(cerva_dicionario)
lista = ler()
for p in lista:
print(f"{p['marca']} - {p['teor']} - {p['tipo']}") | [
"[email protected]"
] | |
daf14cf7c7b03f03f1f1b401e88dd73d582756f7 | e5f1f90c62255b4b16907e8036575ca3bf2612bc | /char.py | 22f08890fa6eee0ddad64a9aae14dee3a0611a35 | [] | no_license | engineern2019/Python | 7719e1defe40161f7a53f8307d6970c27fdc088e | 23b171c8190c8711329cc4f0957c4bff1ab873d5 | refs/heads/master | 2020-05-26T22:00:44.559494 | 2019-05-31T09:38:21 | 2019-05-31T09:38:21 | 188,390,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | word=0
msg=input("enter any message ")
i=0
while i<len(msg):
if msg[i]==" ":
word=word+1
i=i+1
print("There are ",(word+1), "words") | [
"[email protected]"
] | |
c4a8948659302be35ffd7f343130d4c6c4209acb | a6bdf72944765b6f57ef84005194d4918b2eca20 | /actions/create_matrix.py | 0f306dfc5013ee405a37ecb94444881ac816115b | [] | no_license | PrimalCat-Real/MatrixCalcConsole | b53000b196f62d9e8b02f9ecf6b89e7a839de23c | ed1d5c42d99ae07220103ba2a8206e0d50d0a5ec | refs/heads/main | 2023-08-02T12:37:17.157922 | 2021-09-27T18:48:17 | 2021-09-27T18:48:17 | 410,194,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | from colorama import *
import sys
import numpy as np
sys.path.append(".")
from actions import input_checkers as check
# Создание нулевой матрицы за параметрами от пользователя
def create_zero_matrix():
corect = False
while not corect:
try:
count_lines = int(input("How many lines in matrix: "))
count_columns = int(input("How many columns in matrix: "))
corect = True
except ValueError:
print(Fore.RED + "Value must be number, try again" + Style.RESET_ALL)
starter_matrix = np.zeros((count_lines, count_columns))
return starter_matrix, count_lines, count_columns
# Заменяет нулевую матрицу значениеми пользователя
class FulledMatrix():
def __init__(self):
self.starter_matrix, self.count_lines, self.count_columns= create_zero_matrix()
def create_fulled_matrix(self):
for i in range(0, self.count_lines):
try:
holder_matrix_line = input(f'Write {i+1}th line: ').split(",")
except ValueError():
print("error")
matrix_line_checker = check.CheckInput()
matrix_line_checker.check_action("Determine", holder_matrix_line)
for a in range(0, self.count_columns):
try:
self.starter_matrix[i][a] = holder_matrix_line[a]
except:
# TODO возможно дабавить сообщение надо
pass
else:
pass
print(self.starter_matrix)
return self.starter_matrix
| [
"[email protected]"
] | |
4f44abc4467e44e07199edfa36da5089b6d4126b | edf15fa536202317ab394de6a243f44a7152c6f4 | /result_code.py | 9bb7af8e365d6763a98a5513811c1401afba7c2f | [] | no_license | deepankchauhan/translator | 94e11587fa3ef7d66de6cae17b64c0107397249d | efa51aa58f76c87ac461aadf460645c1cce1b499 | refs/heads/master | 2020-04-17T05:37:01.349300 | 2019-02-23T18:43:21 | 2019-02-23T18:43:21 | 166,288,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | print ("hey!")
| [
"[email protected]"
] | |
b64af1d72b18f34d381cce1cc10749c45cd8231f | 3d4e4c25a87bc58613207d5751a1a56042890379 | /test_async_scrape.py | 2c8875a5a59aa62c45e778b3d54069930ab6479e | [] | no_license | ansonnn07/covid19-malaysia | 7ef27491136ebed9f6bbe17e34187db2067e3e2d | 1ab8fbae17582f4e757002d2831e34f00a61a77c | refs/heads/main | 2023-06-16T08:00:09.544743 | 2021-06-28T14:17:23 | 2021-06-28T14:17:23 | 362,746,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | py | import asyncio
import os
import time
from datetime import datetime, timedelta
import pandas as pd
import requests
from aiohttp import ClientSession
from bs4 import BeautifulSoup
# translate the months from English to Malay
month_translation = {"January": "januari",
"February": "februari",
"March": "mac",
"April": "april",
"May": "mei",
"June": "jun",
"July": "julai",
"August": "ogos",
"September": "september",
"October": "oktober",
"November": "november",
"December": "disember"}
default_url = "https://kpkesihatan.com/{format1}/kenyataan-akhbar-kpk-{format2}-situasi-semasa-jangkitan-penyakit-coronavirus-2019-covid-19-di-malaysia/"
new_format_date = datetime(2021, 1, 20)
def create_datetime(day, month, year):
data_date = '-'.join([str(day).zfill(2),
str(month).zfill(2), str(year)])
data_datetime = datetime.strptime(data_date, '%d-%m-%Y')
return data_datetime
def create_date_dict(dt):
"""to be passed to the default_url to create the URL for the date"""
month_full = month_translation[dt.strftime('%B')]
date_dict = {'format1': dt.strftime(
'%Y/%m/%d'), 'format2': f'{dt.day}-{month_full}-{dt.year}'}
return date_dict
def create_datetime_and_dict(day, month, year):
data_datetime = create_datetime(day, month, year)
date_dict = create_date_dict(data_datetime)
return data_datetime, date_dict
async def fetch(url, session, current_date=None):
async with session.get(url) as response:
assert response.status == 200, f"Error accessing page on {current_date}\n{url}"
html_body = await response.read()
soup = BeautifulSoup(html_body, "lxml")
df = pd.read_html(html_body,
match='JUMLAH KESELURUHAN',
header=0)[-1]
return {"soup": soup, "date": current_date, "df": df}
async def fetch_with_sem(sem, url, session, current_date=None):
async with sem:
return await fetch(url, session, current_date)
async def async_scrape(start_date=datetime(2021, 1, 21), end_date=None, verbose=0):
assert isinstance(end_date, datetime)
current_date = start_date
total_days = (end_date - start_date).days + 1
print(f"[INFO] Total days: {total_days}")
tasks = []
sem = asyncio.Semaphore(10)
async with ClientSession() as session:
for i in range(total_days):
url = default_url.format(**create_date_dict(current_date))
if verbose:
print(f"{current_date = }")
print(f"{url = }")
tasks.append(
asyncio.create_task(
fetch_with_sem(sem, url, session, current_date))
)
current_date += timedelta(days=1)
start_time = time.perf_counter()
pages_content = await asyncio.gather(*tasks)
# [{"body": "...", "current_date": datetime(2020, 1, 21)}]
total_time = time.perf_counter() - start_time
print(f"{total_time = :.4f} seconds")
return pages_content
# need to add this to avoid RuntimeError in Windows
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
start_date = datetime(2021, 1, 21)
end_date = datetime(2021, 4, 30)
scrape_coroutine = async_scrape(start_date=start_date,
end_date=end_date,
verbose=0)
# results = asyncio.run(scrape_coroutine)
results = await scrape_coroutine
print("\n[INFO] Results for last 5 days:")
for result in results[-5:]:
print(f"{result['date'] = }")
| [
"[email protected]"
] | |
c87637ee942e77a3ef3bfe68a33bda7071da689d | 2d191eb46ed804c9029801832ff4016aeaf8d31c | /configs/ocrnet/ocrnet_r50-d8_512x512_40k_voc12aug.py | f15f514031eca94e146f3bebecf2fca04bdcd130 | [
"Apache-2.0"
] | permissive | openseg-group/mmsegmentation | df99ac2c3510b7f2dff92405aae25026d1023d98 | 23939f09d2b0bd30fc26eb7f8af974f1f5441210 | refs/heads/master | 2023-03-02T07:49:23.652558 | 2021-02-15T04:16:28 | 2021-02-15T04:16:28 | 278,537,243 | 2 | 2 | null | 2020-07-10T04:24:16 | 2020-07-10T04:24:15 | null | UTF-8 | Python | false | false | 945 | py | _base_ = [
'../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(decode_head=[
dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='OCRHead',
in_channels=2048,
in_index=3,
channels=512,
ocr_channels=256,
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
])
| [
"[email protected]"
] | |
1661c0c892781faebf53a6250a84d0ea3848ed67 | a9eb7535df8f92e54ddd13ff1e2d33cc37ab2c9e | /tf.matmul.py | 3a9abd388278608eeda96a70cd7aa200395efdf3 | [] | no_license | KennCoder7/Kenn-TEST | 075c552a1a1d56e0f6679bf5210194dcf65a2bf0 | b8dddff74591bdd25c3842ec0ec96862f37a5f88 | refs/heads/master | 2020-04-07T01:49:48.448703 | 2018-12-08T05:50:20 | 2018-12-08T05:50:20 | 157,953,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,563 | py | import tensorflow as tf
# a_2d = tf.constant([1]*6, shape=[2, 3])
# b_2d = tf.constant([2]*12, shape=[3, 4])
# c_2d = tf.matmul(a_2d, b_2d)
# a_3d = tf.constant([1]*12, shape=[2, 2, 3])
# b_3d = tf.constant([2]*24, shape=[2, 3, 4])
# c_3d = tf.matmul(a_3d, b_3d)
# a_4d = tf.constant([1]*24, shape=[2, 2, 2, 3])
# b_4d = tf.constant([2]*48, shape=[2, 2, 3, 4])
# c_4d = tf.matmul(a_4d, b_4d)
#
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# print("# {}*{}={} \n{}".
# format(a_2d.eval().shape, b_2d.eval().shape, c_2d.eval().shape, c_2d.eval()))
# print("# {}*{}={} \n{}".
# format(a_3d.eval().shape, b_3d.eval().shape, c_3d.eval().shape, c_3d.eval()))
# print("# {}*{}={} \n{}".
# format(a_4d.eval().shape, b_4d.eval().shape, c_4d.eval().shape, c_4d.eval()))
# a_2d = tf.constant([1]*6, shape=[2, 3])
# b_2d = tf.constant([2]*6, shape=[2, 3])
# c_2d = tf.multiply(a_2d, b_2d)
# a_3d = tf.constant([1]*12, shape=[2, 2, 3])
# b_3d = tf.constant([2]*12, shape=[2, 2, 3])
# c_3d = tf.multiply(a_3d, b_3d)
# a_4d = tf.constant([1]*24, shape=[2, 2, 2, 3])
# b_4d = tf.constant([2]*24, shape=[2, 2, 2, 3])
# c_4d = tf.multiply(a_4d, b_4d)
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# print("# {}*{}={} \n{}".
# format(a_2d.eval().shape, b_2d.eval().shape, c_2d.eval().shape, c_2d.eval()))
# print("# {}*{}={} \n{}".
# format(a_3d.eval().shape, b_3d.eval().shape, c_3d.eval().shape, c_3d.eval()))
# print("# {}*{}={} \n{}".
# format(a_4d.eval().shape, b_4d.eval().shape, c_4d.eval().shape, c_4d.eval()))
a_2d = tf.constant([1]*6, shape=[2, 3])
k = tf.constant(2)
l = tf.constant([2, 3, 4])
b_2d_1 = tf.multiply(k, a_2d) # tf.multiply(a_2d, k) is also ok
b_2d_2 = tf.multiply(l, a_2d) # tf.multiply(a_2d, l) is also ok
a_3d = tf.constant([1]*12, shape=[2, 2, 3])
b_3d_1 = tf.multiply(k, a_3d) # tf.multiply(a_3d, k) is also ok
b_3d_2 = tf.multiply(l, a_3d) # tf.multiply(a_3d, l) is also ok
a_4d = tf.constant([1]*24, shape=[2, 2, 2, 3])
b_4d_1 = tf.multiply(k, a_4d) # tf.multiply(a_4d, k) is also ok
b_4d_2 = tf.multiply(l, a_4d) # tf.multiply(a_4d, l) is also ok
#
with tf.Session() as sess:
tf.global_variables_initializer().run()
print("# {}*{}={} \n{}".
format(k.eval().shape, a_2d.eval().shape, b_2d_1.eval().shape, b_2d_1.eval()))
print("# {}*{}={} \n{}".
format(l.eval().shape, a_2d.eval().shape, b_2d_2.eval().shape, b_2d_2.eval()))
print("# {}*{}={} \n{}".
format(k.eval().shape, a_3d.eval().shape, b_3d_1.eval().shape, b_3d_1.eval()))
print("# {}*{}={} \n{}".
format(l.eval().shape, a_3d.eval().shape, b_3d_2.eval().shape, b_3d_2.eval()))
print("# {}*{}={} \n{}".
format(k.eval().shape, a_4d.eval().shape, b_4d_1.eval().shape, b_4d_1.eval()))
print("# {}*{}={} \n{}".
format(l.eval().shape, a_4d.eval().shape, b_4d_2.eval().shape, b_4d_2.eval()))
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# print("# c_2d_1 shape:{} \n{}".format(c_2d_1.eval().shape, c_2d_1.eval()))
# print("# c_2d_2 shape:{} \n{}".format(c_2d_2.eval().shape, c_2d_2.eval()))
# print("# c_3d_1 shape:{} \n{}".format(c_3d_1.eval().shape, c_3d_1.eval()))
# print("# c_3d_2 shape:{} \n{}".format(c_3d_2.eval().shape, c_3d_2.eval()))
# print("# c_4d_1 shape:{} \n{}".format(c_4d_1.eval().shape, c_4d_1.eval()))
# print("# c_4d_2 shape:{} \n{}".format(c_4d_2.eval().shape, c_4d_2.eval()))
# a_2d = tf.constant([1]*6, shape=[2, 3])
# d_2d_1 = tf.reduce_sum(a_2d, axis=0)
# d_2d_2 = tf.reduce_sum(a_2d, axis=1)
# a_3d = tf.constant([1]*12, shape=[2, 2, 3])
# d_3d_1 = tf.reduce_sum(a_3d, axis=1)
# d_3d_2 = tf.reduce_sum(a_3d, axis=2)
# a_4d = tf.constant([1]*24, shape=[2, 2, 2, 3])
# d_4d_1 = tf.reduce_sum(a_4d, axis=2)
# d_4d_2 = tf.reduce_sum(a_4d, axis=3)
#
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# print("# a_2d 行累加得到shape:{}\n{}".format(d_2d_1.eval().shape, d_2d_1.eval()))
# print("# a_2d 列累加得到shape:{}\n{}".format(d_2d_2.eval().shape, d_2d_2.eval()))
# print("# a_3d 行累加得到shape:{}\n{}".format(d_3d_1.eval().shape, d_3d_1.eval()))
# print("# a_3d 列累加得到shape:{}\n{}".format(d_3d_2.eval().shape, d_3d_2.eval()))
# print("# a_4d 行累加得到shape:{}\n{}".format(d_4d_1.eval().shape, d_4d_1.eval()))
# print("# a_4d 列累加得到shape:{}\n{}".format(d_4d_2.eval().shape, d_4d_2.eval())) | [
"[email protected]"
] | |
e08e72c85ee8c4b4b56053b4e2a2624dd50a38d1 | 8cd091ae2a579675f32b5c022fb263b9ce2d6324 | /base_sift.py | 151857a510044ee5371924e373e33c90af5034d0 | [] | no_license | yanglinGEM/yl-code | 70e80aad8d8c92e28f3c7ae581f2061e646ecc9c | 99cdcbc03796b1dd88296fad830863318b25f83f | refs/heads/master | 2023-02-03T15:25:53.715770 | 2020-12-22T13:30:54 | 2020-12-22T13:30:54 | 318,364,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | import cv2
import numpy as np
img_orign = cv2.imread('3.jpg')
img=cv2.imread('target.jpg')
rows,cols = img.shape[:2]
gray_orign= cv2.cvtColor(img_orign,cv2.COLOR_BGR2GRAY)
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# 这个库需要将opencv版本倒回
sift=cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(gray_orign, None)
kp2, des2 = sift.detectAndCompute(gray, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75* n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img_orign, kp1, img, kp2, good[:20], None, flags=2)
cv2.imshow('img',img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
f5aa640d645d04cb491d5d5ff4dc186e33ed4a44 | 0cbb0df8fb8761c91643058e9a09bb8303268224 | /utilities/config_reader.py | 29a6fa7252eafa6be3056b2599fd60ab0ba339d3 | [] | no_license | nareshrasamalla/python | 75b34f6c3f50606e9bc259a9317093cbb249a97d | 6eaca20de9055dc45c6bb19d41381e7c5456b583 | refs/heads/master | 2020-07-02T08:31:24.249611 | 2019-08-12T09:22:20 | 2019-08-12T09:22:20 | 201,473,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
"""
This file is intended to read the System config file and
fetch values out of it based on the section queried.
"""
import configparser
import os
#Intialize the Config Parser.
config = configparser.RawConfigParser()
os.chdir("..\config")
#Load the Config File.
config.read('nmi.config')
#get_config_by_section is intended to read
#a section from the specified config file.
def get_config_by_section(section_name):
return dict(config.items(section_name))
| [
"[email protected]"
] | |
3f026d9eabf7526bb158dcdbb53c98d0f8daaeb9 | ec7cd0f098c4e354fd80c385e1b6600758aa2dda | /tests/test_cobertura_diff.py | 6fe83721be67ade280ccdc94e8cfc2a065aceef9 | [
"MIT"
] | permissive | rndrr/pycobertura | aba5c0d202b2318ecc950a22ce4a9714b3c15819 | d090ff7c93c5bf07e07676cfa3c12ca376135be1 | refs/heads/master | 2021-05-30T22:37:41.023079 | 2015-09-28T17:47:58 | 2015-09-28T17:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,238 | py | from .utils import make_cobertura
def test_diff_class_source():
from pycobertura.cobertura import CoberturaDiff
from pycobertura.cobertura import Line
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
expected_sources = {
'dummy/__init__': [],
'dummy/dummy': [
Line(1, u'def foo():\n', None, None),
Line(2, u' pass\n', None, None),
Line(3, u'\n', None, None),
Line(4, u'def bar():\n', None, None),
Line(5, u" a = 'a'\n", True, 'cov-up'),
Line(6, u" d = 'd'\n", True, 'line-edit')
],
'dummy/dummy2': [
Line(1, u'def baz():\n', None, None),
Line(2, u" c = 'c'\n", True, 'line-edit'),
Line(3, u'\n', None, 'line-edit'),
Line(4, u'def bat():\n', True, 'line-edit'),
Line(5, u' pass\n', False, 'cov-down')
],
'dummy/dummy3': [
Line(1, u'def foobar():\n', False, 'line-edit'),
Line(2, u' pass # This is a very long comment that was purposefully written so we could test how HTML rendering looks like when the boundaries of the page are reached. And here is a non-ascii char: \u015e\n', False, 'line-edit')
],
}
for class_name in cobertura2.classes():
assert differ.class_source(class_name) == \
expected_sources[class_name]
def test_diff_total_misses():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.diff_total_misses() == 1
def test_diff_total_misses_by_class():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
expected_sources = {
'dummy/__init__': 0,
'dummy/dummy': -2,
'dummy/dummy2': 1,
'dummy/dummy3': 2,
}
for class_name in cobertura2.classes():
assert differ.diff_total_misses(class_name) == \
expected_sources[class_name]
def test_diff_line_rate():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.diff_line_rate() == 0.15000000000000002
def test_diff_line_rate_by_class():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
expected_sources = {
'dummy/__init__': 0,
'dummy/dummy': 0.4,
'dummy/dummy2': -0.25,
'dummy/dummy3': 0.0,
}
for class_name in cobertura2.classes():
assert differ.diff_line_rate(class_name) == \
expected_sources[class_name]
def test_diff_total_hits():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.diff_total_hits() == 3
def test_diff_total_hits_by_class():
from pycobertura.cobertura import CoberturaDiff
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
expected_total_hits = {
'dummy/__init__': 0,
'dummy/dummy': 2,
'dummy/dummy2': 1,
'dummy/dummy3': 0,
}
for class_name in cobertura2.classes():
assert differ.diff_total_hits(class_name) == \
expected_total_hits[class_name]
def test_diff__has_all_changes_covered__some_changed_code_is_still_uncovered():
from pycobertura.cobertura import Cobertura, CoberturaDiff
cobertura1 = Cobertura('tests/dummy.zeroexit1/coverage.xml')
cobertura2 = Cobertura('tests/dummy.zeroexit2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.has_all_changes_covered() is False
def test_diff__has_better_coverage():
from pycobertura.cobertura import Cobertura, CoberturaDiff
cobertura1 = Cobertura('tests/dummy.zeroexit1/coverage.xml')
cobertura2 = Cobertura('tests/dummy.zeroexit2/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.has_better_coverage() is True
def test_diff__has_not_better_coverage():
from pycobertura.cobertura import Cobertura, CoberturaDiff
cobertura1 = Cobertura('tests/dummy.zeroexit2/coverage.xml')
cobertura2 = Cobertura('tests/dummy.zeroexit1/coverage.xml')
differ = CoberturaDiff(cobertura1, cobertura2)
assert differ.has_better_coverage() is False
| [
"[email protected]"
] | |
0e1e424dd25965b3ba42e4afeeeb389beece8303 | 13561108c0c2866ed6dd09cff1c2c738f1b9fa29 | /evalml/pipelines/components/estimators/classifiers/lightgbm_classifier.py | 679acc55265a94e7e3c8a97d62f0a8fd9ae0946a | [
"BSD-3-Clause"
] | permissive | SarahCharlotte/evalml | ee94ad8c602d65e7870cc777014ec0bc306bac81 | a4148e65e2ec6745c0dc7ed5a0d65401adcfdfae | refs/heads/main | 2023-09-05T21:39:06.792121 | 2021-11-16T05:42:21 | 2021-11-16T05:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,308 | py | """LightGBM Classifier."""
import copy
import numpy as np
import pandas as pd
from pandas.api.types import is_integer_dtype
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from skopt.space import Integer, Real
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.problem_types import ProblemTypes
from evalml.utils import (
SEED_BOUNDS,
_rename_column_names_to_numeric,
import_or_raise,
infer_feature_types,
)
class LightGBMClassifier(Estimator):
"""LightGBM Classifier.
Args:
boosting_type (string): Type of boosting to use. Defaults to "gbdt".
- 'gbdt' uses traditional Gradient Boosting Decision Tree
- "dart", uses Dropouts meet Multiple Additive Regression Trees
- "goss", uses Gradient-based One-Side Sampling
- "rf", uses Random Forest
learning_rate (float): Boosting learning rate. Defaults to 0.1.
n_estimators (int): Number of boosted trees to fit. Defaults to 100.
max_depth (int): Maximum tree depth for base learners, <=0 means no limit. Defaults to 0.
num_leaves (int): Maximum tree leaves for base learners. Defaults to 31.
min_child_samples (int): Minimum number of data needed in a child (leaf). Defaults to 20.
bagging_fraction (float): LightGBM will randomly select a subset of features on each iteration (tree) without resampling if this is smaller than 1.0.
For example, if set to 0.8, LightGBM will select 80% of features before training each tree.
This can be used to speed up training and deal with overfitting. Defaults to 0.9.
bagging_freq (int): Frequency for bagging. 0 means bagging is disabled.
k means perform bagging at every k iteration.
Every k-th iteration, LightGBM will randomly select bagging_fraction * 100 % of
the data to use for the next k iterations. Defaults to 0.
n_jobs (int or None): Number of threads to run in parallel. -1 uses all threads. Defaults to -1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "LightGBM Classifier"
hyperparameter_ranges = {
"learning_rate": Real(0.000001, 1),
"boosting_type": ["gbdt", "dart", "goss", "rf"],
"n_estimators": Integer(10, 100),
"max_depth": Integer(0, 10),
"num_leaves": Integer(2, 100),
"min_child_samples": Integer(1, 100),
"bagging_fraction": Real(0.000001, 1),
"bagging_freq": Integer(0, 1),
}
"""{
"learning_rate": Real(0.000001, 1),
"boosting_type": ["gbdt", "dart", "goss", "rf"],
"n_estimators": Integer(10, 100),
"max_depth": Integer(0, 10),
"num_leaves": Integer(2, 100),
"min_child_samples": Integer(1, 100),
"bagging_fraction": Real(0.000001, 1),
"bagging_freq": Integer(0, 1),
}"""
model_family = ModelFamily.LIGHTGBM
"""ModelFamily.LIGHTGBM"""
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
SEED_MIN = 0
SEED_MAX = SEED_BOUNDS.max_bound
"""SEED_BOUNDS.max_bound"""
def __init__(
self,
boosting_type="gbdt",
learning_rate=0.1,
n_estimators=100,
max_depth=0,
num_leaves=31,
min_child_samples=20,
bagging_fraction=0.9,
bagging_freq=0,
n_jobs=-1,
random_seed=0,
**kwargs,
):
parameters = {
"boosting_type": boosting_type,
"learning_rate": learning_rate,
"n_estimators": n_estimators,
"max_depth": max_depth,
"num_leaves": num_leaves,
"min_child_samples": min_child_samples,
"n_jobs": n_jobs,
"bagging_freq": bagging_freq,
"bagging_fraction": bagging_fraction,
}
parameters.update(kwargs)
lg_parameters = copy.copy(parameters)
# when boosting type is random forest (rf), LightGBM requires bagging_freq == 1 and 0 < bagging_fraction < 1.0
if boosting_type == "rf":
lg_parameters["bagging_freq"] = 1
# when boosting type is goss, LightGBM requires bagging_fraction == 1
elif boosting_type == "goss":
lg_parameters["bagging_fraction"] = 1
# avoid lightgbm warnings having to do with parameter aliases
if (
lg_parameters["bagging_freq"] is not None
or lg_parameters["bagging_fraction"] is not None
):
lg_parameters.update({"subsample": None, "subsample_freq": None})
lgbm_error_msg = (
"LightGBM is not installed. Please install using `pip install lightgbm`."
)
lgbm = import_or_raise("lightgbm", error_msg=lgbm_error_msg)
self._ordinal_encoder = None
self._label_encoder = None
lgbm_classifier = lgbm.sklearn.LGBMClassifier(
random_state=random_seed, **lg_parameters
)
super().__init__(
parameters=parameters,
component_obj=lgbm_classifier,
random_seed=random_seed,
)
def _encode_categories(self, X, fit=False):
"""Encodes each categorical feature using ordinal encoding."""
X = infer_feature_types(X)
cat_cols = list(X.ww.select("category", return_schema=True).columns)
if fit:
self.input_feature_names = list(X.columns)
X_encoded = _rename_column_names_to_numeric(X)
rename_cols_dict = dict(zip(X.columns, X_encoded.columns))
cat_cols = [rename_cols_dict[col] for col in cat_cols]
if len(cat_cols) == 0:
return X_encoded
if fit:
self._ordinal_encoder = OrdinalEncoder()
encoder_output = self._ordinal_encoder.fit_transform(X_encoded[cat_cols])
else:
encoder_output = self._ordinal_encoder.transform(X_encoded[cat_cols])
X_encoded[cat_cols] = pd.DataFrame(encoder_output)
X_encoded[cat_cols] = X_encoded[cat_cols].astype("category")
return X_encoded
def _encode_labels(self, y):
y_encoded = infer_feature_types(y)
# change only if dtype isn't int
if not is_integer_dtype(y_encoded):
self._label_encoder = LabelEncoder()
y_encoded = pd.Series(
self._label_encoder.fit_transform(y_encoded), dtype="int64"
)
return y_encoded
def fit(self, X, y=None):
"""Fits LightGBM classifier component to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
"""
X = infer_feature_types(X)
X_encoded = self._encode_categories(X, fit=True)
y_encoded = self._encode_labels(y)
self._component_obj.fit(X_encoded, y_encoded)
return self
def predict(self, X):
"""Make predictions using the fitted LightGBM classifier.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted values.
"""
X_encoded = self._encode_categories(X)
predictions = super().predict(X_encoded)
if not self._label_encoder:
return predictions
predictions = pd.Series(
self._label_encoder.inverse_transform(predictions.astype(np.int64)),
index=predictions.index,
)
return infer_feature_types(predictions)
def predict_proba(self, X):
"""Make prediction probabilities using the fitted LightGBM classifier.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted probability values.
"""
X_encoded = self._encode_categories(X)
return super().predict_proba(X_encoded)
| [
"[email protected]"
] | |
036edbdcac4e99c218d6e3166e64c1c348592b2c | 3d41d1f41277dba3ef7875be3d49d2a22823a17b | /Virago/assembly/pipeline/3_test-provisioning/testProvision.py | 8a362099ed9fe3beca1deea81b4e7a617faa8b5f | [] | no_license | AgnihotriShivam/Automation-lambda | e6f7a6cfc16026776240042c79d3e5c9ea845ffb | 45885485096f9468958b07e659ab1f52423104bb | refs/heads/master | 2022-11-17T06:47:21.409726 | 2020-07-18T07:44:01 | 2020-07-18T07:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py | import json
import boto3
import traceback
payload = {
"accountemail": "gabor.hasenfrasz@t-systems",
"accountid": "787043465971",
"securityemail": "[email protected]",
"username": "A11336167",
"accountname": "baseline",
"accountcreate": {
"CreateAccountStatus": {
"AccountId": "787043465971",
"State": "SUCCEEDED"
}
}
}
def lambda_handler(event, context):
try:
print("EVENT DATA:\n")
print(event)
print("-------------\n")
job_id = event['CodePipeline.job']['id']
job_data = event['CodePipeline.job']['data']
print("JOB DATA:")
print(job_data)
params = json.loads(job_data['actionConfiguration']['configuration']['UserParameters'])
branchname = params['branchname']
code_pipeline = boto3.client('codepipeline')
lambdaclient = boto3.client('lambda')
sfn = boto3.client('stepfunctions')
if 'continuationToken' in job_data:
print("ContinuationToken received")
print(job_data['continuationToken'])
conttoken = json.loads(job_data['continuationToken'])
execArn = conttoken['executionArn']
sfnstatus = sfn.describe_execution(executionArn=execArn)
if (sfnstatus['status'] == 'SUCCEEDED'):
print("Status succeeded")
code_pipeline.put_job_success_result(jobId=job_id)
return "Complete."
elif (sfnstatus['status'] == 'RUNNING'):
print("Status is running")
continuation_token = json.dumps({'executionArn': execArn})
code_pipeline.put_job_success_result(jobId=job_id, continuationToken=continuation_token)
else:
code_pipeline.put_job_failure_result(jobId=job_id, failureDetails={'type': 'JobFailed', 'message' : 'Failed'})
else:
response = lambdaclient.invoke(FunctionName='deployProvision-{}'.format(branchname),
Payload=json.dumps(payload)
)
returnfromlambda = json.loads(response['Payload'].read())
continuation_token = json.dumps({'executionArn': returnfromlambda['executionArn']})
code_pipeline.put_job_success_result(jobId=job_id, continuationToken=continuation_token)
except Exception as e:
print('Function failed due to exception.')
print(e)
traceback.print_exc()
put_job_failure(job_id, 'Function exception: ' + str(e))
| [
"[email protected]"
] | |
6021f99450da92349ed6322671a065a456b44615 | 6dfc2281e1afad370cc30ca103fed6f3a32ac9f0 | /arp_spoof.py | f39efa2815f0e54982199ba45dcb504a400086ea | [] | no_license | iw4p/arp-spoof | a6fa2de1208ce35f6415d2e6db17b005c7c54d8f | b437cb3498166fd2f07739200df81072ed7422dd | refs/heads/master | 2020-06-21T22:48:15.899138 | 2019-07-18T13:45:04 | 2019-07-18T13:45:04 | 197,570,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | #!/usr/bin/env python
import scapy.all as scapy
import time
import sys
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def spoof(target_ip, spoof_ip):
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)
scapy.send(packet, verbose=False)
def restore(destination_ip, source_ip):
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = scapy.ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)
scapy.send(packet, count=4, verbose=False)
target_ip = "192.168.1.5"
gateway_ip = "192.168.1.1"
try:
counter = 0
while True:
spoof(target_ip, gateway_ip)
spoof(gateway_ip, target_ip)
counter = counter + 2
print("\r[+] Packets sent: " + str(counter)),
sys.stdout.flush()
time.sleep(2)
except KeyboardInterrupt:
print("\n[+] Detected CTRL + C ... Resetting ARP tables ... Please wait.\n")
restore(target_ip, gateway_ip)
restore(gateway_ip, target_ip)
| [
"[email protected]"
] | |
9dd25c3a195a803e69626ee77d0394f1d40fc0a5 | ff41e6f4559344143306b564f620aa0b3e96e85e | /venv/Scripts/pip3-script.py | fddcf460b92e381a0c46452f3b396f02aef6b5a0 | [] | no_license | xiongjianguo12138/23_designMode | 5c3ed42c75f0662014a8be084af31df2b58d0871 | b05188328e418cf30dd9f3d5ebd71880ed52f0cf | refs/heads/master | 2022-10-11T09:24:36.224052 | 2020-06-09T09:31:12 | 2020-06-09T09:31:12 | 270,936,974 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 414 | py | #!C:\NLP\23种设计模式python版本\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
9d9cc136d3e694df1c96895abfb88c968265ad96 | d7a4701e18be0f38820f5c15d80099fda6385f9f | /ABC103/C.py | a6bbebcf304df245df79b6b1970d55da63b3c38c | [] | no_license | shiki7/Atcoder | 979a6f0eeb65f3704ea20a949940a0d5e3434579 | c215c02d3bfe1e9d68846095b1bd706bd4557dd0 | refs/heads/master | 2022-05-21T16:59:01.529489 | 2022-04-29T11:26:42 | 2022-04-29T11:26:42 | 201,536,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import fractions
N = int(input())
a = list(map(int, input().split()))
# 最小公倍数
lcm = a[0]
for i in range(1, N):
lcm = lcm * a[i] // fractions.gcd(lcm, a[i])
total = 0
for i in range(N):
total += (lcm - 1) % a[i]
print(total)
| [
"[email protected]"
] | |
29d7ffdf4a998f64def4b939c701236cfe7f594e | 14ed14241eb5baba1f4702a4f79f3ff37c9ee86d | /ppa/editorial/migrations/0001_initial.py | 02cf0d4f0c16a04245f1f18f14cdb7111da373a8 | [
"Apache-2.0"
] | permissive | vineetbansal/ppa-django | 491ac7916a4060558c2f4d6d7ecafc0079d8091a | 67469f63195981609aed8ca5247e7c04b23a5236 | refs/heads/master | 2020-05-25T06:52:31.859162 | 2019-05-09T15:44:39 | 2019-05-09T15:44:39 | 187,672,561 | 1 | 0 | null | 2019-05-20T15:59:44 | 2019-05-20T15:59:44 | null | UTF-8 | Python | false | false | 1,661 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-11-01 19:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='EditorialIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EditorialPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date', models.DateField(verbose_name='Post date')),
('body', wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.RichTextBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"[email protected]"
] | |
cfbb5ed150549c9c2ccea6f9d1dd5a710af86a7e | def900e7c5cfd534a19aff3411fe350e3e783e48 | /manage.py | 76baf737338721c8745c91abe4e6000e40d864be | [] | no_license | mishi1605/Ps_Project | add0cf2dc5ba66a5c313d3f24253098b5091fce7 | 40f9cd7c5d3838380e1bd8894d78f4292212f9b1 | refs/heads/master | 2022-12-22T07:15:50.858507 | 2020-09-29T09:35:42 | 2020-09-29T09:35:42 | 298,335,687 | 0 | 4 | null | 2020-09-29T09:35:43 | 2020-09-24T16:32:10 | Python | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "linux_Ps.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
2d49765703150d90fa46324333407915d716c47a | 487ed703bdfa86218de81490ac2829618627df8c | /CPUCode/example_time.py | 40fb5abe6d7cbb3f479e66ad086c6099dcd8dd92 | [] | no_license | thesps/MaxBDT | 9b10cbc334db49794e5fdab7cb14e8bd20101a8e | ea2e459009fb2ec59cf49876e004eff8e225368e | refs/heads/master | 2021-01-20T07:37:58.146267 | 2017-06-15T11:29:58 | 2017-06-15T11:29:58 | 90,022,434 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | import timeit
setup='''
from sklearn.datasets import make_moons
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
import bitstring as bs
import numpy as np
from DFEBDTGalava import DFEBDTGalava
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from itertools import chain, izip
X, y = make_moons(noise=0.3, random_state=0)
bdt = joblib.load('bdt.pkl')
X_train, X_test, y_train, y_test =\
train_test_split(X, y, test_size=.4, random_state=42)
# Make a mesh of features
n = 128
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
dx = (x_max - x_min) / n
dy = (y_max - y_min) / n
xx, yy = np.meshgrid(np.arange(x_min, x_max, dx),
np.arange(y_min, y_max, dy))
dfedata = (np.array(list(chain.from_iterable(izip(xx.ravel(), yy.ravel())))) * 2**24).astype('int').tolist()
'''
# Convert features to fixed point format and run on DFE
print timeit.timeit("DFEBDTGalava(n * n, dfedata)", setup=setup, number=1)
# Run on CPU
print timeit.timeit('bdt.decision_function(np.c_[xx.ravel(), yy.ravel()])', setup=setup, number=1)
#Z_CPU = Z_CPU.reshape(xx.shape)
| [
"[email protected]"
] | |
10df5d952139b06e5b3632ceab4ce66129d476a5 | fdcd1058df2e42ce9a6c7a38b76757997f53cb2a | /mute/widget/window.py | 7fd3d172be4eabe8ee46965a1f4eb52aefaebca4 | [
"MIT"
] | permissive | LZJ861124/mute | afb12d516ae1a4106079b51999dd0aa484618b07 | f278d9cd2e9c1a4551d5ecdffde919d22ab2f6bb | refs/heads/master | 2020-05-26T09:41:57.238217 | 2019-05-23T08:19:16 | 2019-05-23T08:19:16 | 188,191,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py |
from __future__ import annotations
from const.color import Color
from container.container import Container
from event.event import Event
from viewport.viewport import Viewport
from logcat.logcat import LogCat
class Window(Container):
@LogCat.log_func
def __init__(
self, x: int, y:int, width: int, height: int, caption: str = None
):
super().__init__(x, y, width, height)
self._caption = caption
self._focus = None
self._win = Viewport(width, height)
self._win.move_to(x, y)
self._modal = False
self.set_background(Color.TEXT)
self.on(Event.CLICK, self._on_click)
self.on(Event.KEY_PRESSED, self._on_key_pressed)
self.on(Event.PAINT, self._on_paint)
@LogCat.log_func
def move(self, off_x: int, off_y: int) -> Window:
self._win.move(off_x, off_y)
@LogCat.log_func
def set_background(self, color) -> Window:
self._win.set_background(color)
return self
@LogCat.log_func
def set_content(self, content) -> Window:
self._content = content
return self
@LogCat.log_func
def set_caption(self, caption: str) -> Window:
self._caption = caption
return self
@LogCat.log_func
def _on_any(self, e: Event) -> None:
if self._focus:
self._focus.on_event(e)
@LogCat.log_func
def _on_click(self, e: Event, x: int, y: int) -> bool:
for element in self.elements:
if element.contains(x - self.x, y - self.y):
Event.trigger(
Event(Event.CLICK, element, x=x, y=y)
)
if element.focusable:
self._focus = element
break
return False
@LogCat.log_func
def _on_key_pressed(self, e: Event, key: str) -> None:
if self._focus:
self._focus.on_event(e)
@LogCat.log_func
def _on_paint(self, e: Event) -> None:
(
self._win
.border()
.print_text(1, 0, f'┨ {self._caption} ┠')
.refresh()
)
for element in self.elements:
element.paint(self._win)
if self._focus:
self._focus.paint(self._win)
@property
def focused(self) -> bool:
return self._focused
@property
def modal(self) -> bool:
return self._modal
@property
def win(self) -> Viewport:
return self._win
# window.py
| [
"[email protected]"
] | |
228ccebb01f96ce3f3a82bbfb49168f6ae0a1d89 | 592498a0e22897dcc460c165b4c330b94808b714 | /1000번/1931_회의실 배정(list index).py | 306276f60d04bd7a71fc07003a9d4cca7c1c092d | [] | no_license | atom015/py_boj | abb3850469b39d0004f996e04aa7aa449b71b1d6 | 42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d | refs/heads/master | 2022-12-18T08:14:51.277802 | 2020-09-24T15:44:52 | 2020-09-24T15:44:52 | 179,933,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | n = int(input())
arr = []
for i in range(n):
arr.append(list(map(int,input().split())))
arr.sort(key=lambda x:(x[1],x[0]))
cnt = 1
e = arr[0][1]
for i in range(1,n):
if e <= arr[i][0]:
e = arr[i][1]
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
917631a72394423d02550f505bf79edf580b3671 | 919b0ed2859ae07766d8858f29e2d1a28de5156e | /Master/IoT/IoT_Project/collector/coap_show_data.py | 06aedfee9ba1b98a967395ee153dd32d5912a0ab | [] | no_license | ImBadnick/University | 0b94f370e5a24e35b2c428d80e94684eea5141ce | a2da8900be57a63abfa2ab7b1ef83d5da674e225 | refs/heads/main | 2023-06-26T18:04:47.057041 | 2023-06-19T10:40:20 | 2023-06-19T10:40:20 | 322,025,101 | 18 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from database import Database
import tabulate
import time
def show_coap_data():
db = Database()
connection = db.connect_db()
while(1):
try:
with connection.cursor() as new_cursor:
sql = "SELECT * FROM `coap`"
new_cursor.execute(sql)
results = new_cursor.fetchall()
header = results[0].keys()
rows = [x.values() for x in results]
print(tabulate.tabulate(rows, header, tablefmt='grid'))
print("\n\n\n")
print("----------------------------------------------------------------------------------------------------------")
print("\n\n\n")
time.sleep(5)
except KeyboardInterrupt:
break
if __name__ == '__main__':
show_coap_data() | [
"[email protected]"
] | |
e1f219988d543635c72c5833f63c6c07c035bffe | b0fa6339bc422c1fcf84d9bfff054d56c793ef4a | /core/migrations/0005_auto_20210716_1638.py | 6f41171e0fccf878a130376d50b10fe4659723fd | [] | no_license | Hasmandeep/Ecommerce_Website | f9079df7e9560816a7779794360f5723b7c6f610 | d30e4b722b6ac8c0f6a18886f298401a8478fbf3 | refs/heads/main | 2023-06-17T06:12:41.134463 | 2021-07-17T08:21:26 | 2021-07-17T08:21:26 | 378,190,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.1 on 2021-07-16 11:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20210716_1628'),
]
operations = [
migrations.AlterField(
model_name='item',
name='image',
field=models.ImageField(upload_to=''),
),
]
| [
"[email protected]"
] | |
2cf0f4a3cf555d0409849d7937cf0bf4e4155f09 | 91d2e84b7f569e7338ade987e241fc0b0ae62776 | /App_Order/urls.py | 41c6ba0f8abc36a0a9651b4f1800a8030f389d33 | [] | no_license | Zahed75/Ecommerce_Project | 1965d61b0a84ebbd6d23ccdd0d557c1cc9416195 | a9adcf9458c48f769507ac22f8e20d24c64ea244 | refs/heads/main | 2023-04-17T10:03:45.528529 | 2021-05-06T13:24:25 | 2021-05-06T13:24:25 | 364,914,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from django.urls import path
from App_Order import views
app_name='App_Order'
urlpatterns = [
path('add/<pk>/', views.add_to_cart, name="add"),
path('remove/<pk>/', views.remove_from_cart, name="remove"),
path('cart/', views.cart_view, name="cart"),
path('increase/<pk>/', views.increase_cart, name="increase"),
path('decrease/<pk>/', views.decrease_cart, name="decrease"),
]
| [
"[email protected]"
] | |
9cf1e1dd38cfbb716cd9050a494f1b58d3d05316 | 4d84947c6bdbf94e4f50b5da6e232bbd02ae8fc3 | /rmaker_admin_lib/exceptions.py | 62bbe4595b465fa61e6f7e63ec31224e169d26b4 | [] | no_license | isabella232/esp-rainmaker-admin-cli | c7921fe4bbc2f47ec8aee25b54c10b2e54e200a4 | 818eb68f269c73db9b687ca7b7027a076ea62997 | refs/heads/master | 2023-08-20T12:27:34.439293 | 2021-10-28T06:26:28 | 2021-10-28T06:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | # Copyright 2020 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NetworkError(Exception):
""" Raised when internet connection is not available """
def __str__(self):
return ('Could not connect. '
'Please check your Internet connection.')
class RequestTimeoutError(Exception):
""" Raised when HTTP Request times out """
def __str__(self):
return ('HTTP Request timed out. '
'Please check your Internet connection.')
class InvalidJSONError(Exception):
""" Raised for invalid JSON input """
def __str__(self):
return 'Invalid JSON received.'
class SSLError(Exception):
""" Raised when invalid SSL certificate is passed """
def __str__(self):
return 'Unable to verify SSL certificate.'
class FileError(Exception):
""" Raised when an error occurred during file operations """
def __init__(self, err_str):
self.err_str = err_str
def __str__(self):
return '{}'.format(self.err_str)
class CLIError(Exception):
""" Raised when an error occurred during cli operations """
def __init__(self, err_str):
self.err_str = err_str
def __str__(self):
return '{}'.format(self.err_str)
class InvalidConfigError(Exception):
""" Raised for invalid configuration """
def __str__(self):
return 'Invalid configuration. Please login again.'
class InvalidApiVersionError(Exception):
""" Raised when current API version is not supported """
def __str__(self):
return 'API Version not supported. Please upgrade ESP Rainmaker CLI.'
class ExpiredSessionError(Exception):
""" Raised when user session expires """
def __str__(self):
return 'User session is expired. Please login again.'
| [
"[email protected]"
] | |
8e2fda0d178cdd1a43694f501f42772972bc6262 | ff950f6c75d763d0b5a2af21dbac0e4c48daaec9 | /server/mediastream/migrations/0011_auto_20170428_1736.py | 34d6b34b2889258f5a738ce3f5ad406ce3d56540 | [
"BSD-3-Clause"
] | permissive | rogerbassons/media-stream | 24f860e264c6bbbc0dec8c4100feea7216c780c7 | 6ef4811d04a681015834d6e8d302fab1a76752cc | refs/heads/master | 2021-09-13T18:20:20.848428 | 2018-05-02T21:51:46 | 2018-05-02T21:51:46 | 86,855,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-28 17:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mediastream', '0010_auto_20170421_1938'),
]
operations = [
migrations.AddField(
model_name='video',
name='enabled',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='video',
name='numberviews',
field=models.BigIntegerField(default=0),
),
]
| [
"[email protected]"
] | |
12de3c32d1859fe9166894147a5851c47025412f | e2ffd61970da024cbb4e6fceb6588f75adfe030c | /authapp/models.py | 7e5a48cab23e9d580b9aa2db846cea1013af26ab | [] | no_license | MariaAfanaseva/Django | 20a46a2dc574d008b7033ecc66bcd4e276f193d8 | 81fb0b89dcec723c23f26f14927fc845f7d29dc0 | refs/heads/master | 2022-12-15T11:35:12.609063 | 2021-02-05T10:01:17 | 2021-02-05T10:01:17 | 177,652,266 | 0 | 1 | null | 2022-12-08T05:35:10 | 2019-03-25T19:36:58 | JavaScript | UTF-8 | Python | false | false | 1,923 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.timezone import now
from datetime import timedelta
from django.db.models.signals import post_save
from django.dispatch import receiver
from shop.storage_backends import MediaStorage
def get_activation_key_time():
return now() + timedelta(hours=48)
class ShopUser(AbstractUser):
avatar = models.ImageField(upload_to='users_avatars', storage=MediaStorage(),
blank=True)
age = models.PositiveIntegerField(verbose_name='age', null=True)
is_active = models.BooleanField(verbose_name='Active', default=True)
email = models.EmailField(verbose_name='Email', unique=True)
class UserActivation(models.Model):
user = models.OneToOneField(ShopUser, on_delete=models.CASCADE, primary_key=True)
activation_key = models.CharField(max_length=128, blank=True)
activation_key_expires = models.DateTimeField(default=get_activation_key_time)
def is_activation_key_expired(self):
if now() <= self.activation_key_expires:
return False
else:
return True
class ShopUserProfile(models.Model):
MALE = 'M'
FEMALE = 'W'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
user = models.OneToOneField(ShopUser, primary_key=True, on_delete=models.CASCADE)
tags = models.CharField(verbose_name='Tags', max_length=128, blank=True)
aboutMe = models.TextField(verbose_name='About You', max_length=512, blank=True)
gender = models.CharField(verbose_name='Gender', max_length=1, choices=GENDER_CHOICES, blank=True)
@receiver(post_save, sender=ShopUser)
def create_user_profile(sender, instance, created, **kwargs):
if created:
ShopUserProfile.objects.create(user=instance) # create form
else:
instance.shopuserprofile.save() # save form
| [
"[email protected]"
] | |
e815820b5d7c7a3f4883a3e197d90a96fb251308 | c9b2336619951fbf560c19d50797ac28dd223a7c | /curiosity/curiosity/wsgi.py | e757b3d8d7227071c822101ae69ce47d7457e907 | [] | no_license | rizplate/intuity | 13be80168e1cd70a6585d11d4b03b0c5353a9534 | daf033e48856f762c4e72912d9c96bdcc1239af1 | refs/heads/master | 2020-03-27T20:19:37.778175 | 2018-08-29T07:08:51 | 2018-08-29T07:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for curiosity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "curiosity.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
1015efbebeea9f7d4049a8bd14345475165128cd | 1bb53eaa0a04cdd2c6488fad91abcc856d76b6e4 | /test/part2_test.py | 99ff02a1e24195103a48f48a8b5a7941fbefd763 | [] | no_license | Apurva-Rachala/Bright-Network-Googletask | df05f5be6744b4c445af094d0f4fd3a75f0c74ca | bdfea068ec3d652e48f635d174619cf5a2ab2bfd | refs/heads/main | 2023-06-05T15:51:26.301300 | 2021-07-01T09:22:03 | 2021-07-01T09:22:03 | 381,894,539 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,524 | py | from src.video_player import VideoPlayer
def test_create_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_PLAYlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Successfully created new playlist: my_PLAYlist" in lines[0]
def test_create_existing_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.create_playlist("my_COOL_PLAYLIST")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert ("Cannot create playlist: A playlist with the same name already "
"exists") in lines[1]
def test_add_to_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_COOL_playlist")
player.add_to_playlist("my_cool_PLAYLIST", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_COOL_playlist" in lines[0]
assert "Added video to my_cool_PLAYLIST: Amazing Cats" in lines[1]
def test_add_to_playlist_already_added(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot add video to my_cool_playlist: Video already added" in lines[2]
def test_add_to_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_cool_playlist", "some_other_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot add video to my_cool_playlist: Video does not exist" in lines[2]
def test_add_to_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.add_to_playlist("another_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot add video to another_playlist: Playlist does not exist" in lines[0]
def test_add_to_playlist_nonexistent_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.add_to_playlist("another_playlist", "does_not_exist_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot add video to another_playlist: Playlist does not exist" in lines[0]
def test_show_all_playlists_no_playlists_exist(capfd):
player = VideoPlayer()
player.show_all_playlists()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "No playlists exist yet" in lines[0]
def test_show_all_playlists(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playLIST")
player.create_playlist("anotheR_playlist")
player.show_all_playlists()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Showing all playlists:" in lines[2]
assert "anotheR_playlist" in lines[3]
assert "my_cool_playLIST" in lines[4]
def test_show_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.show_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.show_playlist("my_COOL_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 6
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Showing playlist: my_cool_playlist" in lines[1]
assert "No videos here yet" in lines[2]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[3]
assert "Showing playlist: my_COOL_playlist" in lines[4]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[5]
def test_remove_from_playlist_then_re_add(capfd):
player = VideoPlayer()
player.create_playlist("MY_playlist")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_playlist", "life_at_google_video_id")
player.remove_from_playlist("my_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.show_playlist("my_playLIST")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 8
assert "Showing playlist: my_playLIST" in lines[5]
assert "Life at Google (life_at_google_video_id) [#google #career]" in lines[6]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[7]
def test_show_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.show_playlist("another_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot show playlist another_playlist: Playlist does not exist" in lines[0]
def test_remove_from_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_COOL_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 4
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Removed video from my_COOL_playlist: Amazing Cats" in lines[2]
assert "Cannot remove video from my_cool_playlist: Video is not in playlist" in lines[3]
def test_remove_from_playlist_video_is_not_in_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Cannot remove video from my_cool_playlist: Video is not in playlist" in lines[1]
def test_remove_from_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_cool_playlist", "some_other_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot remove video from my_cool_playlist: Video does not exist" in lines[2]
def test_remove_from_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot remove video from my_cool_playlist: Playlist does not exist" in lines[0]
def test_clear_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.show_playlist("my_cool_playlist")
player.clear_playlist("my_COOL_playlist")
player.show_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 7
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Showing playlist: my_cool_playlist" in lines[2]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[3]
assert "Successfully removed all videos from my_COOL_playlist" in lines[4]
assert "Showing playlist: my_cool_playlist" in lines[5]
assert "No videos here yet" in lines[6]
def test_clear_playlist_nonexistent(capfd):
player = VideoPlayer()
player.clear_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot clear playlist my_cool_playlist: Playlist does not exist" in lines[0]
def test_delete_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.delete_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Deleted playlist: my_cool_playlist" in lines[1]
def test_delete_playlist_nonexistent(capfd):
player = VideoPlayer()
player.delete_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot delete playlist my_cool_playlist: Playlist does not exist" in lines[0]
| [
"[email protected]"
] | |
e9daf124f0d89650306918eeb692c4acb3851954 | 99e61f9f6ac04475178d7a8fd6e10c7f05a775c5 | /cadccutout/cadccutout/tests/test_core.py | 6c2f3f232814e3d858935e5259aca08fbf136e4d | [] | no_license | ijiraq/cadctools | 96a6bd30195a5d18da698e6cd80dc0088e1cdd55 | 6dbea10fc540a6f701c97f6c39bf751fe9c0a205 | refs/heads/master | 2021-06-20T06:21:01.572774 | 2020-11-22T03:03:03 | 2020-11-22T03:03:03 | 184,329,528 | 0 | 0 | null | 2021-02-25T01:15:07 | 2019-04-30T20:48:02 | Python | UTF-8 | Python | false | false | 7,102 | py | # -*- coding: utf-8 -*-
# ***********************************************************************
# ****************** CANADIAN ASTRONOMY DATA CENTRE *******************
# ************* CENTRE CANADIEN DE DONNÉES ASTRONOMIQUES **************
#
# (c) 2018. (c) 2018.
# Government of Canada Gouvernement du Canada
# National Research Council Conseil national de recherches
# Ottawa, Canada, K1A 0R6 Ottawa, Canada, K1A 0R6
# All rights reserved Tous droits réservés
#
# NRC disclaims any warranties, Le CNRC dénie toute garantie
# expressed, implied, or énoncée, implicite ou légale,
# statutory, of any kind with de quelque nature que ce
# respect to the software, soit, concernant le logiciel,
# including without limitation y compris sans restriction
# any warranty of merchantability toute garantie de valeur
# or fitness for a particular marchande ou de pertinence
# purpose. NRC shall not be pour un usage particulier.
# liable in any event for any Le CNRC ne pourra en aucun cas
# damages, whether direct or être tenu responsable de tout
# indirect, special or general, dommage, direct ou indirect,
# consequential or incidental, particulier ou général,
# arising from the use of the accessoire ou fortuit, résultant
# software. Neither the name de l'utilisation du logiciel. Ni
# of the National Research le nom du Conseil National de
# Council of Canada nor the Recherches du Canada ni les noms
# names of its contributors may de ses participants ne peuvent
# be used to endorse or promote être utilisés pour approuver ou
# products derived from this promouvoir les produits dérivés
# software without specific prior de ce logiciel sans autorisation
# written permission. préalable et particulière
# par écrit.
#
# This file is part of the Ce fichier fait partie du projet
# OpenCADC project. OpenCADC.
#
# OpenCADC is free software: OpenCADC est un logiciel libre ;
# you can redistribute it and/or vous pouvez le redistribuer ou le
# modify it under the terms of modifier suivant les termes de
# the GNU Affero General Public la “GNU Affero General Public
# License as published by the License” telle que publiée
# Free Software Foundation, par la Free Software Foundation
# either version 3 of the : soit la version 3 de cette
# License, or (at your option) licence, soit (à votre gré)
# any later version. toute version ultérieure.
#
# OpenCADC is distributed in the OpenCADC est distribué
# hope that it will be useful, dans l’espoir qu’il vous
# but WITHOUT ANY WARRANTY; sera utile, mais SANS AUCUNE
# without even the implied GARANTIE : sans même la garantie
# warranty of MERCHANTABILITY implicite de COMMERCIALISABILITÉ
# or FITNESS FOR A PARTICULAR ni d’ADÉQUATION À UN OBJECTIF
# PURPOSE. See the GNU Affero PARTICULIER. Consultez la Licence
# General Public License for Générale Publique GNU AfferoF
# more details. pour plus de détails.
#
# You should have received Vous devriez avoir reçu une
# a copy of the GNU Affero copie de la Licence Générale
# General Public License along Publique GNU Affero avec
# with OpenCADC. If not, see OpenCADC ; si ce n’est
# <http://www.gnu.org/licenses/>. pas le cas, consultez :
# <http://www.gnu.org/licenses/>.
#
# $Revision: 1 $
#
# ***********************************************************************
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import logging
import pytest
from cadccutout.core import OpenCADCCutout, WriteOnlyStream
from cadccutout.pixel_cutout_hdu import PixelCutoutHDU
# Compatibility with Python 2.7, where there is no FileNotFoundError.
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
logger = logging.getLogger('cadccutout')
logger.setLevel(logging.DEBUG)
def test__parse_input():
test_subject = OpenCADCCutout()
inputs = ['[9][100:1000]']
results = test_subject._parse_input(inputs)
pixel_cutout = results[0]
assert pixel_cutout.get_extension() == 9, 'Wrong extension found.'
assert pixel_cutout.get_ranges() == [(100, 1000)], 'Wrong ranges found.'
inputs = ['[500:700][SCI,8][40:58]']
results = test_subject._parse_input(inputs)
pixel_cutout1 = results[0]
pixel_cutout2 = results[1]
assert pixel_cutout1.get_extension() == 0, 'Wrong extension found for 1.'
assert pixel_cutout1.get_ranges() == [(500, 700)], \
'Wrong ranges found for 1.'
assert pixel_cutout2.get_extension() == ('SCI', 8), \
'Wrong extension found for SCI,8.'
assert pixel_cutout2.get_ranges() == [(40, 58)], \
'Wrong ranges found for 1.'
inputs = ['CIRCLE=88.0 115.0 0.5']
results = test_subject._parse_input(inputs)
assert results[0] == 'CIRCLE=88.0 115.0 0.5', 'Wrong WCS input.'
inputs = ['[AMP]']
results = test_subject._parse_input(inputs)
pixel_cutout = results[0]
assert pixel_cutout.get_extension() == ('AMP', 1), 'Wrong extension found.'
def test__sanity_check_input():
test_subject = OpenCADCCutout()
input = '[9][100:1000]'
sanity_input = test_subject._sanity_check_input(input)
assert isinstance(sanity_input, list), 'Should be list'
with pytest.raises(ValueError) as ve:
test_subject._sanity_check_input(('bad', 'tuple'))
assert ('{}'.format(ve) ==
'Input is expected to be a string or list but was \
(u\'bad\', u\'tuple\')') or ('{}'.format(ve) ==
'Input is expected to be a string or list but was \
(\'bad\', \'tuple\')'), \
'Wrong error message.'
def test_write_stream():
output = io.BytesIO()
test_subject = WriteOnlyStream(output)
with pytest.raises(ValueError):
test_subject.read()
assert test_subject.tell() == 0, 'Nothing written yet, should be zero.'
test_subject.write(b'You have been recruied by the Star League to defend \
the frontier against Xur and the Kodhan Armada.')
assert test_subject.tell() == 111, 'Message written.'
def test_construct():
test_subject = OpenCADCCutout()
with pytest.raises(ValueError) as ve:
test_subject.cutout([])
assert str(ve.value) == 'No Cutout regions specified.', \
'Wrong error message.'
with pytest.raises(FileNotFoundError):
test_subject.cutout([PixelCutoutHDU([(8, 10)])],
input_reader=open('/no/such/file'))
| [
"[email protected]"
] | |
223c74d91cb5f952d691d5651a076036d79b9123 | b0b43e25501240086cad821b34fd0917581d8ca0 | /covid/jhu.py | ab6bf79a4b198cebef7c4a17f44510affc885981 | [
"MIT"
] | permissive | mysterefrank/covid-test | 648212e608d2dce83ae50f2c6f5127107450c23b | 8651d9515376ca88e25b309c710c256bb22804d9 | refs/heads/master | 2022-12-18T19:16:45.940111 | 2020-09-25T15:20:55 | 2020-09-25T15:20:55 | 298,607,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,530 | py | import pandas as pd
import cachetools.func
import warnings
from . import states
@cachetools.func.ttl_cache(ttl=600)
def load_and_massage(url):
df = pd.read_csv(url)
df = df.drop(columns=['Lat', 'Long'])
df = df.rename(columns={'Province/State' : 'province', 'Country/Region' : 'country'})
df.province = df.province.replace(states.abbrev)
df.province = df.province.fillna('tot')
df = df.set_index(['country', 'province'])
df = df.T
df.index = pd.to_datetime(df.index)
return df
@cachetools.func.ttl_cache(ttl=600)
def load_world():
sources = {
'confirmed' : 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',
'death' : 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
}
# Load each data file into a dataframe with row index = date, and column index = (country, province)
d = {key: load_and_massage(url) for key, url in sources.items()}
# Concatenate data frames: column index is now (variable, country, province)
df = pd.concat(d.values(), axis=1, keys=d.keys())
# Permute order of index to (country, province, variable) and sort the columns by the index value
df = df.reorder_levels([1,2,0], axis=1).sort_index(axis=1)
return df
@cachetools.func.ttl_cache(ttl=600)
def get_fips_codes():
'''Get valid FIPS codes from covid19forecasthub'''
url = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master/data-locations/locations.csv'
df = pd.read_csv(url)
fips_codes = df['location']
fips_codes = fips_codes.loc[fips_codes != 'US'].astype(int)
return fips_codes
def filter_counties(df):
'''Filter rows from JHU data schema to counties represented in forecast hub'''
fips_codes = get_fips_codes()
# exclude_counties = ['Kings, New York, US',
# 'Queens, New York, US',
# 'Bronx, New York, US',
# 'Richmond, New York, US']
exclude_counties = []
# Subset to locations:
# (1) in US,
# (2) with county name,
# (3) with FIPS code recognized by forecast hub
# (4) not in list of NYC counties with no data on JHU
df = df.loc[(df['iso2']=='US') & (df['Admin2']) & (df['FIPS'])].copy()
df['FIPS'] = df['FIPS'].astype(int)
df = df.loc[df['FIPS'].isin(fips_codes)].copy()
df = df.loc[~df['Combined_Key'].isin(exclude_counties)].copy()
return df
@cachetools.func.ttl_cache(ttl=600)
def get_county_info():
'''Get state info from JHU location lookup file'''
url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv'
df = pd.read_csv(url)
df = filter_counties(df)
# Add county and state columns, and set key to <state abbrev>-<county name>
df['name'] = df['Admin2']
df['state'] = df['Province_State'].replace(states.abbrev)
df['key'] = df['state'] + '-' + df['name']
df = df.set_index('key')
return df
@cachetools.func.ttl_cache(ttl=600)
def get_state_info():
'''Get state info from JHU location lookup file'''
url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv'
df = pd.read_csv(url)
df = df.loc[df['FIPS'] <= 78].copy()
df['name'] = df['Province_State']
df['key'] = df['Province_State'].replace(states.abbrev)
df = df.set_index('key')
return df
@cachetools.func.ttl_cache(ttl=600)
def load_us(counties=False):
baseURL = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
def load_us_time_series(file):
'''Load data in JHU US time series format (death or confirmed)'''
df = pd.read_csv(baseURL + file)
meta_cols = ['UID',
'Lat',
'Long_',
'iso2',
'iso3',
'code3',
'FIPS',
'Admin2',
'Province_State',
'Country_Region',
'Combined_Key',
'Population']
meta_cols = [c for c in meta_cols if c in df.columns]
if counties:
# subset to valid counties, set index to <state abbrev>-<county> and drop other metadata columns
df = filter_counties(df)
state = df['Province_State'].replace(states.abbrev)
county = df['Admin2']
#county = county.replace({'New York': 'New York City'}) # correct inconsistency with metadata table
df = df.drop(columns=meta_cols)
df = df.set_index(state + '-' + county)
else:
# group by state
df['state'] = df['Province_State'].replace(states.abbrev)
df = df.drop(columns=meta_cols).groupby('state').sum()
df = df.T
df.index = pd.to_datetime(df.index)
return df
confirmed = load_us_time_series("time_series_covid19_confirmed_US.csv")
deaths = load_us_time_series("time_series_covid19_deaths_US.csv")
# Combine deaths and confirmed
df = pd.concat([deaths,confirmed],axis=1,keys=('death','confirmed'))
df = df.reorder_levels([1,0], axis=1).sort_index(axis=1)
return(df) | [
"[email protected]"
] | |
e40c7ed10e2b7d22d766e593e829ebe54b749b93 | 07e429888e0e564b2d6a8b55180fd117038bc268 | /Probability-Basics-/code.py | 0360c0140c70d0bdf03640be5260ccfed050a9d2 | [
"MIT"
] | permissive | Varun0801/ga-learner-dsb-repo | 21803c331c886f060edc803be9e33e88d236bd86 | 45ce5ae273ca66a2443d4b9417e74ef0f872d8ec | refs/heads/master | 2023-02-04T09:58:18.310189 | 2020-12-25T12:02:55 | 2020-12-25T12:02:55 | 255,108,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# code starts here
df = pd.read_csv(path)
#print(df.info())
p_a = ((df['fico'] > 700).sum())/len(df)
print(p_a)
p_b = ((df['purpose'] == 'debt_consolidation').sum())/len(df)
print(p_b)
df1 = df[df['purpose']== 'debt_consolidation']
p_a_b = df1[df1['fico'].astype(float) >700].shape[0]/df1.shape[0]
print(p_a_b)
result = p_a_b == p_a
print(result)
# code ends here
# --------------
# code starts here
prob_lp = (df['paid.back.loan'] == 'Yes').sum()/len(df)
print(prob_lp)
prob_cs = (df['credit.policy'] == 'Yes').sum()/len(df)
print(prob_cs)
new_df = df[df['paid.back.loan'] == 'Yes']
prob_pd_cs = (new_df['credit.policy'] == 'Yes').sum()/len(new_df)
print(prob_pd_cs)
bayes = (prob_pd_cs*prob_lp)/prob_cs
print(bayes)
# code ends here
# --------------
# code starts here
plt.bar(df['purpose'],df['purpose'].index)
df1 = df[df['paid.back.loan'] == 'No']
df1
plt.bar(df1['purpose'],df1['purpose'].index)
# code ends here
# --------------
# code starts here
inst_median = df['installment'].median()
print(inst_median)
inst_mean = df['installment'].mean()
print(inst_mean)
plt.hist(df['installment'])
plt.hist(df['log.annual.inc'])
# code ends here
| [
"[email protected]"
] | |
607cbcb23dfd9977fb1af1f2f5a04824532bcfd8 | 0f33bea55f5d1c4276d851c3697f7257fb9c5d60 | /Python/binary-sort.py | 770554c775ceea7b2c05ac5bafea0c719e2bfab4 | [] | no_license | Ankita909/fullstackDevelopment | 193a3efe72faf36f8e0ecd71efa4c858e0b432c2 | e5c1c216e6fa4381fc28f76ce661ec418fa72083 | refs/heads/main | 2023-08-27T12:11:30.392642 | 2021-10-01T13:38:50 | 2021-10-01T13:38:50 | 412,479,587 | 1 | 0 | null | 2021-10-01T13:35:34 | 2021-10-01T13:35:34 | null | UTF-8 | Python | false | false | 810 | py | # Python recursive binary search.
# Returns index of x in arr if present, else -1
def binarySearch (arr, l, r, x):
# Check base case
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if arr[mid] == x:
return mid
# If element is smaller than mid, then it
# can only be present in left subarray
elif arr[mid] > x:
return binarySearch(arr, l, mid-1, x)
# Else the element can only be present
# in right subarray
else:
return binarySearch(arr, mid + 1, r, x)
else:
# Element is not present in the array
return -1
# Driver Code
arr = [ 2, 3, 4, 10, 40 ]
x = 10
# Function call
result = binarySearch(arr, 0, len(arr)-1, x)
if result != -1:
print ("Element is present at index % d" % result)
else:
print ("Element is not present in array")
| [
"[email protected]"
] | |
32d8cc7336a534b7aa3d304be09170c44366afc1 | 0d492125e092f1fafb00e1ba24c0c50c5bd5e379 | /modle/dataGenerator.py | adde198810577582ba1f38def9db429f62af3529 | [] | no_license | ammarhamdy/DES | 13902bd7cb0647f0e0d05d3d1d95121c777895e8 | 447a0e9132de5c146a0215fc697ca71a86dcd55a | refs/heads/master | 2020-04-09T14:21:25.583452 | 2018-12-25T08:18:48 | 2018-12-25T08:18:48 | 160,394,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,976 | py | import csv
import os
def xor(a: str, b: str):
a_xor_b = str()
for i in range(len(a)):
a_xor_b += str(int(a[i]) ^ int(b[i]))
return a_xor_b
class DataGenerator:
def __init__(self, e_path: str, s_box_dir_path: str, p_path: str):
# load bit selection matrix(8X6).
self.e = list(csv.reader(open(e_path)))
# load s_box directory each s-box(4X16).
names: list = os.listdir(s_box_dir_path)
self.s_boxes: list = \
[list(csv.reader(open(s_box_dir_path + '\\' + names[0]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[1]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[2]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[3]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[4]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[5]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[6]))),
list(csv.reader(open(s_box_dir_path + '\\' + names[7])))]
# load permutation matrix(8X4).
self.p = list(csv.reader(open(p_path)))
def expand(self, right: str):
"""apply matrix e; from 32bits return 48bits"""
expanded = str()
for sub_list in self.e:
for item in sub_list:
expanded += right[int(item) - 1]
return expanded
def reduction(self, bits: str):
"""split 48bits(8*6) in to 32bit(8*4), apply SBox to each block"""
reduction_bits = str()
for i in range(8):
block = bits[i * 6: (i + 1) * 6]
reduction_bits += str(bin(int(self.s_boxes[i][int(block[0] + block[5], 2)][int(block[1:5], 2)]))[2:]) \
.rjust(4, '0')
return reduction_bits
def mangler_function(self, right: str, key: str):
"""apply matrix P return 32bits"""
r_xor_k = self.reduction(xor(self.expand(right), key))
p_r_xor_k = str()
for sub_list in self.p:
for item in sub_list:
p_r_xor_k += r_xor_k[int(item) - 1]
return p_r_xor_k
def round16(self, left0: str, right0: str, key_generator):
"""return data left16, right16."""
l0, r0 = left0, right0
for i in range(16):
left = r0
right = xor(l0, self.mangler_function(r0, key_generator.next_sub_key()))
r0, l0 = right, left
return l0, r0
if __name__ == '__main__':
# test...
# dataGenerator = DataGenerator('..\\data\\bit selection.csv', '..\\data\\sbox', '..\\data\\P.csv')
# e = dataGenerator.expand('11110000101010101111000010101010')
# print(e == '011110100001010101010101011110100001010101010101')
# k = '000110110000001011101111111111000111000001110010'
# r = '011110100001010101010101011110100001010101010101'
# r_xor_k = xor(r, k)
# print(r_xor_k == '011000010001011110111010100001100110010100100111')
# reduction = dataGenerator.reduction_of('011000010001011110111010100001100110010100100111')
# print(reduction == '01011100100000101011010110010111')
# k1 = '000110110000001011101111111111000111000001110010'
# r0 = '11110000101010101111000010101010'
# mf = dataGenerator.mangler_function(r0, k1)
# print(mf, '\n', mf == '00100011010010101010100110111011')
# from keyGenerator import KeyGenerator
# keys = KeyGenerator('..\\data\\pc-1.csv', '..\\data\\pc-2.csv', '..\\data\\number of left shifts.csv').\
# sub_keys_of('0001001100110100010101110111100110011011101111001101111111110001')
# M = '0123456789ABCDEF'
# r0 = '11110000101010101111000010101010'
# l0 = '11001100000000001100110011111111'
# l16r16 = dataGenerator.round16_of(l0, r0, keys)
# print(l16r16[0] == '01000011010000100011001000110100',
# '\n', l16r16[1] == '00001010010011001101100110010101')
pass
| [
"[email protected]"
] | |
63fd2099a3ee63f20feb648b8aece53e9b92a953 | 6a0bdd7339aa8184b5b3938dd8746ef09f65ee7a | /hw1/matmult.py | d9bfdd98fa59026cba68233a8690b9e66132250e | [] | no_license | hiqbal97/CS314 | e375b16f644a6d5322a02447149fad5f41c1ceb4 | e34b77bbde92675157e5e3c9a5273889392dea79 | refs/heads/master | 2020-04-27T08:46:43.154693 | 2019-05-13T20:14:29 | 2019-05-13T20:14:29 | 174,185,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #!/usr/bin/env python3
def main():
dim1 = input()
size1 = [int(x) for x in dim1.split() if x.isdigit()]
mat1 = []
for i in range(size1[0]):
rows = input()
row = []
for num in rows.split():
row.append(float(num))
mat1.append(row)
dim2 = input()
size2 = [int(x) for x in dim2.split() if x.isdigit()]
mat2 = []
for i in range(size2[0]):
rows = input()
row = []
for num in rows.split():
row.append(float(num))
mat2.append(row)
if size1[1] != size2[0]:
print('invalid input')
else:
result_mat = []
for i in range(size1[0]):
rows = []
for j in range(size2[1]):
rows.append(0)
result_mat.append(rows)
for i in range(len(mat1)):
for j in range(len(mat2[0])):
for k in range(len(mat2)):
result_mat[i][j] += mat1[i][k] * mat2[k][j]
for i in range(size1[0]):
for j in range(size2[1]):
if j == (size2[1] - 1):
print(result_mat[i][j])
else:
print(result_mat[i][j], '', end = '')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8ab1edb2538a42d648a941c4c748eb675a5a3761 | 78c4a0b029ef1af4ac9ef90305eef85ef866ad35 | /gfx/processing/bresenham/bresenham.pyde | 8a899712315e147dc3d2a28dc23bf64a333e9908 | [
"MIT"
] | permissive | qeedquan/misc_utilities | 7727e33e01a9f45275e3efdd165c90053d8ba10a | e8319e6572dd92efceebb5a2d52a00cb993492b2 | refs/heads/master | 2023-08-08T13:24:18.213741 | 2023-08-07T00:26:03 | 2023-08-07T00:26:03 | 46,625,839 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,792 | pyde | # http://members.chello.at/~easyfilter/bresenham.html
from math import *
class Grid:
def __init__(self):
self.size = 32
self.x0, self.y0 = -1, -1
self.x1, self.y1 = -1, -1
def pix2cell(self, x, y):
return x/self.size, y/self.size
def point(self, x, y, err = None):
fill(34+x*2, 40+y*2, 50)
rect(x*self.size, y*self.size, self.size, self.size)
if err == None:
return
y += 1
fill(255)
str = "%d" % (err)
textSize(16)
text(str, x*self.size, y*self.size)
# derivation:
# y = (y1-y0)/(x1-x0)(x-x0) - y0
# y(x1-x0) = (y1-y0)(x-x0) - y0(x1-x0)
# (y-y0)(x1-x0) - (y1-y0)(x-x0) = 0
# now we have an implicit equation that gives us 0
# whenever (x, y) is on the line, we can use this to calculate
# the error term
# e = (y-y0)(x1-x0) - (y1-y0)(x-x0)
# if we define dx = (x1-x0) and dy = (y1-y0)
# we can write it as
# e = (y-y0)dx - (x-x0)dy
# when we move to the next point, there are 2 choices we can make
# either go in the x direction or y direction, x will always increase
# since we are moving in a line, so the decision whether or not to increment
# y or not depends on the error
# let e_x be the error if we just increase x and e_xy if we move in both x and y direction
# e_x = (y-y0)dx - (x+1-x0)dy
# e_xy = (y+1-y0)dx - (x+1-x0)dy
# we can rewrite this in term of the previous error
# e_x = (y-y0)dx - (x-x0)dy + dx ->
# e_x = (y+1-y0)dx - (x-x0)dy ->
# e_x = e + dx
# e_xy = (y-y0)dx - (x-x0)dy + dx - dy ->
# e_xy = (y+1-y0)dx - (x+1-x0)dy ->
# e_xy = e + dx - dy
# dy is treated as negative in the code because y coordinate is flipped in window space
# the initial e0 is defined to be e_xy
# so we keep updating the error term when we decide to move in x or in xy direction
def line(self, x0, y0, x1, y1):
dx = abs(x1-x0)
dy = -abs(y1-y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx+dy
while True:
self.point(x0, y0, err)
if x0 == x1 and y0 == y1:
break
e2 = 2*err
if e2 >= dy:
err += dy
x0 += sx
if e2 <= dx:
err += dx
y0 += sy
def thickline(self, x0, y0, x1, y1, wd):
dx = abs(x1-x0)
dy = abs(y1-y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx-dy
ed = 1 if dx+dy == 0 else sqrt(dx*dx*1.0+dy*dy*1.0)
wd = (wd+1)/2
while True:
self.point(x0, y0, err)
e2 = err
x2 = x0
if 2*e2 >= -dx:
e2 += dy
y2 = y0
while e2 < ed*wd and (y1 != y2 or dx > dy):
y2 += sy
# if want AA, then make color max(0,255*(abs(e2)/ed-wd+1)
# so it changes colors around the surroundings
self.point(x0, y2, err)
e2 += dx
if x0 == x1:
break
e2 = err
err -= dy
x0 += sx
if 2*e2 <= dy:
e2 = dx-e2
while e2 < ed*wd and (x1 != x2 or dx < dy):
x2 += sx
self.point(x2, y0, err)
e2 += dy
if y0 == y1:
break
err += dx
y0 += sy
def ellipse(self, x0, y0, x1, y1):
# value of diameter
a = abs(x1-x0)
b = abs(y1-y0)
b1 = b&1
# error increment
dx = 4*(1-a)*b*b
dy = 4*(b1+1)*a*a
# error of 1 step
err = dx+dy+b1*a*a
if x0 > x1:
x0 = x1
x1 += a
if y0 > y1:
y0 = y1
# starting pixel
y0 += (b+1)/2
y1 = y0-b1
a *= 8*a
b1 = 8*b*b
while True:
# make it readable as error value is too large for nice printing
e = map(err, -100000, 100000, -50, 50)
self.point(x1, y0, e)
self.point(x0, y0, e)
self.point(x0, y1, e)
self.point(x1, y1, e)
e2 = 2*err
if e2 <= dy:
y0 += 1
y1 -= 1
dy += a
err += dy
if e2 >= dx or 2*err > dy:
x0 += 1
x1 -= 1
dx += b1
err += dx
if x0 > x1:
break
while y0-y1 < b:
self.point(x0-1, y0)
self.point(x1+1, y0)
self.point(x0-1, y1)
self.point(x0+1, y1)
y0 += 1
y1 -= 1
def circle(self, xm, ym, r):
x = -r
y = 0
err = 2-2*r
while True:
self.point(xm-x, ym+y, err)
self.point(xm-y, ym-x, err)
self.point(xm+x, ym-y, err)
self.point(xm+y, ym+x, err)
r = err
if r <= y:
y += 1
err += y*2+1
if r > x or err > y:
x += 1
err += x*2+1
if x >= 0:
break
def quadbezier(self, x0, y0, x1, y1, x2, y2):
sx = x2 - x1
sy = y2 - y1
xx = x0 - x1
yy = y0 - y1
cur = xx*sy - yy*sx
if not (xx*sx <= 0 and yy*sy <= 0):
print('bezier curve gradient non-monotonic', xx*sx, yy*sy)
return
if sx*sx+sy*sy > xx*xx+yy*yy:
x2 = x0
x0 = sx+x1
y2 = y0
y0 = sy+y1
cur = -cur
if cur != 0:
xx += sx
sx = 1 if x0 < x2 else -1
xx *= sx
yy += sy
sy = 1 if y0 < y2 else -1
yy *= sy
xy = 2*xx*yy
xx *= xx
yy *= yy
if cur*sx*sy < 0:
xx = -xx
yy = -yy
xy = -xy
cur = -cur
dx = 4.0*sy*cur*(x1-x0)+xx-xy;
dy = 4.0*sx*cur*(y0-y1)+yy-xy
xx += xx
yy += yy
err = dx+dy+xy
while True:
self.point(x0, y0, err)
if x0 == x2 and y0 == y2:
return
y1 = 2*err < dx
if 2*err > dy:
x0 += sx
dx -= xy
dy += yy
err += dy
if y1:
y0 += sy
dy -= xy
dx += xx
err += dx
if dy >= dx:
break
self.line(x0, y0, x2, y2)
def draw(self):
s = self.size
y = 0
while y < height:
x = 0
while x < width:
fill(150, 150, 150)
rect(x, y, s, s)
x += s
y += s
textSize(24)
fill(255)
if mode == 0:
text('line', 32, 32)
self.line(self.x0, self.y0, self.x1, self.y1)
elif mode == 1:
text('thick line', 32, 32)
self.thickline(self.x0, self.y0, self.x1, self.y1, 5)
elif mode == 2:
text('ellipse', 32, 32)
self.ellipse(self.x0, self.y0, self.x1, self.y1)
elif mode == 3:
text('circle', 32, 32)
x0, x1 = self.x0, self.x1
y0, y1 = self.y0, self.y1
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
self.circle((x0+x1)/2, (y0+y1)/2, (x1-x0)/2)
elif mode == 4:
text('quadratic bezier', 32, 32)
x0, x1 = self.x0, self.x1
y0, y1 = self.y0, self.y1
if x1 < x0:
x1 = x0
elif y1 < y0:
y0 = y1
self.quadbezier(x0, y0, (x0+x1)/2, (y0+y1)/2, x1, y1)
grid = None
mode = 0
def setup():
global grid
size(1280, 800)
grid = Grid()
def draw():
background(100)
grid.draw()
def keyPressed(ev):
global mode
if '1' <= key and key <= '9':
mode = int(key) - int('1')
def mousePressed(ev):
global grid
if ev.button == LEFT:
grid.x0, grid.y0 = grid.pix2cell(mouseX, mouseY)
grid.x1, grid.y1 = grid.x0, grid.y0
def mouseDragged(ev):
global grid
if ev.button == LEFT:
grid.x1, grid.y1 = grid.pix2cell(mouseX, mouseY)
| [
"[email protected]"
] | |
eadb8151a5ab098a99a1a02b85799e34647561b3 | 8e34e03fc53fbc465e3de0758e98b5b7c0145b85 | /Listas y tuplas/Ejercicio3.py | 3c89bd326f0a3774e50e9ee15dfe186747f7e7cb | [] | no_license | gonrodri18/Python | 44ed944def671bdc44298f65f9dce0e8ff013b5d | b9653bd6357871ac1682a4eefdf6e36ace0b4ee5 | refs/heads/master | 2020-04-30T02:54:43.610378 | 2019-05-17T16:07:36 | 2019-05-17T16:07:36 | 176,573,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # Escribir un programa que almacene las asignaturas de un curso (por ejemplo Matemáticas, Física, Química, Historia y Lengua) en una lista, pregunte al usuario la nota que ha sacado en cada asignatura, y después las muestre por pantalla con el mensaje En <asignatura> has sacado <nota> donde <asignatura> es cada una des las asignaturas de la lista y <nota> cada una de las correspondientes notas introducidas por el usuario
asignaturas = ['Mates' , 'Lengua' , 'Física']
notas = []
for i in asignaturas:
notas.append (input ('¿Qué has sacado en ' + i + '? '))
for i in range(len(notas)):
print ( 'En ' + asignaturas [i] + ' has sacado un ' + notas[i]) | [
"[email protected]"
] | |
8b5ad6ffa898b3ca103c34c25ef37affb51f8bc6 | 7400c30135003160794856a745addd17cec8a58e | /Keyence2020B/test_pasted_from_page.py | 7feb8b44afdb7cc83be6097799003b0525613220 | [
"MIT"
] | permissive | staguchi0703/ant_book_2-2_greedy2 | 4b9587335610df7cccaefd069e8effbe13b89e0f | c5fc527874768bf5ea0c584b3f9f52a85eed87e2 | refs/heads/master | 2022-10-26T17:54:48.988597 | 2020-06-16T15:03:57 | 2020-06-16T15:03:57 | 271,589,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | #
from resolve import resolve
####################################
####################################
# 以下にプラグインの内容をペーストする
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
print('------------')
print(out)
print('------------')
self.assertEqual(out, output)
def test_入力例_1(self):
input = """4
2 4
4 3
9 3
100 5"""
output = """3"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2
8 20
1 10"""
output = """1"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """5
10 1
2 1
4 1
6 1
8 1"""
output = """5"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
ce4071b0a9e5f5c6e00ed63d6a9ac21489fc0c60 | 65f8211fc33eb5f9ac1ff0d68902226ca9a58692 | /sorting_algorithms/bucket_sort.py | d43e2eb31d9e0de39477a48b9806faaa210e89fe | [] | no_license | szarbartosz/asd-python | 46869f5699a1ef661e2df02e523af0adcddbbbda | 0130cc3dcbba6ad62e1516c98b5cbab85848d619 | refs/heads/master | 2022-12-13T19:02:53.699381 | 2020-09-11T13:29:31 | 2020-09-11T13:29:31 | 242,975,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | def insertion_sort(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i - 1
while j >= 0 and arr[j] > key:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = key
def bucket_sort(arr):
sections = len(arr)
buckets = []
for i in range(sections):
buckets.append([])
for el in arr:
index = int(el * sections)
buckets[index].append(el)
for i in range(len(buckets)):
insertion_sort(buckets[i])
k = 0
for i in range(len(buckets)):
for j in range(len(buckets[i])):
arr[k] = buckets[i][j]
k += 1
print(buckets)
arr = [0.897, 0.565, 0.656, 0.1234, 0.665, 0.3434, 0.234, 0.513, 0.963, 0.123, 0.234, 0.043, 0.745, 0.514, 0.801, 0.734, 0.452, 0.401]
bucket_sort(arr)
print(arr)
| [
"[email protected]"
] | |
84742225c61ab01770a4bce455aa015a98af5633 | 1f593a82f731a7aa500bfb3a6b02f6785edac345 | /simi_w2v.py | 2733096d2dfd307ba1375198cd3a9f76328c1604 | [] | no_license | DanVane/Dtm_demo | 6b4aa0ae3ba5f0526452b252bf4bf4a3aeb1190d | b19a99ea30bd4c74845c8707705fe2c77d96f553 | refs/heads/master | 2021-01-01T15:43:36.426314 | 2017-07-19T07:32:11 | 2017-07-19T07:32:11 | 97,686,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | #coding=utf-8
from gensim.models import Word2Vec
import json
import re
import string
def load_data():
f=open("E:\\NetBeans\\dianziyisuo\\python\\data\\jichengdianlu1.txt",'rb')
lines = f.readlines()
# lines = re.sub(string.punctuation,"",lines)
# lines = re.sub(r'[0-9]+', ' ', lines)
all_lines=[]
for line in lines:
line = line.strip().lower()
line = re.sub(r'[0-9]+','',line)
line = line.replace(string.punctuation,"")
all_lines.append(line.split())
model = Word2Vec(all_lines, size=100, window=5, min_count=0, workers=4)
fname="w2v_models/w2v.model"
model.save(fname)
def cal_sim():
"""
do something ...
:return:
"""
fname = "w2v_models/w2v.model"
model = Word2Vec.load(fname)
for i in range(20):
filename = "results/jichengdianlu/topic_"+str(i)+"_words_with_time.json"
f=open(filename,'rb')
data= json.load(f)[0]
data = data['word']
all_data=[]
for y_data in data:
n_data =[word.split('*')[1] for word in y_data]
all_data.append(n_data)
first_words = []
second_words = []
for j in range(9):
result = dict([])
for w1 in all_data[j]:
for w2 in all_data[j+1]:
if w1.strip()==w2.strip():
sim=0
else:
try:
sim = model.wv.similarity(w2.strip(), w1.strip())
except Exception, e:
sim=0
pass
w = w1.strip()+':'+w2.strip()
result[w]=sim
ll = sorted(result.iteritems(), key=lambda d: d[1])[::-1]
first_words.append(ll[0])
second_words.append(ll[1])
sim_words = [
{'fist_words':first_words, 'second_words':second_words},
]
file_name = 'results/jichengdianlu/topic_%d_year_words_similarity.json' % i
with open(file_name, 'w') as f:
f.write(json.dumps(sim_words))
load_data()
cal_sim()
| [
"[email protected]"
] | |
2768ad34eabf9481540d2cdf79355f927c4a4b28 | 7f25234970f7a95d05c0266b03d9bb8eca9ab7c8 | /dockerwithgitlabsecrets/tests/test_wrapper.py | 2ed595f5f284269da686857f932b25e2298924a8 | [
"MIT"
] | permissive | wtsi-hgi/docker-with-gitlab-secrets | ae96f68be75e5e6bc5b241845373994f2bdb85c1 | 53ed8be52e201b80f9c92cad72213dadf0bd63e6 | refs/heads/master | 2021-01-20T17:39:51.607908 | 2019-01-29T15:47:56 | 2019-01-29T15:47:56 | 90,881,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,558 | py | import unittest
from tempfile import NamedTemporaryFile
from dockerwithgitlabsecrets.tests._common import EXAMPLE_VALUE, EXAMPLE_PARAMETER, EXAMPLE_VARIABLES
from dockerwithgitlabsecrets.wrapper import run_wrapped, SAFE_LINE_BREAK
class TestWrapper(unittest.TestCase):
"""
Tests for `run_wrapped`.
"""
def test_help_with_docker_command(self):
return_code, stdout, stderr = run_wrapped(["ps", "--help"], EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertIn("Usage:\tdocker ps", stdout.strip())
def test_with_non_supported_action(self):
return_code, stdout, stderr = run_wrapped(["version"], EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertIn("Version", stdout.strip())
def test_has_standard_variable(self):
return_code, stdout, stderr = run_wrapped(
["run", "-e", f"{EXAMPLE_PARAMETER}={EXAMPLE_VALUE}", "--rm", "alpine", "printenv", EXAMPLE_PARAMETER],
EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(EXAMPLE_VALUE, stdout.strip())
def test_run_has_secret_variable(self):
key, value = list(EXAMPLE_VARIABLES.items())[0]
return_code, stdout, stderr = run_wrapped(["--debug", "run", "--rm", "alpine", "printenv", key],
EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(value, stdout.strip())
def test_run_has_multiline_secret_variable(self):
key, value = list(EXAMPLE_VARIABLES.items())[1]
return_code, stdout, stderr = run_wrapped(["run", "--rm", "alpine", "printenv", key],
EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(value.replace("\n", SAFE_LINE_BREAK), stdout.strip())
def test_run_cli_variable_has_higher_precedence(self):
other_value = "other-value"
key, value = list(EXAMPLE_VARIABLES.items())[0]
return_code, stdout, stderr = run_wrapped(["run", "-e", f"{key}={other_value}", "--rm", "alpine", "printenv",
key], EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(other_value, stdout.strip())
def test_run_with_env_file(self):
key, value = list(EXAMPLE_VARIABLES.items())[0]
key_2, value_2 = list(EXAMPLE_VARIABLES.items())[2]
example_override = "override"
with NamedTemporaryFile("w") as env_file:
env_file.write(f"{key}={example_override}")
env_file.flush()
return_code, stdout, stderr = run_wrapped(
["run", "--env-file", f"{env_file.name}", "--rm", "alpine", "printenv", key], EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(example_override, stdout.strip())
return_code, stdout, stderr = run_wrapped(
["run", "--env-file", f"{env_file.name}", "--rm", "alpine", "printenv", key_2], EXAMPLE_VARIABLES)
self.assertEqual(0, return_code)
self.assertEqual(value_2, stdout.strip())
def test_run_in_interactive_mode(self):
key, value = list(EXAMPLE_VARIABLES.items())[0]
return_code, stdout, stderr = run_wrapped(["run", "--rm", "-t", "alpine", "printenv", key], EXAMPLE_VARIABLES,
interactive=True)
self.assertEqual(0, return_code)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
388846af9812028de504500008b6aa55a3f30332 | e499a4388879e1db773b122cf4c0cecea6beecee | /jupyter-notebooks/scripts/parse_kml.py | e36b0f4e7cc4a733755e0b9774916c2d9f6ffd82 | [] | no_license | randobucci/gccom-grids | 4ecccca0076fef0a1582fda316cf309a7ae2633e | ab9e232eb467523fdb9bf1a0b09ccacc75700d0a | refs/heads/master | 2020-03-07T11:32:34.485835 | 2018-03-30T18:11:24 | 2018-03-30T18:11:24 | 127,458,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | #--------------------------------------------------------
#- File: parse_kml.py
#- Description: Extract coordinates from Google Earth KML
#- and convert to UTM, saving results.
#- Author: Randy Bucciarelli
#- Date: June 15, 2017
#--------------------------------------------------------
import sys
from xml.dom.minidom import parseString
#- Define source files
input_args = sys.argv
if (len(sys.argv) < 2):
src_dir = '../input/'
kml_file = src_dir+'lj-outline.kml'
else:
kml_file = input_args[1]
#- Open kml file and read into memory
file = open(kml_file)
theData = file.read()
file.close()
#-Load data string into DOM
theDom = parseString(theData)
lats,lons,verts = [],[],[]
#- Loop through dom and find coordinates
for d in theDom.getElementsByTagName('coordinates'):
#- Get all the vertices in first polygon
positions = d.firstChild.data.split()
for p in positions:
coords = p.split(',')
lats.append(float(coords[1]))
lons.append(float(coords[0]))
verts.append(0)
#- Write geographic coords to outfile
out_file = kml_file[0:len(kml_file)-4]+'_geo.xy'
thefile = open(out_file, 'w')
for i in range(len(lats)):
thefile.write("%13.8f\t%13.8f\t%d\n" % (lons[i],lats[i],verts[i]))
thefile.close()
print("Created "+out_file)
| [
"[email protected]"
] | |
266c7b9e2dd74454d2706479f7a79fc447b95cd3 | 0cf8c6d0da99141f1247aae3be88e80af9d611b9 | /_02/a03_lambda.py | b683a11596fa561035e4ea9866d8b923ac76dc6f | [] | no_license | Xava2011/PythonWyklad | 6c3676fa43df34ffc4eb8b7aa9b2529e99f680a6 | 643b1f351d1d486a720607a9f5e9d163d252c487 | refs/heads/master | 2021-01-17T08:45:37.673787 | 2017-03-05T10:20:44 | 2017-03-05T10:20:44 | 83,961,729 | 1 | 0 | null | 2017-03-05T10:23:23 | 2017-03-05T10:23:23 | null | UTF-8 | Python | false | false | 1,837 | py | # a03_lambda
#
# przyklady lambda wyrazen
############################
# proste przyklady
f = (lambda x: x + 1)
print f(1), (lambda x: x + 1)(3)
# dziala jak
def f(x):
return x + 1
# Jakie sa roznice pomiedzy powyzszymi funkcjami?
############################
# niestety lambda wyrazenia w Pythonie nie sa w palni zgodne z
# teoria lambda-rachunku
# Sprobuj
# (lambda x, y: y)(lambda y: y)
# oraz
# print (lambda x, y: y)((lambda y: y), 1)
print (lambda x, y: x)((lambda y: y), 1)(5)
# Jak widac lista parametrow odpowiada raczej krotce parametrow,
# niz wielokrotnemu zastosowaniu
# ale mozna pierwsza linie zapisac tak
(lambda x: (lambda y: y))(lambda y: y)
############################
# W lambda-rzchunku mozemy wyrazic wiekszosc dobrze znanych bytow
tt = (lambda x: (lambda y: x))
ff = (lambda x: (lambda y: y))
jesli = (lambda b: (lambda x: (lambda y: (b(x))(y))))
# jaka jest wartosc wyrazen
print ((jesli(tt))(1))(2)
print ((jesli(ff))(1))(2)
# Zadanie z wykladu
# Sprobowac wyrazic w Pythonowym lambda-rachunku:
# a) pary i rzutowania
# b) liczby naturalne
############################
# narzedzia programowania funkcyjnego
#
# filter(f, lita) = lista elementow e takich, ze f(e)=true
def fff(x):
return x % 2 != 0 and x % 3 != 0
print filter(fff, range(2, 25))
# map(f, lista) = f(lista)
def cube(x):
return x*x*x
print map(cube, range(1, 11))
# funkcja moze miec wiecej niz jeden argument
seq = range(8)
def add(x, y):
return x+y
print map(add, seq, seq)
print map((lambda x, y: x+y), seq, seq)
# reduce(f, lista) = wylicza f(x,y) na kolejnych elementach listy 'sumujac ja'
def add(x, y):
return x+y
print reduce(add, range(1, 11))
# przyklad sumowania list
def sum(seq):
def add(x, y):
return x+y
return reduce(add, seq, 0)
sum(range(1, 11))
sum([])
| [
"[email protected]"
] | |
023b871f8cdc5f16eb7f227fcbbd8a4f8aafa20c | 77be4484580e1fafb429a73afd029520751c6012 | /venv/lib/python3.8/site-packages/apache_beam/portability/api/beam_interactive_api_pb2.py | 7da77f4054b53551025b7da2c93eafa7ce2e889d | [] | no_license | antoniofernandeslf/bankpoints | 38f713cb23ef1cca209880af0e023b3d7331aa44 | 1f91c531ce96919755f0122a77b32d5b12b414d8 | refs/heads/main | 2023-03-03T11:40:57.897869 | 2021-02-15T18:47:25 | 2021-02-15T18:47:25 | 339,166,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,459 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: beam_interactive_api.proto
from __future__ import absolute_import
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import beam_runner_api_pb2 as beam__runner__api__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='beam_interactive_api.proto',
package='org.apache.beam.model.interactive.v1',
syntax='proto3',
serialized_options=b'\n$org.apache.beam.model.interactive.v1B\016InteractiveApiZ\016interactive_v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1a\x62\x65\x61m_interactive_api.proto\x12$org.apache.beam.model.interactive.v1\x1a\x15\x62\x65\x61m_runner_api.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"#\n\x14TestStreamFileHeader\x12\x0b\n\x03tag\x18\x01 \x01(\t\"j\n\x14TestStreamFileRecord\x12R\n\x0erecorded_event\x18\x01 \x01(\x0b\x32:.org.apache.beam.model.pipeline.v1.TestStreamPayload.EventBF\n$org.apache.beam.model.interactive.v1B\x0eInteractiveApiZ\x0einteractive_v1b\x06proto3'
,
dependencies=[beam__runner__api__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_TESTSTREAMFILEHEADER = _descriptor.Descriptor(
name='TestStreamFileHeader',
full_name='org.apache.beam.model.interactive.v1.TestStreamFileHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='org.apache.beam.model.interactive.v1.TestStreamFileHeader.tag', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=159,
)
_TESTSTREAMFILERECORD = _descriptor.Descriptor(
name='TestStreamFileRecord',
full_name='org.apache.beam.model.interactive.v1.TestStreamFileRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='recorded_event', full_name='org.apache.beam.model.interactive.v1.TestStreamFileRecord.recorded_event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=267,
)
_TESTSTREAMFILERECORD.fields_by_name['recorded_event'].message_type = beam__runner__api__pb2._TESTSTREAMPAYLOAD_EVENT
DESCRIPTOR.message_types_by_name['TestStreamFileHeader'] = _TESTSTREAMFILEHEADER
DESCRIPTOR.message_types_by_name['TestStreamFileRecord'] = _TESTSTREAMFILERECORD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TestStreamFileHeader = _reflection.GeneratedProtocolMessageType('TestStreamFileHeader', (_message.Message,), {
'DESCRIPTOR' : _TESTSTREAMFILEHEADER,
'__module__' : 'beam_interactive_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.interactive.v1.TestStreamFileHeader)
})
_sym_db.RegisterMessage(TestStreamFileHeader)
TestStreamFileRecord = _reflection.GeneratedProtocolMessageType('TestStreamFileRecord', (_message.Message,), {
'DESCRIPTOR' : _TESTSTREAMFILERECORD,
'__module__' : 'beam_interactive_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.interactive.v1.TestStreamFileRecord)
})
_sym_db.RegisterMessage(TestStreamFileRecord)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
d9249c8d210376bf26efed82210d8658a73b81d0 | 1df71aa9f94733d27069d22d8b2235495446ece1 | /MacApp/PSDownloaderGUI.app/Contents/Resources/PSDownloaderGUI.py | 37ae4dbb6588fab04ed4ef749d7b4c18ed1e6dac | [] | no_license | yemyat/PythonLeo | cd6f3b4f59d7bea079f107f608cb68d2a9160d53 | fdb9558e92ee0f76a236d88679c47ca55dfa6f6c | refs/heads/master | 2020-06-02T09:19:38.577756 | 2014-04-22T19:11:36 | 2014-04-22T19:11:36 | 1,104,736 | 0 | 3 | null | 2015-02-27T12:01:17 | 2010-11-23T06:17:59 | Tcl | UTF-8 | Python | false | false | 2,827 | py | import re
import os
import PythonLeo
from Tkinter import *
import threading
import zipfile,shutil
URL_LIST = {"current_module":"http://leo.rp.edu.sg/workspace/studentModule.asp?site=3", #to get project_id , group_id
"current_problem":"http://leo3.rp.edu.sg//projectweb/project_menu.asp?", #to get topic_id, year2,3=>leo3, year1=>leo1
"problem_download":"http://leo.rp.edu.sg/projectweb/projectupload/savefolderas.asp?folder=/databank/projectbank/"
};
def download():
response.set("Connecting")
leo = PythonLeo.PythonLeo(username_field.get(),password_field.get()) #e.g. 91224, 12345
project_id_list = leo.parse_id("projectid",leo.open_url(URL_LIST["current_module"]))
group_id_list = leo.parse_id("groupid",leo.open_url(URL_LIST["current_module"]))
topic_id_list = leo.parse_id("topicid",leo.open_url(URL_LIST["current_problem"]+
"projectid="+str(project_id_list[-1])+
"&groupid="+str(group_id_list[-1])))
get_download_url = leo.open_url(URL_LIST["problem_download"]+topic_id_list[-1])
download_url = "http://leo.rp.edu.sg"+ re.search('HREF=\"(.+?zip)',get_download_url.read()).groups()[0]
response.set("Downloading")
os.chdir(os.path.expanduser("~/Desktop"))
zip_file = open("problem.zip","wb")
zip_file.write( leo.open_url(download_url).read() )
zip_file.close()
extractDirectory=""
file=open("problem.zip")
##WillYan
zfile=zipfile.ZipFile(file)
zip_dirs=zfile.namelist()
zfile.extractall()
new_folder=os.getcwd()+"/"+"Problem"+str(len(project_id_list))+"/"
os.makedirs(new_folder)
count=0
for i in zip_dirs:
extractDirectory=os.getcwd()+"/"+str(i)
filename=zip_dirs[count][(zip_dirs[count].rfind("/")+1)::]
if(os.path.isdir(extractDirectory)==False):
shutil.copyfile(extractDirectory,(new_folder+"/"+filename))
count+=1
os.remove("problem.zip")
shutil.rmtree("Databank-CurrentSemester")
response.set("Done!")
def download_thread(dummy=1):
threading.Thread(target=download).start()
if __name__ == "__main__":
root = Tk()
root.title("LEO PS Downloader")
main_frame = Frame(root,width=200,height=120)
main_frame.grid(column=0,row=0)
username_label = Label(main_frame, text="Username")
username_label.grid(column=0,row=0)
username_field =Entry(main_frame)
username_field.grid(column=1,row=0,columnspan=2)
password_label = Label(main_frame, text="Password")
password_label.grid(column=0,row=1)
password_field =Entry(main_frame,show="*")
password_field.grid(column=1,row=1,columnspan=2)
password_field.bind("<Return>",download_thread)
response = StringVar()
response.set("")
response_label = Label(main_frame, textvariable=response,fg="red",anchor=W,justify=LEFT)
response_label.grid(column=0,row=2)
dl_button = Button(main_frame,text="Download",command=download_thread)
dl_button.grid(column=2,row=2)
root.mainloop()
| [
"[email protected]"
] | |
481ee7906a744a489a5047dc23bddcf717f292f6 | 60d8335b0017fd2747efd14e50a446f9b61c1cf2 | /en-US/extras/Usage-Example-9.py | 2f0920d459dae949672009696bb0cf5611f5bd6a | [] | no_license | libvirt/libvirt-appdev-guide-python | 7bfd9ca3ca27cbf2244b241a10d49ea3a133ae04 | f652d7b8cf86ee4f61b2720a3a3d6b45042c7779 | refs/heads/master | 2023-08-22T01:30:57.157556 | 2023-07-03T10:22:31 | 2023-07-03T10:26:34 | 249,973,267 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | stream = conn.newStream()
imageType = domain.screenshot(stream,0)
| [
"[email protected]"
] | |
b0eec7ecd9782b591815a35655ccffd969db10fc | 010c5fc504e64d9d855daff1cbce44394bdfb263 | /playfunction/playfunction/doctype/item_icon/test_item_icon.py | 9bedf2a3b56808d0870ebe1b164a5e8b13d152d2 | [
"MIT"
] | permissive | patilsangram/playfunction | 05c30f0db23cf0edb87d0378294ebb6b4d0d95c0 | e8cb03ef0091547bee8da65d5ce1cce7d68cace0 | refs/heads/master | 2023-05-27T17:59:42.255480 | 2020-07-28T14:06:54 | 2020-07-28T14:06:54 | 375,303,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestItemIcon(unittest.TestCase):
pass
| [
"[email protected]"
] | |
ef4a01640e6d0ad4d796f424868a53ae7c91b2fc | 153370f455850fd5d3da284e0d7053b9dee3b5d6 | /tests/test_auth.py | 5ed89cdf41eab07789ebb2e3985102cf205f8445 | [] | no_license | baldarn/raspi_gate | 58742073406fdcb4698a2b1c1deaf7b6c0e26bd9 | c71bbf07e1feab518a97dac5aa27e0c7836f734c | refs/heads/master | 2021-01-18T19:44:08.634193 | 2019-12-28T11:33:20 | 2019-12-28T11:33:20 | 86,909,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | import pytest
from flask import g, session
from raspi_gate.db import get_db
def test_register(client, app):
# test that viewing the page renders without template errors
assert client.get('/auth/register').status_code == 200
# test that successful registration redirects to the login page
response = client.post(
'/auth/register', data={'username': 'a', 'password': 'a'}
)
assert 'http://localhost/auth/login' == response.headers['Location']
# test that the user was inserted into the database
with app.app_context():
assert get_db().execute(
"select * from user where username = 'a'",
).fetchone() is not None
@pytest.mark.parametrize(('username', 'password', 'message'), (
('', '', b'Username is required.'),
('a', '', b'Password is required.'),
('test', 'test', b'already registered'),
))
def test_register_validate_input(client, username, password, message):
response = client.post(
'/auth/register',
data={'username': username, 'password': password}
)
assert message in response.data
def test_login(client, auth):
# test that viewing the page renders without template errors
assert client.get('/auth/login').status_code == 200
# test that successful login redirects to the index page
response = auth.login()
assert response.headers['Location'] == 'http://localhost/'
# login request set the user_id in the session
# check that the user is loaded from the session
with client:
client.get('/')
assert session['user_id'] == 1
assert g.user['username'] == 'test'
@pytest.mark.parametrize(('username', 'password', 'message'), (
('a', 'test', b'Incorrect username.'),
('test', 'a', b'Incorrect password.'),
))
def test_login_validate_input(auth, username, password, message):
response = auth.login(username, password)
assert message in response.data
def test_logout(client, auth):
auth.login()
with client:
auth.logout()
assert 'user_id' not in session
| [
"[email protected]"
] | |
adcee245a95e9425a32e6b7427a416933d334339 | 48cf478689daa5c7558ccb6689b98cfaeb64ea0f | /Compilation/neural_network.py | 24eddd9d47f6de3b2b29b106e3916c69cdf6d093 | [] | no_license | demetriushrowland/machine_learning | d31454038719ccd61b6fb12db3d41da60a46ac59 | d72db242700f1378181910ebdc2a82991647b422 | refs/heads/master | 2020-06-04T04:24:05.628961 | 2020-05-29T03:50:52 | 2020-05-29T03:50:52 | 191,871,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import numpy as np
class Net:
def __init__ (self, weights, activations):
self.weights = weights
self.num_input = weights[0].shape[1]
self.num_hidden_layers = len(weights)-1
self.num_hidden = [weights[i].shape[1] for i in range(1, len(weights))]
self.num_output = weights[-1].shape[0]
self.activations = activations
def forward (self, x):
weights = self.weights
activations = self.activations
x = np.matmul(weights[0], x)
x = activations[0](x)
for hidden_layer_num in range(self.num_hidden_layers):
x = np.matmul(weights[hidden_layer_num+1], x)
x = activations[hidden_layer_num+1](x)
return x
def main():
return
| [
"[email protected]"
] | |
d1d4911cc165e12b0c2b73a44e9fca1d654e75f1 | 936a0943b862cfbbd935adb7e2bf5ac6ec012375 | /examples/basics.py | e04afd25e7c89cbfec558b18ce2ea9d2a79043bd | [
"MIT"
] | permissive | bgheneti/rai-python | b23c65f938acd2656a925c5a4adb7858d333992c | 2956004853525795dfde210266d9bbf3292d27ee | refs/heads/master | 2020-04-10T10:59:24.439509 | 2018-12-08T11:41:18 | 2018-12-08T11:41:18 | 160,981,304 | 0 | 0 | MIT | 2018-12-08T21:45:56 | 2018-12-08T21:45:56 | null | UTF-8 | Python | false | false | 683 | py | #!/usr/bin/python3
import sys
sys.path.append('../ry')
from libry import *
K = Config()
D = K.view()
K.addFile('../rai-robotModels/pr2/pr2.g');
K.addFile('../test/kitchen.g');
print("joint names: ", K.getJointNames())
print("frame names: ", K.getFrameNames())
q = K.getJointState()
print('joint state: ', q)
q[2] = q[2] + 1.
K.setJointState(q)
X = K.getFrameState()
print('frame state: ', X)
X = X + .1
K.setFrameState(X.flatten().tolist())
q = K.getJointState()
print('joint state: ', q)
q[2] = q[2] + 1.
K.setJointState(q)
K.addFrame("camera", "head_tilt_link", "Q:<d(-90 1 0 0) d(180 0 0 1)> focalLength:.3")
C = K.view(frame="camera")
input("Press Enter to continue...")
| [
"[email protected]"
] | |
b402ed27715c8cdf48833fe2f9f37f17572776b9 | 80f316c1bf2f705e82e98f485c3498b0044ff76d | /images/migrations/0002_auto_20160323_1424.py | 2059faca9563d18ce83b0eaa9554796a37d1f8ad | [] | no_license | lfalvarez/django-images | 789933ce0d522644471e2d678c613ae13a6eb40b | f30e995da644101ee21b6a667e4400de3d5fff12 | refs/heads/master | 2020-12-11T07:59:39.015651 | 2016-03-24T11:02:51 | 2016-03-24T11:06:34 | 57,898,112 | 0 | 0 | null | 2016-05-02T15:00:28 | 2016-05-02T15:00:28 | null | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('images', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image',
field=sorl.thumbnail.fields.ImageField(max_length=512, upload_to=b'images'),
),
]
| [
"[email protected]"
] | |
4e3abde77ccd1556174380fea5d10c4c9f3c49b9 | 377bc20bb72234f0095eedba83ddbfaad79c50bb | /djangoTest/djangoTest/settings.py | 35a217a1c587772eb74bf7e55e6eda3b1be1b868 | [] | no_license | choicoding1026/Django | 56fcf9932095829b26037ab5c506882ea31f7f33 | 4bbdf0e0f79175e10f8f75a29c2bc8f0ebd5328c | refs/heads/master | 2022-12-19T20:07:22.904082 | 2020-09-28T08:46:58 | 2020-09-28T08:46:58 | 298,218,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | """
Django settings for djangoTest project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@r+wqv+3!z+6=k-1csw@7(2(vxwx(6o$#w4hbox-m@@r*xn48p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
3d1e5f29bcb35d357062cc9fc390560ac7aff133 | 9cef1e851750e9ab31909eae57415c00c7f2428b | /scrapers/beef_supply_chain_para/middlewares.py | 35d64f08fb9421314217476361bf18fca78b96ec | [
"MIT"
] | permissive | StellaCarneiro/beef-supply-chain | 9fb39c4beaf55b9ae9a9204b22a569533fb7c7c6 | bd2f994e1515a5c8fc3e3794e936c4d3e246453e | refs/heads/main | 2023-01-21T10:17:03.915123 | 2020-12-02T18:30:12 | 2020-12-02T18:30:12 | 342,729,078 | 1 | 0 | null | 2021-02-26T23:45:55 | 2021-02-26T23:45:55 | null | UTF-8 | Python | false | false | 3,623 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BeefSupplyChainParaSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BeefSupplyChainParaDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
60f3142dd05ea30e240de63fe4ee9b0fda5999cf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_395/ch32_2019_03_26_16_46_20_229168.py | 53939c1ffa07971a583c887f865be3d4e5c189df | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | a = str(input('Você tem dúvidas?'))
while a != 'não':
if a == 'nao':
print ('Até a próxima')
elif a != 'não':
print ('Pratique mais')
a = str(input('Você tem dúvidas?'))
break | [
"[email protected]"
] | |
54d82f06835c62981d7b87da84e06bc382fa3711 | a9d96371f00c25e8b4ec73aebae3bf093f1fbcab | /src/crawl_web_maxpages.py | 00d4aad0e39c088c0fea3f2ac14bc87868358a27 | [] | no_license | joskid/SearchEngine | 9c06a3da9d0b97e33519637c5379ff7036411a42 | 9ba8b11e48c6614b2f59e7f2b9b50483022ac4a2 | refs/heads/master | 2021-01-18T05:39:22.119963 | 2012-03-27T07:27:09 | 2012-03-27T07:27:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | # crawling the web starting from seed page with max_pages parameter
# get_page() procedure for getting the contents os webpage as a string
import urllib
def get_page(url):
try:
return urllib.urlopen(url).read()
except:
return ""
# procedure for finding and returning the next url from the passing page parameter
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
# procedure for finding the union of two lists
def union(p,q):
for e in q:
if e not in p:
p.append(e)
return p
# given a seed page, it will return all the links in that page
def get_all_links(page):
links = []
while True:
url,endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
# for crawling the web, constrained on the maximum different pages crawled
def crawl_web(seed,max_pages):
tocrawl = [seed]
crawled = []
while tocrawl:
page = tocrawl.pop()
if len(crawled) < max_pages :
if page not in crawled:
union(tocrawl, get_all_links(get_page(page)))
crawled.append(page)
return crawled
# Running the program with given seed pages and max_pages
print crawl_web("http://xkcd.com/353",10)
| [
"[email protected]"
] | |
d274a8a644daff6fd4c2f4db5f2fb2745f1e937a | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PhysicsAnalysis/D3PDMaker/JetTagD3PDMaker/share/JetTagD3PD_makeTrackJets.py | 49e467360ee52960c521a147637e239d6091e488 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py |
from JetRecTools.JetRecToolsConf import JetTrackZClusterTool
from JetRec.JetRecConf import JetFastJetFinderTool
from JetRec.JetRecFlags import jetFlags
from JetRec.JetGetters import *
#------------------------------------------------------------------------
# Setup tool so that it can be used
#------------------------------------------------------------------------
JetTrackZClusterTool_Z4 = JetTrackZClusterTool( "JetTrackZClusterTool_Z4" )
JetTrackZClusterTool_Z4.TrackJetMinMulti = 2
JetTrackZClusterTool_Z4.TrackJetMinPt = 5000 # MeV
JetTrackZClusterTool_Z4.UseVtxSeeding = True
JetTrackZClusterTool_Z4.DeltaZRange = 10000.0
JetTrackZClusterTool_Z4.TrackParticleContainerName = "TrackParticleCandidate"
JetTrackZClusterTool_Z4.VxContainerName = "VxPrimaryCandidate"
JetTrackZClusterTool_Z4.OutputLevel = INFO
#--------------------------------------------------------------
from InDetTrackSelectorTool.InDetTrackSelectorToolConf import InDet__InDetDetailedTrackSelectorTool
#--------------------------------------------------------------
ToolSvc += InDet__InDetDetailedTrackSelectorTool( "InDetDetailedTrackSelectorTool_Z4")
ToolSvc.InDetDetailedTrackSelectorTool_Z4.pTMin = 500 # MeV
ToolSvc.InDetDetailedTrackSelectorTool_Z4.etaMax = 2.5
ToolSvc.InDetDetailedTrackSelectorTool_Z4.nHitBLayer = 0
ToolSvc.InDetDetailedTrackSelectorTool_Z4.nHitPix = 1
ToolSvc.InDetDetailedTrackSelectorTool_Z4.nHitSct = 6
ToolSvc.InDetDetailedTrackSelectorTool_Z4.nHitSi = 7 #7
ToolSvc.InDetDetailedTrackSelectorTool_Z4.nHitTrt = 0
ToolSvc.InDetDetailedTrackSelectorTool_Z4.IPd0Max = 1.5 # d0 cut
ToolSvc.InDetDetailedTrackSelectorTool_Z4.IPz0Max = 1.5 # z0*sin(theta) cut
ToolSvc.InDetDetailedTrackSelectorTool_Z4.z0Max = 200 # z0 cut
ToolSvc.InDetDetailedTrackSelectorTool_Z4.fitChi2OnNdfMax = 10000 #3.5
ToolSvc.InDetDetailedTrackSelectorTool_Z4.d0significanceMax = -1.
ToolSvc.InDetDetailedTrackSelectorTool_Z4.z0significanceMax = -1.
ToolSvc.InDetDetailedTrackSelectorTool_Z4.OutputLevel = INFO
JetTrackZClusterTool_Z4.TrackSelector = ToolSvc.InDetDetailedTrackSelectorTool_Z4 # 2 tracks threshold at 500MeV
ToolSvc.InDetDetailedTrackSelectorTool_Z4.TrackSummaryTool = InDetTrackSummaryTool
ToolSvc.InDetDetailedTrackSelectorTool_Z4.Extrapolator = InDetExtrapolator
#--------------------------------------------------------------
#--------------------------------------------------------------
JetFastJetFinderToolAntiKt_Z4 = JetFastJetFinderTool("JetFastJetFinderToolAntiKt_Z4")
JetFastJetFinderToolAntiKt_Z4.Algorithm = "anti-kt"
JetFastJetFinderToolAntiKt_Z4.Radius = 0.4
JetFastJetFinderToolAntiKt_Z4.RecombScheme = "E"
JetFastJetFinderToolAntiKt_Z4.Strategy = "Best"
JetFastJetFinderToolAntiKt_Z4.FailIfMisconfigured = True
JetFastJetFinderToolAntiKt_Z4.Inclusive = True
JetFastJetFinderToolAntiKt_Z4.CalculateJetArea = False
JetFastJetFinderToolAntiKt_Z4.StoreNFlipValues = 2
ToolSvc += JetFastJetFinderToolAntiKt_Z4
JetTrackZClusterTool_Z4.JetFinder = JetFastJetFinderToolAntiKt_Z4
#BTagToolList = [ JetTrackZClusterTool_Z4, JetSorterTool() ]
#BTagToolList += getStandardBTaggerTool('AntiKtZ',0.4,'Track')
#--------------------------------------------------------------
my_att_dict = dict(jet_attribute_dict)
# Fill the attribute dictionary
my_att_dict['_finderType'] = 'AntiKtZ'
my_att_dict['_finderParams'] = [0.4]
my_att_dict['_inputType'] = 'Track'
my_att_dict['_inputColl'] = []
#jetRec_Z4 = make_customJetGetter(my_att_dict, BTagToolList).jetAlgorithmHandle()
jetRec_Z4 = make_customJetGetter(my_att_dict, [JetTrackZClusterTool_Z4,JetSorterTool()]).jetAlgorithmHandle()
| [
"[email protected]"
] | |
4b084dd7ab6265d719d52f462ded8f95839f050b | 2d5c00af71c58e02bbf7089c9cdeea42a5e1475a | /train_phase2_classifier.py | b1a3108a24d0b27131b0b839648b1bdbc1f60c08 | [
"MIT"
] | permissive | sohag-mbstu-ict/robust_frcnn | 87fc446fac7b137b183db2b0dfb64d043dbab174 | 707ea9a3063d40fbfd9894bf4561025357fcb0ef | refs/heads/master | 2023-03-16T11:31:52.214790 | 2020-07-19T19:47:18 | 2020-07-19T19:47:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,792 | py | from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, Input
from keras.models import Model, model_from_json
from keras.applications.inception_v3 import InceptionV3
from keras import backend as K
from keras.optimizers import RMSprop, Adam, SGD
from keras.callbacks import LearningRateScheduler
from keras.utils import generic_utils
from keras_frcnn import phase3_utils
from optparse import OptionParser
import os
import pickle
from sklearn.metrics import f1_score
# Import image generator functions
from keras_frcnn import phase2_generator
if 'tensorflow' == K.backend():
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config2 = tf.ConfigProto()
config2.gpu_options.allow_growth = True
set_session(tf.Session(config=config2))
sys.setrecursionlimit(40000)
def kl_div(P, Q):
return np.nansum([p * np.log2(p / (q + 1e-8)) for p, q in zip(P, Q) if p != 0])
def js_distance(P, Q):
M = 0.5 * (P + Q)
return np.sqrt(0.5 * kl_div(P, M) + 0.5 * kl_div(Q, M))
def get_optimal_alpha(p_img, p_curr, rule_mode = "max"):
js_dist_list = [js_distance(p_img[0,i,:], p_curr[0,i,:]) for i in range(p_img.shape[1])]
if rule_mode == "max":
dist_diff = np.nanmax(js_dist_list)
elif rule_mode == "min":
dist_diff = np.nanmin(js_dist_list)
else:
dist_diff = np.nanmean(js_dist_list)
return np.max([alpha_final, dist_diff / (1 - dist_diff + 1e-8)])
def make_target_probas(p_img, p_curr, alpha, constrain_hard_examples = False):
target_probas = (np.log(p_curr[0] + 1e-8) + alpha * np.log(p_img[0] + 1e-8)) / (1 + alpha)
target_probas = np.exp(target_probas) / np.exp(target_probas).sum(axis = 1)[:, None]
idx = []
if constrain_hard_examples:
# Confident predictions in img_classifier
idx_conf = np.where(p_img[0] >= 0.90)
target_probas[idx_conf[0], :] = 0
target_probas[idx_conf] = 1
# Easy predictions (agreement between img and current)
idx_agree = np.where((p_img[0].argmax(1) == p_curr[0].argmax(1)) & (p_curr[0].max(1) >= 0.50))[0]
cols_agree = p_curr[0].argmax(1)[idx_agree]
target_probas[idx_agree,:] = 0
target_probas[idx_agree, cols_agree] = 1
idx = np.unique(idx_conf[0].tolist() + idx_agree.tolist()).tolist()
return np.expand_dims(target_probas, axis = 0), idx
parser = OptionParser()
parser.add_option("-s", "--source_path", dest="source_path", help="Path to the source txt file.")
parser.add_option("-t", "--target_path", dest="target_path", help="Path to the target detections txt file.")
parser.add_option("-o", "--original_detector_path", dest="original_detector_path", help="Path to the txt file used in phase 1.")
parser.add_option("-d", "--save_dir", dest="save_dir", help="Path to directory where architecture and weights will be saved.", default="models/phase2")
parser.add_option("-a", "--model_architecture", dest="model_architecture", help="Path to JSON where architecture will be saved (inside save_dir).", default="phase2_model.json")
parser.add_option("-w", "--model_weights", dest="model_weights", help="Path to .hdf5 where weights will be saved (inside save_dir).", default="phase2_weights.hdf5")
parser.add_option("-e", "--num_epochs", dest="num_epochs", help="Number of epochs for the training.", default=1, type=int)
parser.add_option("--e_length", dest="e_length", help="Epoch length - Steps for each epoch.", default=1000, type=int)
parser.add_option("--config_filename", dest="config_filename", help="Path of the config file of phase 1 F-RCNN.", default="config.pickle")
parser.add_option("-r", "--reg_param", dest="reg_param", help="Regularization parameter for semi-supervised training.", default=0.2, type=float)
parser.add_option("--sup_lr", dest="sup_lr", help="Learning rate used for the supervised training.", default=1e-5, type=float)
parser.add_option("--val_size", dest="val_size", help="Nb of images to use as val set to monitor performance and save weights (default 100).", default=100, type=int)
parser.add_option("-m", "--model_type", dest="model_type", help="Model to be used. 1: Noisy labels (default) 2: Entropy-minimization.", default=1, type=int)
parser.add_option("--alpha_init", type=float, dest="alpha_init", help="Starting alpha value for noisy-label model.", default=100.)
parser.add_option("--alpha_final", type=float, dest="alpha_final", help="Final/smallest alpha value for noisy-label model.", default=0.5)
parser.add_option("--hard_constraints", dest="hard_constraints", help="Set hard thresholds on confident predictions", action="store_true", default=False)
parser.add_option("--recompute_alpha", dest="recompute_alpha", help="Recompute alpha automatically using Hausdorf distance.", action="store_true", default=False)
(options, args) = parser.parse_args()
if not options.source_path: # if filename is not given
parser.error('Error: path to source dataset must be specified. Pass --source_path to command line')
if not options.target_path:
parser.error('Error: path to target detections dataset must be specified. Pass --target_path to command line')
if not os.path.isdir(options.save_dir):
os.mkdir(options.save_dir)
with open(options.config_filename, 'rb') as f_in:
C = pickle.load(f_in)
# Check the correct ordering
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
# Define the number of classes - Background is included
n_classes = len(C.class_mapping)
print("Number of classes (used for phase 1) = {}".format(n_classes))
print("========== Creating architectures ==========")
base_cnn = InceptionV3(include_top = False, weights = 'imagenet', input_shape = input_shape_img)
x = base_cnn.output
x = GlobalAveragePooling2D(name = "final_globalavgpooling")(x)
x = Dense(4096, activation = 'relu', name = "final_dense1")(x)
x = Dropout(0.5)(x)
x = Dense(2048, activation = 'relu', name = "final_dense2")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation = 'relu', name = "final_dense3")(x)
x = Dropout(0.5)(x)
x = Dense(n_classes, activation = "softmax", name = "predictions")(x)
sup_img_classifier = Model(inputs = base_cnn.input, outputs = x)
if options.model_type == 2:
semi_img_classifier = Model(inputs = base_cnn.input, outputs = x)
reg_param = options.reg_param
def semi_loss(y_true, y_pred):
y_pred = y_pred + 1e-8
return - reg_param * K.mean(y_pred * K.log(y_pred))
semi_sup_lr_ratio = 5
supervised_lr = options.sup_lr
semi_lr = supervised_lr / semi_sup_lr_ratio
optimizer_sup = Adam(lr = supervised_lr, clipnorm = 1e-2)
optimizer_semi = Adam(lr = semi_lr, clipnorm = 1e-4)
#optimizer_sup = RMSprop(supervised_lr)
#optimizer_semi = RMSprop(semi_lr)
#optimizer_sup = SGD(lr = supervised_lr, clipnorm = 1e-3, nesterov = False)
#optimizer_semi = SGD(lr = semi_lr, clipnorm = 1e-3, nesterov = False)
sup_img_classifier.compile(optimizer = optimizer_sup, loss = "categorical_crossentropy")
if options.model_type == 2:
semi_img_classifier.compile(optimizer = optimizer_semi, loss = semi_loss)
# Saving the model architecture
with open(os.path.join(options.save_dir, options.model_architecture), "w") as f:
f.write(sup_img_classifier.to_json())
f.close()
print("========== Created and saved architectures ========")
# We create the training generators
data_gen_source = phase2_generator.image_generator(options.source_path, C, mode = "source")
data_gen_original = phase2_generator.image_generator(options.original_detector_path, C, mode = "source")
data_gen_target = phase2_generator.image_generator(options.target_path, C, mode = "target")
sup_loss = np.zeros(options.e_length)
semi_loss = np.zeros(options.e_length)
n_epochs = options.num_epochs
start_time = time.time()
best_acc = -np.Inf
batch_size = 32
# Making the validation set to measure improvement
val_size = options.val_size
x_test, y_test = next(data_gen_source)
y_true = [y_test.argmax()]
for i in range(1, val_size):
if i % 2 == 0:
x_next, y_test = next(data_gen_original)
else:
x_next, y_test = next(data_gen_source)
x_test = np.concatenate((x_test, x_next), axis = 0)
y_true.append(y_test.argmax())
sup_loss_hist = []
semi_loss_hist = []
time_hist = []
f1_loss_hist = []
alpha_init = float(options.alpha_init)
alpha_final = float(options.alpha_final)
constant_thresh = int(5 / 7 * options.e_length * n_epochs)
print("========== Starting training ============")
# Begin the training
for epoch in range(n_epochs):
progbar = generic_utils.Progbar(options.e_length)
print('Epoch {}/{}'.format(epoch + 1, n_epochs))
iter_num = 0
if epoch > 0 and epoch % 3 == 0:
supervised_lr = supervised_lr * 0.1
#semi_lr = semi_lr * 0.94
K.set_value(sup_img_classifier.optimizer.lr, supervised_lr)
#K.set_value(semi_img_classifier.optimizer.lr, semi_lr)
while True:
try:
if iter_num <= constant_thresh:
alpha = alpha_init - iter_num * (alpha_init - alpha_final) / constant_thresh
X_source, Y_source = next(data_gen_source)
X_target, Y_target = next(data_gen_target)
for b in range(1, batch_size):
if b % 3 == 0:
x_next, y_next = next(data_gen_original)
else:
x_next, y_next = next(data_gen_source)
X_source, Y_source = np.concatenate((X_source, x_next), axis = 0), np.concatenate((Y_source, y_next), axis = 0)
x_next, y_next = next(data_gen_target)
X_target = np.concatenate((X_target, x_next), axis = 0)
if options.model_type == 1:
Y_target = np.concatenate((Y_target, y_next), axis = 0)
# Run one supervised step
sup_loss[iter_num] = sup_img_classifier.train_on_batch(X_source, Y_source)
# Run one semi-supervised step
if options.model_type == 2:
semi_loss[iter_num] = semi_img_classifier.train_on_batch(X_target, Y_source) # We pass Y_source because of Keras, but it's not used
else:
curr_probas = np.expand_dims(sup_img_classifier.predict(X_target), axis = 0)
Y_target = np.expand_dims(Y_target, axis = 0)
if options.recompute_alpha:
alpha = get_optimal_alpha(Y_target, curr_probas, "max")
Y_target, _ = make_target_probas(Y_target, curr_probas, alpha, constrain_hard_examples = options.hard_constraints)
semi_loss[iter_num] = sup_img_classifier.train_on_batch(X_target, Y_target[0])
progbar.update(iter_num, [('Supervised Loss', sup_loss[iter_num].mean()), ('Semi-Sup Loss', semi_loss[iter_num].mean()),
("Total Loss", (sup_loss[iter_num] + semi_loss[iter_num]).mean())])
iter_num += 1
if iter_num == options.e_length:
y_pred = sup_img_classifier.predict_on_batch(x_test).argmax(1).tolist()
curr_acc = f1_score(y_true, y_pred, average = "micro")
semi_loss_hist.extend(semi_loss.tolist())
sup_loss_hist.extend(sup_loss.tolist())
time_hist.append(time.time() - start_time)
f1_loss_hist.append(curr_acc)
if C.verbose:
print('\nSupervised Loss: {}'.format(sup_loss.mean()))
print('Semi-Supervised Loss {}'.format(semi_loss.mean()))
print('Total Loss: {}'.format(np.nanmean(sup_loss + semi_loss)))
print("Current F1-Score: {}".format(curr_acc))
print('Elapsed time: {}'.format(time.time() - start_time))
if curr_acc > best_acc:
if C.verbose:
print('Total F1-Score increased from {} to {}, saving weights'.format(best_acc, curr_acc))
best_acc = curr_acc
sup_img_classifier.save_weights(os.path.join(options.save_dir, options.model_weights))
start_time = time.time()
break
except Exception as e:
print('Exception: {}'.format(e))
continue
print("=========== Finished training =============")
#np.savez("img_train_results.npz", sup_loss = sup_loss_hist, semi_loss = semi_loss_hist, time_hist = time_hist, f1_loss = f1_loss_hist, n_epochs = n_epochs, epoch_length = options.e_length)
| [
"[email protected]"
] | |
2df13f8960a8c0c01b41d33c85d81572f3b29fd3 | 228ca0372abc0d46b4667208049b1264429d63c0 | /src/weather.py | ef8a919bc4fc4e0aaa0cee235341344a15fe95b1 | [] | no_license | showmurai/tenki | eaea50ef1ab7ffbacdb8418d30e0601b70db1336 | b1cdf94bf1d3f6d6d0244a3d43e150cf0af934b8 | refs/heads/master | 2020-04-06T06:40:45.442280 | 2014-06-05T06:17:58 | 2014-06-05T06:17:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # -*- coding:utf-8 -*-
import requests
import json
r = requests.get('http://api.openweathermap.org/data/2.5/weather?q=Tokyo,jp', )
print r.status_code
print r.headers['content-type']
print r.json()
encode_json = json.dumps(r.json())
print encode_json
| [
"[email protected]"
] | |
55c4d1d2f2cd01306eadfd5c6b8b1f255b2d2838 | 499329b1a7c5691c8a20a24dccd04b5685cd566a | /bambu/blog/helpers.py | dad9bd81c11d66a2f049b62c8e566d160fb38eb6 | [] | no_license | flamingtarball/bambu-tools | dd00d382d04457d988a33022b97ef54b3f3023dc | ca0e8e7f3cbbb946e39f8df794ec82baf34795de | refs/heads/master | 2016-09-05T23:37:35.906125 | 2012-08-14T10:16:31 | 2012-08-14T10:16:31 | 5,181,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | from django.contrib.auth.models import User
from django.utils.timezone import utc
from taggit.models import Tag
from datetime import datetime
def view_filter(**kwargs):
from bambu.blog.models import Post, Category
posts = Post.objects.live()
if 'year' in kwargs:
posts = posts.filter(
date__year = int(kwargs['year'])
)
if 'month' in kwargs:
posts = posts.filter(
date__month = int(kwargs['month'])
)
if 'day' in kwargs:
posts = posts.filter(
date__day = int(kwargs['day'])
)
if 'category' in kwargs:
posts = posts.filter(categories__slug = kwargs['category'])
elif 'tag' in kwargs:
posts = posts.filter(tags__slug = kwargs['tag'])
elif 'username' in kwargs:
posts = posts.filter(author__username = kwargs['username'])
return posts
def title_parts(**kwargs):
from bambu.blog.models import Category
title_parts = [u'Blog']
if 'year' in kwargs:
if 'month' in kwargs:
if 'day' in kwargs:
title_parts.insert(0,
datetime(
int(kwargs['year']),
int(kwargs['month']),
int(kwargs['day'])
).replace(tzinfo = utc).strftime('%B %d, %Y')
)
else:
title_parts.insert(0,
datetime(
int(kwargs['year']),
int(kwargs['month']),
1
).replace(tzinfo = utc).strftime('%B %Y')
)
else:
title_parts.insert(0, kwargs['year'])
if 'category' in kwargs:
category = Category.objects.get(slug = kwargs['category'])
title_parts.insert(0, category.name)
elif 'tag' in kwargs:
tag = Tag.objects.get(slug = kwargs['tag'])
title_parts.insert(0, tag.name)
elif 'username' in kwargs:
author = User.objects.get(username = kwargs['username'])
title_parts.insert(0, author.get_full_name() or author.username)
return title_parts
def get_post_image(post):
image_types = (
'image/bmp', 'image/x-windows-bmp', 'image/gif',
'image/jpeg', 'image/pjpeg', 'image/png'
)
images = post.attachments.filter(mimetype__in = image_types)[:1]
if images.count() > 0:
try:
url = images[0].file.url
if url.startswith('/'):
url = settings.MEDIA_URL[:-1] + url
return url
except:
pass
return '' | [
"[email protected]"
] | |
b2f2b0710c0640c57e4a6973c8d53859dfa230dc | f3c332093a573e5e407102f28126c97ec0bb94ce | /Math work trial 4.py | 33d4d7dc24f5660b1edc869411ddf005876fa1c6 | [] | no_license | AlexisMergan/Math | 8decc9925c6b400b5962d34750a763f4a5a29ce3 | 2f4c4c458b1ac845e15fd3de673e7371c0bc736b | refs/heads/master | 2021-03-08T00:46:40.093499 | 2020-03-18T19:55:40 | 2020-03-18T19:55:40 | 246,307,282 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import numpy as np
import math
#Parameter Values
sigma=1.5
beta=0.95
alpha=0.3
delta=0.1
epsilon=10**(-3)
K= np.linspace(0.01,5,1000)
V= np.linspace(0,0,1000)
def u(c):
utility=((c**(1-sigma))-1)/(1-sigma)
if c<=0:return -math.exp(200)
else:
return utility
def ct(i, j, K):
kt1 = K[i]
kt2 = K[j]
ct1=((kt1**alpha)+(1-delta)*kt1-kt2)
return ct1
def funct1(i,j,K,V):
cons = ct(i,j,K)
if cons >=0:
return u(cons)+ beta*V[i]
else:
return -math.exp(200)
#print(funct1(2,0,K,V))
#print(funct1(500,0,K,V))
#print(funct1(999,0,K,V))
#print(funct1(999,400,K,V))
length=range(len(K))
def maxlist(i,K):
values_i=[]
for j in length:
values_i.append(float(funct1(i,j,K,V)))
return values_i
#print(maxlist(2,K))
def funct2(i,K):
v=max(maxlist(i,K))
return v
#print(funct2(2,K))
#print(funct2(999,K))
#print(funct2(500,K))
dist = 100
while dist > epsilon:
TV = []
diff = []
for i in length:
TV.append(funct2(i,K))
diff.append(TV[i]-V[i])
dist=max(np.abs(diff))
V=TV
print(dist)
print(V)
| [
"[email protected]"
] | |
e87450c7a8365fbcc7052eb6fab30bacef49d0a0 | 9fb9133e179a02e110b32513cd193903f4cb05cb | /T10 Graphs, Paths and Search/box_world.py | e5178cf8de73d6302373a88901e1d5c26a96de15 | [] | no_license | DPerrySvendsen/COS30002 | be7807d84cb252a78e781994b46a8794a11e81e3 | 27f60dff622cd6621c20cba5eeb8d01b246291f1 | refs/heads/master | 2021-01-23T16:29:59.103086 | 2017-06-04T08:41:24 | 2017-06-05T06:40:20 | 93,300,179 | 4 | 5 | null | null | null | null | UTF-8 | Python | false | false | 15,157 | py | ''' Basic square grid based world (BoxWorld) to test/demo path planning.
Created for HIT3046 AI for Games by Clinton Woodward [email protected]
See readme.txt for details. Look for ### comment lines.
Note that the box world "boxes" (tiles) are created and assigned an index (idx)
value, starting from the origin in the bottom left corder. This matches the
convention of coordinates used by pyglet which uses OpenGL, rather than a
traditional 2D graphics with the origin in the top left corner.
+ ...
^ 5 6 7 8 9
| 0 1 2 3 4
(0,0) ---> +
A BoxWorld can be loaded from a text file. The file uses the following format.
* Values are separated by spaces or tabs (not commas)
* Blank lines or lines starting with # (comments) are ignored
* The first data line is two integer values to specify width and height
* The second row specifies the Start and the Target boxes as index values.
S 10 T 15
* Each BowWorld row is the specified per line of the text file.
- Each type is specified by a single character ".", "~", "m" or "#".
- Number of tile values must match the number of columns
* The number of rows must match the number of specified rows.
Example BoxWorld map file.
# This is a comment and is ignored
# First specify the width x height values
6 5
# Second specify the start and target box indexes
0 17
# Now specify each row of column values
. . . . . .
~ ~ X . . .
. ~ X ~ . .
. . X . . .
. m m m . .
# Note the number of rows and column values match
'''
from graphics import egi
import pyglet
from pyglet.gl import *
from point2d import Point2D
from graph import SparseGraph, Node, Edge
from searches import SEARCHES
from math import hypot
box_kind = ['.','m','~','X']
box_kind_map = {
'clear': '.',
'mud': 'm',
'water': '~',
'wall': 'X',
}
no_edge = ['X'] # box kinds that don't have edges.
edge_cost_matrix = [
# '.' 'm' '~' 'X'
[ 1.0, 2.0, 5.0, None], # '.'
[ 2.0, 4.0, 9.0, None], # 'm'
[ 5.0, 9.0, 10.0, None], # '~'
[None, None, None, None], # 'X <- NO edges to walls.
]
min_edge_cost = 1.0 # must be min value for heuristic cost to work
def edge_cost(k1, k2):
k1 = box_kind.index(k1)
k2 = box_kind.index(k2)
return edge_cost_matrix[k1][k2]
box_kind_color = {
'.': (1.0, 1.0, 1.0, 1.0), # clear, White
'm': (0.6, 0.6, 0.5, 1.0), # mud, Brown-ish
'~': (0.5, 0.5, 1.0, 1.0), # water, Light blue
'X': (0.2, 0.2, 0.2, 1.0), # walls, Dark grey
}
cfg = {
'LABELS_ON': False,
'EDGES_ON': False,
'CENTER_ON': False,
'BOXLINES_ON': False,
'BOXUSED_ON': False,
'TREE_ON': True,
'PATH_ON': True,
}
search_modes = list(SEARCHES.keys())
class Box(object):
'''A single box for boxworld. '''
def __init__(self, coords=(0,0,0,0), kind='.'):
# keep status
self.kind = kind
self.color = box_kind_color[kind]
self.marker = None
# nav graph node
self.node = None
self.idx = -1
# pretty labels...
self.idx_label = None
self.pos_label = None
self.marker_label = None
# position using coordinates
self.reposition(coords)
def reposition(self, coords):
# top, right, bottom, left
pts = self.coords = coords
# points for drawing
self._pts = (
Point2D(pts[3], pts[0]), # top left
Point2D(pts[1], pts[0]), # top right
Point2D(pts[1], pts[2]), # bottom right
Point2D(pts[3], pts[2]) # bottom left
)
# vector-centre point
self._vc = Point2D((pts[1]+pts[3])/2.0, (pts[0]+pts[2])/2.0)
# labels may need to be updated
self._reposition_labels()
def _reposition_labels(self):
# reposition labels if we have any
if self.idx_label:
self.idx_label.x = self._vc.x
self.idx_label.y = self._vc.y
self.pos_label.x = self._vc.x
self.pos_label.y = self._vc.y
if self.marker_label:
self.marker_label.x = self._vc.x
self.marker_label.y = self._vc.y
#self._vc.y - (self.marker_label.content_height // 2)
def set_kind(self, kind):
'Set the box kind (type) using string a value ("water","mud" etc)'
kind = box_kind_map.get(kind, kind)
try:
self.kind = kind
self.color = box_kind_color[kind]
except KeyError:
print('not a known tile kind "%s"' % kind)
def draw(self):
# draw filled box
egi.set_pen_color(self.color)
egi.closed_shape(self._pts, filled=True)
# draw box border
if cfg['BOXLINES_ON']:
egi.set_pen_color((.7,.7,.7,1))
egi.closed_shape(self._pts, filled=False)
# centre circle
if cfg['CENTER_ON']:
egi.set_pen_color((.3,.3,1,1))
egi.circle(self._vc, 5)
# box position (simple column,row) (or x,y actually)
if self.node:
if cfg['LABELS_ON']:
if not self.idx_label:
info = "%d" % self.idx
self.idx_label = pyglet.text.Label(info, color=(0,0,0,255),
anchor_x="center",
anchor_y="top")
info = "(%d,%d)" % (self.pos[0], self.pos[1])
self.pos_label = pyglet.text.Label(info, color=(0,0,0,255),
anchor_x="center",
anchor_y="bottom")
self._reposition_labels()
self.idx_label.draw()
#self.pos_label.draw()
if self.marker:
if not self.marker_label or self.marker_label.text != self.marker:
self.marker_label = pyglet.text.Label(self.marker,
color=(255,0,0,255),
bold=True,
anchor_x="center",
anchor_y="center")
self._reposition_labels()
self.marker_label.draw()
class BoxWorld(object):
'''A world made up of boxes. '''
def __init__(self, nx, ny, cx, cy):
self.boxes = [None]*nx*ny
self.nx, self.ny = nx, ny # number of box (squares)
for i in range(len(self.boxes)):
self.boxes[i] = Box()
self.boxes[i].idx = i
# use resize to set all the positions correctly
self.cx = self.cy = self.wx = self.wy = None
self.resize(cx, cy)
# create nav_graph
self.path = None
self.graph = None
self.reset_navgraph()
self.start = None
self.target = None
def get_box_by_index(self, ix, iy):
idx = (self.nx * iy) + ix
return self.boxes[idx] if idx < len(self.boxes) else None
def get_box_by_pos(self, x, y):
idx = (self.nx * (y // self.wy)) + (x // self.wx)
return self.boxes[idx] if idx < len(self.boxes) else None
def update(self, delta):
pass
def draw(self):
for box in self.boxes:
box.draw()
if cfg['EDGES_ON']:
egi.set_pen_color(name='LIGHT_BLUE')
for node, edges in self.graph.edgelist.items():
# print node, edges
for dest in edges:
egi.line_by_pos(self.boxes[node]._vc, self.boxes[dest]._vc)
if self.path:
# put a circle in the visited boxes?
if cfg['BOXUSED_ON']:
egi.set_pen_color(name="GREEN")
for i in self.path.closed:
egi.circle(self.boxes[i]._vc, 10)
if cfg['TREE_ON']:
egi.set_stroke(3)
# Show open edges
route = self.path.route
egi.set_pen_color(name='GREEN')
for i in self.path.open:
egi.circle(self.boxes[i]._vc, 10)
# show the partial paths considered
egi.set_pen_color(name='ORANGE')
for i,j in route.items():
egi.line_by_pos(self.boxes[i]._vc, self.boxes[j]._vc)
egi.set_stroke(1)
if cfg['PATH_ON']:
# show the final path delivered
egi.set_pen_color(name='RED')
egi.set_stroke(2)
path = self.path.path
for i in range(1,len(path)):
egi.line_by_pos(self.boxes[path[i-1]]._vc, self.boxes[path[i]]._vc)
egi.set_stroke(1)
def resize(self, cx, cy):
self.cx, self.cy = cx, cy # world size
self.wx = (cx-1) // self.nx
self.wy = (cy-1) // self.ny # int div - box width/height
for i in range(len(self.boxes)):
# basic positions (bottom left to top right)
x = (i % self.nx) * self.wx
y = (i // self.nx) * self.wy
# top, right, bottom, left
coords = (y + self.wy -1, x + self.wx -1, y, x)
self.boxes[i].reposition(coords)
def _add_edge(self, from_idx, to_idx, distance=1.0):
b = self.boxes
if b[to_idx].kind not in no_edge: # stone wall
cost = edge_cost(b[from_idx].kind, b[to_idx].kind)
self.graph.add_edge(Edge(from_idx, to_idx, cost*distance))
def _manhattan(self, idx1, idx2):
''' Manhattan distance between two nodes in boxworld, assuming the
minimal edge cost so that we don't overestimate the cost). '''
x1, y1 = self.boxes[idx1].pos
x2, y2 = self.boxes[idx2].pos
return (abs(x1-x2) + abs(y1-y2)) * min_edge_cost
def _hypot(self, idx1, idx2):
'''Return the straight line distance between two points on a 2-D
Cartesian plane. Argh, Pythagoras... trouble maker. '''
x1, y1 = self.boxes[idx1].pos
x2, y2 = self.boxes[idx2].pos
return hypot(x1-x2, y1-y2) * min_edge_cost
def _max(self, idx1, idx2):
'''Return the straight line distance between two points on a 2-D
Cartesian plane. Argh, Pythagoras... trouble maker. '''
x1, y1 = self.boxes[idx1].pos
x2, y2 = self.boxes[idx2].pos
return max(abs(x1-x2),abs(y1-y2)) * min_edge_cost
def reset_navgraph(self):
''' Create and store a new nav graph for this box world configuration.
The graph is build by adding NavNode to the graph for each of the
boxes in box world. Then edges are created (4-sided).
'''
self.path = None # invalid so remove if present
self.graph = SparseGraph()
# Set a heuristic cost function for the search to use
#self.graph.cost_h = self._manhattan
self.graph.cost_h = self._hypot
#self.graph.cost_h = self._max
nx, ny = self.nx, self.ny
# add all the nodes required
for i, box in enumerate(self.boxes):
box.pos = (i % nx, i // nx) #tuple position
box.node = self.graph.add_node(Node(idx=i))
# build all the edges required for this world
for i, box in enumerate(self.boxes):
# four sided N-S-E-W connections
if box.kind in no_edge:
continue
# UP (i + nx)
if (i+nx) < len(self.boxes):
self._add_edge(i, i+nx)
# DOWN (i - nx)
if (i-nx) >= 0:
self._add_edge(i, i-nx)
# RIGHT (i + 1)
if (i%nx + 1) < nx:
self._add_edge(i, i+1)
# LEFT (i - 1)
if (i%nx - 1) >= 0:
self._add_edge(i, i-1)
# Diagonal connections
# UP LEFT(i + nx - 1)
j = i + nx
if (j-1) < len(self.boxes) and (j%nx - 1) >= 0:
self._add_edge(i, j-1, 1.4142) # sqrt(1+1)
# UP RIGHT (i + nx + 1)
j = i + nx
if (j+1) < len(self.boxes) and (j%nx + 1) < nx:
self._add_edge(i, j+1, 1.4142)
# DOWN LEFT(i - nx - 1)
j = i - nx
if (j-1) >= 0 and (j%nx - 1) >= 0:
self._add_edge(i, j-1, 1.4142)
# DOWN RIGHT (i - nx + 1)
j = i - nx
if (j+1) >= 0 and (j%nx +1) < nx:
self._add_edge(i, j+1, 1.4142)
def set_start(self, idx):
'''Set the start box based on its index idx value. '''
# remove any existing start node, set new start node
if self.target == self.boxes[idx]:
print("Can't have the same start and end boxes!")
return
if self.start:
self.start.marker = None
self.start = self.boxes[idx]
self.start.marker = 'S'
def set_target(self, idx):
'''Set the target box based on its index idx value. '''
# remove any existing target node, set new target node
if self.start == self.boxes[idx]:
print("Can't have the same start and end boxes!")
return
if self.target is not None:
self.target.marker = None
self.target = self.boxes[idx]
self.target.marker = 'T'
def plan_path(self, search, limit):
'''Conduct a nav-graph search from the current world start node to the
current target node, using a search method that matches the string
specified in `search`.
'''
cls = SEARCHES[search]
self.path = cls(self.graph, self.start.idx, self.target.idx, limit)
@classmethod
def FromFile(cls, filename, pixels=(500,500) ):
'''Support a the construction of a BoxWorld map from a simple text file.
See the module doc details at the top of this file for format details.
'''
# open and read the file
f = open(filename)
lines = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
lines.append(line)
f.close()
# first line is the number of boxes width, height
nx, ny = [int(bit) for bit in lines.pop(0).split()]
# Create a new BoxWorld to store all the new boxes in...
cx, cy = pixels
world = BoxWorld(nx, ny, cx, cy)
# Get and set the Start and Target tiles
s_idx, t_idx = [int(bit) for bit in lines.pop(0).split()]
world.set_start(s_idx)
world.set_target(t_idx)
# Ready to process each line
assert len(lines) == ny, "Number of rows doesn't match data."
# read each line
idx = 0
for line in reversed(lines): # in reverse order
bits = line.split()
assert len(bits) == nx, "Number of columns doesn't match data."
for bit in bits:
bit = bit.strip()
assert bit in box_kind, "Not a known box type: "+bit
world.boxes[idx].set_kind(bit)
idx += 1
return world | [
"[email protected]"
] | |
feeaa15cae7e0454a3ad30921c8a3a80199f4399 | 910be469257538bcbbd15e894679856a1d311252 | /server/service/trade/migrations/0006_auto_20161219_0232.py | f61cdfb26a888fc0d4d146d69de0dfe9c9a9cfb8 | [] | no_license | bopo/bankeys2 | ece7e7faa93aab48bf5a336721bfa69b33a870d8 | 5a81f5f4cd6442aade444444ba768b9ffa9dcbd4 | refs/heads/master | 2023-08-19T04:16:12.063961 | 2023-08-04T09:09:00 | 2023-08-04T09:09:00 | 119,646,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-19 02:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0005_auto_20161219_0230'),
]
operations = [
migrations.AlterField(
model_name='transfer',
name='type',
field=models.CharField(choices=[('0', '\u626b\u7801\u652f\u4ed8'), ('1', '\u7b2c\u4e09\u65b9\u652f\u4ed8')], default=0, max_length=100, verbose_name='\u6d88\u8d39\u7c7b\u578b'),
),
]
| [
"[email protected]"
] | |
52b3f97fc61218cd38c7bea2e300e88231fcb7d5 | 25a4e5a76b06e6e677a2e08973e7d03f9b31300d | /Orile_tensoflow/first.py | f7c16104969646bc34b979a1260acf6d351665ca | [] | no_license | ducksfrogs/machine_learning_lessons | f4da81c7eb40fd1288a3e5614e111d0b5f780e06 | 5fe0ecffd45206cfd81e0a13069033f90317371e | refs/heads/master | 2022-12-12T02:24:28.769446 | 2020-08-03T02:13:10 | 2020-08-03T02:13:10 | 281,533,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
| [
"[email protected]"
] | |
a2e966e841bf6ddb259d95be44428c8325a429fc | 0bb36716a1edcdd38979201baf75230e769ba8b7 | /examples/trajectory.py | 32f20ca2eb0f2fb02bdb890c44f00c3dc388cca7 | [
"Unlicense"
] | permissive | NOAA-ORR-ERD/gridded | 6759df63af86e606bba9722132619a464aae4715 | e89345cc5a5889d20c182e2c194a44bb45dfc575 | refs/heads/master | 2023-08-16T16:28:56.838870 | 2023-08-14T18:28:38 | 2023-08-14T18:28:38 | 79,688,194 | 60 | 17 | Unlicense | 2023-04-04T17:55:55 | 2017-01-22T02:38:59 | Python | UTF-8 | Python | false | false | 1,308 | py |
# coding: utf-8
# # Trajectory Test
# In[1]:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import numpy.ma as ma
import numpy as np
import netCDF4
import gridded
# In[2]:
url = 'http://geoport.whoi.edu/thredds/dodsC/examples/bora_feb.nc'
# In[3]:
nc = netCDF4.Dataset(url)
lon = nc['lon_rho'][:]
lat = nc['lat_rho'][:]
temp = nc['temp'][-1,-1,:,:]
# In[4]:
x = np.linspace(13., 15.)
y = np.linspace(45.3, 43.5)
len(x)
# In[5]:
plt.pcolormesh(lon,lat,ma.masked_invalid(temp),vmin=5,vmax=15,cmap='jet');
plt.plot(x,y,'-')
plt.grid()
plt.colorbar();
# In[6]:
temperature = gridded.Variable.from_netCDF(filename=url, name='Temperature', varname='temp')
salinity = gridded.Variable.from_netCDF(filename=url, name='Salinity', varname='salt', grid=temperature.grid)
points = np.column_stack((x,y))
t0 = temperature.time.max_time
# ## Interpolate values at array of lon,lat points at specific time
# In[7]:
salts = salinity.at(points, t0)
# In[8]:
temps = temperature.at(points, t0)
# In[9]:
plt.plot(temps)
# In[10]:
times = temperature.time.data
# ## Interpolate values at lon,lat points with changing time values
# In[11]:
over_time = [temperature.at((x[i],y[i]), val)[0] for i,val in enumerate(times)]
# In[12]:
plt.plot(over_time)
| [
"[email protected]"
] | |
c99f0e732f3e0dd93828549b9b6022010efa53d7 | 2ffdfe188859d5be1a427ce1c4457d41331b541c | /message/Count(Motifs) +Consensus(Motifs).py | e152c4cf7f124a1bd34f2be65e7e0181a8a61b2a | [] | no_license | Hydebutterfy/learn-python | 18504b0c7281c2b0f94dbc73e77da87c8ac3ff38 | 883614deaf0a3cdf46d8305197fe5659fd609d60 | refs/heads/master | 2021-01-24T11:59:44.883949 | 2016-12-02T07:48:40 | 2016-12-02T07:48:40 | 59,213,755 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py |
def Count(Motifs):
count = {}
k = len(Motifs[0])
for symbol in "ACGT":
count[symbol] = []
for j in range(k):
count[symbol].append(0)
t=len(Motifs)
for i in range(t):
for j in range(k):
symbol=Motifs[i][j]
count[symbol][j] += 1
return count
def Consensus(Motifs):
k = len(Motifs[0])
count = Count(Motifs)
consensus = ""
for j in range(k):
m=0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return consensus
Motifs=[]
filename = input("Enter file1 name: ")
fileread = open(filename,"r")
for i in fileread:
dna = i.strip()
Motifs.append(dna.upper())
print (Consensus(Motifs)) | [
"[email protected]"
] | |
f356bab78b052e54ab2b39f5b35a6e23804fa954 | 372128141eb3bbbcd40585aa42de63fb9c9f474a | /CS333/lab12.py | c6f9fce906e9deb6d2dd7a8345a06e7f6275fc86 | [] | no_license | CharlesGe129/DailyScripts | 6120b9a6937331106466f631953fe5e7890a5173 | ed71e476a312a4ca0b98da4b5eec76cb80624661 | refs/heads/master | 2021-08-07T00:01:03.126998 | 2018-11-28T17:51:42 | 2018-11-28T17:51:42 | 104,777,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | def iterate(times, tax, funcs):
(a, b, c) = (1, 1, 1)
fac = 1 - tax
for i in range(times):
a1 = fac*funcs[0](a, b, c) + tax
b1 = fac*funcs[1](a, b, c) + tax
c1 = fac*funcs[2](a, b, c) + tax
(a, b, c) = (a1, b1, c1)
print(f"#{i+1}: a={a}, b={b}, c={c}")
def s1(a, b, c):
return c
def s2(a, b, c):
return 0.5 * a
def s3(a, b, c):
return 0.5*a + b
if __name__ == '__main__':
iterate(5, 0.15, [s1, s2, s3])
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.