blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2427fc0867a45d210f2e8771af3dc7bfb635c603 | 8d2411813994b6a7b038253f5656b83c9a62033e | /examples/least_square_regression/main_least_square_regression.py | ffabb7f266d7487e745d7d057a6ef1edc1171f7c | [
"MIT"
] | permissive | wesenu/random-fourier-features | a8ee840e9e5e30eca012a9051cc2f087fde73c2a | 4a12185e44d1f9aba594f8a7569042e73675d6cd | refs/heads/main | 2023-08-23T01:32:41.518073 | 2021-10-25T01:17:01 | 2021-10-25T01:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | #!/usr/bin/env python3
#
# This Python script provides an example usage of RFFRegression class which is a class
# for least square regression using RFF. Interface of RFFRegression is quite close to
# sklearn.linear_model.LinearRegression.
#################################### SOURCE START ###################################
"""
Overview:
Train Random Fourier Feature least square regression and plot results.
Usage:
main_rff_regression.py [--random_type <str>] [--kdim <int>] [--std_kernel <float>]
[--rtype <str>] [--n_train <int>] [--n_test <int>] [--seed <int>]
main_rff_regression.py (-h | --help)
Options:
--rtype <str> Random matrix type (rff ot orf). [default: rff]
--kdim <int> Dimention of RFF/ORF. [default: 8]
--std_kernel <float> Standard deviation of RFF/ORF. [default: 0.5]
--n_train <int> Number of training data points. [default: 21]
--n_test <int> Number of test data points. [default: 101]
--seed <int> Random seed. [default: 111]
-h, --help Show this message.
"""
import os
import sys
import docopt
import numpy as np
import matplotlib.pyplot as mpl
### Main procedure
def main(args):
### Fix seed for random fourier feature calclation
rfflearn.seed(111)
### Create classifier instance
if args["--rtype"] == "rff": reg = rfflearn.RFFRegression(dim_kernel = args["--kdim"], std_kernel = args["--std_kernel"])
elif args["--rtype"] == "orf": reg = rfflearn.ORFRegression(dim_kernel = args["--kdim"], std_kernel = args["--std_kernel"])
else : raise RuntimeError("Error: 'random_type' must be 'rff' or 'orf'.")
### Prepare training data
with utils.Timer("Creating dataset: "):
Xs_train = np.linspace(0, 3, args["--n_train"]).reshape((args["--n_train"], 1))
ys_train = np.sin(Xs_train**2)
Xs_test = np.linspace(0, 3, args["--n_test"]).reshape((args["--n_test"], 1))
ys_test = np.sin(Xs_test**2)
### Train regression with random fourier features
with utils.Timer("Train regressor: "):
reg.fit(Xs_train, ys_train)
### Conduct prediction for the test data
with utils.Timer("Prediction: "):
predict = reg.predict(Xs_test)
### Plot regression results
mpl.figure(0)
mpl.title("Regression for function y = sin(x^2) with RFF")
mpl.xlabel("X")
mpl.ylabel("Y")
mpl.plot(Xs_train, ys_train, "o")
mpl.plot(Xs_test, ys_test, ".")
mpl.plot(Xs_test, predict, "-")
mpl.legend(["Training data", "Test data", "Prediction by RFF regression"])
mpl.grid()
mpl.show()
if __name__ == "__main__":
### Parse input arguments.
args = docopt.docopt(__doc__)
### Add path to 'rfflearn/' directory.
### The followings are not necessary if you copied 'rfflearn/' to the current
### directory or other directory which is included in the Python path.
current_dir = os.path.dirname(__file__)
module_path = os.path.join(current_dir, "../../")
sys.path.append(module_path)
import rfflearn.cpu as rfflearn
import rfflearn.utils as utils
### Convert all arguments to an appropriate type.
for k, v in args.items():
try : args[k] = eval(str(v))
except: args[k] = str(v)
### Run main procedure.
main(args)
#################################### SOURCE FINISH ##################################
# Author: Tetsuya Ishikawa <[email protected]>
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
| [
"[email protected]"
] | |
109a815533df2da73af5995e637085223d4ca0d5 | c27c8d8fbf996722d78b6713dbd807610b8192d8 | /gridAPI/views.py | 4703a8c359993209e8472381a648efb0cc90efbc | [] | no_license | s1s1ty/ConwayGameOfLife | f81df5e54988ce5df60338890fe3c98904b3aeda | 89f3014755a1e56777ee236602edc71eb89751a6 | refs/heads/master | 2021-08-23T08:00:57.399149 | 2017-12-04T07:29:56 | 2017-12-04T07:29:56 | 112,950,753 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.response import Response
from .models import Grid
from .serializers import GridSerializer
from .conveygame import ConveyGameLife
class GridList(generics.ListCreateAPIView):
"""
Retrieve, and Create a grid instance.
"""
queryset = Grid.objects.all()
serializer_class = GridSerializer
def post(self, request):
data = request.data
serializer = GridSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
class GridDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Grid.objects.get(pk=pk)
except Grid.DoesNotExist:
raise Http404
def get(self, request, pk):
grid = self.get_object(pk)
if request.GET.get('after'):
all_age = list(request.GET.get('after'))
data_list = []
# keep this grid default bcz need to know how grid data are provided
re = [[1,1,1],[0,1,0],[1,0,0]]
mod = [[1,1,1],[0,1,0],[1,0,0]]
for index in all_age:
if index!=',':
v=re
obj = ConveyGameLife(v,grid.x,grid.y,mod)
re = obj.state_check()
mod = list(re)
print mod
data_list.append({
'age': index,
'grid': list(mod)
})
re_data = {
'x': grid.x,
'y': grid.y,
'data': data_list
}
return Response(re_data)
serializer = GridSerializer(grid)
return Response(serializer.data)
def patch(self, request, pk):
grid = self.get_object(pk)
serializer = GridSerializer(grid, data=request.data, partial=True) # set partial=True to update a data partially
if serializer.is_valid():
serializer.save()
return JsonReponse(code=201, data=serializer.data)
return JsonResponse(code=400, data="wrong parameters")
| [
"[email protected]"
] | |
e252352ce3f32544da3a2eef1671d17e5ed872af | 843c5fddab984554ef66cc1ba40de2b19aa32ebb | /csiro_workspace/__init__.py | e7b15050d8aa458d2dc3f1c75d7e802c22b2e334 | [
"MIT",
"BSD-3-Clause"
] | permissive | afcarl/workspace-python | fe094b1f9a35fb0f8a5532b9a761578e60fb679b | 78126e9e8af7adbcd47206f69f9b8b999f38a300 | refs/heads/master | 2020-07-23T14:42:37.813891 | 2019-08-07T06:49:51 | 2019-08-07T06:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # ===========================================================================
#
# Revision: $Revision: 4 $
# Last changed: $Date: 2010-05-24 11:57:57 +1000 (Mon, 24 May 2010) $
#
# Copyright 2015 by:
#
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
#
# This file is licensed by CSIRO under the copy of the CSIRO Binary
# License Agreement included with the file when downloaded or obtained
# from CSIRO (including any Supplementary License). If no copy was
# included, you must obtain a new copy of the Software from CSIRO before
# any use is permitted.
#
# For further information, contact: [email protected]
#
# This copyright notice must be included with all copies of the source code.
#
# ===========================================================================
__all__ = ['workspace']
| [
"het018@f175a1c1-1187-0410-b721-d13dda9e15ce"
] | het018@f175a1c1-1187-0410-b721-d13dda9e15ce |
6eacf0ad7871a5a01c4b7178181798f3abaf6a48 | 1536a6cabaa9b6429603d57df6705a14063e016a | /src/networkx/algorithms/isomorphism/vf2weighted.py | a3142485aa277bd6563c933fd41e6917b9d77885 | [] | no_license | lorenzoriano/Graph-Evolve | ac7da40f7b27afdca7d929ae3e10308874929f1c | 11b6f43beaa88254776da959da7bcd9c4cd21eba | refs/heads/master | 2020-05-02T20:10:38.282396 | 2011-12-16T11:01:45 | 2011-12-16T11:01:45 | 1,967,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,794 | py | """
VF2 implementations for weighted graphs.
"""
import networkx as nx
from networkx.algorithms.isomorphism.isomorphvf2 \
import GraphMatcher,DiGraphMatcher,GMState,DiGMState
__all__ = ['WeightedGraphMatcher',
'WeightedDiGraphMatcher',
'WeightedMultiGraphMatcher',
'WeightedMultiDiGraphMatcher']
## VF2 is a recursive algorithm, so the call/lookup overhead is already high.
## Each implementation needs to be as fast as possible.
##
## Within the semantic feasibility function, we provide local variables
## Also, we don't want the function checking if the graph is a multigraph
## or if it is directed each time it is called. So we provide separate
## implementations.
def close(x, y, rtol, atol):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol
The relative tolerance.
atol
The absolute tolerance.
"""
# assumes finite weights
return abs(x-y) <= atol + rtol * abs(y)
class WeightedGraphMatcher(GraphMatcher):
"""Implementation of VF2 algorithm for undirected, weighted graphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.Graph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
GraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_adj = self.G1.adj
G2_adj = self.G2.adj
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for neighbor in G1_adj[G1_node]:
if neighbor is G1_node:
if not close(G1_adj[G1_node][G1_node].get('weight',1),
G2_adj[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif neighbor in core_1:
if not close(G1_adj[G1_node][neighbor].get('weight',1),
G2_adj[G2_node][core_1[neighbor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that neighbors are symmetric
return True
class WeightedDiGraphMatcher(DiGraphMatcher):
"""Implementation of VF2 algorithm for directed, weighted graphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.DiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
DiGraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_succ = self.G1.succ
G1_pred = self.G1.pred
G2_succ = self.G2.succ
G2_pred = self.G2.pred
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for successor in G1_succ[G1_node]:
if successor is G1_node:
if not close(G1_succ[G1_node][G1_node].get('weight',1),
G2_succ[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif successor in core_1:
if not close(G1_succ[G1_node][successor].get('weight',1),
G2_succ[G2_node][core_1[successor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that successors are symmetric
for predecessor in G1_pred[G1_node]:
if predecessor is G1_node:
if not close(G1_pred[G1_node][G1_node].get('weight',1),
G2_pred[G2_node][G2_node].get('weight',1),
rtol, atol):
return False
elif predecessor in core_1:
if not close(G1_pred[G1_node][predecessor].get('weight',1),
G2_pred[G2_node][core_1[predecessor]].get('weight',1),
rtol, atol):
return False
# syntactic check has already verified that predecessors are symmetric
return True
class WeightedMultiGraphMatcher(GraphMatcher):
"""Implementation of VF2 algorithm for undirected, weighted multigraphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.MultiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
GraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_adj = self.G1.adj
G2_adj = self.G2.adj
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for neighbor in G1_adj[G1_node]:
if neighbor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_adj[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_adj[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif neighbor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_adj[G1_node][neighbor].items()]
data2 = [d.get('weight',1)
for k,d in G2_adj[G2_node][core_1[neighbor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that neighbors are symmetric
return True
class WeightedMultiDiGraphMatcher(DiGraphMatcher):
"""Implementation of VF2 algorithm for directed, weighted multigraphs."""
def __init__(self, G1, G2, rtol=1e-6, atol=1e-9):
"""Initialize WeightedGraphMatcher.
Parameters
----------
G1, G2 : nx.MultiDiGraph instances
G1 and G2 must be weighted graphs.
rtol : float, optional
The relative tolerance used to compare weights.
atol : float, optional
The absolute tolerance used to compare weights.
"""
self.rtol = rtol
self.atol = atol
DiGraphMatcher.__init__(self, G1, G2)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
G1_succ = self.G1.succ
G1_pred = self.G1.pred
G2_succ = self.G2.succ
G2_pred = self.G2.pred
core_1 = self.core_1
rtol, atol = self.rtol, self.atol
for successor in G1_succ[G1_node]:
if successor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_succ[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_succ[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif successor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_succ[G1_node][successor].items()]
data2 = [d.get('weight',1)
for k,d in G2_succ[G2_node][core_1[successor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that successors are symmetric
for predecessor in G1_pred[G1_node]:
if predecessor is G1_node:
data1 = [d.get('weight',1)
for k,d in G1_pred[G1_node][G1_node].items()]
data2 = [d.get('weight',1)
for k,d in G2_pred[G2_node][G2_node].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
elif predecessor in core_1:
data1 = [d.get('weight',1)
for k,d in G1_pred[G1_node][predecessor].items()]
data2 = [d.get('weight',1)
for k,d in G2_pred[G2_node][core_1[predecessor]].items()]
data1.sort()
data2.sort()
for x,y in zip(data1,data2):
if not close(x,y,rtol,atol): return False
# syntactic check has already verified that predecessors are symmetric
return True
| [
"[email protected]"
] | |
48fa7cf015ded5db313626bb690814ef8bce4466 | 3e897283fc2720dcb5d0860f4e38015d7f0c0ce0 | /db.py | b405120179e073fe4a867abe2da766cbd5b0073f | [] | no_license | theselfdrivingboat-com/heroku-selfdrivingboat | 3fa450f8cfade7e7233f4cf05212e02321b1bbd0 | 06630aa5eebff598c67872021416d9fd81fb733f | refs/heads/main | 2023-05-05T01:33:27.103829 | 2021-05-29T22:18:42 | 2021-05-29T22:18:42 | 348,523,745 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | import os
import psycopg2
try:
DATABASE_URL = os.environ['DATABASE_URL']
except KeyError:
with open('db.secret', 'r') as f:
DATABASE_URL = f.read()
#https://www.psycopg.org/docs/usage.html
"""
theselfdrivingboat::DATABASE=> \d boat_commands
Table "public.boat_commands"
Column | Type | Collation | Nullable | Default
---------------+-----------------------------+-----------+----------+---------------------------------------------------
command_id | integer | | not null | nextval('boat_commands_command_id_seq'::regclass)
command_name | character varying(100) | | |
created_on | timestamp without time zone | | | CURRENT_TIMESTAMP
has_been_read | boolean | | | false
read_by | character varying(100) | | |
Indexes:
"boat_commands_pkey" PRIMARY KEY, btree (command_id)
"""
def add_new_command(command):
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute("INSERT INTO boat_commands (command_name) VALUES (%s)", (command, ))
conn.commit()
cur.close()
conn.close()
def get_unread_commands():
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute("SELECT * FROM boat_commands WHERE has_been_read = FALSE ORDER BY command_id DESC ", (boat_name,));
commands = cur.fetchall()
conn.commit()
cur.close()
conn.close()
return commands
def read_last_command(boat_name):
"return None if last command was already read"
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute("WITH last_command AS (SELECT * FROM boat_commands ORDER BY command_id DESC LIMIT 1) UPDATE boat_commands SET (has_been_read, read_by) = (TRUE, %s) FROM last_command WHERE last_command.has_been_read = FALSE RETURNING last_command.command_name", (boat_name,));
last_command = cur.fetchone()
conn.commit()
cur.close()
conn.close()
return last_command
| [
"[email protected]"
] | |
517ef682eaa2f91d28f6f04979cd561e42f77735 | 9522f191729ee115bac785721b4dd324ce65cf8a | /lesson_4/task_4_4.py | 8910dd0fae93b3f48e0b8726781ee2b2536af7c8 | [] | no_license | SvetlanaTsim/python_basics | 89ed9549c8a5d10138474e26229d4fb30657bb6f | 7ffd588b9c9d4c601d3d15a1ee52b2e355343a98 | refs/heads/main | 2023-08-21T07:52:55.721827 | 2021-10-18T15:04:27 | 2021-10-18T15:04:27 | 370,294,080 | 0 | 0 | null | 2021-10-18T14:33:54 | 2021-05-24T09:16:18 | Python | UTF-8 | Python | false | false | 537 | py | # Написать свой модуль utils и перенести в него функцию currency_rates()
# из предыдущего задания. Создать скрипт, в котором импортировать этот модуль
# и выполнить несколько вызовов функции currency_rates(). Убедиться, что ничего лишнего не происходит.
from utils import currency_rate
currency_rate('hkd')
currency_rate('HUF')
currency_rate('wrongtext') #None
| [
"[email protected]"
] | |
2f6a85cebe53b633e4ca3f5350fa4a5fa0d56e29 | d5d3a69af574986e8c70751a84ee83cfd88fa0ef | /Distributions.py | c5ab6c149003081f7e628161b2d81d39fad8f28c | [] | no_license | people-robots/SafeNavigationHumanExperiments | 09e8d95a53e08da8338cd64612396df2b995c303 | da5dade7344a2f44d65058e350493f6456a422da | refs/heads/master | 2020-05-19T03:50:49.007702 | 2019-08-03T00:40:13 | 2019-08-03T00:40:13 | 184,810,582 | 2 | 1 | null | 2019-08-03T00:40:14 | 2019-05-03T19:34:18 | Python | UTF-8 | Python | false | false | 1,393 | py | import numpy as np
class BasicDistribution:
def __init__(self, degree_resolution = 1):
self.degree_resolution = degree_resolution
self.cached_distribution = None
def get_distribution(self, center = 0):
if self.cached_distribution is None:
self.cached_distribution = self.generate_base_distribution();
return np.roll(self.cached_distribution, int(np.round(center)))
class Gaussian(BasicDistribution):
def __init__(self, sigma = 100, amplitude = 1):
super().__init__()
self.sigma = sigma
self.amplitude = amplitude
self.cached_distribution = None
def generate_base_distribution(self):
arr = np.zeros(int(360 / self.degree_resolution))
for degree in np.arange (-180, 540, self.degree_resolution):
gaussian_value = self.amplitude * np.exp((-1 * ((degree) ** 2)) / (2 * (self.sigma ** 2)))
if degree < 0:
degree = degree + 360
elif degree >= 360:
degree = degree - 360
if (arr[int(degree / self.degree_resolution)] < gaussian_value):
arr[int(degree / self.degree_resolution)] = gaussian_value
return arr;
class Rectangular(BasicDistribution):
def __init__(self, width=40):
self.width = width
self.cached_distribution = None
def generate_base_distribution(self):
width = self.width;
arr = np.zeros(360 / self.degree_resolution)
halfwidth = int(width / 2)
arr[:halfwidth] = 1
arr[-halfwidth:] = 1
return arr
| [
"[email protected]"
] | |
c105a5762406b0a1c7513ccb0e5dd7f42d807e22 | 28013f06c5f480b7b9fc37185559a3d7b4949f04 | /venv/Scripts/django-admin.py | c63ad342d9a7545f90a72a61ecc5ae386f113875 | [] | no_license | zaricnikola/tvitko | fa5aa7b2c317bafcdb36f3bc96a3b8d3b9e748b9 | 377060a8dbca027236f80cf321944fa8f4562556 | refs/heads/master | 2022-12-22T04:23:27.959308 | 2018-03-11T17:50:48 | 2018-03-11T17:50:48 | 124,666,877 | 0 | 1 | null | 2022-12-11T17:26:08 | 2018-03-10T15:09:19 | Python | UTF-8 | Python | false | false | 176 | py | #!c:\users\korisnik\pycharmprojects\projekat1\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
f3a9e1daddda62c6e0533bdb6de5fc332a856c3e | 12b77f29511b6903cba9c931a791535ed438a538 | /abcd/asgi.py | 48d84d6042364e24b176acebeea87a4895083aa6 | [] | no_license | shabin75/abcd | db079379aaf8a0c85240594a8d2e32e43e8f9b17 | 19118b2c56045379bb9e4de451f0bff35471dece | refs/heads/main | 2023-04-25T09:14:04.089485 | 2021-05-14T07:12:30 | 2021-05-14T07:12:30 | 367,269,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for abcd project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'abcd.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
575e4cc071d0b136f9b85a000e788bd355f886ba | cc23a18b4e031a9c1f3082ad79f9c9e1bd0314bc | /restaurants/migrations/0002_auto_20160807_1257.py | d1fcddb906023f9b9c71332bbae54a1bdf23b36a | [] | no_license | shahsaumya/Restaurant-Picker | 1170354d5d11944b9c3fa76dc30044fa604a4c57 | 5821b4c9b0f0d8a577509422b8e374d15366c51a | refs/heads/master | 2020-09-21T21:10:55.528210 | 2018-01-04T20:07:59 | 2018-01-04T20:07:59 | 67,931,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-07 07:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='restaurant',
old_name='rest_type',
new_name='restaurant_type',
),
migrations.AlterField(
model_name='restaurant',
name='map',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='restaurant',
name='menu',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='restaurant',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
]
| [
"Saumya Shah"
] | Saumya Shah |
7fd0208b9639daf5011460a29c186bc7c01996ad | 64159d10f8db2f6a0719de2493d970e1c2624efc | /split.py | 0ecbaeda6f61ee8d08a8074d9953d8736e4664e0 | [] | no_license | AlexisK/ws_dsua_2017 | 50521b3bb1122325a788a1b759499fab1cd1c5eb | a91017b42097561c25389be21b7250d5650ee65f | refs/heads/master | 2021-08-19T12:49:03.547930 | 2017-11-26T11:12:30 | 2017-11-26T11:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | import sys
import codecs
import math
class WrongFileLength(Exception):
pass
def file_len(data_path):
with codecs.open(data_path, encoding='utf-8', mode='r') as fname:
for i, l in enumerate(fname):
pass
print('File %s length: %s' % (data_path, str(i + 1)))
return i + 1
def run(data_path1, data_path2, train_percent):
input_file1 = codecs.open(data_path1, encoding='utf-8', mode='r')
input_file2 = codecs.open(data_path2, encoding='utf-8', mode='r')
output_file1 = codecs.open('train.inp', encoding='utf-8', mode='w')
output_file2 = codecs.open('devel.inp', encoding='utf-8', mode='w')
output_file3 = codecs.open('train.out', encoding='utf-8', mode='w')
output_file4 = codecs.open('devel.out', encoding='utf-8', mode='w')
a = file_len(data_path1)
b = file_len(data_path2)
if a != b:
raise WrongFileLength('Input and output files must have the same length!')
train_count = math.trunc(a * train_percent)
print('Number of items for training %s' % str(train_count))
sent_num = 0
print('Splitting input file.')
for line in input_file1:
if sent_num < train_count:
output_file1.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
else:
output_file2.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
sent_num = 0
print('Splitting output file.')
for line in input_file2:
if sent_num < train_count:
output_file3.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
else:
output_file4.write(line)
sys.stdout.write("\rSentence number %s." % sent_num)
sys.stdout.flush()
sent_num += 1
if __name__ == '__main__':
path1 = str(sys.argv[1])
path2 = str(sys.argv[2])
train_percent = float(sys.argv[3])
run(path1, path2, train_percent) | [
"[email protected]"
] | |
b32e09a4a1743cf37ac6566c2894a9813e366fcf | 1b486327dacb6057e085fb855b936ad30d63a0e2 | /python/numbers/list-prime-numbers.py | c8b83e092a6f7e02a0fd29d138b1ba87db02a8f4 | [] | no_license | ritchiefitz/interview_questions | de16a6067be15e1b43239ef224acafe701afef94 | 0496044955d6d631ef9b8229d31fdd61990ccf83 | refs/heads/master | 2021-01-02T09:32:54.871115 | 2014-10-17T18:30:41 | 2014-10-17T18:30:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import sys
import math
def list_primenumbers(num):
print "\nAll Prime Numbers Under", num
for n in range(1, num):
if all(n % i != 0 for i in range(2, int(math.sqrt(n)) + 1)):
print n
numbers = sys.argv
del numbers[0]
for i, num in enumerate(numbers):
list_primenumbers(int(num)) | [
"[email protected]"
] | |
131da4ef6887fa5704722436717046f8e50c0a34 | 2f0bde4d37b7ea1aad91ab44b5b4526d0bec30ce | /examples/strike-slip-example/okada_driver.py | b09fae0728d457d440530d09f1f90b57ca4f9062 | [
"MIT"
] | permissive | kmaterna/Elastic_stresses_py | 5c78a628136f610ec68e7ee38d8bc76515319e4f | 549a13c6c7fa3c80aac9d63548fdbf3b1ec7b082 | refs/heads/master | 2023-08-28T21:54:42.500337 | 2023-08-18T01:45:18 | 2023-08-18T01:45:18 | 141,371,162 | 42 | 11 | MIT | 2022-08-09T14:22:15 | 2018-07-18T02:37:59 | Python | UTF-8 | Python | false | false | 1,128 | py | #!/usr/bin/env python
import Elastic_stresses_py.PyCoulomb.fault_slip_object as fso
from Elastic_stresses_py.PyCoulomb import run_dc3d, configure_calc, output_manager, io_additionals
# Definitions
lon0_sys, lat0_sys = -120.5, 36;
bbox = (-121.5, -119.5, 35.2, 36.8);
lonlatfile = "Inputs/lon_lats.txt";
source_slip_dist = "Inputs/s2004PARKFI01CUST.fsp";
# Inputs
parkfield_faults = fso.file_io.io_srcmod.read_srcmod_distribution(source_slip_dist);
coulomb_fault_model = fso.fault_slip_object.fault_object_to_coulomb_fault(parkfield_faults, lon0_sys, lat0_sys);
disp_points = io_additionals.read_disp_points(lonlatfile);
# Configure, Compute, Output
params = configure_calc.configure_default_displacement_params();
inputs = configure_calc.configure_default_displacement_input(coulomb_fault_model, zerolon=lon0_sys,
zerolat=lat0_sys, bbox=bbox, domainsize=100);
outobj = run_dc3d.do_stress_computation(params, inputs, disp_points=disp_points, strain_points=[]);
output_manager.produce_outputs(params, inputs, disp_points, obs_strain_points=[], out_object=outobj);
| [
"[email protected]"
] | |
a386ccedf20d5d2d49c4c93a445d9977d09b5804 | 20c6cb5af100c0208ba66b995655cebc661d6916 | /options.py | baecbda8c6727ac4d3bc16c05218570675b17f57 | [] | no_license | Anonymous-2611/AdaRec | 35cb231cf8b1baf364aa3df9198cb7109dca2b71 | 501c0082e0090e3bb5831197b9c054592b513a7a | refs/heads/master | 2023-02-20T16:08:53.824420 | 2021-01-21T08:08:04 | 2021-01-21T08:08:04 | 331,169,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,859 | py | import argparse
"""
==========================
All options' parsers.
==========================
Except for global setting (e.g. name/gpu/seed/seed/etc.), options are designed like `{scope}_{option}`.
Note: `scope` is not something related to real code, they just make
options easy to arrange and look pretty when printing.
"""
def add_common_args(parser):
parser.add_argument("--name", type=str, default="NoName")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--preset", type=str, default=None, help="Use preset template, see presets.py for detail.")
parser.add_argument("--resume", type=str, default=None, help="path/to/FOLDER/from/where/you/want/to/resume")
parser.add_argument("--aux_store_root", type=str, help="/path/to/store", default=None)
parser.add_argument("--aux_console_output", action="store_true", help="Print logger info to console")
parser.add_argument("--aux_eval_ks", nargs="+", type=int, default=None, help="ks for Metric@k")
return parser
def add_dataset_args(parser):
parser.add_argument("--dataset", type=str, default=None, help="Preset for dataset and dataloader.")
# mask : masked language model data loader
# seq : normal next item data loader
parser.add_argument("--dataset_type", type=str, default="mask", choices=["mask", "seq"])
parser.add_argument("--data_folder", type=str, default=None)
parser.add_argument("--data_main", type=str, default=None)
parser.add_argument("--data_neg", type=str, default=None)
parser.add_argument("--loader_generate_sub_session", action="store_true", default=None)
parser.add_argument("--loader_train_batch_size", type=int, default=None)
parser.add_argument("--loader_val_batch_size", type=int, default=None)
parser.add_argument("--loader_test_batch_size", type=int, default=None)
parser.add_argument("--loader_mask_prob", type=float, default=None)
parser.add_argument("--loader_max_len", type=int, default=None)
parser.add_argument("--loader_num_items", type=int, default=None, help="Number of real items, without PAD and MASK")
parser.add_argument("--loader_num_aux_vocabs", type=int, default=None, help="+1 when seq, +2 when mask")
return parser
def add_bert_args(parser):
parser.add_argument("--bert_hidden_units", type=int, default=None, help="Size of hidden vectors (d_model)")
parser.add_argument("--bert_num_blocks", type=int, default=None, help="Number of transformer layers")
parser.add_argument("--bert_num_heads", type=int, default=None, help="Number of heads for multi-attention")
parser.add_argument("--bert_dropout", type=float, default=None, help="Dropout probability")
parser.add_argument("--bert_use_eps", action="store_true", default=None, help="Use x_{i+1} = x_{i} + eps*F(x_{i})")
return parser
def add_nin_args(parser):
parser.add_argument("--nin_num_blocks", type=int, default=None)
parser.add_argument("--nin_block_dilations", nargs="+", type=int, default=None)
parser.add_argument("--nin_hidden_units", type=int, default=None)
parser.add_argument("--nin_kernel_size", type=int, default=None)
parser.add_argument("--nin_use_eps", action="store_true", default=None)
return parser
def add_sas_args(parser):
parser.add_argument("--sas_num_blocks", type=int, default=None)
parser.add_argument("--sas_hidden_units", type=int, default=None)
parser.add_argument("--sas_num_heads", type=int, default=None)
parser.add_argument("--sas_dropout", type=float, default=None)
parser.add_argument("--sas_use_eps", action="store_true", default=None)
return parser
def add_student_model_args(parser):
parser.add_argument("--model_dropout", type=float, default=None, help="Dropout probability in cells.")
parser.add_argument("--model_num_hidden", type=int, default=None, help="Hidden in cells.")
parser.add_argument("--model_num_cell", type=int, default=None, help="Number of cells.")
parser.add_argument("--model_num_node", type=int, default=None, help="Number of intermediate node in a cell.")
return parser
def add_training_args(parser, is_search=False):
parser.add_argument("--train_iter", type=int, default=None, help="Number of epochs for training")
parser.add_argument("--train_log_every", type=int, default=None, help="Log every T*b.")
parser.add_argument("--train_grad_clip_norm", type=float, default=None, help="Clip gradient by norm.")
if not is_search: # single model training, maybe teacher model or finetune student model
parser.add_argument("--train_lr", type=float, default=None, help="Learning rate")
parser.add_argument("--train_lr_decay_step", type=int, default=None, help="Decay step for StepLR")
parser.add_argument("--train_lr_decay_gamma", type=float, default=None, help="Gamma for StepLR")
parser.add_argument("--train_wd", type=float, default=None, help="l2 regularization")
else:
parser.add_argument("--train_model_lr", type=float, default=None, help="Initial learning rate for model")
parser.add_argument("--train_model_lr_decay_step", type=int, default=None)
parser.add_argument("--train_model_lr_decay_gamma", type=float, default=None)
parser.add_argument("--train_model_wd", type=float, default=None, help="l2 regularization for model")
parser.add_argument("--train_alpha_lr", type=float, default=None, help="Initial learning rate for alpha")
parser.add_argument("--train_alpha_lr_decay_step", type=int, default=None)
parser.add_argument("--train_alpha_lr_decay_gamma", type=float, default=None)
parser.add_argument("--train_alpha_wd", type=float, default=None, help="l2 regularization for alpha")
return parser
def add_gru4rec_args(parser):
parser.add_argument("--gru_num_layers", type=int, default=None)
parser.add_argument("--gru_hidden_units", type=int, default=None)
parser.add_argument("--gru_dropout", type=float, default=None)
return parser
def add_caser_args(parser):
parser.add_argument("--caser_hidden_units", type=int, default=None)
parser.add_argument("--caser_dropout", type=float, default=None)
parser.add_argument("--caser_num_hf", type=int, default=None)
parser.add_argument("--caser_num_vf", type=int, default=None)
parser.add_argument("--caser_hf_size", type=int, nargs="+", default=None)
return parser
def gru4rec_parser():
# Baseline: GRU4Rec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_gru4rec_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def caser_parser():
# Baseline: Caser
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_caser_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def bert4rec_parser():
# Train Teacher-BERT network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_bert_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def nextitnet_parser():
# Train Teacher-NextItNet network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_nin_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def nextitnet_distill_parser():
# Distill NextItNet into NextItNet
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--nin_student_hidden_units", type=int, default=None)
parser.add_argument("--nin_student_num_blocks", type=int, default=None)
parser.add_argument("--nin_student_block_dilations", nargs="+", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_nin_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def sasrec_distill_parser():
# Distill SASRec into SASRec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--sas_student_num_heads", type=int, default=None)
parser.add_argument("--sas_student_hidden_units", type=int, default=None)
parser.add_argument("--sas_student_num_blocks", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_sas_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def bert4rec_distill_parser():
# Distill Bert4rec into Bert4rec
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--bert_student_num_heads", type=int, default=None)
parser.add_argument("--bert_student_hidden_units", type=int, default=None)
parser.add_argument("--bert_student_num_blocks", type=int, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_bert_args(parser)
parser = add_training_args(parser, is_search=False)
parser.add_argument("--distill_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--distill_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--distill_teacher_folder", type=str, default=None)
# use EMD distillation method
return parser
def sasrec_parser():
# Train Teacher-SASRec network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_sas_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_search_preset_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-T", type=str, required=True, help="teacher's type")
parser.add_argument("-D", type=str, required=True, help="dataset's name")
return parser
def student_search_parser(teacher_type):
# Search student network architecture using pretrained Teacher model
teacher_type = teacher_type.strip().lower()
assert teacher_type in ["bert", "nin", "sas"]
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_teacher", type=int, default=0)
parser.add_argument("--gpu_student", type=int, default=0)
# Student-Search
parser.add_argument("--search_temperature", type=float, default=None, help="Initial gumbel sampling temperature.")
parser.add_argument("--search_temperature_decay_rate", type=float, default=None, help="Temperature decay rate.")
parser.add_argument("--search_temperature_decay_epochs", type=float, default=None, help="Temperature decay every.")
parser.add_argument("--search_teacher_folder", type=str, default=None)
parser.add_argument("--search_teacher_layers", type=int, default=None, help="Number of layers in teacher network.")
parser.add_argument("--search_teacher_hidden", type=int, default=None, help="Hidden units in teacher network.")
parser.add_argument("--search_distill_loss", type=str, default=None, help="KD loss type.") # hierarchical|emd|ada
parser.add_argument("--search_hierarchical_select_method", type=str, default=None, help="Hierarchical KD method.")
parser.add_argument("--search_loss_gamma", type=float, default=None, help="Trade off between CE and KD.")
parser.add_argument("--search_loss_gamma_decay", type=float, default=None, help="Gamma decay every.")
parser.add_argument("--search_loss_beta", type=float, default=None, help="Loss factor for model efficiency.")
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_training_args(parser, is_search=True)
parser = add_student_model_args(parser)
if teacher_type == "bert":
parser = add_bert_args(parser)
elif teacher_type == "nin":
parser = add_nin_args(parser)
elif teacher_type == "sas":
parser = add_sas_args(parser)
return parser
def student_finetune_parser():
# Finetune student network architecture after architecture is generated
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--search_folder", type=str, default=None)
parser.add_argument("--search_teacher_type", type=str, default=None)
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_student_model_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_augment_parser():
# Augment student network
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
# e.g. Using 30music's alpha to train ml2k
parser.add_argument("--augment_source_folder", type=str, default=None) # get 30music's alpha (arch)
parser.add_argument("--augment_target_folder", type=str, default=None) # get ml2k's embedding and linear
parser = add_common_args(parser)
parser = add_dataset_args(parser)
parser = add_student_model_args(parser)
parser = add_training_args(parser, is_search=False)
return parser
def student_augment_preset_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-T", type=str, required=True, help="teacher's type")
parser.add_argument("-D_src", type=str, required=True, help="from which alpha file is searched")
return parser
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Unsupported value encountered.")
def empty_parser():
return argparse.ArgumentParser()
| [
"[email protected]"
] | |
a849cbe8bf109f0432486551e1295f4c785d43ba | 3a32644df62877604b44593267d41d3c664c1867 | /5-selim_sef/create_folds.py | d29b9e239a24254db5e5f31c9062d3ac25b5146d | [
"BSD-3-Clause",
"Python-2.0",
"MIT",
"Apache-2.0"
] | permissive | avanetten/SpaceNet_SAR_Buildings_Solutions | f5872778baf7cd7f7b22f2a4b104a5f680a33a28 | 6a9c3962d987d985384d0d41a187f5fbfadac82c | refs/heads/master | 2023-01-21T01:43:04.626934 | 2020-12-04T00:15:21 | 2020-12-04T00:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import argparse
import pandas as pd
import numpy as np
from numpy.random.mtrand import RandomState
from sklearn.model_selection import KFold
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--csv',
default="/mnt/sota/datasets/spacenet/train/AOI_11_Rotterdam/SummaryData/SN6_Train_AOI_11_Rotterdam_Buildings.csv")
parser.add_argument("--folds", type=int, default=10)
parser.add_argument("--seed", type=int, default=777)
args = parser.parse_args()
df = pd.read_csv(args.csv)
tiles = np.unique(df["ImageId"].values)
kfold = KFold(n_splits=args.folds, shuffle=True, random_state=RandomState(args.seed))
data = []
for i, (train_idx, test_idx) in enumerate(kfold.split(tiles)):
for idx in test_idx:
data.append([tiles[idx], i])
pd.DataFrame(data, columns=["id", "fold"]).to_csv("folds.csv", index=False)
| [
"[email protected]"
] | |
720b17067175b8210714342c5cbfb9f099f13432 | fb990d3b9f87c4382b8f2398f5e379485f1a88df | /portfolio/views.py | d7856ad160df31bcefdf76b900ed31997d3a40ff | [] | no_license | mtaz337/Django3-Portfolio | cb8997894dacde379bfbaf33c9a4b4b6b70cdfc1 | 83787ec1150fc37814d50cdf9586123d896cdfac | refs/heads/master | 2023-08-30T12:23:25.759757 | 2021-11-01T22:25:33 | 2021-11-01T22:25:33 | 423,620,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.shortcuts import render
from .models import project
# Create your views here.
def home(request):
projects = project.objects.all()
return render(request, 'portfolio/home.html',{'projects':projects})
| [
"[email protected]"
] | |
be879a7d3d071b9d7054fd314b9dae4616b984a0 | c82c0f445b6cd4fa390f7b217605cd656432a256 | /counter/urls.py | d4f66f04433ba6d285f30f22e06f1ef1e3ec8917 | [] | no_license | zione/skip | 5b2b576ec40d2c909d40feeb43e5ec6eb1b88648 | d3eb27e7ef180ed4193653ced3acbce5b0dca002 | refs/heads/master | 2020-03-17T03:41:59.296290 | 2018-05-14T07:23:48 | 2018-05-14T07:23:48 | 133,246,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.urls import path
from django.conf.urls import include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'posters',views.PosterViewSet)
urlpatterns = [
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('',include(router.urls)),
] | [
"[email protected]"
] | |
762c40055a4f516994d4b1b12d541777f4ca027d | 2e0e49425ded98d0a70b44ec8e02d37ceb69392a | /wristlock/runs/lock_lock.py | 081e4342510fa89e7829a88b419d75d58914b5ea | [
"MIT"
] | permissive | laurentrivard/pykevoplus | c5819b9c6ce86b85db84f69113cd315a20a14716 | e0c8074c2cae39745eea98db46e072b1246f3583 | refs/heads/master | 2021-05-11T21:04:04.992763 | 2018-01-24T04:11:44 | 2018-01-24T04:11:44 | 117,460,853 | 1 | 0 | null | 2018-01-14T19:41:37 | 2018-01-14T19:41:36 | null | UTF-8 | Python | false | false | 470 | py | from wristlock.runs import BaseCommandRun
from wristlock.entities import LockState
class LockLock(BaseCommandRun):
def __init__(self, email: str, password: str, lock_id: str):
super(LockLock, self).__init__(email=email, password=password, lock_id=lock_id)
def run(self):
super(LockLock, self).run()
self._lock()
def _lock(self):
self.session.get(self.lock_command_url)
self.wait_for_state(state=LockState.LOCKED)
| [
"[email protected]"
] | |
b2accf861b3793cad894dd1672b3102bd8f0ae3b | 188a114f7e7c336041b8b5d8db07bf929690a616 | /euler/problem78/euler_sol.py | a26d38cd45cf314b1563a1b0eb6eb256ebefa78e | [] | no_license | rhalstea/rhalstea_code_challenges | 3194eda68e9a1d138d1ff29ac6bce4e6519a3dbf | 78725a73bbaad12f9d3ec510b14901b7761dcd46 | refs/heads/master | 2021-01-10T20:13:52.578893 | 2012-02-12T21:05:34 | 2012-02-12T21:05:34 | 2,413,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | #!/usr/bin/env python
d = 1000000
p = []
for i in range(d):
p.append(0)
p[0] = 1
p[1] = 1
for i in
print d
| [
"[email protected]"
] | |
b421125d6784e475deb129da3e9ec285ef8d30e5 | 221052194dc74a31d2f60ff386429aa0dca691a3 | /options.py | 4a9f9b354439da91b627fb6f63d209020b0a1d6d | [] | no_license | pucrs-automated-planning/prob-plan-recognition | 7eda366567ffe33007ce119daa5c3015262e3aaf | 85df94d9d7b1aa7ae21b0c26ad4c63c9fed86129 | refs/heads/master | 2023-08-04T21:23:22.150192 | 2023-07-24T14:49:19 | 2023-07-24T14:49:19 | 111,575,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,264 | py | import getopt, os, sys
def usage():
print >> sys.stderr, "Parameters:"
print >> sys.stderr, "-e --experiment <file> Plan Recognition experiment files (tar'ed)"
print >> sys.stderr, "-h --help Get Help"
print >> sys.stderr, "-t --max-time <time> Maximum allowed execution time (defaults to 1800 secs)"
print >> sys.stderr, "-m --max-memory <time> Maximum allowed memory consumption (defaults to 1Gb)"
print >> sys.stderr, "-O --optimal Optimal Probabilistic PR"
print >> sys.stderr, "-G --greedy Greedy LAMA (takes first solution as best)"
print >> sys.stderr, "-P --hspr Use hspr for satisficing planning"
print >> sys.stderr, "-F --ff Use FF for satisficing planning"
print >> sys.stderr, "-S --simulation Simulation mode"
print >> sys.stderr, "-b --beta <value> Parameter strictly positive which penalizes non--optimal behavior"
print >> sys.stderr, "-D --simulate-from-obs Uses provided observations instead of generating them (Simulation mode)"
class Program_Options:
def __init__(self, args):
try:
opts, args = getopt.getopt(args,
"e:ht:m:OGSb:PFD",
["experiment=",
"help",
"max-time=",
"max-memory=",
"beta=",
"hspr",
"ff",
"optimal",
"greedy",
"simulation",
"simulate-from-obs"])
except getopt.GetoptError:
print >> sys.stderr, "Missing or incorrect parameters specified!"
usage()
sys.exit(1)
self.exp_file = None
self.domain_name = None
self.instance_names = []
self.max_time = 1800
self.max_memory = 1024
self.optimal = False
self.greedy = False
self.simulation = False
self.use_hspr = False
self.use_FF = False
self.beta = 1.0
self.simulate_from_obs = False
for opcode, oparg in opts:
if opcode in ('-h', '--help'):
print >> sys.stderr, "Help invoked!"
usage()
sys.exit(0)
if opcode in ('-e', '--experiment'):
self.exp_file = oparg
if not os.path.exists(self.exp_file):
print >> sys.stderr, "File", self.exp_file, "does not exist"
print >> sys.stderr, "Aborting"
sys.exit(1)
if opcode in ('-t', '--max-time'):
try:
self.max_time = int(oparg)
if self.max_time <= 0:
print >> sys.stderr, "Maximum time must be greater than zero"
sys.exit(1)
except ValueError:
print >> sys.stderr, "Time must be an integer"
sys.exit(1)
if opcode in ('-b', '--beta'):
try:
self.beta = float(oparg)
if self.beta <= 0.0:
print >> sys.stderr, "Beta must be a positive real number"
sys.exit(1)
except ValueError:
print >> sys.stderr, "Beta must be a (positive) real number, rather than", oparg
sys.exit(1)
if opcode in ('-m', '--max-memory'):
try:
self.max_memory = int(oparg)
if self.max_memory <= 0:
print >> sys.stderr, "Maximum memory must be greater than zero"
sys.exit(1)
except ValueError:
print >> sys.stderr, "Memory amount must be an integer"
sys.exit(1)
if opcode in ('-O', '--optimal'):
self.optimal = True
if opcode in ('-G', '--greedy'):
self.greedy = True
if opcode in ('-S', '--simulation'):
self.simulation = True
if opcode in ('-P', '--hspr'):
self.use_hspr = True
if opcode in ('-F', '--ff'):
self.use_FF = True
if opcode in ('-D', '--simulate-from-obs'):
self.simulate_from_obs = True
if self.exp_file is None:
print >> sys.stderr, "No experiment file was specified!!"
usage()
sys.exit(1)
os.system('tar jxvf %s' % self.exp_file)
if not os.path.exists('domain.pddl'):
print >> sys.stderr, "No 'domain.pddl' file found in experiment file!"
usage()
sys.exit(1)
if not os.path.exists('template.pddl'):
print >> sys.stderr, "No 'template.pddl' file found in experiment file!"
usage()
sys.exit(1)
if not os.path.exists('hyps.dat'):
print >> sys.stderr, "No 'hyps.dat' file found in experiment file!"
usage()
sys.exit(1)
if not self.simulation:
if not os.path.exists('obs.dat'):
print >> sys.stderr, "No 'obs.dat' file found in experiment file!"
usage()
sys.exit(1)
if not os.path.exists('real_hyp.dat'):
print >> sys.stderr, "No 'real_hyp.dat' file found in experiment file!"
usage()
sys.exit(1)
def print_options(self):
def print_yes(): print >> sys.stdout, "Yes"
def print_no(): print >> sys.stdout, "No"
print >> sys.stdout, "Options set"
print >> sys.stdout, "==========="
print >> sys.stdout, "Experiment File:", self.exp_file
print >> sys.stdout, "Max. Time Allowed", self.max_time
print >> sys.stdout, "Max. Memory Allowed", self.max_memory
| [
"[email protected]"
] | |
ffd30679e6f9e60e46a044798f3da994f2d94de4 | 33045abfa768b151dfbcf157960e951d7939ca4e | /script python/tableau_overlay5.py | db7a018d36eed68c0dc851b74df33ae998647e97 | [] | no_license | kedenn/portfolio | 42393d66f67b3d0e37c0e904bd3912747afe8cf3 | 07c036a6fa549396e860539ffb74dc695e50e26c | refs/heads/mainPortfolio | 2023-08-03T19:16:37.704559 | 2021-09-24T17:04:11 | 2021-09-24T17:04:11 | 410,043,841 | 0 | 0 | null | 2021-09-24T17:12:03 | 2021-09-24T17:12:02 | null | UTF-8 | Python | false | false | 5,761 | py | import tkinter
from tkinter import *
import tkinter.font
import turtle
#import mouse
#import keyboard
import time
#from pynput import keyboard
#fenetre 1
win = Tk()
win.attributes("-topmost", True )
win.overrideredirect(1)
win.attributes("-alpha",0.4)
#win.after(5000, lambda: win.focus_force())
#info
L = win.winfo_screenwidth()
H = win.winfo_screenheight()
#print (L,H)
def setup():
t.hideturtle()
t.up()
def enable_button():
size_but = bouton1.winfo_height()
global u
#print(size_but)
x, y = canvas.winfo_pointerx(), canvas.winfo_pointery()
t.setpos(x - (win.winfo_screenwidth() / 2) ,(y * -1) + (win.winfo_screenheight() /2) + size_but + float(u))
t.down()
global i
while i == True:
#print(1)
x, y = canvas.winfo_pointerx(), canvas.winfo_pointery()
t.goto(x - (win.winfo_screenwidth() / 2) ,(y * -1) + (win.winfo_screenheight() /2) + size_but + float(u) )
setup()
def on_release(k):
#print(k.widget)
if str(k.widget) == ".!canvas":
global i
if i == True:
i = False
else:
i = True
enable_button()
def focus(event):
#print(str(event.widget))
wid = win.focus_get()
#print(wid, "has focus")
def clear_canvenas():
t.clear()
def enable_window():
global win_enable
if win_enable == True:
canvas.master.wm_attributes("-transparent", "white")
win_enable = False
bouton1.config(fg="#000000")
bouton2.config(text = "stylo OFF", bg="red" , fg="#000000")
else:
canvas.master.wm_attributes("-transparent", "black")
win_enable = True
bouton1.config(fg="#ffffff")
bouton2.config(text = "stylo ON", bg="green",fg="#ffffff")
def set_tortle_size(size):
t.pensize(size)
def bleu():
t.pencolor("blue")
fbouton1.config(text="✔")
fbouton2.config(text="")
fbouton3.config(text="")
fbouton4.config(text="")
def rouge():
t.pencolor("red")
fbouton1.config(text="")
fbouton2.config(text="✔")
fbouton3.config(text="")
fbouton4.config(text="")
def vert():
t.pencolor("green")
fbouton1.config(text="")
fbouton2.config(text="")
fbouton3.config(text="✔")
fbouton4.config(text="")
def noir():
t.pencolor("white")
fbouton1.config(text="")
fbouton2.config(text="")
fbouton3.config(text="")
fbouton4.config(text="✔")
def leave():
win.destroy()
def set_position_at_mouse(ecart):
global u
u = ecart
#widget de la fenetre
#frame1
Frame1 = Frame(win,borderwidth=2,relief=GROOVE, bg='bisque')
Frame1.pack()
#frame -> button1,2,3,4....
font_button = tkinter.font.Font(size=26, weight="bold")
bouton1 = tkinter.Button(Frame1, width= 20,height=2, fg="#ffffff", bg="#1586f3", text="effacer le tableau",command=clear_canvenas)
bouton1.grid(row=0,column=2)
#2
bouton2 = tkinter.Button(Frame1, width= 20,height=2, fg="#ffffff", bg="green", text="stylo ON",command=enable_window)
bouton2.grid(row=0,column=3)
#3
bouton3 = tkinter.Button(Frame1, width= 20,height=2, fg="#ffffff", bg="#c70039", text="QUITTER",command=leave)
bouton3.grid(row=0,column=4)
#couleur
#62f11d
#d315f3
#frame2
Frame2 = Frame(Frame1,borderwidth=2,relief=GROOVE, bg='#62f11d')
Frame2.grid(row=0,column=1)
#frame2 --> button Color
fbouton1 = tkinter.Button(Frame2, width= 5,height=2, fg="#ffffff", bg="blue",command=bleu)
fbouton1.grid(row=0,column=1)
fbouton2 = tkinter.Button(Frame2, width= 5,height=2, fg="#ffffff", bg="red",command=rouge)
fbouton2.grid(row=0,column=2)
fbouton3 = tkinter.Button(Frame2, width= 5,height=2, fg="#ffffff", bg="green",command=vert)
fbouton3.grid(row=0,column=3)
fbouton4 = tkinter.Button(Frame2, width= 5,height=2, fg="#ffffff", bg="white",command=noir)
fbouton4.grid(row=0,column=4)
fbouton2.config(text="✔")
#frame3
Frame3 = Frame(Frame1,borderwidth=2,relief=GROOVE, bg='#B20BD4')
Frame3.grid(row=0,column=5)
#slider
slider = Scale(Frame3, from_=0, to=200,orient=HORIZONTAL,length=L/6, command=set_tortle_size,relief=GROOVE)
slider.grid(row=0,column=2)
slider.set(1)
#slider2
slider2 = Scale(Frame3, from_= 0, to=15,orient=HORIZONTAL,length=100, command=set_position_at_mouse,relief=GROOVE)
slider2.grid(row=0,column=4)
slider2.set(10)
#label
label = Label(Frame3, text= "taille :",bg='#B20BD4',fg="#ffffff")
label.grid(row=0,column=1)
#label2
label2 = Label(Frame3, text= "ecart souris:",bg='#B20BD4',fg="#ffffff", wraplength = 50)
label2.grid(row=0,column=3)
#canvas1
canvas = tkinter.Canvas(win, width=L, height=H ,bg="red")
canvas.master.overrideredirect(True)
canvas.pack()
#setting turtle
t = turtle.RawTurtle(canvas)
t.pencolor("red") # Red
t.speed(20)
t.pensize(1)
#declaration des variable
win_enable = True
i = False
setup()
win.bind("<Button-1>", on_release)
win.bind("<ButtonRelease-1>", on_release)
canvas.mainloop()
'''
#fenetre2
win2 = Tk()
win2.attributes("-alpha",0.0)
#win2.attributes("-topmost", True )
#canvas2
canvas2 = tkinter.Canvas(win2, width=L, height=H ,bg="white")
canvas2.master.overrideredirect(True)
#canvas.master.wm_attributes("-transparentcolor", "white")
canvas2.pack()
#win2.attributes("-topmost", True )
#win2.attributes("-topmost", True )
'''
| [
"[email protected]"
] | |
cf54eaf31fad96e9bdadcbc785f540df4d2170c5 | 6dc072672c67edd0b6695156e853dd9580ecc081 | /preprocess.py | 09b9ca9c9d3538fec8218cf8f255cbdd1d850840 | [] | no_license | CSInfoMgmt/Topic-Lexicon | 81371f2c170b18a9e51398e047c09f1ecb42f310 | 673d2725a273e000eb2a232444bf652cf47b6266 | refs/heads/master | 2020-08-14T12:13:28.890408 | 2019-10-15T00:00:57 | 2019-10-15T00:00:57 | 215,165,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | # coding=utf-8
import unidecode
import inflection
import re
from nltk.corpus import stopwords
stop = stopwords.words('english')
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def change_alphabet(sent):
return unidecode.unidecode(sent.decode('utf-8'))
def clean_sent(sent):
sent=re.sub(r"http\S+", "", sent.lower()).decode('utf-8')
sent=re.sub(r"@\S+", "", sent.lower()).decode('utf-8')
#words=sent.split(" ")
words=tokenizer.tokenize(sent)
words_refined=[lemmatizer.lemmatize(inflection.singularize(word)) for word in words]
words=[inflection.transliterate(word.decode('utf-8')) for word in words_refined if not word.isdigit() and len(word)>2]
p_stemmer = PorterStemmer()
_digits = re.compile('\d')
words_refined=[str(word) for word in words if not bool(_digits.search(word)) and word not in stop]
return words_refined
#sentence="HE !!!! is a https://jbfjerferfe @rimjhim apply ...hates ....rama123 kingis singh has have their Málaga Málaga 2312423534646"
#sentence=change_alphabet(sentence)
#sentence=clean_sent(sentence)
#print sentence
| [
"[email protected]"
] | |
5ba698b3740f6ef0b403b70da096ee2d1dfeeed2 | 6f7a1e4a53a142250c9a5df71066c4dfaebaa6b7 | /day3/hacker_rank_problems/hackerRankRegexFindMethod.py | c97344c0b422a8236ded49c8d5a044b07b2cca54 | [] | no_license | Hamdi-Limam/PythonTraining | 25a22682400bd571c21837a00ed2e4b27c5a38c8 | dcf02d7e097cb5234c37aa086461dfcaf31f610f | refs/heads/master | 2023-09-04T07:38:42.756701 | 2021-10-31T22:06:46 | 2021-10-31T22:06:46 | 405,022,986 | 4 | 0 | null | 2021-10-06T10:34:11 | 2021-09-10T09:16:05 | Python | UTF-8 | Python | false | false | 1,033 | py | # HackerRank problem for Regex find methods. Link: https://www.hackerrank.com/challenges/re-findall-re-finditer/problem
import re
# The expression re.findall() returns all the non overlapping matches of patterns in a string as a list of strings.
print("Result of re.findall: ", re.findall(r'\w','http://www.hackerrank.com/'))
# The expression re.finditer() returns an iterator yielding MatchObject instances over all non-overlapping matches for the re pattern in the string.
m = re.finditer(r'\w','http://www.hackerrank.com/')
print("Result of re.finditer: ", m)
items = list(m)
print([items[i].group() for i in range(len(items))])
# Task: You are given a string . It consists of alphanumeric characters, spaces and symbols(+,-).
# Your task is to find all the substrings of that contains or more vowels.
# Also, these substrings must lie in between consonants and should contain vowels only.
import re
result = re.findall(r"([AEIOU]{2,})(?=[^/s+-AEIOU])" ,input(), re.I)
print("\n".join(result) if len(result) > 0 else "-1") | [
"[email protected]"
] | |
abaf47ad2543a3d51f4b5305edccf1b4966797bf | aa0d322f877b8ecc65b638e5443e0f5912953e6a | /apisapron/api/views.py | 0cc796eb038ac0dcdd2a4b534be0e6613a4585f8 | [] | no_license | Rapaix/sapron | 8c75128b1b1a309a8891e5b17e5e2ea40815eb2d | 60dfbf03ec09c44dcc2dc28106dd7e0005eebe3e | refs/heads/main | 2023-07-10T23:54:23.004284 | 2021-08-13T19:14:53 | 2021-08-13T19:14:53 | 395,766,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | from .serializers import AgendaListSerializer, AgendaDetailSerializer
from rest_framework import generics, filters
from .models import Agenda
class AgendaListAPIView(generics.ListAPIView):
queryset = Agenda.objects.all()
serializer_class = AgendaListSerializer
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
ordering_fields = ['checkin']
search_fields = ['hospede', 'id']
class AgendaRetrieveAPIView(generics.RetrieveAPIView):
lookup_field = "id"
queryset = Agenda.objects.all()
serializer_class = AgendaDetailSerializer
class AgendaCreateAPIView(generics.CreateAPIView):
queryset = Agenda.objects.all()
serializer_class = AgendaDetailSerializer
class AgendaUpdateAPIView(generics.RetrieveUpdateAPIView):
lookup_field = "id"
queryset = Agenda.objects.all()
serializer_class = AgendaDetailSerializer
class AgendaDeleteAPIView(generics.DestroyAPIView):
lookup_field = "id"
queryset = Agenda.objects.all()
| [
"[email protected]"
] | |
c0a7a6f4094ac8baa62b9dcb357dbaeca1202ae3 | 56ced49e5b05a27e0c25249b5f7292ad52d0b1b1 | /random_tsv.py | cd78d6bae348c4f78f7344b8793da3b35c2d4d69 | [] | no_license | Docproc/opentargets_wip | b1e5cc5f6514d3c866f0dbd154786822c3ef662a | c1c118ffe57cbb9f34fad723289140b8c4ca97b5 | refs/heads/master | 2020-04-02T09:36:45.890761 | 2018-11-04T18:53:19 | 2018-11-04T18:53:19 | 154,301,038 | 0 | 0 | null | 2018-10-31T18:59:25 | 2018-10-23T09:32:18 | Python | UTF-8 | Python | false | false | 2,209 | py | import random
import sys
import logging
from ontoma import OnToma
import argparse
import pandas as pd
# Generate a random sample from a TSV file, optionally mapping phenotypes to EFO
parser = argparse.ArgumentParser(description='Build random data from actual values in a TSV file')
parser.add_argument('-input', help="TSV input file", required=True, action='store')
parser.add_argument('-samples', type=int, help='Number of samples to output', required=True)
parser.add_argument('-columns', help="Columns to include, space separated. If not specifed, include all.", nargs='+')
parser.add_argument('-no_header', help="If specified, do not print header row", action='store_true')
parser.add_argument('-map_phenotypes',
help="Map phenotypes to EFO terms using OnToma. Value of this argument is the name of the phenotype column.",
action='store')
args = parser.parse_args()
table = pd.read_table(args.input, dtype='unicode')
if args.columns:
columns = args.columns
else:
columns = table.columns
# Cache columns as lists to avoid lots of expensive operations
columns_cache = {}
for column in columns:
columns_cache[column] = table[column].unique().tolist()
if args.map_phenotypes:
if not args.map_phenotypes in table.columns:
print("Can't find %s column in %s" % (args.map_phenotypes, args.input))
sys.exit(1)
otmap = OnToma()
ontoma_logger = logging.getLogger("ontoma.downloaders")
ontoma_logger.setLevel(logging.WARNING)
ontoma_logger2 = logging.getLogger("ontoma.interface")
ontoma_logger2.setLevel(logging.WARNING)
# Header
if not args.no_header:
header = "\t".join(columns)
if args.map_phenotypes:
header += "\tEFO"
print(header)
# Body
for i in range(0, int(args.samples)):
output_str = ""
efo = ""
for column in columns:
entries = columns_cache[column]
choice = random.choice(entries)
output_str += choice + "\t"
if args.map_phenotypes and column == args.map_phenotypes:
efo = otmap.find_term(choice)
if efo: # Always print as last column to match header order
output_str += efo + "\t"
print(output_str)
| [
"[email protected]"
] | |
c91f5d4f977500ac292a5e429656b74940e20cf8 | ad1a43084c709d340b70af786dc16057ffec1fbe | /Module 1/Chapter 14/prog4.py | 0b6e8a3e75f94970602035058788d47629ce66b4 | [
"MIT"
] | permissive | PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch- | f58ad7c1f567cc7dee9bde9e5d2ac00e7a652705 | 49fd30ca8e1e30e7d85cf14e9dcb6e1d24d4a445 | refs/heads/master | 2021-06-24T22:18:39.631825 | 2021-01-14T08:52:38 | 2021-01-14T08:52:38 | 67,773,927 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import socket #for sockets
import sys #for exit
try:
#create an AF_INET, STREAM socket (TCP)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) + ' , Error message : ' + msg[1]
sys.exit();
print 'Socket Created'
host = 'www.google.com'
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
#could not resolve
print 'Hostname could not be resolved. Exiting'
sys.exit()
print 'Ip address of ' + host + ' is ' + remote_ip | [
"[email protected]"
] | |
2c06f20f28a1ed6ea1691caac9bee631846f43d8 | 14b975092cef438cd8340d1c112925c85b336550 | /WXBackGround/urls.py | 47cc3b4fd51eaca7adfc4c8d1a9702a45d55a6a2 | [] | no_license | colacanfly/WXBackGround | a6bfaa2d037bc1091978b54801da8f8f6cce0fbf | 2057f12f74172199060a317e9d28fe7877e84284 | refs/heads/master | 2020-12-08T00:08:26.528831 | 2020-01-09T15:40:52 | 2020-01-09T15:40:52 | 232,832,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | """WXBackGround URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from back import views, backdb, update_backdb
urlpatterns = [
path('admin/', admin.site.urls),
path('connect/', views.connection),
path('backdb/', backdb.backdb),
path('update', update_backdb.update)
]
| [
"[email protected]"
] | |
9f46263a5d2231aea04564301949591db9df7a8d | b01f3776ecdd635fbe864161fa1d18ea0f442da3 | /neat/ctrnn/__init__.py | 294aa7e3e32c5d80b336004bca8851c27970c003 | [
"BSD-3-Clause"
] | permissive | dlotten68/CI_-teamJLD | 95405cddc96020de44776e3342e173b80cbdcbbf | 5648e6217f50a386473d4b0e01e99ef85b6c4bd8 | refs/heads/master | 2021-08-28T15:03:21.708032 | 2017-12-12T14:36:59 | 2017-12-12T14:36:59 | 109,374,616 | 0 | 1 | null | 2017-11-17T10:15:08 | 2017-11-03T09:03:52 | Python | UTF-8 | Python | false | false | 4,721 | py | """Handles the continuous-time recurrent neural network implementation."""
from __future__ import division
from neat.graphs import required_for_output
from neat.six_util import itervalues, iteritems
class CTRNNNodeEval(object):
def __init__(self, time_constant, activation, aggregation, bias, response, links):
self.time_constant = time_constant
self.activation = activation
self.aggregation = aggregation
self.bias = bias
self.response = response
self.links = links
class CTRNN(object):
"""Sets up the ctrnn network itself."""
def __init__(self, inputs, outputs, node_evals):
self.input_nodes = inputs
self.output_nodes = outputs
self.node_evals = node_evals
self.values = [{}, {}]
for v in self.values:
for k in inputs + outputs:
v[k] = 0.0
for node, ne in iteritems(self.node_evals):
v[node] = 0.0
for i, w in ne.links:
v[i] = 0.0
self.active = 0
self.time_seconds = 0.0
def reset(self):
self.values = [dict((k, 0.0) for k in v) for v in self.values]
self.active = 0
self.time_seconds = 0.0
def set_node_value(self, node_key, value):
for v in self.values:
v[node_key] = value
def get_max_time_step(self): # pragma: no cover
# TODO: Compute max time step that is known to be numerically stable for
# the current network configuration.
# pylint: disable=no-self-use
raise NotImplementedError()
def advance(self, inputs, advance_time, time_step=None):
"""
Advance the simulation by the given amount of time, assuming that inputs are
constant at the given values during the simulated time.
"""
final_time_seconds = self.time_seconds + advance_time
# Use half of the max allowed time step if none is given.
if time_step is None: # pragma: no cover
time_step = 0.5 * self.get_max_time_step()
if len(self.input_nodes) != len(inputs):
raise RuntimeError("Expected {0} inputs, got {1}".format(len(self.input_nodes), len(inputs)))
while self.time_seconds < final_time_seconds:
dt = min(time_step, final_time_seconds - self.time_seconds)
ivalues = self.values[self.active]
ovalues = self.values[1 - self.active]
self.active = 1 - self.active
for i, v in zip(self.input_nodes, inputs):
ivalues[i] = v
ovalues[i] = v
for node_key, ne in iteritems(self.node_evals):
node_inputs = [ivalues[i] * w for i, w in ne.links]
s = ne.aggregation(node_inputs)
z = ne.activation(ne.bias + ne.response * s)
ovalues[node_key] += dt / ne.time_constant * (-ovalues[node_key] + z)
self.time_seconds += dt
ovalues = self.values[1 - self.active]
return [ovalues[i] for i in self.output_nodes]
@staticmethod
def create(genome, config, time_constant):
""" Receives a genome and returns its phenotype (a CTRNN). """
genome_config = config.genome_config
required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections)
# Gather inputs and expressed connections.
node_inputs = {}
for cg in itervalues(genome.connections):
if not cg.enabled:
continue
i, o = cg.key
if o not in required and i not in required:
continue
if o not in node_inputs:
node_inputs[o] = [(i, cg.weight)]
else:
node_inputs[o].append((i, cg.weight))
node_evals = {}
for node_key, inputs in iteritems(node_inputs):
node = genome.nodes[node_key]
activation_function = genome_config.activation_defs.get(node.activation)
aggregation_function = genome_config.aggregation_function_defs.get(node.aggregation)
node_evals[node_key] = CTRNNNodeEval(time_constant,
activation_function,
aggregation_function,
node.bias,
node.response,
inputs)
return CTRNN(genome_config.input_keys, genome_config.output_keys, node_evals)
| [
"[email protected]"
] | |
3b3a117221d6b53dde8de16074a991941662ffdb | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/11146113.py | 693fb033baff53bcb422a63ef8bda36e903bc01f | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/11146113.py generated: Fri, 27 Mar 2015 15:47:59
#
# Event Type: 11146113
#
# ASCII decay Descriptor: [B0 -> (J/psi(1S) -> mu+ mu-) (phi(1020) -> K+ K-) (K_S0 -> pi+ pi-)]cc
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/KKmumuInAcc.py" )
from Configurables import Generation
Generation().EventType = 11146113
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_JpsiphiKs,KKmumupipi=KKmumuInAcc.dec"
Generation().SignalRepeatedHadronization.CutTool = "ListOfDaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"[email protected]"
] | |
abb5ec63e93933f52d609b3e35d1bf8f73ad9cdc | 7e756808567a4d61d4343489b2a158685845b397 | /shared/codes | 0d40f667138c3c4afa0ea72068908e6b7fb4bbe6 | [] | no_license | millotpg/uDj | ff4f25e0bd59a5349876a4834db37b3a156511ff | 7e9ea85057b102f95558995326fe7a1f52f55962 | refs/heads/master | 2021-01-10T23:36:54.080580 | 2016-10-09T17:56:53 | 2016-10-09T17:56:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | #!/usr/bin/python3
import pygame
import time
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load("test.mp3")
pygame.mixer.music.play()
while True:
time.sleep(10)
continue
| [
"[email protected]"
] | ||
b56110c8af873151bd51c274cf5ab2f8f6286c60 | 9bc7266b14fc9c780f8af0e4d30d14c4b922eba5 | /FPNN/imgaug/augmenters/arithmetic.py | c67a607cb2287e70393de6ba413693024a46fcf1 | [] | no_license | anasali90/code_samples | f59c93222039b42d866cdec6342f0fde7877945b | 3b0a6603e76f2b605a1797fe4fb7b3331413d933 | refs/heads/master | 2020-11-24T14:14:49.311576 | 2019-12-17T08:12:30 | 2019-12-17T08:12:30 | 228,187,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70,550 | py | """
Augmenters that perform simple arithmetic changes.
Do not import directly from this file, as the categorization is not final.
Use instead::
from imgaug import augmenters as iaa
and then e.g.::
`seq = iaa.Sequential([iaa.Add((-5, 5)), iaa.Multiply((0.9, 1.1))])`
List of augmenters:
* Add
* AddElementwise
* AdditiveGaussianNoise
* Multiply
* MultiplyElementwise
* Dropout
* CoarseDropout
* ReplaceElementwise
* SaltAndPepper
* CoarseSaltAndPepper
* Salt
* CoarseSalt
* Pepper
* CoarsePepper
* Invert
* ContrastNormalization
* JpegCompression
"""
from __future__ import print_function, division, absolute_import
from .. import imgaug as ia
from .. import parameters as iap
from PIL import Image
import imageio
import tempfile
import numpy as np
import six.moves as sm
from . import meta
from .meta import Augmenter
# TODO tests
class Add(Augmenter):
"""
Add a value to all pixels in an image.
Parameters
----------
value : int or tuple of two ints or list of ints or StochasticParameter, optional(default=0)
Value to add to all
pixels.
* If an int, then that value will be used for all images.
* If a tuple (a, b), then a value from the discrete range [a .. b]
will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image
from that parameter.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Add(10)
always adds a value of 10 to all pixels in the image.
>>> aug = iaa.Add((-10, 10))
adds a value from the discrete range [-10 .. 10] to all pixels of
the input images. The exact value is sampled per image.
>>> aug = iaa.Add((-10, 10), per_channel=True)
adds a value from the discrete range [-10 .. 10] to all pixels of
the input images. The exact value is sampled per image AND channel,
i.e. to a red-channel it might add 5 while subtracting 7 from the
blue channel of the same image.
>>> aug = iaa.Add((-10, 10), per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False, name=None,
deterministic=False, random_state=None):
super(Add, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_discrete_param(value, "value", value_range=(-255, 255), tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
image = images[i].astype(np.int32)
rs_image = ia.new_random_state(seeds[i])
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
nb_channels = image.shape[2]
samples = self.value.draw_samples((nb_channels,), random_state=rs_image).astype(image.dtype)
for c, sample in enumerate(samples):
# TODO make value range more flexible
ia.do_assert(-255 <= sample <= 255)
image[..., c] += sample
else:
sample = self.value.draw_sample(random_state=rs_image).astype(image.dtype)
ia.do_assert(-255 <= sample <= 255) # TODO make value range more flexible
image += sample
image = meta.clip_augmented_image_(image, 0, 255) # TODO make value range more flexible
image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.value, self.per_channel]
# TODO tests
class AddElementwise(Augmenter):
"""
Add values to the pixels of images with possibly different values
for neighbouring pixels.
While the Add Augmenter adds a constant value per image, this one can
add different values (sampled per pixel).
Parameters
----------
value : int or tuple of two int or list of int or StochasticParameter, optional(default=0)
Value to add to the
pixels.
* If an int, then that value will be used for all images.
* If a tuple (a, b), then values from the discrete range [a .. b]
will be sampled.
* If a list of integers, a random value will be sampled from the list
per image.
* If a StochasticParameter, then values will be sampled per pixel
(and possibly channel) from that parameter.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.AddElementwise(10)
always adds a value of 10 to all pixels in the image.
>>> aug = iaa.AddElementwise((-10, 10))
samples per pixel a value from the discrete range [-10 .. 10] and
adds that value to the pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=True)
samples per pixel *and channel* a value from the discrete
range [-10 .. 10] ands adds it to the pixel's value. Therefore,
added values may differ between channels of the same pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False, name=None, deterministic=False, random_state=None):
super(AddElementwise, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_discrete_param(value, "value", value_range=(-255, 255), tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
seed = seeds[i]
image = images[i].astype(np.int32)
height, width, nb_channels = image.shape
rs_image = ia.new_random_state(seed)
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
samples = self.value.draw_samples((height, width, nb_channels), random_state=rs_image).astype(image.dtype)
else:
samples = self.value.draw_samples((height, width, 1), random_state=rs_image).astype(image.dtype)
samples = np.tile(samples, (1, 1, nb_channels))
after_add = image + samples
after_add = meta.clip_augmented_image_(after_add, 0, 255) # TODO make value range more flexible
after_add = meta.restore_augmented_image_dtype_(after_add, input_dtypes[i])
result[i] = after_add
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.value, self.per_channel]
def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
Parameters
----------
loc : number or tuple of two number or list of number or StochasticParameter, optional(default=0)
Mean of the normal distribution that generates the
noise.
* If a number, exactly that value will be used.
* If a tuple (a, b), a random value from the range a <= x <= b will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of two number or list of number or StochasticParameter, optional(default=0)
Standard deviation of the normal distribution that generates the
noise. Must be >= 0. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple (a, b), a random value from the range a <= x <= b will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional(default=False)
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.GaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution N(0, 0.1*255) to images.
>>> aug = iaa.GaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution N(0, s) to images,
where s is sampled per image from the range 0 <= s <= 0.1*255.
>>> aug = iaa.GaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution N(0, 0.1*255) to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.GaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution N(0, 0.1*255) to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state)
# TODO
#class MultiplicativeGaussianNoise(Augmenter):
# pass
# TODO
#class ReplacingGaussianNoise(Augmenter):
# pass
class Multiply(Augmenter):
"""
Multiply all pixels in an image with a specific value.
This augmenter can be used to make images lighter or darker.
Parameters
----------
mul : number or tuple of two number or list of number or StochasticParameter, optional(default=1.0)
The value with which to multiply the pixel values in each
image.
* If a number, then that value will always be used.
* If a tuple (a, b), then a value from the range a <= x <= b will
be sampled per image and used for all pixels.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, then that parameter will be used to
sample a new value per image.
per_channel : bool or float, optional(default=False)
Whether to use the same multiplier per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Multiply(2.0)
would multiply all images by a factor of 2, making the images
significantly brighter.
>>> aug = iaa.Multiply((0.5, 1.5))
would multiply images by a random value from the range 0.5 <= x <= 1.5,
making some images darker and others brighter.
"""
def __init__(self, mul=1.0, per_channel=False, name=None,
deterministic=False, random_state=None):
super(Multiply, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.mul = iap.handle_continuous_param(mul, "mul", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
image = images[i].astype(np.float32)
rs_image = ia.new_random_state(seeds[i])
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
nb_channels = image.shape[2]
samples = self.mul.draw_samples((nb_channels,), random_state=rs_image)
for c, sample in enumerate(samples):
ia.do_assert(sample >= 0)
image[..., c] *= sample
else:
sample = self.mul.draw_sample(random_state=rs_image)
ia.do_assert(sample >= 0)
image *= sample
image = meta.clip_augmented_image_(image, 0, 255) # TODO make value range more flexible
image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.mul, self.per_channel]
# TODO tests
class MultiplyElementwise(Augmenter):
"""
Multiply values of pixels with possibly different values
for neighbouring pixels.
While the Multiply Augmenter uses a constant multiplier per image,
this one can use different multipliers per pixel.
Parameters
----------
mul : number or tuple of two number or list of number or StochasticParameter, optional(default=1.0)
The value by which to multiply the pixel values in the
image.
* If a number, then that value will always be used.
* If a tuple (a, b), then a value from the range a <= x <= b will
be sampled per image and pixel.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then that parameter will be used to
sample a new value per image and pixel.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.MultiplyElementwise(2.0)
multiply all images by a factor of 2.0, making them significantly
bighter.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5))
samples per pixel a value from the range 0.5 <= x <= 1.5 and
multiplies the pixel with that value.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True)
samples per pixel *and channel* a value from the range
0.5 <= x <= 1.5 ands multiplies the pixel by that value. Therefore,
added multipliers may differ between channels of the same pixel.
>>> aug = iaa.AddElementwise((0.5, 1.5), per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
def __init__(self, mul=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
super(MultiplyElementwise, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.mul = iap.handle_continuous_param(mul, "mul", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
seed = seeds[i]
image = images[i].astype(np.float32)
height, width, nb_channels = image.shape
rs_image = ia.new_random_state(seed)
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
samples = self.mul.draw_samples((height, width, nb_channels), random_state=rs_image)
else:
samples = self.mul.draw_samples((height, width, 1), random_state=rs_image)
samples = np.tile(samples, (1, 1, nb_channels))
image = image * samples
image = meta.clip_augmented_image_(image, 0, 255) # TODO make value range more flexible
image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.mul, self.per_channel]
def Dropout(p=0, per_channel=False, name=None, deterministic=False,
random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
Parameters
----------
p : float or tuple of two float or StochasticParameter, optional(default=0)
The probability of any pixel being dropped (i.e. set to
zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple (a, b), then a value p will be sampled from the
range a <= p <= b per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do `Binomial(1-p)` to convert parameter
`p` to a 0/1 representation.
per_channel : bool or float, optional(default=False)
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range 0.0 <= x <= 0.05.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state)
def CoarseDropout(p=0, size_px=None, size_percent=None,
per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Augmenter that sets rectangular areas within images to zero.
In contrast to Dropout, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so when there is one large area that is dropped, there is a high likelihood
that all other dropped areas are also large.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
Parameters
----------
p : float or tuple of two float or StochasticParameter, optional(default=0)
The probability of any pixel being dropped (i.e. set to
zero).
* If a float, then that value will be used for all pixels. A value
of 1.0 would mean, that all pixels will be dropped. A value of
0.0 would lead to no pixels being dropped.
* If a tuple (a, b), then a value p will be sampled from the
range a <= p <= b per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
size_px : int or tuple of two ints or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a 3x3 mask, which is then
upsampled to HxW, where H is the image size and W the image width.
* If a tuple (a, b), then two values M, N will be sampled from the
range [a..b] and the mask will be generated at size MxN, then
upsampled to HxW.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of two floats or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from (p*H)x(p*W) and later upsampled
to HxW.
* If a tuple (a, b), then two values m, n will be sampled from the
interval (a, b) and used as the percentages, i.e the mask size
will be (m*H)x(n*W).
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional(default=False)
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional(default=4)
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being dropped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Dropout(0.02, size_percent=0.5)
drops 2 percent of all pixels on an lower-resolution image that has
50 percent of the original image's size, leading to dropped areas that
have roughly 2x2 pixels size.
>>> aug = iaa.Dropout((0.0, 0.05), size_percent=(0.05, 0.5))
generates a dropout mask at 5 to 50 percent of image's size. In that mask,
0 to 5 percent of all pixels are dropped (random per image).
>>> aug = iaa.Dropout((0.0, 0.05), size_px=(2, 16))
same as previous example, but the lower resolution image has 2 to 16 pixels
size.
>>> aug = iaa.Dropout(0.02, size_percent=0.5, per_channel=True)
drops 2 percent of all pixels at 50 percent resolution (2x2 sizes)
in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, size_percent=0.5, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state)
class ReplaceElementwise(Augmenter):
"""
Replace pixels in an image with new values.
Parameters
----------
mask : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Mask that indicates the pixels that are supposed to be replaced.
The mask will be thresholded with 0.5. A value of 1 then indicates a
pixel that is supposed to be replaced.
* If this is a float, then that value will be used as the
probability of being a 1 per pixel.
* If a tuple (a, b), then the probability will be sampled per image
from the range a <= x <= b.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used to
sample a mask.
replacement : number or tuple of two number or list of number or StochasticParameter
The replacement to use at all locations that are marked as `1` in
the mask.
* If this is a number, then that value will always be used as the
replacement.
* If a tuple (a, b), then the replacement will be sampled pixelwise
from the range a <= x <= b.
* If a list of number, then a random value will be picked from
that list as the replacement per pixel.
* If a StochasticParameter, then this parameter will be used sample
pixelwise replacement values.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = ReplaceElementwise(0.05, [0, 255])
Replace 5 percent of all pixels in each image by either 0 or 255.
"""
def __init__(self, mask, replacement, per_channel=False, name=None, deterministic=False, random_state=None):
super(ReplaceElementwise, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.mask = iap.handle_probability_param(mask, "mask", tuple_to_uniform=True, list_to_choice=True)
self.replacement = iap.handle_continuous_param(replacement, "replacement")
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
seed = seeds[i]
image = images[i].astype(np.float32)
height, width, nb_channels = image.shape
per_channel = self.per_channel.draw_sample(random_state=ia.new_random_state(seed+1))
if per_channel == 1:
mask_samples = self.mask.draw_samples(
(height, width, nb_channels),
random_state=ia.new_random_state(seed+2)
)
replacement_samples = self.replacement.draw_samples(
(height, width, nb_channels),
random_state=ia.new_random_state(seed+3)
)
else:
mask_samples = self.mask.draw_samples(
(height, width, 1),
random_state=ia.new_random_state(seed+2)
)
mask_samples = np.tile(mask_samples, (1, 1, nb_channels))
replacement_samples = self.replacement.draw_samples(
(height, width, 1),
random_state=ia.new_random_state(seed+3)
)
replacement_samples = np.tile(replacement_samples, (1, 1, nb_channels))
mask_thresh = mask_samples > 0.5
image_repl = image * (~mask_thresh) + replacement_samples * mask_thresh
image_repl = meta.clip_augmented_image_(image_repl, 0, 255) # TODO make value range more flexible
image_repl = meta.restore_augmented_image_dtype_(image_repl, input_dtypes[i])
result[i] = image_repl
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.mask, self.replacement, self.per_channel]
def SaltAndPepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds salt and pepper noise to an image, i.e. some white-ish and black-ish
pixels.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to salt/pepper
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt/pepper is to be added
at that location.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.SaltAndPepper(0.05)
Replaces 5 percent of all pixels with salt/pepper.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=iap.Beta(0.5, 0.5) * 255,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def CoarseSaltAndPepper(p=0, size_px=None, size_percent=None,
per_channel=False, min_size=4, name=None,
deterministic=False, random_state=None):
"""
Adds coarse salt and pepper noise to an image, i.e. rectangles that
contain noisy white-ish and black-ish pixels.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to salt/pepper
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt/pepper is to be added
at that location.
size_px : int or tuple of two ints or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a 3x3 mask, which is then
upsampled to HxW, where H is the image size and W the image width.
* If a tuple (a, b), then two values M, N will be sampled from the
range [a..b] and the mask will be generated at size MxN, then
upsampled to HxW.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of two floats or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from (p*H)x(p*W) and later upsampled
to HxW.
* If a tuple (a, b), then two values m, n will be sampled from the
interval (a, b) and used as the percentages, i.e the mask size
will be (m*H)x(n*W).
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional(default=False)
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional(default=4)
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with salt/pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement = iap.Beta(0.5, 0.5) * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def Salt(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds salt noise to an image, i.e. white-ish pixels.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to salt
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt is to be added
at that location.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Salt(0.05)
Replaces 5 percent of all pixels with salt.
"""
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=True,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def CoarseSalt(p=0, size_px=None, size_percent=None,
per_channel=False, min_size=4, name=None,
deterministic=False, random_state=None):
"""
Adds coarse salt noise to an image, i.e. rectangles containing noisy
white-ish pixels.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to salt
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that salt is to be added
at that location.
size_px : int or tuple of two ints or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a 3x3 mask, which is then
upsampled to HxW, where H is the image size and W the image width.
* If a tuple (a, b), then two values M, N will be sampled from the
range [a..b] and the mask will be generated at size MxN, then
upsampled to HxW.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of two floats or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from (p*H)x(p*W) and later upsampled
to HxW.
* If a tuple (a, b), then two values m, n will be sampled from the
interval (a, b) and used as the percentages, i.e the mask size
will be (m*H)x(n*W).
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional(default=False)
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional(default=4)
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.CoarseSalt(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with salt in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=True,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def Pepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adds pepper noise to an image, i.e. black-ish pixels.
This is similar to dropout, but slower and the black pixels are not
uniformly black.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to pepper
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b..
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Pepper(0.05)
Replaces 5 percent of all pixels with pepper.
"""
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=p,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def CoarsePepper(p=0, size_px=None, size_percent=None,
per_channel=False, min_size=4, name=None,
deterministic=False, random_state=None):
"""
Adds coarse pepper noise to an image, i.e. rectangles that contain
noisy black-ish pixels.
Parameters
----------
p : float or tuple of two float or list of float or StochasticParameter, optional(default=0)
Probability of changing a pixel to pepper
noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple (a, b), then a probability will be sampled per image
from the range a <= x <= b..
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of two ints or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a 3x3 mask, which is then
upsampled to HxW, where H is the image size and W the image width.
* If a tuple (a, b), then two values M, N will be sampled from the
range [a..b] and the mask will be generated at size MxN, then
upsampled to HxW.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of two floats or StochasticParameter, optional(default=None)
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from (p*H)x(p*W) and later upsampled
to HxW.
* If a tuple (a, b), then two values m, n will be sampled from the
interval (a, b) and used as the percentages, i.e the mask size
will be (m*H)x(n*W).
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional(default=False)
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional(default=4)
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
# TODO tests
class Invert(Augmenter):
"""
Augmenter that inverts all values in images.
For the standard value range of 0-255 it converts 0 to 255, 255 to 0
and 10 to (255-10)=245.
Let M be the maximum value possible, m the minimum value possible,
v a value. Then the distance of v to m is d=abs(v-m) and the new value
is given by v'=M-d.
Parameters
----------
p : float or StochasticParameter, optional(default=0)
The probability of an image to be
inverted.
* If a float, then that probability will be used for all images.
* If a StochasticParameter, then that parameter will queried per
image and is expected to return values in the range [0.0, 1.0],
where values >0.5 mean that the image/channel is supposed to be
inverted. Recommended to be some form of Binomial(...).
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
min_value : int or float, optional(default=0)
Minimum of the range of possible pixel values. For uint8 (0-255)
images, this should be 0.
max_value : int or float, optional(default=255)
Maximum of the range of possible pixel values. For uint8 (0-255)
images, this should be 255.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Invert(0.1)
Inverts the colors in 10 percent of all images.
>>> aug = iaa.Invert(0.1, per_channel=0.5)
For 50 percent of all images, it inverts all channels with a probability of
10 percent (same as the first example). For the other 50 percent of all
images, it inverts each channel individually with a probability of 10
percent (so some channels of an image may end up inverted, others not).
"""
def __init__(self, p=0, per_channel=False, min_value=0, max_value=255, name=None,
deterministic=False, random_state=None):
super(Invert, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
self.min_value = min_value
self.max_value = max_value
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
image = images[i].astype(np.int32)
rs_image = ia.new_random_state(seeds[i])
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
nb_channels = image.shape[2]
p_samples = self.p.draw_samples((nb_channels,), random_state=rs_image)
for c, p_sample in enumerate(p_samples):
ia.do_assert(0 <= p_sample <= 1)
if p_sample > 0.5:
image_c = image[..., c]
distance_from_min = np.abs(image_c - self.min_value) # d=abs(v-m)
image[..., c] = -distance_from_min + self.max_value # v'=M-d
else:
p_sample = self.p.draw_sample(random_state=rs_image)
ia.do_assert(0 <= p_sample <= 1.0)
if p_sample > 0.5:
distance_from_min = np.abs(image - self.min_value) # d=abs(v-m)
image = -distance_from_min + self.max_value
image = meta.clip_augmented_image_(image, self.min_value, self.max_value)
image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.p, self.per_channel, self.min_value, self.max_value]
# TODO tests
class ContrastNormalization(Augmenter):
"""
Augmenter that changes the contrast of images.
Parameters
----------
alpha : number or tuple of two number or list of number or StochasticParameter, optional(default=1.0)
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled per image from
the range a <= x <= b and be used as the alpha value.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float, optional(default=False)
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
0.5 and 1.5. The factor 0.5 means that any difference from the center value
(i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels.
"""
def __init__(self, alpha=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
super(ContrastNormalization, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.alpha = iap.handle_continuous_param(alpha, "alpha", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
image = images[i].astype(np.float32)
rs_image = ia.new_random_state(seeds[i])
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel:
nb_channels = images[i].shape[2]
alphas = self.alpha.draw_samples((nb_channels,), random_state=rs_image)
for c, alpha in enumerate(alphas):
image[..., c] = alpha * (image[..., c] - 128) + 128
else:
alpha = self.alpha.draw_sample(random_state=rs_image)
image = alpha * (image - 128) + 128
image = meta.clip_augmented_image_(image, 0, 255) # TODO make value range more flexible
image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.alpha, self.per_channel]
class JpegCompression(Augmenter):
"""
Noising an image using a jpeg compression.
During saving an image to `jpeg` format user can select quality of image to preserve. The lower quality,
the higher compression is used. After image loading/decoding, artifacts caused by the compression can be noticed.
For more details, see https://en.wikipedia.org/wiki/Compression_artifact.
Parameters
----------
compression : number or tuple of two number or list of number or StochasticParameter
Degree of compression using saving to `jpeg` format in range [0, 100]
High values for compression cause more artifacts. Standard value for image processing software is default value
set to between 50 and 80. At 100 image is unreadable and at 0 no compression is used and the image occupies much
more memory.
* If a single number, then that value will be used for the compression degree.
* If a tuple of two number (a, b), then the compression will be a
value sampled from the interval [a..b].
* If a list, then a random value will be sampled and used as the
compression per image.
* If a StochasticParameter, then N samples will be drawn from
that parameter per N input images, each representing the compression
for the nth image. Expected to be discrete.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.JpegCompression(compression=(80, 95))
noises all images using a jpeg compression algorithm with max compression 80 to 95
"""
def __init__(self, compression=50, name=None, deterministic=False, random_state=None):
super(JpegCompression, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# will be converted to int during augmentation, which is why we allow floats here
self.compression = iap.handle_continuous_param(compression, "compression", value_range=(0, 100), tuple_to_uniform=True, list_to_choice=True)
# The value range 1 to 95 is suggested by PIL's save() documentation
# Values above 95 seem to not make sense (no improvement in visual quality, but large file size)
# A value of 100 would mostly deactivate jpeg compression
# A value of 0 would lead to no compression (instead of maximum compression)
# We use range 1 to 100 here, because this augmenter is about generating images for training
# and not for saving, hence we do not care about large file sizes
self.maximum_quality = 100
self.minimum_quality = 1
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
samples = self.compression.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
image = images[i].astype(np.float32)
nb_channels = image.shape[-1]
is_single_channel = (nb_channels == 1)
if is_single_channel:
image = image[..., 0]
sample = int(samples[i])
ia.do_assert(100 >= sample >= 0)
img = Image.fromarray(image.astype(np.uint8))
with tempfile.NamedTemporaryFile(mode="wb", suffix=".jpg") as f:
# Map from compression to quality used by PIL
# We have valid compressions from 0 to 100, i.e. 101 possible values
quality = int(np.clip(np.round(
self.minimum_quality + (self.maximum_quality - self.minimum_quality) * (1.0 - (sample / 101))
), self.minimum_quality, self.maximum_quality))
img.save(f, quality=quality)
if nb_channels == 1:
image = imageio.imread(f.name, pilmode="L")
else:
image = imageio.imread(f.name, pilmode="RGB")
if is_single_channel:
image = image[..., np.newaxis]
result[i] = image
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.compression]
| [
"[email protected]"
] | |
cf16ef3acd5037757589291f40cf352839ca0190 | 816ec70a15f7d4a4531bba90862ea50fda147237 | /mysite/urls.py | 58f6d206f77c6cf880eb1a8a3e0a96bc83987306 | [] | no_license | madhuprakash19/blogsite | 63d240f4301bfbe0eb25dcd20fe80e587d429434 | 757edba805dbc9bc9024f00e071f958354ecafd7 | refs/heads/main | 2023-07-07T22:24:50.657265 | 2021-08-10T13:03:45 | 2021-08-10T13:03:45 | 393,453,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth.views import LoginView,LogoutView
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('blog.urls')),
path('accounts/login/',LoginView.as_view(),name='login'),
path('accounts/logout/',LogoutView.as_view(),name='logout',kwargs={'next_page':'/'}),
]
| [
"[email protected]"
] | |
a15867a35dd920ebf76e728cf713b109c778aac1 | a11f098121fc5446f8dc7a9555034c51512a4e26 | /app03.py | 5e9a5b21f9738b072a606de1943a4357df144e6c | [] | no_license | anjoe/flask-study | 30b7f2adb1265790dee246dd97fee51f496b046b | 2b5639c9ef4ae77672ff8f4df1c5e1164af6b962 | refs/heads/master | 2020-08-06T21:03:42.926786 | 2019-10-06T15:28:58 | 2019-10-06T15:28:58 | 213,153,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from flask import Blueprint
app03=Blueprint('app03',__name__)
@app03.route('/t3/')
def show():
return 'app03.hello'
| [
"[email protected]"
] | |
53bf208a90bb2722736c1446143d90b5f46b53ec | 9e1a416b36a7ccd2d206ec9b12db616b8c52b851 | /reviews/viewsets.py | 62a36428037e0f6e44359c57334defdd1f9b14b1 | [] | no_license | hyywon/F4-Back | bfcecbe25d06edc3ffedb7f00779270785f83cd5 | d3a41202eadfe4ab12094ca9ef9d6a6a5cf17e5a | refs/heads/main | 2023-01-01T01:48:55.860762 | 2020-10-26T13:25:14 | 2020-10-26T13:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from rest_framework import viewsets
from django_filters import rest_framework as filters
from django_filters import FilterSet
from .models import Review
from .serializers import ReviewListSerializer, ReviewCreateSerializer, ReviewUpdateSerializer
from rest_framework.generics import (
ListCreateAPIView,
UpdateAPIView,
)
class ReviewListViewSet(viewsets.ModelViewSet):
queryset = Review.objects.all().order_by('created_at')
serializer_class = ReviewListSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('id',)
http_method_names = ['get']
class ReviewCreateViewSet(ListCreateAPIView):
queryset = Review.objects.all()
serializer_class = ReviewCreateSerializer
http_method_names = ['post']
class ReviewUpdateViewSet(UpdateAPIView):
queryset = Review.objects.all()
serializer_class = ReviewUpdateSerializer
lookup_field = 'id'
# class ReviewDeleteViewSet(DestroyAPIView):
# queryset = Review.objects.all()
# serializer_class = ReviewDeleteSerializer
# lookup_field = 'id'
| [
"[email protected]"
] | |
d1d565660577bb10f246a4aefa4b65af807b75a3 | cbd4475c32b9658fd712b368dbbdfb6083071e29 | /baymax_ui_auto_test/cases/case_data_integration/case_resourceMan/case_operate_dir.py | bdc59723be38e05fc906b8bf169482b10abd1ec3 | [] | no_license | xu-hn/UIautotest | 1b04617dd4f64dc0037606695a3e355a34368d0f | 4fa2b74b46687adaf86175960e44fcdfae35b4d1 | refs/heads/master | 2020-07-03T10:53:27.456575 | 2020-07-02T08:51:11 | 2020-07-02T08:51:11 | 201,883,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,184 | py | # -*- coding: utf-8 -*-
from common.BaseRunner import ParametrizedTestCase
import unittest, os, sys, time
from PageObject.data_integration_page.resourceMan_page.resourceMan_page import ResourceManPage
from PageObject.home.home_page import HomePage
from PageObject.login.login_page import LoginTestPage
from common.ElementParam import ElementParam
from common.case_false_rerun import rerun
from common.login_who import who_login
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), p)
)
class OperateDirTest(ParametrizedTestCase):
resourceMan_url = ElementParam.RESOURCE_MEN_URL
def login(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH(who_login(self.who)),
"caseName": sys._getframe().f_code.co_name}
page = LoginTestPage(app)
page.operate()
def to_resource_dir(self):
self.login()
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/home/资源目录.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = HomePage(app)
page.operate()
#链接到某url装饰器
def get_url(to_url=""):
def decorator(func):
def wrapper(self, *args, **kwargs):
if to_url != "":
self.driver.get(to_url)
time.sleep(1)
rerun(self, to_url, func)
return wrapper
return decorator
# 校验“打开数据标准文件夹” !
@get_url()
def test_a017_open_dir(self):
self.to_resource_dir()
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/展开文件夹.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# # 校验“闭合数据标准文件夹" !
@get_url(resourceMan_url)
def test_a018_close_dir(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/闭合文件夹.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“新建数据标准文件夹” 非admin用户没权限
# @get_url(resourceMan_url)
# def test_a019_create_dir(self):
# app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/新建文件夹.yaml"),
# "caseName": sys._getframe().f_code.co_name}
# page = ResourceManPage(app)
# page.operate()
# page.check_point()
#
# # 校验“删除数据标准文件夹” 非admin用户没权限
# @get_url(resourceMan_url)
# def test_a020_delete_dir(self):
# app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/删除文件夹.yaml"),
# "caseName": sys._getframe().f_code.co_name}
# page = ResourceManPage(app)
# page.operate()
# page.check_point()
#
# # 校验“移动数据标准文件夹” 非admin用户没权限
# @get_url(resourceMan_url)
# def test_a021_move_dir(self):
# app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/移动文件夹.yaml"),
# "caseName": sys._getframe().f_code.co_name}
# page = ResourceManPage(app)
# page.operate()
# page.check_point()
#
# 校验“创建jdbc数据源” !
@get_url(resourceMan_url)
def test_a022_create_dbsource_jdbc(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“创建jdbc_oracle数据源” !
@get_url(resourceMan_url)
def test_b002_create_dbsource_jdbc_oracle(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC_oracle数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“创建jdbc_hive数据源”!
@get_url(resourceMan_url)
def test_b003_create_dbsource_jdbc_hive(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC_hive数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# #
# # 校验“创建_http_数据源” !
@get_url(resourceMan_url)
def test_b004_create_source_http(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建_http_数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# # 校验“创建_ftp_数据源” !
@get_url(resourceMan_url)
def test_b005_create_source_ftp(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建_ftp_数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# # 校验“创建_socket_数据源” !
@get_url(resourceMan_url)
def test_b006_create_source_socket(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建_SOCKET_数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# #
# # 校验“创建_mongodb_数据源”!
@get_url(resourceMan_url)
def test_b007_create_source_mongodb(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建_mongodb_数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# #
# # 校验“创建_ElasticSearch_数据源”!
@get_url(resourceMan_url)
def test_b008_create_source_ElasticSearch(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建_ElasticSearch_数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# # 校验“创建jdbc_oracle数据源_链接测试” !
@get_url(resourceMan_url)
def test_b009_create_dbsource_jdbc_oracle_connect(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC_oracle数据源_链接测试.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
#
# 校验“创建jdbc_hive数据源_链接测试” !
@get_url(resourceMan_url)
def test_b010_create_dbsource_jdbc_hive_connect(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC_hive数据源_链接测试.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“创建jdbc数据源-链接测试” !
@get_url()
def test_a023_create_dbsource_jdbc_connect(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-新建JDBC数据源-链接测试.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# # 校验“删除jdbc数据源” !
@get_url()
def test_a024_delete_dbsource_jdbc(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据源-删除JDBC数据源.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“元数据-新建schema”!
@get_url()
def test_a025_create_schema(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/元数据-新建schema.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# 校验“元数据-移动schema”!
@get_url()
def test_a026_move_schema(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/元数据-移动schema.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“元数据-复制schema”!
@get_url()
def test_a027_copy_schema(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/元数据-复制schema.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“元数据-删除schema”!
@get_url()
def test_a028_delete_schema(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/元数据-删除schema.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“元数据-分析-新建schema”!! you bug
# @get_url()
# def test_a029_analysis_create_schema(self):
# app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/元数据-分析-新建schema.yaml"),
# "caseName": sys._getframe().f_code.co_name}
# page = ResourceManPage(app)
# page.operate()
# page.check_point()
#
# 校验“数据集-新建_mysql_Dataset” !
@get_url(resourceMan_url)
def test_a030_create_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-新建Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# # 校验“数据集-新建_Oracle_Dataset” !
@get_url(resourceMan_url)
def test_b001_create_Oracle_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-新建-Oracle-Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-移动Dataset”!
@get_url()
def test_a031_move_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-移动Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-删除Dataset” !
@get_url()
def test_a032_delete_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-删除Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-预览Dataset”!
@get_url()
def test_a033_preview_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-预览Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-查看Dataset”!
@get_url(resourceMan_url)
def test_a034_view_Dataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-查看Dataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-新建HDFSDataset”!
@get_url(resourceMan_url)
def test_a035_create_HDFSDataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-新建HDFSDataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-移动HDFSDataset”!
@get_url(resourceMan_url)
def test_a036_move_HDFSDDataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-移动HDFSDataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
#
# 校验“数据集-预览HDFSDataset”!
@get_url()
def test_a037_preview_HDFSDataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-预览HDFSDataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
# # 校验“数据集-删除HDFSDataset” !
@get_url(resourceMan_url)
def test_a038_delete_HDFSDataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-删除HDFSDataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据集-查看HDFSDataset” ! ------------------------------------------------------------------------------------------------------------
@get_url(resourceMan_url)
def test_a039_view_HDFSDataset(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据集-查看HDFSDataset.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据标准-新建standard”!
@get_url(resourceMan_url)
def test_a040_create_standard(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据标准-新建standard.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据标准-移动standard” !
@get_url()
def test_a041_move_standard(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据标准-移动standard.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
#
# 校验“数据标准-删除standard”
@get_url()
def test_a042_delete_standard(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/数据标准-删除standard.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
@classmethod
def setUpClass(cls):
super(OperateDirTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(OperateDirTest, cls).tearDownClass()
'''
--------------------------------------- 调试 --------------------------------------------------------------------------------------------
'''
class OperateDirTestSSSS(ParametrizedTestCase):
resourceMan_url = ElementParam.RESOURCE_MEN_URL
def login(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH(who_login(self.who)),
"caseName": sys._getframe().f_code.co_name}
page = LoginTestPage(app)
page.operate()
def to_resource_dir(self):
self.login()
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/home/资源目录.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = HomePage(app)
page.operate()
#链接到某url装饰器
def get_url(to_url=""):
def decorator(func):
def wrapper(self, *args, **kwargs):
if to_url != "":
self.driver.get(to_url)
func(self, *args, **kwargs)
return wrapper
return decorator
# 校验“打开数据标准文件夹”
def test_a017_open_dir(self):
self.to_resource_dir()
# 校验“移动数据标准文件夹”
@get_url(resourceMan_url)
def test_a021_move_dir(self):
app = {"logTest": self.logTest, "driver": self.driver, "path": PATH("../YAML/data_integration_yaml/resourceMan_yaml/移动文件夹.yaml"),
"caseName": sys._getframe().f_code.co_name}
page = ResourceManPage(app)
page.operate()
page.check_point()
@classmethod
def setUpClass(cls):
super(OperateDirTestSSSS, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(OperateDirTestSSSS, cls).tearDownClass()
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
e15fbbc01d14573d8ad538799b6a7f1880f85372 | d03a31d080267010c7bbd5ac9cbaa94cffa23292 | /tests/test_utils.py | 19edf3ba3488d535380466b4472b639ffe03e69d | [] | no_license | chmouel/python-nonobot | 0d654f4e608f77bb85d0adb16b9d3639a2586f0b | 4e07ec1a4ba755a6f7070f5778fe734a3180ad70 | refs/heads/master | 2021-01-01T18:02:30.832406 | 2014-12-05T10:48:14 | 2014-12-05T10:48:14 | 17,373,952 | 0 | 4 | null | 2014-03-15T09:04:21 | 2014-03-03T17:34:52 | Python | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import urllib
import nonobot.utils as nutils
class UtilsTest(unittest.TestCase):
def test_clean_nick(self):
self.assertEqual(nutils.clean_nick("foo_____"),
'foo')
def test_clean_nick_nothing_on_empty(self):
self.assertIsNone(nutils.clean_nick(""))
def test_quoted(self):
self.assertEqual(nutils.clean_nick("foo***"),
urllib.quote("foo***"))
def test_clean_nick_with_space(self):
name = "foo bar"
self.assertEqual(nutils.clean_nick(name),
urllib.quote(name))
| [
"[email protected]"
] | |
2b11e396b21053b8fe884f17e6abd9bd5374a2a3 | 48e6a442f35cace8df3a87e1660e4539a084b39e | /revision/revision_5/sweepL1L2/generated_data/0.20_0.60/A2_pod100/traffic_matrix_1/A2_1.py | 731507a11c329685e372c3f32ebcd93b27b1f1c8 | [] | no_license | LiYan1988/kthjocn | 9056c05e5095a93b47a22c9e027458f410c0d8f3 | 7e5de3b3fb2a48c6dcaf6d7788ab823c5c743845 | refs/heads/master | 2021-01-01T03:56:47.251908 | 2018-06-08T07:27:33 | 2018-06-08T07:27:33 | 77,261,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
from sdm import *
from gurobipy import *
import pandas as pd
np.random.seed(2010)
num_cores=5
num_slots=80
mtridx = 1
time_limit_routing = 1800 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix_1.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
betav = np.array([0,
1e-5, 2e-5, 4e-5, 8e-5,
1e-4, 2e-4, 4e-4, 8e-4,
1e-3, 2e-3, 4e-3, 8e-3,
1e-2, 2e-2, 4e-2, 8e-2,
1e-1, 2e-1, 4e-1, 1, 10])
#betav = np.array([1e-3, 2e-3, 4e-3, 8e-3])
results = {}
obj_results = {}
cnk_results = {}
thp_results = {}
obj_ub = {}
cnk_ub = {}
thp_ub = {}
for beta in betav:
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,
alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.05, method=2)
m.multiple_heuristic()
results[beta] = pd.DataFrame(m.heuristics_results)
obj_results[beta] = results[beta].iloc[0, :]
cnk_results[beta] = results[beta].iloc[1, :]
thp_results[beta] = results[beta].iloc[2, :]
obj_ub[beta] = m.obj_ub_
cnk_ub[beta] = m.connection_ub_
thp_ub[beta] = m.throughput_ub_
# write results
m.write_result_csv('cnklist_heuristic_%d_%.2e.csv'%(mtridx,beta), m.cnklist_)
obj_results = pd.DataFrame(obj_results)
cnk_results = pd.DataFrame(cnk_results)
thp_results = pd.DataFrame(thp_results)
obj_ub = pd.Series(obj_ub)
cnk_ub = pd.Series(cnk_ub)
thp_ub = pd.Series(thp_ub)
argmax = {betav[i]:obj_results.iloc[:, i].argmax() for i in range(len(betav))}
objmax = {betav[i]:obj_results.iloc[:, i].max() for i in range(len(betav))}
cnk_bh = {betav[i]:cnk_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
thp_bh = {betav[i]:thp_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
obj_final = pd.DataFrame({'ub':obj_ub, 'best_heuristic':objmax,
'best_method':argmax, 'cnk_bh':cnk_bh,
'thp_bh':thp_bh, 'cnk_ub':cnk_ub, 'thp_ub':thp_ub})
obj_final['optimality'] = obj_final['best_heuristic']/obj_final['ub']
obj_results.to_csv('obj_results_{}.csv'.format(mtridx))
cnk_results.to_csv('cnk_results_{}.csv'.format(mtridx))
thp_results.to_csv('thp_results_{}.csv'.format(mtridx))
obj_final.to_csv('obj_final_{}.csv'.format(mtridx)) | [
"[email protected]"
] | |
835478f976795573d8355bbee93293234d4cb55f | c87ae09a0229b4b4620c511b0c51eb685ec22b99 | /Python全栈学习/第四模块 网络编程进阶&数据库开发/practise/基于多进程的socket通信/队列的使用.py | b186e6d31bb0a9672c52aa8b644e8f974705eaaf | [] | no_license | caideyang/python2018 | 050f4c29c37b5bec963e77e0724cd05a9350deed | b7a3a728ef36b43448dc5ff594fdba500b67ad53 | refs/heads/master | 2020-03-25T06:02:54.699941 | 2018-11-01T23:04:29 | 2018-11-01T23:04:29 | 143,480,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/26 10:41
from multiprocessing import Queue
import time
if __name__ == "__main__":
q = Queue(3) # 创建队列,最大深度3
q.put("hello") # 往队列存放消息
q.put([1,2,3,4])
q.put({"name": "caideyang"})
# time.sleep(1)
print(q.empty()) # 判断队列是否为空
print(q.full()) # 判断队列是否满了
print(q.get()) # 从队列取数据
print(q.get()) | [
"[email protected]"
] | |
635f7add7ac5d16f01eeef321294fbf88823c467 | 43f85daf9464e1bf8bdfaeb8988107e8b29555f6 | /시즌3 - 웹 개발 쉽게 하Django/3-6/catalog/migrations/0001_initial.py | 27fabd6bb1534fde67d961ecc0c6c9067360d5b8 | [] | no_license | min98-k/chocopy | 578914ff887078bcfbb97af0ffcc9332c7a37cf7 | bcafe77329966dc2bdd9d61ed19b1b855d59ce4b | refs/heads/master | 2023-04-21T02:14:47.786528 | 2020-03-22T23:36:49 | 2020-03-22T23:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # Generated by Django 2.1.5 on 2019-01-19 01:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='책의 장르를 입력하세요(ex:소설)', max_length=200)),
],
),
]
| [
"[email protected]"
] | |
c8892cc719511184005cee0885fc673eb6eda1e9 | e39683d9726c304c73e770e251fc7d9890ddda8c | /dadata/__init__.py | 1fff4a8e83c8f3c1d58830e8760bdb1b69878e68 | [] | permissive | okfish/django-dadata | 2ce53f80d51427f7716d3b3c07436577ed872c46 | 79ddb72cd3f15754dcf4d3e010615c5124f60d1b | refs/heads/master | 2021-01-10T05:50:13.748345 | 2018-12-23T14:07:30 | 2018-12-23T14:07:30 | 43,523,851 | 3 | 4 | BSD-3-Clause | 2019-05-30T12:40:53 | 2015-10-01T22:08:55 | Python | UTF-8 | Python | false | false | 26 | py | __version__ = '0.0.1-dev'
| [
"[email protected]"
] | |
8883460618997c1a942e479a38717a23d5ddb63b | b183448083cd328034a19887910059bfe52945bc | /evn_gmail/bin/symilar | 390e7f9706effc65a684c5067a554e696a0c22ee | [] | no_license | asif-django/projects | d43b90a0ae2cbcd77467ca0e7c1401aaed8dcb97 | bec9b5e86c591065fb3bd3f737a6f5f14a2d099e | refs/heads/master | 2022-12-18T09:47:27.295998 | 2020-09-24T15:55:07 | 2020-09-24T15:55:07 | 298,251,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/home/grktechnologies/classprojects/first_project/evn_gmail/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
] | ||
bc3468411b2c792670f4d7186b1db0b21a8e59be | 4f54959f2142171104713c5a288bc1fc9b6b1c38 | /lists/migrations/0001_initial.py | 7d52212cbdbc7ad91d050e10098b189d52d83423 | [
"MIT"
] | permissive | votatdat/obeythetestinggoat | 87284ebf97b794106d96275fd3b217aeb37a42d0 | 5af0278ede75914ebce7f9ed8a63dd95fb5f576d | refs/heads/main | 2023-04-22T13:49:56.084873 | 2021-05-10T10:43:04 | 2021-05-10T10:43:04 | 361,966,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-29 09:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"[email protected]"
] | |
65bb85decf0f3c01946c6d6c872c386dcd4b68c6 | 73d2d54a0b6763d867e2e75f6d111aa686f30c3e | /data/text_dataset.py | b42032e74dbc9cbc410faadd9375199769655956 | [
"MIT"
] | permissive | chickenfingerwu/ScrabbleGAN-ETL-dataset | caf82c88f77bc1fb24d2b2a9b0b8cdf878551a91 | 759035836510485ca8bda6af37d7dce498f2e211 | refs/heads/master | 2023-02-21T06:17:26.188360 | 2021-01-26T14:56:12 | 2021-01-26T14:56:12 | 311,996,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,290 | py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import lmdb
import torchvision.transforms as transforms
import six
import sys
from PIL import Image
import numpy as np
from data.base_dataset import BaseDataset, get_transform
import os
import sys
class TextDataset(BaseDataset):
def modify_commandline_options(parser, is_train):
parser.add_argument('--collate', action='store_false', default=True,
help='use regular collate function in data loader')
parser.add_argument('--aug_dataroot', type=str, default=None,
help='augmentation images file location, default is None (no augmentation)')
parser.add_argument('--aug', action='store_true', default=False,
help='use augmentation (currently relevant for OCR training)')
return parser
def __init__(self, opt, target_transform=None):
BaseDataset.__init__(self, opt)
self.env = lmdb.open(
opt.dataroot,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (opt.dataroot))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode('utf-8')).decode('utf-8'))
self.nSamples = nSamples
if opt.aug and opt.aug_dataroot is not None:
self.env_aug = lmdb.open(
os.path.abspath(opt.aug_dataroot),
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
with self.env_aug.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode('utf-8')).decode('utf-8'))
self.nSamples = self.nSamples + nSamples
self.nAugSamples = nSamples
self.transform = get_transform(opt, grayscale=(opt.input_nc == 1))
self.target_transform = target_transform
if opt.collate:
self.collate_fn = TextCollator(opt)
else:
self.collate_fn = RegularCollator(opt)
self.labeled = opt.labeled
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
envAug = False
if hasattr(self, 'env_aug'):
if index>=self.nAugSamples:
index = index-self.nAugSamples
else:
envAug = True
index += 1
with eval('self.env'+'_aug'*envAug+'.begin(write=False)') as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key.encode('utf-8'))
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
item = {'img': img, 'img_path': img_key, 'idx':index}
if self.labeled:
label_key = 'label-%09d' % index
label = txn.get(label_key.encode('utf-8'))
if self.target_transform is not None:
label = self.target_transform(label)
item['label'] = label
if hasattr(self,'Z'):
z = self.Z[index-1]
item['z'] = z
return item
class TextCollator(object):
def __init__(self, opt):
self.resolution = opt.resolution
def __call__(self, batch):
img_path = [item['img_path'] for item in batch]
width = [item['img'].shape[2] for item in batch]
indexes = [item['idx'] for item in batch]
imgs = torch.ones([len(batch), batch[0]['img'].shape[0], batch[0]['img'].shape[1], max(width)], dtype=torch.float32)
for idx, item in enumerate(batch):
try:
imgs[idx, :, :, 0:item['img'].shape[2]] = item['img']
except:
print(imgs.shape)
item = {'img': imgs, 'img_path':img_path, 'idx':indexes}
if 'label' in batch[0].keys():
labels = [item['label'] for item in batch]
item['label'] = labels
if 'z' in batch[0].keys():
z = torch.stack([item['z'] for item in batch])
item['z'] = z
return item
class RegularCollator(object):
def __init__(self, opt):
self.resolution = opt.resolution
def __call__(self, batch):
img_path = [item['img_path'] for item in batch]
imgs = torch.stack([item['img'] for item in batch])
item = {'img': imgs, 'img_path':img_path}
if 'label' in batch[0].keys():
labels = [item['label'] for item in batch]
item['label'] = labels
if 'z' in batch[0].keys():
z = torch.stack([item['z'] for item in batch])
item['z'] = z
return item | [
"[email protected]"
] | |
09c4d75546bbe0a6617534eaf629bb3047355823 | 9e525820bd268caf63e14aee63a06cf810de677c | /project_summarizer/plugin_manager/bad_plugin_error.py | cc71c63da0ba7351ee928de279cecace2932fa58 | [
"MIT"
] | permissive | jackdewinter/pyscan | 99b72f6578a30c5d767cd8a51b6f93ada42753b7 | aff981ac403a0cd2ef1b5a96c8c7ddf5478615bc | refs/heads/master | 2023-08-05T04:34:20.252182 | 2022-08-26T02:06:14 | 2022-08-26T02:06:14 | 232,250,571 | 1 | 0 | MIT | 2023-03-06T10:02:47 | 2020-01-07T05:38:01 | Python | UTF-8 | Python | false | false | 2,326 | py | """
Module to allow for a critical error within a plugin to be encapsulated
and reported.
"""
from typing import Optional
class BadPluginError(Exception):
"""
Class to allow for a critical error within a plugin to be encapsulated
and reported.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
file_name: Optional[str] = None,
class_name: Optional[str] = None,
field_name: Optional[str] = None,
is_constructor: bool = False,
is_empty: bool = False,
formatted_message: Optional[str] = None,
) -> None:
if not formatted_message and file_name:
formatted_message = BadPluginError.__create_file_name_message(
file_name, class_name, is_constructor
)
elif class_name:
formatted_message = BadPluginError.__create_class_name_message(
class_name, formatted_message, field_name, is_empty
)
super().__init__(formatted_message)
# pylint: enable=too-many-arguments
@staticmethod
def __create_file_name_message(
file_name: Optional[str], class_name: Optional[str], is_constructor: bool
) -> str:
if class_name:
return (
f"Plugin file named '{file_name}' threw an exception in the constructor for the class '{class_name}'."
if is_constructor
else f"Plugin file named '{file_name}' does not contain a class named '{class_name}'."
)
return f"Plugin file named '{file_name}' cannot be loaded."
@staticmethod
def __create_class_name_message(
class_name: Optional[str],
formatted_message: Optional[str],
field_name: Optional[str],
is_empty: bool,
) -> str:
if formatted_message:
return f"Plugin class '{class_name}' had a critical failure: {formatted_message}"
if field_name:
return (
f"Plugin class '{class_name}' returned an empty value for field name '{field_name}'."
if is_empty
else f"Plugin class '{class_name}' returned an improperly typed value for field name '{field_name}'."
)
return f"Plugin class '{class_name}' had a critical failure loading the plugin details."
| [
"[email protected]"
] | |
6a2d4f6dc8fd8104d61daf5bdc7ca47981cdf426 | b25050e2694af14ab7ee8b59f966231973b18caa | /PerceptronAlg.py | b75fe4d85decf79c3e0d13c25fa4c28c2615b213 | [] | no_license | dlee67/MLPy | c40ef9d5548061aac0323daa98b29884b38f64e9 | 6ecb54663716da3b1962409d4baff0e6cf73a204 | refs/heads/master | 2021-09-03T05:02:17.629821 | 2018-01-05T20:29:06 | 2018-01-05T20:29:06 | 114,918,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,839 | py | #When it comes to algorithms, whoever is teaching it will not tell us the significances within the algorithm,
#that makes it work.
from matplotlib.colors import ListedColormap
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/iris/iris.data',
header=None)
df.tail()
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
class Perceptron (object):
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter=n_iter
self.random_state=random_state
self.final_weight = None
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state) # Random generator, that's really it.
self.w_ = rgen.normal(loc=0.0, scale =0.01, size=1+X.shape[1])
self.errors_ = []
print("weight vector in the beginning: ", self.w_)
#iterate according to the amount of n_iter.
for epoch in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update=self.eta * (target - self.predict(xi)) #Still bit confused about why there is nothing here that prepresents the X^i
if update != 0:
print("Miss fire on target: ", target, " ,with data: ", xi)
print("Weight now: ", self.w_[1:])
print("Biased unit at: ", self.w_[0])
print("Update's at: ", update)
print("net_input returns: ", self.net_input(xi))
print("Weight later: ", (self.w_[1:] + update*xi))
print("\n")
#if update == 0:
# print("No miss fire on target: ", target, " ,with data: ", xi)
# print("Weight now: ", self.w_[1:])
# print("Biased unit at: ", self.w_[0])
# print("Update's at: ", update)
# print("net_input returns: ", self.net_input(xi))
# print("Weight later: ", (self.w_[1:] + update*xi))
# print("\n")
self.w_[1:] += update*xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
print("Weight now is: ", self.w_)
return self
# https://math.stackexchange.com/questions/1461038/how-exactly-does-the-sign-of-the-dot-product-determine-the-angle-between-two-vec
# Everything is explained in the above link.
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
X = df.iloc[0:100, [0, 2]].values
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
def graph(formula, slope, intercept):
x = np.arange(10)
y = formula(slope, x, intercept)
plt.plot(x, y)
plt.show()
def lin_eq(m, x, b):
return m*x + b
#plot_decision_regions(X, y, classifier=ppn)
#plt.show() | [
"[email protected]"
] | |
dd7b95e500628e6940aa3810aa36052e0ea0e6aa | c83161b000e1ae352e7bfb4cc88d234367304fb8 | /python/wx/center.py | 2f69c9c98e916b7e8a0d2769f8c214d33fc4d6d1 | [] | no_license | opsahl/programming | 0b9545f1fe762913e34a9114d26cf48e03c670a0 | 721e8a1cbd6aa109095f4d57bb51315ab671305d | refs/heads/master | 2022-08-16T03:27:38.331905 | 2012-08-19T14:03:42 | 2012-08-19T14:03:42 | 5,289,479 | 0 | 0 | null | 2022-07-21T22:33:44 | 2012-08-03T19:28:59 | C | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# center.py
import wx
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(300, 200))
self.Centre()
self.Show()
if __name__ == "__main__":
app = wx.App()
Example(None, title="Center")
app.MainLoop()
| [
"[email protected]"
] | |
8583238485b2660402d8b57cac48f5d116c6583c | 20f29a892a7f5c77045936b7c8ff8162e5d31997 | /time_system/clocker/migrations/0010_auto__add_field_timesheet_hourly_rate.py | 3b187a7598e54270e7d14e66b3bace6b0ba65195 | [] | no_license | orlandobuzana/timeclock | f1953bdd6fbd99de0952ba870da4324ad119cd36 | 146c24c51e006b09544e3311463e8639f870e1ef | refs/heads/master | 2020-12-28T21:18:22.201190 | 2014-08-03T22:04:52 | 2014-08-03T22:04:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,123 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Timesheet.hourly_rate'
db.add_column(u'clocker_timesheet', 'hourly_rate',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=5, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Timesheet.hourly_rate'
db.delete_column(u'clocker_timesheet', 'hourly_rate')
models = {
u'clocker.employee': {
'Meta': {'ordering': "['username']", 'object_name': 'Employee'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 6, 18, 0, 0)'}),
'first_name': ('django.db.models.fields.TextField', [], {}),
'has_salary': ('django.db.models.fields.BooleanField', [], {}),
'hire_date': ('django.db.models.fields.DateField', [], {}),
'hourly_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'salary': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'unique': 'True'})
},
u'clocker.job': {
'Meta': {'ordering': "['-is_active', 'name']", 'object_name': 'Job', 'db_table': "'Job'"},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {}),
'name': ('django.db.models.fields.TextField', [], {})
},
u'clocker.shift': {
'Meta': {'ordering': "['-time_in', 'employee']", 'object_name': 'Shift', 'db_table': "'Shift'"},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clocker.Employee']"}),
'hours': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_in': ('django.db.models.fields.DateTimeField', [], {}),
'time_out': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'clocker.shiftsummary': {
'Meta': {'ordering': "['shift', 'employee', 'job']", 'unique_together': "(('job', 'shift'),)", 'object_name': 'ShiftSummary', 'db_table': "'Shift Summary'"},
'employee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clocker.Employee']"}),
'hours': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clocker.Job']"}),
'miles': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shift': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clocker.Shift']"})
},
u'clocker.timesheet': {
'Meta': {'ordering': "['-end']", 'object_name': 'Timesheet'},
'employee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'timesheet_set'", 'to': u"orm['clocker.Employee']"}),
'end': ('django.db.models.fields.BigIntegerField', [], {}),
'hourly_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shifts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['clocker.Shift']", 'symmetrical': 'False'}),
'signature': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start': ('django.db.models.fields.BigIntegerField', [], {})
}
}
complete_apps = ['clocker'] | [
"[email protected]"
] | |
35b145b54af2566ea269e246ccca0064a63940d7 | c5e0e3dd8b904d09bfa19761bdbf0f9fd4fdfc75 | /merge_data.py | 0c4c0f1d875d178569b7cbee0aed7f48c8b9d673 | [] | no_license | pauldiepold/MasterProjekt | a50a793305841cbcb2af8e65bd9deffcaf668895 | be48e3050970ae9379a06105edc92062edff686e | refs/heads/master | 2023-08-07T14:39:44.458492 | 2021-09-17T10:26:32 | 2021-09-17T10:26:32 | 370,984,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | import pandas as pd
lookup = {
'ita': [
['5R04', '4R04', '3R04', '1Y01', '2Y01', '2R03', '5R05', '1R02', '2Y02', '2R04'],
['1R03', '3Y01', '1Y02', '3Y02', '2Y03', '4R05', '4Y01', '5Y01', '5R06', '3Y03'],
['1Y03', '3R05', '4Y02', '2Y04', '3Y04', '4Y03', '1R04', '5Y02', '5Y03', '1Y04'],
['2R05', '1R05', '5Y04', '4Y04', '5R07', '4R06', '3Y05', '3R06', '2Y05', '4Y05'],
['5Y05', '1Y05', '2R06', '1R06', '2Y06', '3Y06', '4R07', '1Y06', '4Y06', '3R07'],
['5Y06', '2R07', '2Y07', '1R07', '1Y07', '4R08', '3Y07', '4Y07', '3R08', '5Y07'],
['2R08', '2Y08', '4Y08', '3Y08', '1R08', '4R09', '5Y08', '1Y08', '3R09', '2R09'],
['1R09', '2Y09', '3Y09', '5Y09', '1Y09', '4Y09', '2Y10', '1R10', '3R10', '2R10'],
['4Y10', '3Y10', '1Y10', '2Y11', '4Y11', '2R11', '1R11', '3Y11', '3R11', '1Y11'],
['2Y12', '2R12', '1Y12', '3Y12', '1R12', '3R12', '2Y13', '1Y13', '1R13', '2R13'],
['3R13', '1Y14', '1R14', '3R14', '2R14', '4R14', '1R15', '4R15', '3R15', '2R15']
],
'helligkeit': [
['1Y01', '2Y01', '3Y01', '4Y01', '5Y01'],
['1R02', '1Y02', '2Y02', '3Y02', '4Y02', '5Y02'],
['2R03', '1R03', '1Y03', '2Y03', '3Y03', '4Y03', '5Y03'],
['5R04', '4R04', '3R04', '2R04', '1R04', '1Y04', '2Y04', '3Y04', '4Y04', '5Y04'],
['5R05', '4R05', '3R05', '2R05', '1R05', '1Y05', '2Y05', '3Y05', '4Y05', '5Y05'],
['5R06', '4R06', '3R06', '2R06', '1R06', '1Y06', '2Y06', '3Y06', '4Y06', '5Y06'],
['5R07', '4R07', '3R07', '2R07', '1R07', '1Y07', '2Y07', '3Y07', '4Y07', '5Y07'],
['4R08', '3R08', '2R08', '1R08', '1Y08', '2Y08', '3Y08', '4Y08', '5Y08'],
['4R09', '3R09', '2R09', '1R09', '1Y09', '2Y09', '3Y09', '4Y09', '5Y09'],
['3R10', '2R10', '1R10', '1Y10', '2Y10', '3Y10', '4Y10'],
['3R11', '2R11', '1R11', '1Y11', '2Y11', '3Y11', '4Y11'],
['3R12', '2R12', '1R12', '1Y12', '2Y12', '3Y12'],
['3R13', '2R13', '1R13', '1Y13', '2Y13'],
['4R14', '3R14', '2R14', '1R14', '1Y14'],
['4R15', '3R15', '2R15', '1R15'],
],
'farbton': [
['5R04', '5R05', '5R06', '5R07'],
['4R04', '4R05', '4R06', '4R07', '4R08', '4R09', '4R14', '4R15'],
['3R04', '3R05', '3R06', '3R07', '3R08', '3R09', '3R10', '3R11', '3R12', '3R13', '3R14', '3R15'],
['2R03', '2R04', '2R05', '2R06', '2R07', '2R08', '2R09', '2R10', '2R11', '2R12', '2R13', '2R14', '2R15'],
['1R02', '1R03', '1R04', '1R05', '1R06', '1R07', '1R08', '1R09', '1R10', '1R11', '1R12', '1R13', '1R14',
'1R15'],
['1Y01', '1Y02', '1Y03', '1Y04', '1Y05', '1Y06', '1Y07', '1Y08', '1Y09', '1Y10', '1Y11', '1Y12', '1Y13',
'1Y14'],
['2Y01', '2Y02', '2Y03', '2Y04', '2Y05', '2Y06', '2Y07', '2Y08', '2Y09', '2Y10', '2Y11', '2Y12', '2Y13'],
['3Y01', '3Y02', '3Y03', '3Y04', '3Y05', '3Y06', '3Y07', '3Y08', '3Y09', '3Y10', '3Y11', '3Y12'],
['4Y01', '4Y02', '4Y03', '4Y04', '4Y05', '4Y06', '4Y07', '4Y08', '4Y09', '4Y10', '4Y11'],
['5Y01', '5Y02', '5Y03', '5Y04', '5Y05', '5Y06', '5Y07', '5Y08', '5Y09']
]
}
for key, value in lookup.items():
for index, step in enumerate(value):
df_step = pd.DataFrame(columns=['Temp', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'R', 'S',
'T', 'U', 'V', 'W'])
for karte in step:
currentFile30Grad = pd.read_csv("messdaten/csv/farbkarten_getrennt_30_grad/" + karte + ".csv")
currentFile35Grad = pd.read_csv("messdaten/csv/farbkarten_getrennt_35_grad/" + karte + ".csv")
currentFile40Grad = pd.read_csv("messdaten/csv/farbkarten_getrennt_40_grad/" + karte + ".csv")
df = pd.concat([currentFile30Grad, currentFile35Grad, currentFile40Grad])
df_step = pd.concat([df_step, df])
df_step.to_csv("messdaten/csv/farbkarten_gruppiert_" + key + "/" + str(index + 1).zfill(2) + '.csv',
index=False)
| [
"[email protected]"
] | |
8655e2676e99c9d122273c120717d9195ba1dbe8 | 745f6b82a6ae2108e5055210c00bced272992ccc | /lib/session.py | 679a6d455cedd3f2ae9d2347ac3091210dba9ab9 | [
"MIT"
] | permissive | kankje/blog | f594a512b7d598bc76e325c9c36b75edb2f5e4b4 | 67fa76b43a3fa2442ea90b025da3ccad9e5a41bf | refs/heads/master | 2020-05-19T03:42:45.309872 | 2014-05-08T11:02:47 | 2014-05-08T11:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import pickle
from datetime import timedelta
from uuid import uuid4
from redis import Redis
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
class RedisSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(session):
session.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
class RedisSessionInterface(SessionInterface):
serializer = pickle
session_class = RedisSession
def __init__(self, redis=None, prefix='session:'):
if redis is None:
redis = Redis()
self.redis = redis
self.prefix = prefix
def generate_sid(self):
return str(uuid4())
def get_redis_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
return timedelta(days=1)
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self.generate_sid()
return self.session_class(sid=sid, new=True)
val = self.redis.get(self.prefix + sid)
if val is not None:
data = self.serializer.loads(val)
return self.session_class(data, sid=sid)
return self.session_class(sid=sid, new=True)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
self.redis.delete(self.prefix + session.sid)
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain)
return
redis_exp = self.get_redis_expiration_time(app, session)
cookie_exp = self.get_expiration_time(app, session)
val = self.serializer.dumps(dict(session))
self.redis.setex(self.prefix + session.sid, val,
int(redis_exp.total_seconds()))
response.set_cookie(app.session_cookie_name, session.sid,
expires=cookie_exp, httponly=True,
domain=domain)
| [
"[email protected]"
] | |
33d791569b336bc8debbe72e4037f2fe71c44308 | b98fc8f9d303ff7288478e8b4d59ecb9f2269032 | /redditscraper/tests/test_posts.py | e434956a0aceb766bc93aa66c03f375e25d92a30 | [
"MIT"
] | permissive | nsfyn55/redditabusescraper | a359df59ac4718b124cba5f603f80fda6f342a43 | 0c5d210279e873389bbcff722e553b650abfa5b0 | refs/heads/master | 2022-12-19T11:53:49.832617 | 2018-11-07T15:47:26 | 2018-11-07T15:47:26 | 153,163,930 | 0 | 0 | MIT | 2022-12-08T02:26:02 | 2018-10-15T18:40:28 | Python | UTF-8 | Python | false | false | 3,462 | py | from redditscraper.data import posts, users
from unittest import mock
import json
import datetime
post_file = open('redditscraper/tests/data/posts.json', 'r')
post_json = json.loads(post_file.read())
single_post = post_json['data']['children'][0]
def test_clean_title():
raw_title = "Obama and Biden ride again (as detectives!) in the fun mystery, 'Hope Never Dies'"
expected = 'obama and biden ride again as detectives in the fun mystery hope never dies'
actual = posts._clean_title(raw_title)
def test_clean_title_single_tick():
raw_title = "trump says ‘rogue killers’ may be involved in saudi journalist case"
expected = 'trump says rogue killers may be involved in saudi journalist case'
actual = posts._clean_title(raw_title)
assert actual == expected
def test_convert_post():
with mock.patch('redditscraper.data.posts._clean_title') as m:
with mock.patch('redditscraper.data.posts._get_user_by_name') as user:
user.return_value = users.User(
username="nsfyn55",
account_created_date=datetime.datetime.fromtimestamp(1326982593),
comment_karma=10436,
link_karma=352)
m.return_value = "clean title"
expected = posts.Post(
pid="92ntdm",
title="clean title",
up=0,
down=0,
domain='usatoday.com',
author_link_karma=352,
author_comment_karma=10436,
author='nsfyn55',
author_account_created_date=datetime.datetime.fromtimestamp(1326982593),
created=datetime.datetime(2018, 7, 28, 13, 27, 22))
actual = posts.convert_postjson_to_tuple(single_post)
assert expected == actual
def test_convert_posts():
with mock.patch('redditscraper.data.posts._clean_title') as m:
with mock.patch('redditscraper.data.posts._get_user_by_name') as user:
user.return_value = users.User(
username="nsfyn55",
account_created_date=datetime.datetime.fromtimestamp(1326982593),
comment_karma=10436,
link_karma=352)
m.return_value = "clean title"
expected1 = posts.Post(
pid="92ntdm",
title="clean title",
up=0,
down=0,
domain='usatoday.com',
author='nsfyn55',
author_link_karma=352,
author_comment_karma=10436,
author_account_created_date=datetime.datetime.fromtimestamp(1326982593),
created=datetime.datetime(2018, 7, 28, 13, 27, 22))
expected2 = posts.Post(
pid="92ntd1",
title="clean title",
up=1,
down=0,
domain='apnews.com',
author='nsfyn55',
author_link_karma=352,
author_comment_karma=10436,
author_account_created_date=datetime.datetime.fromtimestamp(1326982593),
created=datetime.datetime(2018, 7, 28, 13, 27, 19))
actual = posts.convert_fullpost_to_list(post_json)
expected = [expected1, expected2]
assert expected == actual
| [
"[email protected]"
] | |
42078de29267bd24bf06287ba6b1afc98394fac3 | 9b9427a415d118640f21ae277103d80ac59d814a | /MyLang/LangPi/views.py | fdf417f79cf95af9e5b2d21ed669de96e23ed8a8 | [] | no_license | MinGH0311/2017- | f57de7fb679301fbe5303dded3f1c5403b404e0b | 8e243917e274916677bef01b0a8df155c4660c71 | refs/heads/master | 2021-01-19T16:43:23.794692 | 2017-08-22T04:23:44 | 2017-08-22T04:23:44 | 101,022,912 | 0 | 0 | null | 2017-08-22T05:00:08 | 2017-08-22T05:00:08 | null | UTF-8 | Python | false | false | 2,359 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import Login
import Ajax
import Listen
import Read
import Voca
import Util
def login(request):
return Login.login(request)
def register(request):
return Login.register(request)
def find_id(request):
return Login.find_id(request)
def change_id(request):
return Login.change_id(request)
def email_check_v2(request):
return Ajax.email_check_v2(request)
def email_check(request):
return Ajax.email_check(request)
def id_check(request):
return Ajax.id_check(request)
def get_voca_score(request):
return Ajax.get_voca_score(request)
def get_read_score(request):
return Ajax.get_read_score(request)
def get_listen_score(request):
return Ajax.get_listening_score(request)
def youtube_search(video_id=0, video_name='', max_results=5):
return Listen.youtube_search(video_id, video_name, max_results)
# Parameter
# ->url: 추가하려는 유튜브의 url
def adder(request, url):
return Listen.adder(request, url)
def recommandation(url, num, cur):
return Util.recommandation(url, num, cur)
def home(request):
return Util.home(request)
def bullet_board(request):
return Util.bullet_board(request)
def show_memo(request):
return Util.show_memo(request)
def edit(request):
return Util.edit(request)
def write(request):
return Util.write(request)
def dictation(request, name):
return Listen.dictation(request, name)
def video_list(request):
return Listen.video_list(request)
def add_video(request):
return Listen.add_video(request)
def reading(request):
return Read.reading(request)
def search(request):
return Listen.search(request)
def mypage(request):
return Util.mypage(request)
def mypage_listening(request):
return Util.mypage_listening(request)
def mypage_reading(request):
return Util.mypage_reading(request)
def mypage_vocabulary(request):
return Util.mypage_vocabulary(request)
def mypage_message(request):
return Util.mypage_message(request)
def mypage_likedislike(request):
return Util.mypage_likedislike(request)
def mypage_board(request):
return Util.mypage_board(request)
def voca_exam(request):
return Voca.voca_exam(request)
def add_voca(request):
return Voca.add_voca(request)
def delete(request):
return Login.delete(request) | [
"[email protected]"
] | |
7aff18c1f4eb1588572df7d1e9b78d0096ea42c8 | 8dbe574f3b20308d79ef37643570d7dec15e67d9 | /cn.zero/py.ori.fmt/m1140.bin.py | d1675897ba84651c10bf4eed04b87afd190c32a7 | [] | no_license | xaeingking/ZeroAoVoiceScripts | 62526d004bd02e645970930ecd4b6053809092ab | 512c1fd544954a38c92fc097f5b0c006031ee87d | refs/heads/master | 2020-05-20T17:04:55.028776 | 2019-01-29T10:40:44 | 2019-01-29T10:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,701 | py | from ZeroScenarioHelper import *
def main():
CreateScenaFile(
"m1140.bin", # FileName
"m1140", # MapName
"m1140", # Location
0x006E, # MapIndex
"ed7304",
0x00080000, # Flags
("", "", "", "", "", ""), # include
0x00, # PlaceNameNumber
0x00, # PreInitFunctionIndex
b'\x00\xff\xff', # Unknown_51
# Information
[0, 0, -1000, 0, 0, 0, 34000, 262, 30, 45, 0, 360, 0, -28000, -30000, 0, 0, 1, 110, 0, 1, 0, 2],
)
BuildStringList((
"m1140", # 0
"钢铁完全体", # 1
"bm1040", # 2
"bm1040", # 3
"bm1040", # 4
))
ATBonus("ATBonus_24C", 100, 5, 1, 5, 1, 5, 1, 5, 5, 5, 5, 5, 5, 0, 0, 0)
ATBonus("ATBonus_23C", 100, 5, 1, 5, 1, 5, 1, 5, 5, 0, 0, 0, 1, 0, 0, 0)
Sepith("Sepith_851", 6, 6, 15, 9, 0, 0, 0)
Sepith("Sepith_859", 4, 4, 0, 0, 9, 9, 9)
MonsterBattlePostion("MonsterBattlePostion_29C", 8, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_2A0", 5, 9, 180)
MonsterBattlePostion("MonsterBattlePostion_2A4", 11, 10, 180)
MonsterBattlePostion("MonsterBattlePostion_2A8", 6, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_2AC", 10, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_2B0", 13, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_2B4", 4, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_2B8", 8, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_2FC", 7, 4, 0)
MonsterBattlePostion("MonsterBattlePostion_300", 10, 11, 225)
MonsterBattlePostion("MonsterBattlePostion_304", 4, 7, 90)
MonsterBattlePostion("MonsterBattlePostion_308", 12, 7, 270)
MonsterBattlePostion("MonsterBattlePostion_30C", 4, 11, 135)
MonsterBattlePostion("MonsterBattlePostion_310", 11, 4, 315)
MonsterBattlePostion("MonsterBattlePostion_314", 7, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_318", 5, 5, 45)
MonsterBattlePostion("MonsterBattlePostion_27C", 7, 9, 180)
MonsterBattlePostion("MonsterBattlePostion_280", 11, 10, 180)
MonsterBattlePostion("MonsterBattlePostion_284", 10, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_288", 5, 11, 180)
MonsterBattlePostion("MonsterBattlePostion_28C", 12, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_290", 4, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_294", 14, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_298", 2, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_31C", 8, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_320", 3, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_324", 12, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_328", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_32C", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_330", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_334", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_338", 0, 0, 180)
# monster count: 8
BattleInfo(
"BattleInfo_33C", 0x0000, 21, 6, 60, 8, 1, 25, 0, "bm1040", "Sepith_851", 60, 25, 10, 5,
(
("ms65000.dat", 0, 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms65000.dat", 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms62700.dat", "ms65000.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms65000.dat", "ms62700.dat", "ms65000.dat", 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
)
)
BattleInfo(
"BattleInfo_404", 0x0000, 21, 6, 60, 8, 1, 25, 0, "bm1040", "Sepith_859", 60, 25, 10, 5,
(
("ms62700.dat", 0, 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms62700.dat", 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms65000.dat", "ms62700.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms62700.dat", "ms65000.dat", "ms62700.dat", 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
)
)
# event battle count: 1
BattleInfo(
"BattleInfo_4CC", 0x0000, 40, 6, 0, 0, 1, 0, 0, "bm1040", 0x00000000, 100, 0, 0, 0,
(
("ms72900.dat", "ms72900.dat", "ms72900.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_31C", "MonsterBattlePostion_2FC", "ed7401", "ed7403", "ATBonus_23C"),
(),
(),
(),
)
)
AddCharChip((
"chr/ch00000.itc", # 00
"chr/ch00000.itc", # 01
"chr/ch00000.itc", # 02
"chr/ch00000.itc", # 03
"chr/ch00000.itc", # 04
"chr/ch00000.itc", # 05
"chr/ch00000.itc", # 06
"chr/ch00000.itc", # 07
"chr/ch00000.itc", # 08
"chr/ch00000.itc", # 09
"chr/ch00000.itc", # 0A
"chr/ch00000.itc", # 0B
"chr/ch00000.itc", # 0C
"chr/ch00000.itc", # 0D
"chr/ch00000.itc", # 0E
"chr/ch00000.itc", # 0F
"monster/ch65050.itc", # 10
"monster/ch65050.itc", # 11
"monster/ch62750.itc", # 12
"monster/ch62750.itc", # 13
"monster/ch72950.itc", # 14
"monster/ch72951.itc", # 15
))
DeclNpc(0, -27500, 23000, 0, 484, 0x0, 0, 20, 0, 0, 0, 255, 255, 255, 0)
DeclMonster(20770, 2670, -28000, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(7940, 18540, -29190, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(-1260, 21090, -29200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(-12520, 16300, -29190, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(-21210, -550, -28000, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(4300, 4670, -27200, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(-7580, 4170, -27200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(320, -6970, -27200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclActor(0, -29000, 23000, 1200, 0, -28000, 23000, 0x007C, 0, 3, 0x0000)
ChipFrameInfo(1000, 0, [0, 1, 2, 3, 4, 5]) # 0
ChipFrameInfo(2000, 0, [0, 1, 2, 3, 4, 5]) # 1
ChipFrameInfo(1000, 0, [0, 1, 2, 3, 4, 5]) # 2
ChipFrameInfo(2000, 0, [0, 1, 2, 3, 4, 5]) # 3
ScpFunction((
"Function_0_550", # 00, 0
"Function_1_56F", # 01, 1
"Function_2_570", # 02, 2
"Function_3_588", # 03, 3
))
def Function_0_550(): pass
label("Function_0_550")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_56E")
OP_A1(0xFE, 0x3E8, 0x8, 0x0, 0x1, 0x2, 0x3, 0x4, 0x3, 0x2, 0x1)
Jump("Function_0_550")
label("loc_56E")
Return()
# Function_0_550 end
def Function_1_56F(): pass
label("Function_1_56F")
Return()
# Function_1_56F end
def Function_2_570(): pass
label("Function_2_570")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x11B, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_583")
OP_70(0x0, 0x0)
Jump("loc_587")
label("loc_583")
OP_70(0x0, 0x1E)
label("loc_587")
Return()
# Function_2_570 end
def Function_3_588(): pass
label("Function_3_588")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x72, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_617")
TalkBegin(0xFF)
SetMapFlags(0x8000000)
SetChrName("")
#A0001
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"从宝箱中感觉到了高级魔兽的气息。\x01",
"【推测魔兽等级40】\x01",
"要打开宝箱吗?\x02",
)
)
Menu(
0,
-1,
-1,
1,
(
"是\x01", # 0
"否\x01", # 1
)
)
MenuEnd(0x0)
OP_60(0x0)
OP_57(0x0)
OP_5A()
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_617")
ClearMapFlags(0x8000000)
TalkEnd(0xFF)
Return()
label("loc_617")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x11B, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_7D3")
Sound(14, 0, 100, 0)
OP_71(0x0, 0x0, 0x1E, 0x0, 0x0)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x72, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_710")
OP_A7(0x8, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
TurnDirection(0x8, 0x0, 0)
OP_98(0x8, 0x0, 0x3E8, 0x0, 0x0, 0x0)
def lambda_670():
OP_98(0xFE, 0x0, 0xFFFFFC18, 0x0, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_670)
def lambda_68A():
OP_A7(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E8)
ExitThread()
QueueWorkItem(0x8, 2, lambda_68A)
ClearChrFlags(0x8, 0x80)
SetChrFlags(0x8, 0x8000)
#A0002
AnonymousTalk(
0x3E7,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"出现了魔兽!\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
WaitChrThread(0x8, 1)
Battle("BattleInfo_4CC", 0x0, 0x0, 0x0, 0x0, 0xFF)
SetChrFlags(0x8, 0x80)
ClearChrFlags(0x8, 0x8000)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_6F1"),
(2, "loc_700"),
(1, "loc_70D"),
(SWITCH_DEFAULT, "loc_710"),
)
label("loc_6F1")
SetScenarioFlags(0x72, 4)
OP_70(0x0, 0x1E)
Sleep(500)
Jump("loc_710")
label("loc_700")
OP_70(0x0, 0x0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_70D")
OP_B7(0x0)
Return()
label("loc_710")
Jc((scpexpr(EXPR_EXEC_OP, "AddItemNumber('暗之刃', 1)"), scpexpr(EXPR_END)), "loc_767")
FadeToDark(300, 0, 100)
Sound(17, 0, 100, 0)
SetMessageWindowPos(-1, -1, -1, -1)
#A0003
AnonymousTalk(
0x3E7,
(
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"获得了。\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
SetMessageWindowPos(14, 280, 60, 3)
FadeToBright(300, 0)
SetScenarioFlags(0x11B, 0)
OP_DE(0x6, 0x0)
Jump("loc_7CE")
label("loc_767")
FadeToDark(300, 0, 100)
#A0004
AnonymousTalk(
0x3E7,
(
"宝箱里装有",
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"。\x01",
"不过现有的数量太多,",
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
Sound(15, 0, 100, 0)
OP_71(0x0, 0x1E, 0x0, 0x0, 0x0)
label("loc_7CE")
Jump("loc_805")
label("loc_7D3")
FadeToDark(300, 0, 100)
#A0005
AnonymousTalk(
0x3E7,
(
scpstr(0x6),
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
label("loc_805")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_3_588 end
SaveToFile()
Try(main)
| [
"[email protected]"
] | |
2d4343cd3084b61e1e48672feeb7de774d2d4833 | 6f3b3f29b0ed43f056526a7d96df27c623cf8a29 | /czsc/enum.py | ccd2fd8436e1ff3e6e6431b4b1183683bb279deb | [
"MIT"
] | permissive | dst1213/czsc | a163c362d162110557e64e8ea8b41350d4d90a00 | 939803a9b196c19db3d8498f63276a4fdb8a442b | refs/heads/master | 2023-04-22T04:17:22.703347 | 2021-04-30T13:53:40 | 2021-04-30T13:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,016 | py | # coding: utf-8
from enum import Enum
class Mark(Enum):
D = "底分型"
G = "顶分型"
class Direction(Enum):
Up = "向上"
Down = "向下"
class Freq(Enum):
F1 = "1分钟"
F5 = "5分钟"
F15 = "15分钟"
F30 = "30分钟"
F60 = "60分钟"
D = "日线"
W = "周线"
M = "月线"
class Signals(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
INB = "INB~向下笔买点区间"
INS = "INS~向上笔卖点区间"
FXB = "FXB~向下笔结束分型左侧高点升破"
FXS = "FXS~向上笔结束分型左侧低点跌破"
BU0 = "BU0~向上笔顶分完成"
BU1 = "BU1~向上笔走势延伸"
BD0 = "BD0~向下笔底分完成"
BD1 = "BD1~向下笔走势延伸"
# TK = Triple K
TK1 = "TK1~三K底分"
TK2 = "TK2~三K上涨"
TK3 = "TK3~三K顶分"
TK4 = "TK4~三K下跌"
# ==================================================================================================================
# 信号值编码规则:
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推;基础型有着特殊含义,用于因子组合,各种变种形态编号主要用于形态对比研究。
# 组合规则:笔数_多空_编号;如 LA0 表示多头信号A0
# ==================================================================================================================
LA0 = "LA0~aAb式底背驰"
LB0 = "LB0~aAbcd式底背驰"
LC0 = "LC0~aAbBc式底背驰"
LD0 = "LD0~abcAd式底背驰"
LE0 = "LE0~ABC式底背驰"
LF0 = "LF0~类趋势底背驰"
LG0 = "LG0~上颈线突破"
LH0 = "LH0~向上中枢完成"
LI0 = "LI0~三买"
LJ0 = "LJ0~向上三角扩张中枢"
LK0 = "LK0~向上三角收敛中枢"
LL0 = "LL0~向上平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
LA1 = "LA1~aAb式底背驰特例一"
LA2 = "LA2~aAb式底背驰特例二"
LA3 = "LA3~aAb式底背驰特例三"
LB1 = "LB1~aAbcd式底背驰特例一"
LB2 = "LB2~aAbcd式底背驰特例二"
LB3 = "LB3~aAbcd式底背驰特例三"
LC1 = "LC1~aAbBc式底背驰特例一"
LC2 = "LC2~aAbBc式底背驰特例二"
LC3 = "LC3~aAbBc式底背驰特例三"
LD1 = "LD1~abcAd式底背驰特例一"
LD2 = "LD2~abcAd式底背驰特例二"
LD3 = "LD3~abcAd式底背驰特例三"
LE1 = "LE1~ABC式底背驰特例一"
LE2 = "LE2~ABC式底背驰特例二"
LE3 = "LE3~ABC式底背驰特例三"
LF1 = "LF1~类趋势底背驰特例一"
LF2 = "LF2~类趋势底背驰特例二"
LF3 = "LF3~类趋势底背驰特例三"
LG1 = "LG1~上颈线突破特例一"
LG2 = "LG2~上颈线突破特例二"
LG3 = "LG3~上颈线突破特例三"
LH1 = "LH1~向上中枢完成特例一"
LH2 = "LH2~向上中枢完成特例二"
LH3 = "LH3~向上中枢完成特例三"
LI1 = "LI1~三买特例一"
LI2 = "LI2~三买特例二"
LI3 = "LI3~三买特例三"
LJ1 = "LJ1~向上三角扩张中枢特例一"
LJ2 = "LJ2~向上三角扩张中枢特例二"
LJ3 = "LJ3~向上三角扩张中枢特例三"
LK1 = "LK1~向上三角收敛中枢特例一"
LK2 = "LK2~向上三角收敛中枢特例二"
LK3 = "LK3~向上三角收敛中枢特例三"
LL1 = "LL1~向上平台型中枢特例一"
LL2 = "LL2~向上平台型中枢特例二"
LL3 = "LL3~向上平台型中枢特例三"
# ------------------------------------------------------------------------------------------------------------------
SA0 = "SA0~aAb式顶背驰"
SB0 = "SB0~aAbcd式顶背驰"
SC0 = "SC0~aAbBc式顶背驰"
SD0 = "SD0~abcAd式顶背驰"
SE0 = "SE0~ABC式顶背驰"
SF0 = "SF0~类趋势顶背驰"
SG0 = "SG0~下颈线突破"
SH0 = "SH0~向下中枢完成"
SI0 = "SI0~三卖"
SJ0 = "SJ0~向下三角扩张中枢"
SK0 = "SK0~向下三角收敛中枢"
SL0 = "SL0~向下平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
SA1 = "SA1~aAb式顶背驰特例一"
SA2 = "SA2~aAb式顶背驰特例二"
SA3 = "SA3~aAb式顶背驰特例三"
SB1 = "SB1~aAbcd式顶背驰特例一"
SB2 = "SB2~aAbcd式顶背驰特例二"
SB3 = "SB3~aAbcd式顶背驰特例三"
SC1 = "SC1~aAbBc式顶背驰特例一"
SC2 = "SC2~aAbBc式顶背驰特例二"
SC3 = "SC3~aAbBc式顶背驰特例三"
SD1 = "SD1~abcAd式顶背驰特例一"
SD2 = "SD2~abcAd式顶背驰特例二"
SD3 = "SD3~abcAd式顶背驰特例三"
SE1 = "SE1~ABC式顶背驰特例一"
SE2 = "SE2~ABC式顶背驰特例二"
SE3 = "SE3~ABC式顶背驰特例三"
SF1 = "SF1~类趋势顶背驰特例一"
SF2 = "SF2~类趋势顶背驰特例二"
SF3 = "SF3~类趋势顶背驰特例三"
SG1 = "SG1~下颈线突破特例一"
SG2 = "SG2~下颈线突破特例二"
SG3 = "SG3~下颈线突破特例三"
SH1 = "SH1~向下中枢完成特例一"
SH2 = "SH2~向下中枢完成特例二"
SH3 = "SH3~向下中枢完成特例三"
SI1 = "SI1~三卖特例一"
SI2 = "SI2~三卖特例二"
SI3 = "SI3~三卖特例三"
SJ1 = "SJ1~向下三角扩张中枢特例一"
SJ2 = "SJ2~向下三角扩张中枢特例二"
SJ3 = "SJ3~向下三角扩张中枢特例三"
SK1 = "SK1~向下三角收敛中枢特例一"
SK2 = "SK2~向下三角收敛中枢特例二"
SK3 = "SK3~向下三角收敛中枢特例三"
SL1 = "SL1~向下平台型中枢特例一"
SL2 = "SL2~向下平台型中枢特例二"
SL3 = "SL3~向下平台型中枢特例三"
# --------------------------------------------------------------------------------------------
# 信号值编码规则:
# 笔数:X3 - 三笔信号;X5 - 五笔信号;X7 - 七笔信号;X9 - 九笔信号;
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则:笔数_多空_编号;如 X5LA0 表示五笔多头信号A0
# ============================================================================================
# 三笔形态信号
# 具体描述:
# --------------------------------------------------------------------------------------------
X3LA0 = "X3LA0~向下不重合"
X3LB0 = "X3LB0~向下奔走型中枢"
X3LC0 = "X3LC0~向下三角收敛中枢"
X3LD0 = "X3LD0~向下三角扩张中枢"
X3LE0 = "X3LE0~向下盘背中枢"
X3LF0 = "X3LF0~向下无背中枢"
X3SA0 = "X3SA0~向上不重合"
X3SB0 = "X3SB0~向上奔走型中枢"
X3SC0 = "X3SC0~向上三角收敛中枢"
X3SD0 = "X3SD0~向上三角扩张中枢"
X3SE0 = "X3SE0~向上盘背中枢"
X3SF0 = "X3SF0~向上无背中枢"
class Factors(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
# ==================================================================================================================
# 因子值编码规则:
# 类型:
# L1 - 一买/类一买;L2 - 二买/类二买;L3 - 三买/类三买;
# S1 - 一卖/类一卖;S2 - 二卖/类二卖;S3 - 三卖/类三卖;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则为 类型_编号
# ==================================================================================================================
L1A0 = "L1A0~一买"
L1A1 = "L1A1~一买特例一"
L1A2 = "L1A2~一买特例二"
L1A3 = "L1A3~一买特例三"
L1A4 = "L1A4~一买特例四"
L1A5 = "L1A5~一买特例五"
L2A0 = "L2A0~二买"
L2A1 = "L2A1~二买特例一"
L2A2 = "L2A2~二买特例二"
L2A3 = "L2A3~二买特例三"
L2A4 = "L2A4~二买特例四"
L2A5 = "L2A5~二买特例五"
L3A0 = "L3A0~三买"
L3A1 = "L3A1~三买特例一"
L3A2 = "L3A2~三买特例二"
L3A3 = "L3A3~三买特例三"
L3A4 = "L3A4~三买特例四"
L3A5 = "L3A5~三买特例五"
# ------------------------------------------------------------------------------------------------------------------
S1A0 = "S1A0~一卖"
S1A1 = "S1A1~一卖特例一"
S1A2 = "S1A2~一卖特例二"
S1A3 = "S1A3~一卖特例三"
S1A4 = "S1A4~一卖特例四"
S1A5 = "S1A5~一卖特例五"
S2A0 = "S2A0~二卖"
S2A1 = "S2A1~二卖特例一"
S2A2 = "S2A2~二卖特例二"
S2A3 = "S2A3~二卖特例三"
S2A4 = "S2A4~二卖特例四"
S2A5 = "S2A5~二卖特例五"
S3A0 = "S3A0~三卖"
S3A1 = "S3A1~三卖特例一"
S3A2 = "S3A2~三卖特例二"
S3A3 = "S3A3~三卖特例三"
S3A4 = "S3A4~三卖特例四"
S3A5 = "S3A5~三卖特例五"
# ==================================================================================================================
| [
"[email protected]"
] | |
2e1e60216857e39ac4b0889424814102c219c482 | c06f60d7c96b0cf3c1d1d09c1583cec70e7eee83 | /알고리즘입문/implementation/find_representative_value/main.py | e1f447a597cc06cbe0153ef5a3e9cb73d8c5c1d3 | [] | no_license | hongki-jung/algorithm | 8b77418f8597287d390757c4c596cf2b8a12b186 | 5b50c1bdc7c598a2be98f60cd4b2d2663b7db7a0 | refs/heads/main | 2023-08-06T17:29:13.494305 | 2021-09-21T15:50:31 | 2021-09-21T15:50:31 | 381,289,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # import sys
# sys.stdin=open("input.txt","r")
# n = int(input())
# a = list(map(int, input().split()))
# tmp = 0
# for i in range(len(a)):
# tmp+=a[i]
# average = int(tmp/len(a))
# initNumber = float('inf')
# for i in range(len(a)):
# if(abs(average-a[i]) < initNumber):
# initNumber = abs(average-a[i])
# if(abs(average-a[i]) == initNumber):
# print(a[i-1])
# print(initNumber)
import sys
sys.stdin = open("input.txt", "r")
n = int(input())
a = list(map(int, input().split()))
ave = (sum(a) / n) + 0.5
ave = int(ave)
# ave = round(sum(a)/n)
# round() 소수첫째자리에서 반올림. 단, round_half_even
# sum() : list의 모든 값을 합친다.
min = 2124999900
#idx : 학생번호 (인덱스 값)
# x : 성적 (값)
for idx, x in enumerate(a):
tmp = abs(x - ave) # abs() 절대값을 구해주는 함수
if tmp < min:
min = tmp
score = x # 점수 값
res = idx + 1
# = else if
elif tmp == min:
if x > score: # x : 현재학생의 점수 ,score : 이전의 답
score = x
res = idx + 1
print(ave, res)
# a = list(map(int, input().split()))
# ave = (sum(a)/n) + 0.5
# min = 212032
# for idx, x in enumerate(a):
# tmp = abs(x-ave) # abs()
# if tmp < min:
# min = tmp
# score = x
# res= idx +1
# elif tmp == min:
# if x> score:
# score =x
# res = idx+1
| [
"[email protected]"
] | |
9c3715620a5b25b37c3979ce308db7ca9c7a30c0 | c652bd024539f563ddabe17d2f88fcdc32a5fb64 | /artificial-intelligence/predicting_house_prices.py | 272485d365d1e94d420052fa97f33bbbde45832e | [] | no_license | luckeciano/hackerrank | b1c3f0471f65e0e46dcd38f7c92fb66691a6607d | e848e8dab9f94f5fa8fdb08d8657251a188f6faf | refs/heads/master | 2020-04-21T03:18:50.942721 | 2019-02-10T20:36:02 | 2019-02-10T20:36:02 | 169,281,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | import numpy as np
from sklearn.linear_model import LinearRegression
F_N = (raw_input()).split()
F = int(F_N[0])
N = int(F_N[1])
total_features =[]
Y = []
for n in range(N):
x_y = map(float, raw_input().rstrip().split())
feature_list = []
for i in range(F):
feature_list.append(x_y[i])
total_features.append(feature_list)
Y.append(x_y[F])
tc = int(raw_input())
features_tc = []
for i in range(tc):
ft_tc = map(float, raw_input().rstrip().split())
features_tc.append(ft_tc)
reg = LinearRegression().fit(total_features, Y)
y_test = reg.predict(features_tc)
for y in y_test:
print(y) | [
"[email protected]"
] | |
a7f1d277305c8d83ed636dbd93ae990fb9151277 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Maths/diff_eqns/electrogravRK45adaptive.py | c27cb2519beb5537ac886dfec50963acbdc8f86c | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,226 | py | from __future__ import division
from pylab import *
import os
#Evaluates the derivative functions
def derivs(time, last_values, properties, alpha):
particle_num = size(last_values, axis = 1)
spatial_dims = size(last_values, axis = 0)/2.
grad = zeros((spatial_dims * 2, particle_num))
for j in arange(particle_num):
#Calc position derivative
grad[0, j] = last_values[1, j]
grad[2, j] = last_values[3, j]
grad[4, j] = last_values[5, j]
#Calc velocity derivative
field_sum = calc_field(last_values, j, properties)
grad[1, j] = properties[1, j] / properties[0, j] * alpha * field_sum[0]
grad[3, j] = properties[1, j] / properties[0, j] * alpha * field_sum[1]
grad[5, j] = properties[1, j] / properties[0, j] * alpha * field_sum[2]
return grad
#Evaluate the summuation to calculate the field at one particle due to all the others
def calc_field(last_values, at_particle, properties):
particle_num = size(last_values, axis = 1)
spatial_dims = size(last_values, axis = 0)/2
#Calculate summation
field_sum = zeros(spatial_dims)
denominator = zeros(1)
for i in arange(particle_num):
if i != at_particle:
delx1 = last_values[0, at_particle] - last_values[0, i]
delx2 = last_values[2, at_particle] - last_values[2, i]
delx3 = last_values[4, at_particle] - last_values[4, i]
denominator = ((delx1**2 + delx2**2 + delx3**2)**(3./2.))
field_sum[0] = field_sum[0] + delx1 * properties[1, i] / denominator
field_sum[1] = field_sum[1] + delx2 * properties[1, i] / denominator
field_sum[2] = field_sum[2] + delx3 * properties[1, i] / denominator
return field_sum
#Energy calculator
def energy_calc(last_values, properties, alpha):
#Potential energy
pot = zeros(1)
for i in range(size(last_values, 1)):
for j in range(i+1, size(last_values, 1)):
delx1 = last_values[0, i] - last_values[0, j]
delx2 = last_values[2, i] - last_values[2, j]
delx3 = last_values[4, i] - last_values[4, j]
denominator = (delx1**2 + delx2**2 + delx3**2)**(1/2)
pot = pot + alpha * properties[1, i] * properties[1, j] / denominator
#Kinetic energy
kin = zeros(1)
for i in range(size(last_values, 1)):
speed = (last_values[1, i]**2 + last_values[3, i]**2 + last_values[5, i]**2)**(1/2)
kin = kin + 0.5 * properties[0, i] * speed**2
#Total energy
tot_energy = pot + kin
return [pot, kin, tot_energy]
#Coefficients used in the Runge-Kutta loop
def solver_coef():
a = zeros(7)
b = zeros((7, 7))
c = zeros(7)
cstar = zeros(7)
a[0], a[1], a[2], a[3], a[4], a[5], a[6] = 0, 1/5, 3/10, 4/5, 8/9, 1, 1
b[1, 0] = 1/5
b[2, 0], b[2, 1] = 3/40, 9/40
b[3, 0], b[3, 1], b[3, 2] = 44/45, -56/15, 32/9
b[4, 0], b[4, 1], b[4, 2], b[4, 3] = 19372/6561, -25360/2187, 64448/6561, -212/729
b[5, 0], b[5, 1], b[5, 2], b[5, 3], b[5, 4] = 9017/3168, -355/33, 46732/5247, 49/176, -5103/18656
b[6, 0], b[6, 1], b[6, 2], b[6, 3], b[6, 4], b[6, 5] = 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84
c[0], c[1], c[2], c[3], c[4], c[5], c[6] = 5179/57600, 0, 7571/16695, 393/640, -92097/339200, 187/2100, 1/40
cstar[0], cstar[1], cstar[2], cstar[3], cstar[4], cstar[5], cstar[6] = 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0
return [a, b, c, cstar]
#The Runge-Kutta code
def rkstep(sol, time, h, k7):
k1 = k7
k2 = h * derivs(time + a[1] * h, sol + b[1, 0] * k1, properties, alpha)
k3 = h * derivs(time + a[2] * h, sol + b[2, 0] * k1 + b[2, 1] * k2, properties, alpha)
k4 = h * derivs(time + a[3] * h, sol + b[3, 0] * k1 + b[3, 1] * k2 + b[3, 2] * k3, properties, alpha)
k5 = h * derivs(time + a[4] * h, sol + b[4, 0] * k1 + b[4, 1] * k2 + b[4, 2] * k3 + b[4, 3] * k4, properties, alpha)
k6 = h * derivs(time + a[5] * h, sol + b[5, 0] * k1 + b[5, 1] * k2 + b[5, 2] * k3 + b[5, 3] * k4 + b[5, 4] * k5, properties, alpha)
k7 = h * derivs(time + a[6] * h, sol + b[6, 0] * k1 + b[6, 1] * k2 + b[6, 2] * k3 + b[6, 3] * k4 + b[6, 4] * k5 + b[6, 5] * k6, properties, alpha)
sol = sol + c[0] * k1 + c[1] * k2 + c[2] * k3 + c[3] * k4 + c[4] * k5 + c[5] * k6 + c[6] * k7
solstar = sol + cstar[0] * k1 + cstar[1] * k2 + cstar[2] * k3 + cstar[3] * k4 + cstar[4] * k5 + cstar[5] * k6 + cstar[6] * k7
return [sol, solstar, k7]
#Make the colour & size array for the particles
def plot_props(properties):
particle_colours = zeros((size(properties, 1), 4))
colour_strength = -1/(abs(properties[1,:]) + 2) + 1
particle_colours[:, 3] = colour_strength[:]
for idx in arange(size(properties, 1)):
#Make -ve charge blue, +ve charge red
if properties[1, idx] >= 0:
particle_colours[idx, 0] = 1
else:
particle_colours[idx, 2] = 1
particle_size = properties[0, :]**(2/3) * 4
return [particle_colours, particle_size]
#Calculate net momentum of the system
def momentum_calc(sol, properties):
px = 0.0
py = 0.0
pz = 0.0
for i in range(size(sol, 1)):
px = px + sol[1, i] * properties[0, i]
py = py + sol[3, i] * properties[0, i]
pz = pz + sol[5, i] * properties[0, i]
return [px, py, pz]
#Initial conditions
def initial_conditions(spatial_dims, particle_num, properties):
sol = zeros((spatial_dims * 2, particle_num))
v1bias = 0.1
v2bias = -0.70
#Patticle 0
sol[0, 0] = 0.0 #x1
sol[1, 0] = 0.0 + v1bias #v1
sol[2, 0] = 0.0 #x2
sol[3, 0] = 0.0 + v2bias #v2
sol[4, 0] = 0.0 #x3
sol[5, 0] = 0.0 #v3
properties[1, 0] = 50 #charge
properties[0, 0] = 50 #mass
#Particle 1
sol[0, 1] = 2.0 #x1
sol[1, 1] = 0.0 + v1bias #v1
sol[2, 1] = 0.0 #x2
sol[3, 1] = 6.0 + v2bias #v2
sol[4, 1] = 0.0 #x3
sol[5, 1] = 0.0 #v3
properties[1, 1] = 20 #charge
properties[0, 1] = 20 #mass
#Particle 2
sol[0, 2] = 1.7 #x1
sol[1, 2] = 0.0 + v1bias #v1
sol[2, 2] = 0.0 #x2
sol[3, 2] = -2.5 + v2bias #v2
sol[4, 2] = 0.0 #x3
sol[5, 2] = 0.0 #v3
properties[1, 2] = 2 #charge
properties[0, 2] = 2 #mass
#Particle 3
sol[0, 3] = -3.0 #x1
sol[1, 3] = 0.0 + v1bias #v1
sol[2, 3] = 0.0 #x2
sol[3, 3] = -4.5 + v2bias #v2
sol[4, 3] = 0.0 #x3
sol[5, 3] = 0.0 #v3
properties[1, 3] = 10 #charge
properties[0, 3] = 10 #mass
#Particle 4
sol[0, 4] = -2.7 #x1
sol[1, 4] = 0.0 + v1bias #v1
sol[2, 4] = 0.0 #x2
sol[3, 4] = 0.5 + v2bias #v2
sol[4, 4] = 0.0 #x3
sol[5, 4] = 0.0 #v3
properties[1, 4] = 1 #charge
properties[0, 4] = 1 #mass
#Particle 5
sol[0, 5] = -1.0 #x1
sol[1, 5] = 0.0 + v1bias #v1
sol[2, 5] = 0.0 #x2
sol[3, 5] = -7.0 + v2bias #v2
sol[4, 5] = 0.0 #x3
sol[5, 5] = 0.0 #v3
properties[1, 5] = 2 #charge
properties[0, 5] = 2 #mass
#Particle 6
sol[0, 6] = 0.0 #x1
sol[1, 6] = -5.0 + v1bias #v1
sol[2, 6] = 2.5 #x2
sol[3, 6] = 0.0 + v2bias #v2
sol[4, 6] = 0.0 #x3
sol[5, 6] = 0.0 #v3
properties[1, 6] = 2 #charge
properties[0, 6] = 2 #mass
return sol
###############################################################################
#Start of script. The script calculates in 3 spatial dims, but only plots 2.
t_min = 0.0
t_max = 1.0
epsilon = 1.0e-1
vdivx_errscale = 1
safety = 0.90
particle_num = 7
spatial_dims = 3
slice_interval = 0.01
slices_per_plot = 1
total_slices = floor((t_max - t_min) / slice_interval) + 1
#sol and associated arrays store things as [x1,v1,x2,v2,x3,v3]
data = zeros((total_slices, spatial_dims * 2, particle_num))
time_array = zeros(total_slices)
#Initial conditions
#Properties[0, :] is mass
#properties[1, :] is charge
properties = ones((2, particle_num))
sol = initial_conditions(spatial_dims, particle_num, properties)
data[0, :, :] = sol[:, :]
#Calc net momentum and print
pnet = momentum_calc(sol, properties)
print ('Net momentum = ' + str(pnet))
#The field constant. -ve for gravity, +ve for electric charges
alpha = -1.
#System energy
energy = ones((3, total_slices))
energy[:, 0] = energy_calc(sol, properties, alpha)
#Get solver coefficients
[a, b, c, cstar] = solver_coef()
t = t_min
cnt = 0
cnt1 = 0
slice_num_temp = -1
#Allows you to scale the error for x and v differently
err_scale = ones((spatial_dims * 2, particle_num))
err_scale[1, :] = vdivx_errscale * err_scale[1, :]
err_scale[3, :] = vdivx_errscale * err_scale[3, :]
err_scale[5, :] = vdivx_errscale * err_scale[5, :]
#Dormand - Prince RK loop
h_try = 0.001
h = h_try
#Dormand - Prince has "first same as last" property.
k7 = h * derivs(t, sol, properties, alpha)
while t < t_max:
[soltemp, solstar, k7temp] = rkstep(sol, t, h, k7)
delta = soltemp - solstar
max_delta = epsilon * err_scale
err_prop = abs(delta) / abs(max_delta)
maxprop = err_prop.max()
maxprop_idx = err_prop.argmax()
if maxprop > 1:
#Decrease step size
htemp = safety * h * abs(max_delta.ravel()[maxprop_idx] / delta.ravel()[maxprop_idx])**0.2
h = max(0.1 * h, htemp)
else:
#Increase step size
htemp = safety * h * abs(max_delta.ravel()[maxprop_idx] / delta.ravel()[maxprop_idx])**0.25
h = min(10 * h, htemp, slice_interval)
#Update vals
sol = soltemp
k7 = k7temp
t = t + h
#Print update to the terminal every so often
slice_num = floor(t / slice_interval)
if cnt1 % 500 == 0:
print ('Slice:' + str(int(slice_num)) + ', t = ' + str(t) + ', h = ' +str(h))
cnt1 = cnt1 + 1
#Record data after defined time interval
if slice_num != slice_num_temp:
data[cnt, :, :] = sol[:, :]
energy[:, cnt] = energy_calc(sol, properties, alpha)
time_array[cnt] = t
cnt = cnt + 1
slice_num_temp = slice_num
#Plot energy
plot(time_array[:cnt], energy[0, :cnt], '--', time_array[:cnt], energy[1, :cnt], '.', time_array[:cnt], energy[2, :cnt])
xlabel('t')
ylabel('energy')
legend(('potential', 'kinetic', 'total'), loc = 'lower left')
title('Total energy of the system')
savefig('system_energy.png', dpi = 300)
show()
clf()
#Make colour array
[particle_colours, particle_size] = plot_props(properties)
#Plot pictures
destpath = 'Images/'
filename = '%03d'
filetype = '.png'
res_dpi = 150
for a in arange(cnt/slices_per_plot):
x_vals = data[a * slices_per_plot, 0, :cnt]
y_vals = data[a * slices_per_plot, 2, :cnt]
scatter(x_vals, y_vals, s = particle_size, c = particle_colours, edgecolors = particle_colours)
xlabel('x')
ylabel('y')
axis([-5.0, 5.0, -5.0, 5.0])
axes().set_aspect('equal')
title('Interaction of "charged" particles')
fullfilename = destpath + str(filename % a) + filetype
savefig(fullfilename, dpi = res_dpi)
clf()
#Make a movie
movie_name = ' gravo_movie.mp4'
savedPath = os.getcwd()
os.chdir(destpath)
movie_command = 'ffmpeg -qscale 1 -r 25 -i ' + filename + filetype + movie_name
os.system(movie_command)
os.chdir(savedPath)
| [
"[email protected]"
] | |
de30a73a553193306c802bfaa033aeddd3682f2e | e5e2f917c3e116cf0cc9ca848ee9a9e2426c3576 | /30 Days Coding Challenge/day16_GCD.py | 77916ab568d26a8a5781342986e1426109780528 | [] | no_license | sikander27/Techgig-python | ca33b0d7e04a76408e5282a2f135c724b6087f2e | 61e753666227f19efc580e515b090e22999803c4 | refs/heads/master | 2023-04-02T06:13:50.677559 | 2021-04-11T07:14:37 | 2021-04-11T07:14:37 | 331,856,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | '''
For this challenge, you need to take input of two numbers ,
calculate their greatest common divisor (GCD) and print it to the stdout
'''
''' Read input from STDIN. Print your output to STDOUT '''
#Use input() to read input from STDIN and use print to write your output to STDOUT
def gcd(a, b):
# everything divides zero
if (b == 0):
return a
return gcd(b, a%b)
def main():
x, y = map(int, input().split())
print(gcd(x, y))
main()
| [
"[email protected]"
] | |
2767966ce0528f6e331be37edbe3ed2e1b8f78e2 | 93bd24e7508ed6b8a02c863c73c3f16adb5370ba | /src/interact.py | c81e252d0285a1394584e82f754f63fdf52d773c | [] | no_license | reloadbrain/flixify | d06d56ea4c5ffc9bdb50caa052790938a4153c8d | 390c169aa46ee70929cb50fd980336cb151b14d1 | refs/heads/master | 2020-03-28T17:21:36.658727 | 2015-12-24T19:28:04 | 2015-12-24T19:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import json
lis = []
with open("output/outFinal.json") as f:
c = json.load(f)
for pair in c:
lis.append((c[pair][0]+c[pair][1], pair))
ans = reversed(sorted(lis))
for item in ans:
print item
| [
"[email protected]"
] | |
8276a8ebb69d15ec5527431093c0e68b429d4370 | 92cac54e6b901ae5957bcd2aa901331edf613fc2 | /venv/FaceRecognition/Lib/site-packages/matplotlib/testing/compare.py | 841c3c50422b42a6371e76c6f463e67117251de9 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | faiyazsamin/FaceRecognition | 833e7221422ea4a6ca75fa8b41f76b4f9ba8a722 | 9c0bd65f300784910a923f446cf33bacfc502b52 | refs/heads/master | 2022-11-29T21:36:33.590946 | 2018-05-24T19:52:21 | 2018-05-24T19:52:21 | 134,751,215 | 1 | 1 | MIT | 2022-11-17T20:05:03 | 2018-05-24T17:58:30 | Python | UTF-8 | Python | false | false | 18,106 | py | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import atexit
import functools
import hashlib
import itertools
import os
import re
import shutil
import sys
from tempfile import TemporaryFile
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
if path.endswith('.pdf'):
from matplotlib import checkdep_ghostscript
md5.update(checkdep_ghostscript()[1].encode('utf-8'))
elif path.endswith('.svg'):
from matplotlib import checkdep_inkscape
md5.update(checkdep_inkscape().encode('utf-8'))
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(cmdline, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
# Modified from https://bugs.python.org/issue25567.
_find_unsafe_bytes = re.compile(br'[^a-zA-Z0-9_@%+=:,./-]').search
def _shlex_quote_bytes(b):
return (b if _find_unsafe_bytes(b) is None
else b"'" + b.replace(b"'", b"'\"'\"'") + b"'")
class _SVGConverter(object):
def __init__(self):
self._proc = None
# We cannot rely on the GC to trigger `__del__` at exit because
# other modules (e.g. `subprocess`) may already have their globals
# set to `None`, which make `proc.communicate` or `proc.terminate`
# fail. By relying on `atexit` we ensure the destructor runs before
# `None`-setting occurs.
atexit.register(self.__del__)
def _read_to_prompt(self):
"""Did Inkscape reach the prompt without crashing?
"""
stream = iter(functools.partial(self._proc.stdout.read, 1), b"")
prompt = (b"\n", b">")
n = len(prompt)
its = itertools.tee(stream, n)
for i, it in enumerate(its):
next(itertools.islice(it, i, i), None) # Advance `it` by `i`.
while True:
window = tuple(map(next, its))
if len(window) != n:
# Ran out of data -- one of the `next(it)` raised
# StopIteration, so the tuple is shorter.
return False
if self._proc.poll() is not None:
# Inkscape exited.
return False
if window == prompt:
# Successfully read until prompt.
return True
def __call__(self, orig, dest):
if (not self._proc # First run.
or self._proc.poll() is not None): # Inkscape terminated.
env = os.environ.copy()
# If one passes e.g. a png file to Inkscape, it will try to
# query the user for conversion options via a GUI (even with
# `--without-gui`). Unsetting `DISPLAY` prevents this (and causes
# GTK to crash and Inkscape to terminate, but that'll just be
# reported as a regular exception below).
env.pop("DISPLAY", None) # May already be unset.
# Do not load any user options.
# `os.environ` needs native strings on Py2+Windows.
env[str("INKSCAPE_PROFILE_DIR")] = os.devnull
# Old versions of Inkscape (0.48.3.1, used on Travis as of now)
# seem to sometimes deadlock when stderr is redirected to a pipe,
# so we redirect it to a temporary file instead. This is not
# necessary anymore as of Inkscape 0.92.1.
self._stderr = TemporaryFile()
self._proc = subprocess.Popen(
[str("inkscape"), "--without-gui", "--shell"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=self._stderr, env=env)
if not self._read_to_prompt():
raise OSError("Failed to start Inkscape")
try:
fsencode = os.fsencode
except AttributeError: # Py2.
def fsencode(s):
return s.encode(sys.getfilesystemencoding())
# Inkscape uses glib's `g_shell_parse_argv`, which has a consistent
# behavior across platforms, so we can just use `shlex.quote`.
orig_b, dest_b = map(_shlex_quote_bytes, map(fsencode, [orig, dest]))
if b"\n" in orig_b or b"\n" in dest_b:
# Who knows whether the current folder name has a newline, or if
# our encoding is even ASCII compatible... Just fall back on the
# slow solution (Inkscape uses `fgets` so it will always stop at a
# newline).
return make_external_conversion_command(lambda old, new: [
str('inkscape'), '-z', old, '--export-png', new])(orig, dest)
self._proc.stdin.write(orig_b + b" --export-png=" + dest_b + b"\n")
self._proc.stdin.flush()
if not self._read_to_prompt():
# Inkscape's output is not localized but gtk's is, so the
# output stream probably has a mixed encoding. Using
# `getfilesystemencoding` should at least get the filenames
# right...
self._stderr.seek(0)
raise ImageComparisonFailure(
self._stderr.read().decode(
sys.getfilesystemencoding(), "replace"))
def __del__(self):
if self._proc:
if self._proc.poll() is None: # Not exited yet.
self._proc.communicate(b"quit\n")
self._proc.wait()
self._proc.stdin.close()
self._proc.stdout.close()
self._stderr.close()
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
def cmd(old, new):
return [str(gs), '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
converter['svg'] = _SVGConverter()
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(converter)
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
reason = "Don't know how to convert %s files to png" % extension
from . import is_called_from_pytest
if is_called_from_pytest():
import pytest
pytest.skip(reason)
else:
from nose import SkipTest
raise SkipTest(reason)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if False and matplotlib.checkdep_xmllint():
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
@cbook.deprecated("2.1")
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(cmd, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah, ad = actual_image.shape
ew, eh, ed = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {0} "
"actual size {1}".format(expectedImage.shape, actualImage.shape))
num_values = expectedImage.size
abs_diff_image = abs(expectedImage - actualImage)
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Examples
--------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
raise Exception("Output image %s does not exist." % actual)
if os.stat(actual).st_size == 0:
raise Exception("Output image file %s is empty." % actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
diff_image = make_test_filename(actual, 'failed-diff')
if tol <= 0.0:
if np.array_equal(expectedImage, actualImage):
return None
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
if rms <= tol:
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(float)
actualImage = np.array(actualImage).astype(float)
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {0} "
"actual size {1}".format(expectedImage.shape, actualImage.shape))
absDiffImage = np.abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
| [
"[email protected]"
] | |
def2f5d76a06abfa75bee8540d5a5982b97fa204 | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py | d49160981b34ad878af72233ab6ebf3869bfae89 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 1,012 | py | from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.route53.route53domains_client import (
route53domains_client,
)
class route53_domains_transferlock_enabled(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for domain in route53domains_client.domains.values():
report = Check_Report_AWS(self.metadata())
report.resource_id = domain.name
report.region = domain.region
if domain.status_list and "clientTransferProhibited" in domain.status_list:
report.status = "PASS"
report.status_extended = (
f"Transfer Lock is enabled for the {domain.name} domain"
)
else:
report.status = "FAIL"
report.status_extended = (
f"Transfer Lock is disabled for the {domain.name} domain"
)
findings.append(report)
return findings
| [
"[email protected]"
] | |
515436b4d5fe3ddd0030470fde74f0965147a76f | 96cfaaa771c2d83fc0729d8c65c4d4707235531a | /Configuration/Spring08Production/python/Spring08_PhotonJetpt30-50_GEN_cfg.py | 8cf29fde0fbca5f06b831fcb9e3f0f9fe8054a8d | [] | no_license | khotilov/cmssw | a22a160023c7ce0e4d59d15ef1f1532d7227a586 | 7636f72278ee0796d0203ac113b492b39da33528 | refs/heads/master | 2021-01-15T18:51:30.061124 | 2013-04-20T17:18:07 | 2013-04-20T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Gen")
process.load("FWCore.MessageService.MessageLogger_cfi")
# control point for all seeds
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.Spring08Production.Spring08_PhotonJetpt30_50_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Spring08Production/data/Spring08_PhotonJetpt30-50_GEN.cfg,v $'),
annotation = cms.untracked.string('FastSim PhotonJet Pthat 30-50 for Spring08')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.GEN = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
),
fileName = cms.untracked.string('PhotonJetpt30-50.root')
)
process.e = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.e)
| [
"[email protected]"
] | |
c0784615677646eda804d4ea5f9351061c8c2357 | b4aaf7eaab9781c394457e9c5ce41897dc2acb29 | /app/monitor/routes_cpu.py | 448444f752e5018f257c72cd495052ab32003d19 | [
"MIT"
] | permissive | bcarroll/inmoov_mini | 17ba0505faf3317916c001c59a2cddde2b87fe10 | d0bddcfdd5ec5f434d70c97066c8d6fb62a6f933 | refs/heads/master | 2023-02-23T04:29:33.936277 | 2022-07-07T15:31:43 | 2022-07-07T15:31:43 | 129,016,192 | 0 | 0 | MIT | 2023-02-15T21:50:59 | 2018-04-11T01:29:55 | Python | UTF-8 | Python | false | false | 492 | py | import psutil
from app.monitor import bp
from flask import jsonify
from flask_login import login_required
@bp.route('/cpu/percent')
@login_required
def cpu_percent():
return( jsonify( psutil.cpu_percent(interval=1) ) )
@bp.route('/cpu/frequency')
@login_required
def cpu_frequency():
cpu_freq = {}
cpu_freq['current'] = psutil.cpu_freq().current
cpu_freq['min'] = psutil.cpu_freq().min
cpu_freq['max'] = psutil.cpu_freq().max
return( jsonify(cpu_freq) )
| [
"[email protected]"
] | |
e5d247bf7cc030a77b8a76348eccb8ccd9311a7f | f91f9330179236025528682df90c4ba9baaae49d | /backend/manage.py | f4749adf415cbcfc37b8618e105dd6561a310b4a | [] | no_license | crowdbotics-apps/test-4316-dev-9267 | 96e7b04626045a40333ef81f161bf7e096c68931 | 3a0ae32df3ea8175d4aa37a20dec262086e00eb4 | refs/heads/master | 2022-12-04T10:11:57.679231 | 2020-08-24T08:22:33 | 2020-08-24T08:22:33 | 289,866,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_4316_dev_9267.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ee819a6e8372d9aa07f36cdf730a81eaea0c1055 | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Fundamentals/04_Lists/the_office.py | 12c13b6f4c2e6e1f5e8584f7e661696c2d418881 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | employees = input().split()
factor = int(input())
employee_happiness = list(map(lambda x: int(x) * factor, employees))
avg_happiness = sum(employee_happiness) / len(employee_happiness)
# above_avg_happy = [employee for employee in employee_happiness if employee >= avg_happiness]
above_avg_happy = list(filter(lambda employee: employee >= avg_happiness, employee_happiness))
if int(len(above_avg_happy)) >= len(employee_happiness) / 2:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are happy!')
else:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are not happy!')
| [
"[email protected]"
] | |
cc8b434ce82b6e1625a617bbbd89b70bd16b8524 | f225b35d49562e7a1114968bdf9128dbc4cd91ab | /myspider/items.py | fa8e60c784f6743287563124add7390aebc383f6 | [] | no_license | 15032373556/scrapy_exercise | 1948ce42102f99e414ae214b27163eb1d9e3b338 | 7a6e8b7a395044bda3acb649ab8f5a74bc854d82 | refs/heads/master | 2022-11-25T13:29:28.726984 | 2020-07-25T03:09:41 | 2020-07-25T03:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ItcastItem(scrapy.Item):
# 抓取 1.讲师姓名 2.讲师职称 3.讲师个人信息
# 测试提交代码
name = scrapy.Field()
title = scrapy.Field()
info = scrapy.Field()
| [
"[email protected]"
] | |
57a18e4cb2aa5d0c6caec11aa70fd109c721f42d | 954de20148e5cc14a79439e532b51d790b4ef15a | /Chapter06/Tasks/rivers.py | e98a51312a5fd49ddd59dc33d3b54ae611bcaff0 | [] | no_license | d1rtyst4r/archivetempLearningPythonGPDVWA | 8803114f0bf9a3bbce55610bed477533c58b3630 | 794cab59c3f2cfabb677d7add5f3ecf3daf71e44 | refs/heads/master | 2023-04-10T01:42:51.505569 | 2023-04-02T10:38:04 | 2023-04-02T10:38:04 | 138,033,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | rivers = {
'nile': 'egypt',
'daugava': 'latvia',
'thames': 'england'
}
for river, country in rivers.items():
print("The " + river.title() + " runs through " + country.title() + ".")
| [
"[email protected]"
] | |
c44508356d98bca92ede322dff36b8fe69233790 | 316a07bd7ab47d447606d341c5d221d8318f65b9 | /python-quantumclient/quantumclient/__init__.py | d7a6aab476a7fd877ee47d88223005f3700109e1 | [] | no_license | kumarcv/openstack-nf | 791d16a4844df4666fb2b82a548add98f4832628 | ad2d8c5d49f510292b1fe373c7c10e53be52ba23 | refs/heads/master | 2020-05-20T03:10:54.495411 | 2013-06-16T23:44:11 | 2013-06-16T23:44:11 | 7,497,218 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Citrix Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Tyler Smith, Cisco Systems
import gettext
# gettext must be initialized before any quantumclient imports
gettext.install('quantumclient', unicode=1)
| [
"[email protected]"
] | |
1caf0bb73a348c9474abe29b5774ab1b7560fa59 | 1789431658d7f5594a6bdae23d0d74bcb7c32020 | /main.py | 60e2979f764fc36895a52f45b1593a26ac31f2f9 | [] | no_license | Abhishekparmar123/Dictionary-app-using-SQL | 14783912c20f4bf36bcd2393563f915480e8ee38 | 37af5cf58cae3ed70ee6d0b0f200c4eb7e004f8b | refs/heads/master | 2022-12-05T13:22:18.002949 | 2020-08-23T11:30:53 | 2020-08-23T11:30:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | import sqlite3
import json
data = json.load(open("data.json"))
def create():
conn = sqlite3.connect("Dictionary.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS dictionary (word VARCHAR(30), meaning VARCHAR(1000))")
conn.commit()
conn.close()
def insert(word, meaning):
conn = sqlite3.connect("Dictionary.db")
cur = conn.cursor()
cur.execute("INSERT INTO dictionary VALUES (?, ?)", (word, meaning))
conn.commit()
conn.close()
def delete():
conn = sqlite3.connect("Dictionary.db")
cur = conn.cursor()
cur.execute("DELETE FROM dictionary ")
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("Dictionary.db")
cur = conn.cursor()
cur.execute("SELECT * FROM dictionary ")
rows = cur.fetchall()
conn.close()
return rows
def search(item):
conn = sqlite3.connect("Dictionary.db")
cur = conn.cursor()
cur.execute("SELECT meaning FROM dictionary WHERE word like ?", (item,))
row = cur.fetchall()
conn.close()
return row
create()
key = list(data.keys())
# for i in key:
# insert(i, str(data[i]))
# print(view())
# delete()
word = input("Enter your word : ")
result = (search(word))
result = list(result[0])
| [
"[email protected]"
] | |
99cbb1c2c9693fe423a01b59ef5289715abab28f | 396ee8958eb753d96a62b1199103c2c1194c08e0 | /creme/ensemble/bagging.py | a8509f03c6ffe22b2ed05d0f2a2d8f770954a48a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ZuoMatthew/creme | fe1499a9071a994587172f908a530522be6b915b | 27d40fa7a5014c94d7f95dee259368c0adc7115c | refs/heads/master | 2020-04-22T20:46:58.100005 | 2019-02-12T17:13:15 | 2019-02-12T17:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | import collections
import copy
from sklearn import utils
from .. import base
__all__ = ['BaggingClassifier']
class BaggingClassifier(base.BinaryClassifier):
"""Bagging for classification.
For each incoming observation, each model's `fit_one` method is called `k` times where `k`
is sampled from a Poisson distribution of parameter 1. `k` thus has a 36% chance of being equal
to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being
equal to 3, a 1% chance of being equal to 4, etc. You can do `scipy.stats.poisson(1).pmf(k)`
for more detailed values.
Parameters:
base_estimator (creme.base.Classifier): The estimator to bag.
Example:
In the following example three logistic regressions are bagged together. The performance is
slightly better than when using a single logistic regression.
::
>>> import creme.compose
>>> import creme.ensemble
>>> import creme.linear_model
>>> import creme.model_selection
>>> import creme.optim
>>> import creme.preprocessing
>>> import creme.stream
>>> from sklearn import datasets
>>> from sklearn import metrics
>>> X_y = creme.stream.iter_sklearn_dataset(
... load_dataset=datasets.load_breast_cancer,
... shuffle=True,
... random_state=42
... )
>>> optimiser = creme.optim.VanillaSGD()
>>> model = creme.compose.Pipeline([
... ('scale', creme.preprocessing.StandardScaler()),
... ('learn', creme.linear_model.LogisticRegression(optimiser))
... ])
>>> model = creme.ensemble.BaggingClassifier(model, n_estimators=3)
>>> metric = metrics.roc_auc_score
>>> creme.model_selection.online_score(X_y, model, metric)
0.991497...
References:
- `Online Bagging and Boosting <https://ti.arc.nasa.gov/m/profile/oza/files/ozru01a.pdf>`_
"""
def __init__(self, base_estimator=None, n_estimators=10, random_state=42):
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimators = [copy.deepcopy(base_estimator) for _ in range(n_estimators)]
self.rng = utils.check_random_state(random_state)
def fit_one(self, x, y):
y_pred = self.predict_proba_one(x)
for estimator in self.estimators:
for _ in range(self.rng.poisson(1)):
estimator.fit_one(x, y)
return y_pred
def predict_one(self, x):
votes = collections.Counter((estimator.predict_one(x) for estimator in self.estimators))
return max(votes, key=votes.get)
def predict_proba_one(self, x):
return sum(estimator.predict_proba_one(x) for estimator in self.estimators) / len(self.estimators)
| [
"[email protected]"
] | |
25da0744ea358f58944383d0fa56dbde72f8ca7e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rbd.py | 4548339f733e7c118189a771baa2ea2fcc9e9962 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rBD':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e0315093c2111b0b43b0c96efd9f3b6ae0dd7d10 | 1a639d185f9c883b7bebf33c577c58b22ac93c7e | /other/sound.py | d69bb732b93754a0a9bbad2d5b75c7350984b2d5 | [] | no_license | gofr1/python-learning | bd09da5b5850b1533a88b858690ed4380b55d33e | 19343c985f368770dc01ce415506506d62a23285 | refs/heads/master | 2023-09-02T15:42:27.442735 | 2021-11-12T10:17:13 | 2021-11-12T10:17:13 | 237,828,887 | 0 | 0 | null | 2021-11-12T10:17:14 | 2020-02-02T20:03:42 | Python | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python3
# gTTS (Google Text-to-Speech), a Python library and CLI tool to interface with Google Translate text-to-speech API
# sudo pip3 install gtts
from io import BytesIO
from pygame import mixer
from gtts import gTTS
def speak(text):
with BytesIO() as f:
gTTS(text=text, lang="en").write_to_fp(f)
f.seek(0)
mixer.init()
mixer.music.load(f)
mixer.music.play()
while mixer.music.get_busy():
continue
if __name__ == '__main__':
text = input("What should I say? >>")
speak(text) | [
"[email protected]"
] | |
80d6314141d4f24833a5ea2410e5ce6f0c2c9472 | 074afd26d00bb742b03c12891b057ab263e640bf | /LeetCode 30 days/week1.2.py | ffcfa748b1935152b9419bb6cf112f940f619277 | [] | no_license | IsmailTitas1815/Data-Structure | 7a898800b1e53c778b1f2f11b0df259e52c20140 | fece8dd97d3e162e39fc31d5f3498a6dac49b0f0 | refs/heads/master | 2023-02-05T10:39:49.349484 | 2020-12-21T13:37:22 | 2020-12-21T13:37:22 | 296,343,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # import re
# s = '123 456-7890'
# new_s = [int(i) for i in re.findall('\d', s)]
# unformattedPhone = "1239084590348509 456-7890"
# numbersList = [int(s) for s in unformattedPhone if s.isdigit()]
# print(numbersList)
class Solution:
def isHappy(self,num):
setofvalue = set()
while num!=1:
num = sum(int(i)**2 for i in str(num))
if num in setofvalue:
return False
setofvalue.add(num)
return True
s=0
old = 0
num = int(input())
obj = Solution()
boo = obj.isHappy(num)
print(boo)
#
# def happy_numbers(n):
# past = set()
# while n != 1:
# n = sum(int(i)**2 for i in str(n))
# if n in past:
# return False
# past.add(n)
# return True
# print([x for x in range(500) if happy_numbers(x)][:10]) | [
"[email protected]"
] | |
a277810779992737e996b5a33165c4c02f6bb8b9 | 8afd826d1a1073ed07379ba83ea3d8c229bceee3 | /market_unitest.py | bae2d5e711ed27ea0c29c4b1bac500d39aab0ffb | [] | no_license | irinarozalio/market | 47249ae789a52a25b40083aed2619ac374e650d2 | 37e6ddae3acbca76eb7682aa25e36f1d98ad0776 | refs/heads/master | 2022-12-10T18:31:59.058596 | 2019-03-27T15:57:11 | 2019-03-27T15:57:11 | 174,009,244 | 0 | 0 | null | 2022-12-08T04:51:49 | 2019-03-05T19:38:59 | Python | UTF-8 | Python | false | false | 2,431 | py | import unittest
import requests
from market import f1
import psycopg2
import psycopg2.extras
import os
class Product_name_exist(unittest.TestCase):
def test_product_exists(self):
url = r"http://ec2-3-17-162-33.us-east-2.compute.amazonaws.com:5000/amount_product?product_id=1"
r = requests.get(url)
self.assertEqual(r.json()['name'], 'Shoulder Knot Leopard Print Dress')
class Product_name_not_exists(unittest.TestCase):
def test_product_not_exists(self):
url = r"http://ec2-3-17-162-33.us-east-2.compute.amazonaws.com:5000/amount_product?product_id='a'"
r = requests.get(url)
self.assertEqual(r.json()['error'], 'No Product Found')
# def test_f1(self):
# self.assertGreaterEqual(1, f1())
class Not_in_stock(unittest.TestCase):
def test_not_in_stock(self):
product_id = 1
url = r"http://ec2-3-17-162-33.us-east-2.compute.amazonaws.com:5000/reduce_amount"
r = requests.put(url, data = {'amount':1, 'product_id':product_id, 'location_id':1 })
data = r.json()
#if str(product_id) in data.keys():
if 'Not in stock' in data.values():
val = data[str(product_id)]
else:
val ='123'
self.assertEqual(str(val), 'Not in stock')
class ConnectPG(unittest.TestCase):
pg_con = None
def test_connectPG(self):
pg_con = psycopg2.connect(database='dev',
user='iradba',
password=os.getenv('MARKET_DB_PASS'),
host = 'ip-172-31-12-93',
port = '5432')
pg_con.autocommit = True
pg_cur = pg_con.cursor()
pg_cur.execute('SELECT version()')
db_version = pg_cur.fetchone()
a = 'PostgreSQL 11.2 (Debian 11.2-1.pgdg90+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit'
if a in db_version:
val = 'PostgreSQL 11.2'
else:
val ='Database connection closed'
print('Database connection closed')
self.assertEqual(val,'PostgreSQL 11.2')
class Set_amount(unittest.TestCase):
def test_set_amount(self):
product_id = 1
amount = 5
url = r"http://ec2-3-17-162-33.us-east-2.compute.amazonaws.com:5000/set_amount"
r = requests.put(url, data = {'amount':amount, 'product_id':product_id, 'location_id':1 })
data = r.json()
if amount in data.values():
val = amount
else:
print('Test_set_amount is Failed')
self.assertEqual(val, 5)
# class TestStringMethods(unittest.TestCase):
# def test_upper(self):
# self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
15d8600fcc62bae425faaf90085a8f09360ab77e | df038c9a84ca9b11bbef86d84d2e6feb6fd733bf | /setup.py | d7698c8b3fa3d0b9da3a92af8fb21d3751e3cf58 | [
"BSD-2-Clause"
] | permissive | wybaby/PSpider | d31ff8cbde1a3f23d05c1684c455beea2b48c915 | 5087fc20589878fa123daa113213fbf17282a35b | refs/heads/master | 2021-01-22T01:55:16.258596 | 2017-06-23T03:35:04 | 2017-06-23T07:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="2.4.5",
author="xianhu",
keywords=["spider", "crawler", "multi-threads", "asyncio", "distributed"],
packages=find_packages(exclude=("otherfiles", "test.*")),
package_data={
"": ["*.conf"], # include all *.conf files
},
install_requires=[
"aiohttp>=2.0.0", # aiohttp, http for asyncio
"pybloom_live>=2.0.0", # pybloom-live, fork from pybloom
"redis>=2.10.0", # redis, python client for redis
"requests>=2.10.0", # requests, http for humans
]
)
| [
"[email protected]"
] | |
35068272afd8f83f2cd64208d58014d0fd301ea0 | 1a75117532cbf8c72bfa6886afdb11262c01ccf3 | /src/bert/run_squad.py | 823decc5d89adb98097d239a7b44cfc527d1806d | [
"MIT"
] | permissive | yyHaker/MachineComprehension | 627e90eacc0afc3e519df95dec4d09e886fcbfd6 | d92d39f43f72df8bac29e8947d7457b19e088b43 | refs/heads/master | 2020-05-01T15:21:23.964854 | 2019-06-03T11:43:06 | 2019-06-03T11:43:06 | 177,544,370 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 50,948 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD."""
from __future__ import absolute_import, division, print_function
import argparse
import collections
import json
import logging
import math
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from pytorch_pretrained_bert.tokenization import (BasicTokenizer,
BertTokenizer,
whitespace_tokenize)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
logger.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
## Other parameters
parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
"of training.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold',
type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
print(args)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory () already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = read_squad_examples(
input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE),
'distributed_{}'.format(args.local_rank)))
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format(
list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride),
str(args.max_query_length))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(
tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForQuestionAnswering.from_pretrained(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
else:
model = BertForQuestionAnswering.from_pretrained(args.bert_model)
model.to(device)
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_squad_examples(
input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logger.info("***** Running predictions *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating",
disable=args.local_rank not in [-1, 0]):
if len(all_results) % 1000 == 0:
logger.info("Processing example: %d" % (len(all_results)))
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, args.verbose_logging,
args.version_2_with_negative, args.null_score_diff_threshold)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
98cf87b58d43e76ff154e27adfa6fd3769b34aff | 5f852dcf10bbf8ccf397242825a8dd9d26c09ba9 | /18_db/task_18_1/create_db.py | b98ea378ccd49a23fbe041df9599047c205c6d69 | [] | no_license | deiliet/pyneng-answers | 4541da2c5830a57562538db2aec45fe46a95d475 | f45e22081a3a15ced54be74af520ed99bdc69810 | refs/heads/master | 2020-12-23T18:49:40.704009 | 2020-02-19T20:37:24 | 2020-02-19T20:37:24 | 237,238,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | import sys
import re
import glob
import sqlite3
def db_file_checker(db_file_list, db_filename, regexp_db):
if not db_file_list and 'default.db' in db_filename:
print('DataBase File is not existed. Empty Default DataBase File is created')
elif not db_file_list:
print('DataBase File is not existed. Empty DataBase File is created')
else:
print('DataBase File is already existed')
while True:
decision = input('Proceed with existing DataBase File? [yes]: ') or 'yes'
if decision == 'yes' or decision == 'y':
break
if decision == 'no' or decision == 'n':
sys.exit('Change DataBase filename')
else:
print('Type yes/y to submit or no/n to decline')
if not re.match(regexp_db, db_filename):
print('File is not DataBase or whitespaces are existed')
sys.exit('Change DataBase filename')
def sql_create_file_checker(sql_create_file_list, sql_create_filename, regexp_sql, regexp_sql_create):
if not sql_create_file_list and 'default.sql' in sql_create_filename:
default_sql_create_file = open(sql_create_filename, 'w')
default_sql_create_file.close()
print('SQL-CreateSctipt File is not existed. Empty Default SQL-CreateSctipt File is created')
elif not sql_create_file_list:
custom_sql_create_file = open(sql_create_filename, 'w')
custom_sql_create_file.close()
print('SQL-CreateSctipt File is not existed. Empty SQL-CreateSctipt File is created')
else:
print('SQL-CreateSctipt File is already existed')
while True:
decision = input('Proceed with SQL-CreateSctipt File? [yes]: ') or 'yes'
if decision == 'yes' or decision == 'y':
break
if decision == 'no' or decision == 'n':
sys.exit('Change SQL-CreateSctipt filename')
else:
print('Type yes/y to submit or no/n to decline')
if not re.match(regexp_sql, sql_create_filename):
print('File is not SQL-CreateSctipt or whitespaces are existed')
sys.exit('Change SQL-CreateSctipt filename')
def execute_sql_create (sql_create_filename, regexp_sql_create, cursor):
with open(sql_create_filename, 'r') as file:
for match_index in re.finditer(regexp_sql_create, file.read(), re.DOTALL):
print(match_index.group())
if match_index:
try:
cursor.executescript(match_index.group())
print(f'Table "{match_index.group("table_name")}" is sucsessfully created')
except sqlite3.OperationalError:
print(f'Table "{match_index.group("table_name")}" in DataBase is already existed')
def db_file_create(db_filename='default.db', sql_create_filename='default.sql'):
db_file_list = sorted(glob.glob(db_filename))
sql_create_file_list = sorted(glob.glob(sql_create_filename))
regexp_db = r'\S+(\.db)'
regexp_sql = r'\S+(\.sql)'
regexp_sql_create = r'create table (?P<table_name>.*?) \(.*?\);'
db_file_checker(db_file_list, db_filename, regexp_db)
sql_create_file_checker(sql_create_file_list, sql_create_filename, regexp_sql, regexp_sql_create)
connection = sqlite3.connect(db_filename)
cursor = connection.cursor()
execute_sql_create(sql_create_filename, regexp_sql_create, cursor)
return None
if __name__ == "__main__":
db_file_create('dhcp_snooping.db', 'dhcp_snooping_schema.sql')
| [
"[email protected]"
] | |
32c3db21179f71d7d3b81b90f71abe18b6ebcc50 | a757953b0fab1c4d05ce6b70b68b411df42bb887 | /raredecay/run_config/config.py | bf95bbd8a144a333eb7c63e6c9faeb67b38b1b7f | [
"Apache-2.0"
] | permissive | efueger/raredecay | 4e6ae0cff4bde925e2985335793dc7a138a5c772 | 4a92742016b2aea27e5156fee168d69d2c0361d0 | refs/heads/master | 2021-01-01T20:37:16.970625 | 2017-07-31T14:48:35 | 2017-07-31T14:48:35 | 98,898,305 | 0 | 0 | null | 2017-07-31T14:31:54 | 2017-07-31T14:31:54 | null | UTF-8 | Python | false | false | 1,379 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 13:44:43 2016
The configuration file for external operations.
@author: Jonas Eschle "Mayou36"
"""
RUN_NAME = 'Classifier optimization'
run_message = str("This could be your advertisement" +
" ")
OUTPUT_CFG = dict(
run_name=RUN_NAME,
output_path=None,
del_existing_folders=False,
output_folders=dict(
log="log",
plots="plots",
results="results",
config="config"
)
)
save_fig_cfg = dict(
file_format=['png', 'pdf'],
to_pickle=True,
dpi=150,
figsize=(2,10)
)
# ==============================================================================
# LOGGER CONFIGURATION BEGIN
# ==============================================================================
logger_cfg = dict(
logging_mode='both', # define where the logger is written to
# take 'both', 'file', 'console' or 'no'
log_level_file='debug',
# specifies the level to be logged to the file
log_level_console='warning', # 'warning',
# specify the level to be logged to the console
overwrite_file=True,
# specifies whether it should overwrite the log file each time
# or instead make a new one each run
log_file_name='logfile_',
# the beginning ofthe name of the logfile, like 'project1'
log_file_dir=None # will be set automatically
)
| [
"[email protected]"
] | |
2a56ad3e779b445de933f69ba162ea411564f49d | 3de6b4bbaf86afe1ff87b0d829bcba014a8a9696 | /.history/bshare/settings_20200511214108.py | 60ecb0dc2ccdb72f5774bfa11a473c0e11e25761 | [] | no_license | codermythbuster/bs | f0b758ab396e50f744aa29c7ecd58354b7df06df | 3687302da8f9fe5a8f75d52ba429f14e2f09c67e | refs/heads/master | 2022-08-04T22:27:59.987215 | 2020-05-26T11:24:01 | 2020-05-26T11:24:01 | 263,302,705 | 0 | 0 | null | 2020-05-26T09:58:03 | 2020-05-12T10:16:30 | HTML | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for bshare project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!gb_xpk*t)1xyzhykn!jpc+^3$&mxon$neoto-omg8+44s0l8='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bshare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bshare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
b05404ce46ceca3242895585057d1a8c0e42e276 | 3d5fecd85726e1d44ce903de031871002f16a7f3 | /60.py | 6a11811bc200d642cc5233e4aee66c41fe2876a6 | [] | no_license | SereDim/Trash | 886d5729e67b187686afd4de03fc6bcd0c21d1d1 | 608ec4231e38ab43c08c5ec4ad5ef78d1a1975f4 | refs/heads/master | 2022-04-19T18:09:04.465814 | 2020-04-21T08:46:30 | 2020-04-21T08:46:30 | 240,449,188 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | '''
Дан одновимірний масив з 10 цілих чисел. Підрахуйте найбільше число
однакових чисел, що йдуть підряд в ньому.
Серебренніков Дмитро
'''
d=0
a=[1,2,3,4,4,4,4,4,4,4,4,4,4,5,6,7,8,8]
for b in a:
c=a.count(b)
if c>d:
d=c
print(d)
'''
Я думал, за условием нужно число найбольших чисел посчитать,
хотя, без понятия, почему так подумал
'''
| [
"[email protected]"
] | |
7f21a3c4b4eab2603971a2c036ccf0062bc692a0 | 92d5c15b92356de9f66d2d4738f3c6f00ef2796f | /alembic/versions/11a00705ac61_added_a_bunch_of_gra.py | 125da279586118ae1e213bc3e51a31aadf58a062 | [] | no_license | colinmorris/moz-graphs | 2f88472b7ad23ee0c63977c2151ac102af475769 | f412c0564fb210327436da0468f78932bd21dca0 | refs/heads/master | 2016-09-06T04:36:39.322822 | 2013-07-27T22:00:14 | 2013-07-27T22:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | """added a bunch of graph vars for assignee
Revision ID: 11a00705ac61
Revises: 48044ce97c4f
Create Date: 2013-04-08 10:21:12.247290
"""
# revision identifiers, used by Alembic.
revision = '11a00705ac61'
down_revision = '48044ce97c4f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('bugmonths', sa.Column('assignee_constraint_prior_month', sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('bugmonths', 'assignee_constraint_prior_month')
### end Alembic commands ###
| [
"[email protected]"
] | |
9bb977254cc1d833a7b9b9ab6e3e06da9b351b2c | 08d994afb95bd2c197bf95ee897e329f1ab6efe3 | /exceptions.py | f8a043c2763bd05eed42d36ba942409769a73576 | [] | no_license | hiki0505/Avito_task | 239ca863f87fffc1648bfe2beeaeb40b5bb34317 | 23d029544887d874dbfc6ba0c10cddb0d18da3a5 | refs/heads/master | 2023-08-08T15:52:25.032076 | 2021-09-08T09:46:35 | 2021-09-08T09:46:35 | 404,261,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from fastapi import HTTPException
class InvalidDimensionException(Exception):
pass
class EmptyArrayException(Exception):
pass
def handle_server_exceptions(func):
def wrapper():
try:
func()
except (HTTPException, ) as e:
print(e)
return wrapper
| [
"[email protected]"
] | |
1c356a0590a1ea7f7ecb9aeb128b9e9854057920 | cad926eefb51e70265ad116dff7b94fecc74ca23 | /main.py | cff450724a50f641349bbcf0e93346ecba9c9858 | [] | no_license | noahfoe/Machine-Learning-Flappy-Bird-AI | 331a58a00455ef58fe4996d15488ef5bbb998fb7 | 81556dc468f4f1b63eae967668a7b819f8bdbd94 | refs/heads/master | 2023-02-06T07:20:14.525591 | 2020-12-21T15:10:46 | 2020-12-21T15:10:46 | 287,111,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,987 | py | import pygame
import neat
import time
import os
import random
pygame.font.init()
WIDTH = 600
HEIGHT = 800
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Flappy Bird")
PIPE_IMG = pygame.transform.scale2x(pygame.image.load(
os.path.join("imgs", "pipe.png")).convert_alpha())
BG_IMG = pygame.transform.scale(pygame.image.load(
os.path.join("imgs", "bg.png")).convert_alpha(), (600, 900))
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(
os.path.join("imgs", "bird" + str(x) + ".png"))) for x in range(1, 4)]
BASE_IMG = pygame.transform.scale2x(pygame.image.load(
os.path.join("imgs", "base.png")).convert_alpha())
STAT_FONT = pygame.font.SysFont("comicsans", 50)
class Bird:
IMGS = BIRD_IMGS
MAX_ROTATION = 25
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
# displacement = (V * time) + (1.5 * time^2) - give us the arch of the bird
d = self.vel*self.tick_count + 1.5 * self.tick_count**2
if d >= 16:
d = 16
if d < 0:
d -= 2
self.y = self.y + d
# tilt the bird
if d < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
self.img_count += 1
if self.img_count < self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count < self.ANIMATION_TIME * 2:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME * 3:
self.img = self.IMGS[2]
elif self.img_count < self.ANIMATION_TIME * 4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME * 4 + 1:
self.img = self.IMGS[0]
self.img_count = 0
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME * 2
rotated_image = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_image.get_rect(
center=self.img.get_rect(topleft=(self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe():
GAP = 200
VEL = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.PIPE_BOTTOM = PIPE_IMG
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VEL
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask, top_offset)
if t_point or b_point:
return True
return False
class Base:
VEL = 5
WIDTH = BASE_IMG.get_width()
IMG = BASE_IMG
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VEL
self.x2 -= self.VEL
# Cycles through the two, same, base imgs so it looks like one continuos motion
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(win, birds, pipes, base, score):
win.blit(BG_IMG, (0, 0))
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1, (255, 255, 255))
win.blit(text, (WIDTH - 10 - text.get_width(), 10))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
def eval_genomes(genomes, config):
nets = [] # neural networds
ge = [] # genomes
birds = []
for _, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
birds.append(Bird(230, 350))
g.fitness = 0
ge.append(g)
base = Base(730)
pipes = [Pipe(700)]
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_ind = 1
else:
run = False
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness += 0.1 # this runs 30 times a second, thats why only 0.1
output = nets[x].activate((bird.y, abs(
bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] > 0.5:
bird.jump()
add_pipe = False
rem = [] # list of removed pipes
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird):
ge[x].fitness -= 1
birds.remove(bird)
nets.pop(x)
ge.pop(x)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
# if pipe is off the screen, remove it
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
pipe.move()
# add pipe
if add_pipe:
score += 1
for g in ge:
g.fitness += 5
pipes.append(Pipe(730))
# remove pipes
for r in rem:
pipes.remove(r)
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
birds.pop(x)
nets.pop(x)
ge.pop(x)
base.move()
draw_window(WIN, birds, pipes, base, score)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner = p.run(eval_genomes, 50)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, "config-feedforward.txt")
run(config_path)
| [
"[email protected]"
] | |
ac0452c4ee1cba70bc0b8a8ea20a040ea0865b34 | 4e998c1e66d6be88477746b19a68491ab4677ded | /reinforcement/valueIterationAgents.py | 54157c9492e24710e8704b95d05ce454f53f0124 | [] | no_license | Sinaeskandari/CS188-Projects | 18ee4c9dd6dd930ea8d4ee4ed90bc278283d9b97 | 8620820fe011fa727f6a13391ab4bde1b2077dad | refs/heads/master | 2023-04-12T21:02:47.647192 | 2021-05-12T12:46:36 | 2021-05-12T12:46:36 | 366,712,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,905 | py | # valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import mdp, util
from learningAgents import ValueEstimationAgent
import collections
import gridworld
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp: gridworld.Gridworld, discount=0.9, iterations=100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
self.runValueIteration()
def runValueIteration(self):
# Write value iteration code here
"*** YOUR CODE HERE ***"
for _ in range(self.iterations):
values = self.values.copy()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
# first state is 'TERMINAL_STATE'
continue
q_values = []
for action in self.mdp.getPossibleActions(state):
q_value = 0
for t in self.mdp.getTransitionStatesAndProbs(state, action):
q_value += (t[1] * (
self.mdp.getReward(state, action, t[0]) + (self.discount * self.values[t[0]])))
q_values.append(q_value)
values[state] = max(q_values)
self.values = values
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
q = 0
for t in self.mdp.getTransitionStatesAndProbs(state, action):
q += (t[1] * (self.mdp.getReward(state, action, t[0]) + (self.discount * self.values[t[0]])))
return q
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
if self.mdp.isTerminal(state):
return
q_vals = {}
for action in self.mdp.getPossibleActions(state):
q_vals[self.computeQValueFromValues(state, action)] = action
return q_vals[max(q_vals)]
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
class AsynchronousValueIterationAgent(ValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
An AsynchronousValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs cyclic value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount=0.9, iterations=1000):
"""
Your cyclic value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy. Each iteration
updates the value of only one state, which cycles through
the states list. If the chosen state is terminal, nothing
happens in that iteration.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state)
mdp.isTerminal(state)
"""
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
"*** YOUR CODE HERE ***"
state_count = len(self.mdp.getStates())
for i in range(self.iterations):
values = self.values.copy()
state_index = i % state_count
state = self.mdp.getStates()[state_index]
if self.mdp.isTerminal(state):
continue
q_values = []
for action in self.mdp.getPossibleActions(state):
q_value = 0
for t in self.mdp.getTransitionStatesAndProbs(state, action):
q_value += (t[1] * (
self.mdp.getReward(state, action, t[0]) + (self.discount * self.values[t[0]])))
q_values.append(q_value)
values[state] = max(q_values)
self.values = values
class PrioritizedSweepingValueIterationAgent(AsynchronousValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
A PrioritizedSweepingValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs prioritized sweeping value iteration
for a given number of iterations using the supplied parameters.
"""
def __init__(self, mdp, discount=0.9, iterations=100, theta=1e-5):
"""
Your prioritized sweeping value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy.
"""
self.theta = theta
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
"*** YOUR CODE HERE ***"
predecessors = {}
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
continue
for action in self.mdp.getPossibleActions(state):
for t in self.mdp.getTransitionStatesAndProbs(state, action):
try:
predecessors[t[0]].add(state)
except KeyError:
predecessors[t[0]] = set()
predecessors[t[0]].add(state)
priority_queue = util.PriorityQueue()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
continue
max_q_value = 0
q_values = []
actions = self.mdp.getPossibleActions(state)
if len(actions) != 0:
for action in actions:
q_values.append(self.computeQValueFromValues(state, action))
max_q_value = max(q_values)
diff = abs(self.values[state] - max_q_value)
priority_queue.push(state, -diff)
for i in range(self.iterations):
if priority_queue.isEmpty():
return
state = priority_queue.pop()
if not self.mdp.isTerminal(state):
q_values = []
for action in self.mdp.getPossibleActions(state):
q_values.append(self.computeQValueFromValues(state, action))
self.values[state] = max(q_values)
for p in predecessors[state]:
if not self.mdp.isTerminal(p):
max_q_value = 0
q_values = []
actions = self.mdp.getPossibleActions(p)
if len(actions) != 0:
for action in actions:
q_values.append(self.computeQValueFromValues(p, action))
max_q_value = max(q_values)
diff = abs(self.values[p] - max_q_value)
if diff > self.theta:
priority_queue.update(p, -diff)
| [
"[email protected]"
] | |
112219d7dc6e8bfc69df3e1a6997b9da7b62b6f0 | 73ed957adadd6e1a274ddf43f8fc44a7687c5fdf | /templatetags/generator.py | bab03337b3d64b93823520c849653e978a879442 | [] | no_license | HubSpotArchive/Freemarker2Django | 8caa733b1b0a8d0561c128daf6d91b45d71f5732 | a471fefc89fb42d772a85cf64902b09a21493b62 | refs/heads/master | 2021-01-17T05:03:55.691791 | 2011-06-21T19:55:13 | 2011-06-21T19:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | """
Generator for templatetags for a given macro.
"""
TEMPLATE = """from django import template
from base import matched_inclusion_tag
register = template.Library()
@matched_inclusion_tag(register, '%(macro_name)s.html')
def %(macro_name)s(%(formatted_kwargs)s):
return {
%(formatted_map)s
}
"""
MAP_INDENT = 8
class Generator(object):
"""
Wrapper object for info about the template tag that we are generating.
"""
def __init__(self, macro_name, parameters_map):
"""
@macro_name - The name of the macro the template tag is being formatted for.
@parameters_map - Map of parameter names to default values.
(Actually a list of tuples so that we can emulate an ordered dict,
since Jython is at Python 2.5)
"""
self.macro_name = macro_name
self.parameters_map = parameters_map
def _format_kwargs(self):
values_map = []
for key, _value in self.parameters_map:
if _value == False:
value = 'False'
elif _value == True:
value = 'True'
else:
value = "'%s'" % _value
values_map.append((key, value))
return ", ".join(["%s=%s" % (key, value) for key, value in values_map])
def _format_map(self):
return "\n".join(["%(indent)s'%(key)s': %(key)s," % {
'indent': MAP_INDENT * ' ',
'key': key,
} for key, value in self.parameters_map])
def render(self):
return TEMPLATE % {
'macro_name': self.macro_name,
'formatted_kwargs': self._format_kwargs(),
'formatted_map': self._format_map(),
}
| [
"[email protected]"
] | |
59602afbce466d3a9113e0c7e330db6597cd02fa | cb242b1fdf3889d4df347f3102daf6584a0c40a4 | /threeandthrees/words.py | d562ed188fda92eba7607308a68f44f8b4960f5d | [
"MIT"
] | permissive | bwarren2/threeandthrees | d927711f0927a8e3493cd201ffdd8d930e5586f2 | 2a09a398ab332c27e2e7722e612fa18318b50e60 | refs/heads/master | 2023-05-25T23:28:11.706181 | 2021-04-30T15:12:45 | 2021-04-30T15:12:45 | 68,949,353 | 0 | 0 | MIT | 2021-04-30T15:12:46 | 2016-09-22T18:18:05 | Python | UTF-8 | Python | false | false | 2,546 | py | from collections import defaultdict, OrderedDict
import random
from colorama import init, Fore
import os
import re
init(autoreset=True)
safe_pattern = re.compile('^[a-z]{9}$')
def extract_words():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/american-english.txt', 'r') as f:
raw_data = f.read().split('\n')
data = list(filter(is_clean, raw_data))
return data
def is_clean(word):
return re.search(safe_pattern, word) is not None
def extract_cores(wordlist):
coremap = defaultdict(list)
for word in wordlist:
coremap[word[3:6]].append(word)
return coremap
all_words = extract_words()
coremap = extract_cores(all_words)
class Wordmonger(object):
def __init__(self, all_words, coremap):
self.words = all_words
self.coremap = coremap
self.challenge = OrderedDict()
def answer_count(self, candidate):
value = self.coremap.get(candidate, None)
if value is None:
return 0
else:
return len(value)
def answers(self, candidate):
return self.coremap.get(candidate, None)
def generate(self):
key = random.choice(list(self.coremap.keys()))
return key
# return self.coremap[key]
def check(self, arg):
return arg in self.coremap[arg[3:6]]
def show_challenge(self):
for idx, (key, value) in enumerate(self.challenge.iteritems(), 1):
if value is not None:
print(
"{idx}:\t {color}{word}".format(
**{
'idx': idx, 'word': value, 'color': Fore.GREEN
}
)
)
else:
print(
"{idx}:\t ___{core}___".format(
**{'idx': idx, 'core': key}
)
)
def formulate_challenge(self, n=10):
self.challenge = OrderedDict()
while n > 0:
new_core = random.choice(list(self.coremap.keys()))
if new_core not in list(self.challenge.keys()):
self.challenge[new_core] = None
n -= 1
def claim(self, answer):
key = answer[3:6]
if (
answer in self.coremap[key]
and key in list(self.challenge.keys())
):
self.challenge[key] = answer
return True
else:
return False
monger = Wordmonger(all_words, coremap)
| [
"[email protected]"
] | |
b3d4c02a6bc7af5a7905dee573b6da6451fe1ace | f425b3f11863f2a69e5e8d0a2798bc5cf0328267 | /NLP_Q2_Q3.py | 1ca220b6efa660a963fbb05b2ba1f0389dbacf27 | [] | no_license | yangliu1994/Qishi | 66df4f89c2908cdabaaac2ee36174b7062726feb | 50f59d7a8c4ccb982bf59c0ed4637c6c83cfafa2 | refs/heads/master | 2020-04-20T10:38:22.705789 | 2020-02-12T02:29:07 | 2020-02-12T02:29:07 | 168,794,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,600 | py | import numpy as np
class Dictionary:
def __init__(self, lst):
self.conn = {word: set() for word in lst}
for i, word1 in enumerate(lst):
for word2 in lst[i+1:]:
if self.check(word1, word2):
self.conn[word1].add(word2)
self.conn[word2].add(word1)
@staticmethod
def check(word1, word2):
if len(word1) == len(word2):
check = 0
for k, char in enumerate(word1):
if char == word2[k]:
if k == len(word1) - 1:
return True
continue
if abs(ord(char) - ord(word2[k])) >= 10:
break
check += 1
if check > 1:
break
if k == len(word1) - 1:
return True
return False
def distance(self, start, end):
start_set = set()
end_set = {start}
result = 0
while start_set != end_set:
third_set = end_set.copy()
for word in third_set - start_set:
if self.check(word, end):
return result + 1
if word == start:
for w in self.conn:
if self.check(word, w):
end_set.add(w)
else:
end_set = end_set.union(self.conn[word])
start_set = third_set
result += 1
return np.inf
def __str__(self):
return str(self.conn)
class SparseMatrixException(Exception):
pass
class SparseMatrix:
def __init__(self, num_rows, num_cols, values):
self.num_rows = num_rows
self.num_cols = num_cols
self.values = values
def __eq__(self, other):
return self.num_rows == other.num_rows and self.num_cols == other.num_cols and self.values == other.values
def __add__(self, other):
if not (self.num_rows == other.num_rows and self.num_cols == other.num_cols):
return SparseMatrixException('two matrices cannot be summed')
for key in other.values:
self.values[key] = self.values.get(key, 0) + other.values[key]
for key in self.values.copy():
if self.values[key] == 0:
self.values.pop(key)
return self
def __sub__(self, other):
if not (self.num_rows == other.num_rows and self.num_cols == other.num_cols):
return SparseMatrixException('two matrices cannot be subtracted')
for key in other.values:
self.values[key] = self.values.get(key, 0) - other.values[key]
for key in self.values.copy():
if self.values[key] == 0:
self.values.pop(key)
return self
def __mul__(self, other):
if isinstance(other, SparseMatrix):
if self.num_cols != other.num_rows:
return SparseMatrixException('two matrices cannot be multiplied')
new = SparseMatrix(self.num_rows, other.num_cols, {})
for key1 in self.values:
for key2 in other.values:
if key1[1] == key2[0]:
new.values[(key1[0], key2[1])] = new.values.get((key1[0], key2[1]), 0) + \
self.values[key1] * other.values[key2]
for key in new.values.copy():
if new.values[key] == 0:
new.values.pop(key)
return new
else:
for key in self.values:
self.values[key] *= other
return self
def __str__(self):
return str(self.values)
# Q2
print(Dictionary(['hot', 'dot', 'dog', 'lot', 'log']).distance('hit', 'cog'))
print(Dictionary(['hot', 'dot', 'don', 'dog', 'lot', 'log']).distance('hit', 'cog'))
print(Dictionary(['hot', 'cot', 'con', 'lot', 'log']).distance('hit', 'cog'))
# Q3
print(SparseMatrix(2, 2, {(0, 0): 1}) + SparseMatrix(2, 2, {(0, 0): 1, (1, 1): 1}) == \
SparseMatrix(2, 2, {(0, 0): 2, (1, 1): 1}))
print(SparseMatrix(2, 2, {(0, 0): 1}) - SparseMatrix(2, 2, {(0, 0): 1, (1, 1): 1}) == \
SparseMatrix(2, 2, {(1, 1): -1}))
print(SparseMatrix(2, 3, {(0, 0): 1}) * SparseMatrix(3, 2, {(0, 0): 1, (0, 1): 1}) == \
SparseMatrix(2, 2, {(0, 0): 1, (0, 1): 1}))
print(SparseMatrix(2, 2, {(0, 0): 1}) * 2 == SparseMatrix(2, 2, {(0, 0): 2}))
print(SparseMatrix(2, 3, {(0, 0): 1}) * SparseMatrix(4, 2, {(0, 0): 1, (0, 1): 1}))
### test commit
| [
"[email protected]"
] | |
04b77db5da018700a7d4c2a91e8ea2f351602677 | 44e6c62fa676c74ab44d57da2530ae039edf5a47 | /GUI.py | 8a24370f29a9b123bab4040a44cdb4cf010b23e4 | [] | no_license | DylanBennette/SUVAT-Education-Tool | a2ec9b6dfbe9f1b51c37a87e03ecb72d514644fe | d1f798009d3980a7eb170f5a7a58037f97d7a099 | refs/heads/master | 2021-01-23T03:28:07.484098 | 2014-11-11T20:18:26 | 2014-11-11T20:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128,560 | py | import Base
import sys
from PyQt4.QtGui import*
from PyQt4.QtGui import*
from PyQt4 import QtGui
from A2_Physics_TestingClass import *
from A2_Physics_Test_Tester import *
from A2_Physics_Type_Controller import *
from A2_Physics_Student_Controller import *
from A2_Physics_Answers_Controller import *
from A2_Physics_Attempt_Controller import *
from A2_Physics_TestQuestions_Controller import *
from A2_Physics_Response_Controller import *
from A2_Physics_Propulsion_redone import *
from A2_Physics_Projectile import*
from PyQt4 import QtGui, QtCore
#MainWindow
class MainWindow(QMainWindow):
#constructor
def __init__(self):
#parentconstructor
super().__init__()
self.setWindowTitle('Physics Simulation')
self.setWindowIcon(QtGui.QIcon('physics.png'))
self.make_menubar()
def make_menubar(self):
"""This creates a menu bar on which i can run the entire program """
#this creates the exit action
exitAction = QtGui.QAction(QtGui.QIcon("exit.png"),"&exit", self)
exitAction.setShortcut('Ctrl+E+X')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
#this runs the simulation menu
DisplaySimulation = QtGui.QAction(QtGui.QIcon("Run Simulation.png"),"&Run Simualtion", self)
DisplaySimulation.setShortcut("Ctrl+S")
DisplaySimulation.setStatusTip("Displaying Simuation menu")
DisplaySimulation.triggered.connect(self.RunSimulation)
#this runs the Questions Menu
DisplayQuestions = QtGui.QAction(QtGui.QIcon("Run Test.png"),"&Select Test", self)
DisplayQuestions.setShortcut("Ctrl+T")
DisplayQuestions.setStatusTip("Dislaying Test Selection Menu")
DisplayQuestions.triggered.connect(self.TestSelection)
#this allows you to logout
DisplayPlayerName = QtGui.QAction(QtGui.QIcon("Display Questions.png"),"&Hello", self)
DisplayPlayerName.setShortcut("Ctrl+P")
DisplayPlayerName.setStatusTip("Dialog box for logging out")
DisplayPlayerName.triggered.connect(self.StudentLogout)
#This allows you to View the database editor
TestEditor = QtGui.QAction(QtGui.QIcon("Test Editor.png"),"&Test Editor", self)
TestEditor.setShortcut("Ctrl+T+E")
TestEditor.setStatusTip("Test Editor")
TestEditor.triggered.connect(self.TestViewWindow)
StudentEditor = QtGui.QAction(QtGui.QIcon("Student Editor.png"),"&Student Editor",self)
StudentEditor.setShortcut("Ctrl+S")
StudentEditor.setStatusTip("Student Editor")
StudentEditor.triggered.connect(self.StudentViewWindow)
QuestionEditor = QtGui.QAction(QtGui.QIcon("Question Editor.png"),"&Question Editor",self)
QuestionEditor.setShortcut("Ctrl+Q")
QuestionEditor.setStatusTip("Question Editor")
QuestionEditor.triggered.connect(self.QuestionViewWindow)
AttemptEditor = QtGui.QAction(QtGui.QIcon("Attempt Editor.png"),"&Attempt Editor",self)
AttemptEditor.setShortcut("Ctrl+A+T")
AttemptEditor.setStatusTip("Attempt Editor")
AttemptEditor.triggered.connect(self.AttemptViewWindow)
ResponseEditor = QtGui.QAction(QtGui.QIcon("Response Editor.png"),"&Response Editor",self)
ResponseEditor.setShortcut("Ctrl+R")
ResponseEditor.setStatusTip("Response Editor")
ResponseEditor.triggered.connect(self.ResponseViewWindow)
TestQuestionsEditor = QtGui.QAction(QtGui.QIcon("Test Questions Editor.png"),"&Test Questions Editor",self)
TestQuestionsEditor.setShortcut("Ctrl+T+Q")
TestQuestionsEditor.setStatusTip("Test Questions Editor")
TestQuestionsEditor.triggered.connect(self.TestQuestionsViewWindow)
TypeEditor = QtGui.QAction(QtGui.QIcon("Type Editor.png"),"&Type Editor",self)
TypeEditor.setShortcut("Ctrl+T+Y")
TypeEditor.setStatusTip("Type Editor")
TypeEditor.triggered.connect(self.TypeViewWindow)
AnswersEditor = QtGui.QAction(QtGui.QIcon("Answers Editor.png"),"&Answers Editor",self)
AnswersEditor.setShortcut("Ctrl+A+N")
AnswersEditor.setStatusTip("Answers Editor")
AnswersEditor.triggered.connect(self.AnswersViewWindow)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu("&File")
SimulationMenu = menubar.addMenu("&Simulation")
QuestionsMenu = menubar.addMenu("&Testing")
DatabaseMenu = menubar.addMenu("&Editor")
DatabaseMenu.addAction(TestEditor)
DatabaseMenu.addAction(QuestionEditor)
DatabaseMenu.addAction(TypeEditor)
DatabaseMenu.addAction(StudentEditor)
DatabaseMenu.addAction(AnswersEditor)
DatabaseMenu.addAction(AttemptEditor)
DatabaseMenu.addAction(ResponseEditor)
DatabaseMenu.addAction(TestQuestionsEditor)
SimulationMenu.addAction(DisplaySimulation)
QuestionsMenu.addAction(DisplayQuestions)
fileMenu.addAction(exitAction)
self.setGeometry(300, 300, 300, 200)
self.show()
def TestViewWindow(self):
test = Test_Controller()
results = test.Return_TestsNoInput()
Maxrows = len(results)
self.setWindowTitle('Test Editor')
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Test Name'])
row = 0
column = 0
for i in range(0,2):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
print(self.RecordTable.setItem(row,column,item))
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Test
self.SearchItem = QLineEdit('Test_Name')
self.SearchSubmit = QPushButton('Start Search')
self.AddTestButton = QPushButton('Add')
self.EditTestButton = QPushButton('Edit')
self.DeleteTestButton = QPushButton('Delete')
#create layout
self.TableBox = QVBoxLayout()
self.TableBox.addWidget(self.RecordTable)
#create vertical layout
self.ButtonLayout = QHBoxLayout()
self.ButtonLayout.addWidget(self.AddTestButton)
self.ButtonLayout.addWidget(self.EditTestButton)
self.ButtonLayout.addWidget(self.DeleteTestButton)
#scond to last layout
self.LastLayout = QVBoxLayout()
self.LastLayout.addLayout(self.TableBox)
self.LastLayout.addWidget(self.SearchItem)
self.LastLayout.addWidget(self.SearchSubmit)
self.LastLayout.addLayout(self.ButtonLayout)
#Final Layout
self.TestLayout = QWidget()
self.TestLayout.setLayout(self.LastLayout)
self.setCentralWidget(self.TestLayout)
#connect
self.AddTestButton.clicked.connect(self.AddTest)
self.EditTestButton.clicked.connect(self.EditTest)
self.DeleteTestButton.clicked.connect(self.DeleteTest)
self.SearchSubmit.clicked.connect(self.SearchTestLayout)
def SearchTestLayout(self):
#Items
Search = self.SearchItem.text()
test = TestTestingClass()
results = test.SearchFortest(Search)
self.setWindowTitle('Test Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.RecordTableSearchresults = QTableWidget(Maxrows,2)
self.RecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Test Name'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
Item = QTableWidgetItem(additem)
self.RecordTableSearchresults.setItem(row,column,Item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Test
self.SearchItem = QLineEdit('Test_Name')
self.SearchSubmit = QPushButton('Start Search')
self.AddTestButton = QPushButton('Add')
self.EditTestButton = QPushButton('Edit')
self.DeleteTestButton = QPushButton('Delete')
#create layout
self.TableBox = QVBoxLayout()
self.TableBox.addWidget(self.RecordTableSearchresults)
#create vertical layout
self.ButtonLayout = QHBoxLayout()
self.ButtonLayout.addWidget(self.AddTestButton)
self.ButtonLayout.addWidget(self.EditTestButton)
self.ButtonLayout.addWidget(self.DeleteTestButton)
#scond to last layout
self.LastLayout = QVBoxLayout()
self.LastLayout.addLayout(self.TableBox)
self.LastLayout.addWidget(self.SearchItem)
self.LastLayout.addWidget(self.SearchSubmit)
self.LastLayout.addLayout(self.ButtonLayout)
#Final Layout
self.TestLayout = QWidget()
self.TestLayout.setLayout(self.LastLayout)
self.setCentralWidget(self.TestLayout)
#connect
self.AddTestButton.clicked.connect(self.AddTest)
self.EditTestButton.clicked.connect(self.EditTest)
self.DeleteTestButton.clicked.connect(self.DeleteTest)
def AddTest(self):
test = Test_Controller()
results = test.Return_TestsNoInput()
self.setWindowTitle('Add Test')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Test Editor')
#create table and push buttons
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Test Name'])
row = 0
column = 0
for i in range(0,2):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.TestLabel = QLabel('Enter an appropriate Test here!')
self.TestName = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.TableLayout = QVBoxLayout()
self.TableLayout.addWidget(self.RecordTable)
#create layout
self.AddTestLayout = QGridLayout()
self.AddTestLayout.addWidget(self.TestLabel,0,0)
self.AddTestLayout.addWidget(self.TestName,1,0)
self.AddTestLayout.addWidget(self.SubmitChanges,2,0)
self.FinalLayout = QVBoxLayout()
self.FinalLayout.addLayout(self.TableLayout)
self.FinalLayout.addLayout(self.AddTestLayout)
self.AddTestLayout = QWidget()
self.AddTestLayout.setLayout(self.FinalLayout)
self.setCentralWidget(self.AddTestLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddTestchanges)
def AddTestchanges(self):
self.Test_Name = self.TestName.text()
test = Test_Controller()
test.Add_Test(self.Test_Name)
def EditTest(self):
test = Test_Controller()
results = test.Return_TestsNoInput()
self.setWindowTitle('Test Editor')
#create table and push buttons
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Test Name'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Test_IDLabel = QLabel('enter an appropriate Test ID')
self.Test_NameLabel = QLabel('Enter an appropriate Test')
self.Test_ID = QLineEdit('Test_ID')
self.TestName = QLineEdit('Test_Name')
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.Test_IDLabel,0,0)
self.InputLayout.addWidget(self.Test_NameLabel,0,1)
self.InputLayout.addWidget(self.Test_ID,1,0)
self.InputLayout.addWidget(self.TestName,1,1)
#mainWidget
self.FinalLayout =QVBoxLayout()
self.FinalLayout.addWidget(self.RecordTable)
self.FinalLayout.addLayout(self.InputLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.EditQuestion = QWidget()
self.EditQuestion.setLayout(self.FinalLayout)
self.setCentralWidget(self.EditQuestion)
#connections
self.SubmitChanges.clicked.connect(self.UpdateTestchanges)
def UpdateTestchanges(self):
Test_ID = self.Test_ID.text()
Test_Name = self.TestName.text()
test = Test_Controller()
print(Test_ID,Test_Name)
test.Update_Test(Test_ID,Test_Name)
def DeleteTest(self):
test = Test_Controller()
results = test.Return_TestsNoInput()
self.setWindowTitle('Questions Editor')
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Test Name'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.TestIDLabel = QLabel('enter an appropriate Test ID')
self.TestIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.TestIDLabel,0,0)
self.InputLayout.addWidget(self.TestIDinfo,0,1)
#mainWidget
self.TestLayout =QVBoxLayout()
self.TestLayout.addWidget(self.RecordTable)
self.TestLayout.addLayout(self.InputLayout)
self.TestLayout.addWidget(self.SubmitChanges)
self.DeleteTest = QWidget()
self.DeleteTest.setLayout(self.TestLayout)
self.setCentralWidget(self.DeleteTest)
#connections
self.SubmitChanges.clicked.connect(self.DeleteTestchanges)
def DeleteTestchanges(self):
Test_ID = self.TestIDinfo.text()
test = Test_Controller()
test.Delete_Test(Test_ID)
def AddQuestion(self):
results = Questions_Controller.Questions_Controller.Search_QuestionsNoInput()
self.setWindowTitle('Questions Editor')
#create table and push buttons
self.RecordTable = QTable(results)
self.QuestionLabel = QLabel('Enter an appropriate Question here!')
self.Type_IDLabel = QLabel('Enter an appropriate ID here')
self.Questioninfo = QLineEdit('')
self.Type_IDinfo = QLineEdit('')
self.SubmitChanges = QPushButton()
#create layout
self.AddQuestionLayout = QGridLayout()
self.AddQuestionLayout.addWidget(self.QuestionLabel,0,0)
self.AddQuestionLayout.addWidget(self.Type_IDLabel,0,1)
self.AddQuestionLayout.addWidget(self.Question,1,0)
self.AddQuestionLayout.addWidget(self.Type_ID,1,1)
self.AddQuestionLayout.addWidget(self.SubmitChanges,2,0)
self.AddQuestion = QWidget
self.EditQuestion.addLayout(QuestionLayout)
self.setCentralWidget(self.CentralWidget)
#something about result of line edit etc
Question = self.Questioninfo.textEdited()
Type_ID = self.Type_IDinfo.textEdited()
return Question,Type_ID
self.SubmitChanges.clicked.connect(AddQuestion)
def AddQuestion(self,Question,Type_ID):
self.Questions_Controller.Add_Question()
def EditQuestion(self):
results = Questions_Controller.Questions_Controller.Search_QuestionsNoInput()
self.setWindowTitle('Questions Editor')
#create table and push buttons
self.RecordTable = QTable(results)
self.QuestionIDLabel = QLabel('enter an appropriate Question ID')
self.QuestionLabel = QLabel('Enter an appropriate Question')
self.Type_IDLabel = QLabel('Question Type ID')
self.QuestionIDinfo = QLineEdit('5656565')
self.Questioninfo = QLineEdit('######')
self.Type_IDinfo = QLineEdit('jytjytj')
self.SubmitChanges = QPushButton()
#create layout for labels and line edits
self.InputLayout = QGridLayout
self.InputLayout.addWidget(self.QuestionIDLabel,0,0)
self.InputLayout.addWidget(self.QuestionLabel,0,1)
self.InputLayout.addWidget(self.Type_IDLabel,1,0)
self.InputLayout.addWidget(self.QuestionIDinfo,1,1)
self.InputLayout.addWidget(self.Questioninfo,2,0)
self.InputLayout.addWidget(self.Type_IDinfo,2,1)
self.InputLayout.addWidget(self.SubmitChanges,2,2)
#mainWidget
self.QuestionLayout =QVBoxLayout()
self.QuestionLayout.addLayout(self.RecordTable)
self.QuestionLayout.addLayout(self.InputLayout)
self.EditQuestion = QWidget()
self.EditQuestion.addLayout(QuestionLayout)
self.setCentralWidget(self.CentralWidget)
#connections
Question_ID = self.QuestionIDinfo.textEdited()
Question = self.Questioninfo.textEdited()
Type_ID = self.Type_IDinfo.textEdited()
return Question_ID,Question,Type_ID
self.SubmitChanges.clicked.connect(UpdateQuestion)
def UpdateQuestion(self,Question_ID,Question,Type_ID):
self.Questions_Controller.Update_Question()
def DeleteQuestion(self):
results = Questions_Controller.Questions_Controller.Search_QuestionsNoInput()
self.setWindowTitle('Questions Editor')
#create table and push buttons
self.RecordTable = QTable(results)
self.QuestionIDLabel = QLabel('enter an appropriate Question ID')
self.QuestionIDInfo = QLineEdit()
self.SubmitChanges = QPushButton()
#create layout for labels and line edits
self.InputLayout = QGridLayout
self.InputLayout.addWidget(self.QuestionIDLabel,0,0)
self.InputLayout.addWidget(self.QuestionIDInfo,0,1)
self.InputLayout.addWidget(self.SubmitChanges,1,0)
#mainWidget
self.QuestionLayout =QVBoxLayout()
self.QuestionLayout.addLayout(self.RecordTable)
self.QuestionLayout.addLayout(self.InputLayout)
self.EditQuestion = QWidget()
self.EditQuestion.addLayout(QuestionLayout)
self.setCentralWidget(self.CentralWidget)
#connections
Question_ID = self.QuestionIDInfo.textEdited()
return Question_ID
self.SubmitChanges.clicked.connect(DeleteQuestion)
def DeleteQuestion(self,Question_ID):
self.Questions_Controller.Delete_Question()
def StudentLogout(self):
#Actions
self.CancelAction = QPushButton("Cancel")
self.ConfirmAction = QPushButton("Confirm")
self.DialogLabel = QLabel("Are you sure you would like to Logout?")
#button layout
self.ActionBox = QGridLayout()
self.ActionBox.addWidget(self.ConfirmAction,0,0)
self.ActionBox.addWidget(self.CancelAction,0,1)
#main layout
self.QDialogButtonBox.addWidget(self.DialogLabel)
self.QDialogButtonBox.addLayout(self.ActionBox)
if self.ConfirmAction.clicked.connect:
print("Your login script doesnt exist you idiot!")
elif self.CancelAction.clicked.connect:
StudentLogout.close()
def QuestionViewWindow(self):
Question = Questions_Controller()
results = Question.Search_QuestionsNoInput()
Maxrows = len(results)
self.setWindowTitle('Question Editor')
self.RecordTable = QTableWidget(Maxrows,3)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question','Question Type'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
print(self.RecordTable.setItem(row,column,item))
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Question
self.SearchItem = QLineEdit('Question_Name')
self.SearchSubmit = QPushButton('Start Search')
self.AddQuestionButton = QPushButton('Add')
self.EditQuestionButton = QPushButton('Edit')
self.DeleteQuestionButton = QPushButton('Delete')
#create layout
self.TableBox = QVBoxLayout()
self.TableBox.addWidget(self.RecordTable)
#create vertical layout
self.ButtonLayout = QHBoxLayout()
self.ButtonLayout.addWidget(self.AddQuestionButton)
self.ButtonLayout.addWidget(self.EditQuestionButton)
self.ButtonLayout.addWidget(self.DeleteQuestionButton)
#scond to last layout
self.LastLayout = QVBoxLayout()
self.LastLayout.addLayout(self.TableBox)
self.LastLayout.addWidget(self.SearchItem)
self.LastLayout.addWidget(self.SearchSubmit)
self.LastLayout.addLayout(self.ButtonLayout)
#Final Layout
self.QuestionLayout = QWidget()
self.QuestionLayout.setLayout(self.LastLayout)
self.setCentralWidget(self.QuestionLayout)
#connect
self.AddQuestionButton.clicked.connect(self.AddQuestion)
self.EditQuestionButton.clicked.connect(self.EditQuestion)
self.DeleteQuestionButton.clicked.connect(self.DeleteQuestion)
self.SearchSubmit.clicked.connect(self.SearchQuestionLayout)
def SearchQuestionLayout(self):
#Items
Search = self.SearchItem.text()
Question = Questions_Controller()
results = Question.SearchForQuestion(Search)
self.setWindowTitle('Question Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.RecordTableSearchresults = QTableWidget(Maxrows,3)
self.RecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Question Name','Question Type'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
Item = QTableWidgetItem(additem)
self.RecordTableSearchresults.setItem(row,column,Item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Question
self.SearchItem = QLineEdit('Question_Name')
self.SearchSubmit = QPushButton('Start Search')
self.AddQuestionButton = QPushButton('Add')
self.EditQuestionButton = QPushButton('Edit')
self.DeleteQuestionButton = QPushButton('Delete')
#create layout
self.TableBox = QVBoxLayout()
self.TableBox.addWidget(self.RecordTableSearchresults)
#create vertical layout
self.ButtonLayout = QHBoxLayout()
self.ButtonLayout.addWidget(self.AddQuestionButton)
self.ButtonLayout.addWidget(self.EditQuestionButton)
self.ButtonLayout.addWidget(self.DeleteQuestionButton)
#scond to last layout
self.LastLayout = QVBoxLayout()
self.LastLayout.addLayout(self.TableBox)
self.LastLayout.addWidget(self.SearchItem)
self.LastLayout.addWidget(self.SearchSubmit)
self.LastLayout.addLayout(self.ButtonLayout)
#Final Layout
self.QuestionLayout = QWidget()
self.QuestionLayout.setLayout(self.LastLayout)
self.setCentralWidget(self.QuestionLayout)
#connect
self.AddQuestionButton.clicked.connect(self.AddQuestion)
self.EditQuestionButton.clicked.connect(self.EditQuestion)
self.DeleteQuestionButton.clicked.connect(self.DeleteQuestion)
def AddQuestion(self):
Question = Questions_Controller()
results = Question.Search_QuestionsNoInput()
self.setWindowTitle('Add Question')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Question Editor')
#create table and push buttons
self.RecordTable = QTableWidget(Maxrows,3)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question Name','Question Type'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.QuestionLabel = QLabel('Enter an appropriate Question here!')
self.Question = QLineEdit()
self.QuestionTypeLabel = QLabel('Enter an appropriate Question Type here!')
self.QuestionType= QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.TableLayout = QVBoxLayout()
self.TableLayout.addWidget(self.RecordTable)
#create layout
self.AddQuestionLayout = QGridLayout()
self.AddQuestionLayout.addWidget(self.QuestionLabel,0,0)
self.AddQuestionLayout.addWidget(self.Question,1,0)
self.AddQuestionLayout.addWidget(self.QuestionTypeLabel,0,1)
self.AddQuestionLayout.addWidget(self.QuestionType,1,1)
self.AddQuestionLayout.addWidget(self.SubmitChanges,2,0)
self.FinalLayout = QVBoxLayout()
self.FinalLayout.addLayout(self.TableLayout)
self.FinalLayout.addLayout(self.AddQuestionLayout)
self.AddQuestionLayout = QWidget()
self.AddQuestionLayout.setLayout(self.FinalLayout)
self.setCentralWidget(self.AddQuestionLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddQuestionchanges)
def AddQuestionchanges(self):
self.Question_Name = self.Question.text()
self.Question_Type =self.QuestionType.text()
Question = Questions_Controller()
Question.Add_Question(self.Question_Name,self.Question_Type)
def EditQuestion(self):
Question = Questions_Controller()
results = Question.Search_QuestionsNoInput()
self.setWindowTitle('Question Editor')
#create table and push buttons
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,3)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question','Question Type'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Question_IDLabel = QLabel('enter an appropriate Question ID')
self.Question_NameLabel = QLabel('Enter an appropriate Question')
self.QuestionTypeLabel = QLabel('Enter an appropriate Question Type here!')
self.QuestionType = QLineEdit('Question_Type')
self.Question_ID = QLineEdit('Question_ID')
self.QuestionName = QLineEdit('Question_Name')
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditQuestionLayout = QGridLayout()
self.EditQuestionLayout.addWidget(self.Question_IDLabel,0,0)
self.EditQuestionLayout.addWidget(self.Question_NameLabel,0,2)
self.EditQuestionLayout.addWidget(self.QuestionTypeLabel,0,1)
self.EditQuestionLayout.addWidget(self.QuestionType,1,1)
self.EditQuestionLayout.addWidget(self.Question_ID,1,0)
self.EditQuestionLayout.addWidget(self.QuestionName,1,2)
#mainWidget
self.FinalLayout =QVBoxLayout()
self.FinalLayout.addWidget(self.RecordTable)
self.FinalLayout.addLayout(self.EditQuestionLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.EditQuestion = QWidget()
self.EditQuestion.setLayout(self.FinalLayout)
self.setCentralWidget(self.EditQuestion)
#connections
self.SubmitChanges.clicked.connect(self.UpdateQuestionchanges)
def UpdateQuestionchanges(self):
Question_ID = self.Question_ID.text()
Question = self.QuestionName.text()
Question_Type = self.QuestionType.text()
Question1 = Questions_Controller()
Question1.Update_Question(Question,Question_Type,Question_ID)
def DeleteQuestion(self):
Question = Questions_Controller()
results = Question.Search_QuestionsNoInput()
self.setWindowTitle('Questions Editor')
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,3)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question','Question Type'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.QuestionIDLabel = QLabel('enter an appropriate Question ID')
self.QuestionIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.QuestionIDLabel,0,0)
self.InputLayout.addWidget(self.QuestionIDinfo,0,1)
#mainWidget
self.QuestionLayout =QVBoxLayout()
self.QuestionLayout.addWidget(self.RecordTable)
self.QuestionLayout.addLayout(self.InputLayout)
self.QuestionLayout.addWidget(self.SubmitChanges)
self.DeleteQuestion = QWidget()
self.DeleteQuestion.setLayout(self.QuestionLayout)
self.setCentralWidget(self.DeleteQuestion)
#connections
self.SubmitChanges.clicked.connect(self.DeleteQuestionchanges)
def DeleteQuestionchanges(self):
Question_ID = self.QuestionIDinfo.text()
Question = Questions_Controller()
Question.Delete_Question(Question_ID)
def TypeViewWindow(self):
Type = Type_Controller()
results = Type.Return_TypeNoInput()
Maxrows = len(results)
self.setWindowTitle('Type Editor')
self.TypeRecordTable = QTableWidget(Maxrows,2)
self.TypeRecordTable.setHorizontalHeaderLabels(['ID Number','Question_Type',])
row = 0
column = 0
for i in range(0,2):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
print(self.TypeRecordTable.setItem(row,column,item))
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Type
self.TypeSearchItem = QLineEdit('Type_Name')
self.TypeSearchSubmit = QPushButton('Start Search')
self.AddTypeButton = QPushButton('Add')
self.EditTypeButton = QPushButton('Edit')
self.DeleteTypeButton = QPushButton('Delete')
#create layout
self.TypeTableBox = QVBoxLayout()
self.TypeTableBox.addWidget(self.TypeRecordTable)
#create vertical layout
self.TypeButtonLayout = QHBoxLayout()
self.TypeButtonLayout.addWidget(self.AddTypeButton)
self.TypeButtonLayout.addWidget(self.EditTypeButton)
self.TypeButtonLayout.addWidget(self.DeleteTypeButton)
#scond to last layout
self.TypeLastLayout = QVBoxLayout()
self.TypeLastLayout.addLayout(self.TypeTableBox)
self.TypeLastLayout.addWidget(self.TypeSearchItem)
self.TypeLastLayout.addWidget(self.TypeSearchSubmit)
self.TypeLastLayout.addLayout(self.TypeButtonLayout)
#Final Layout
self.TypeLayout = QWidget()
self.TypeLayout.setLayout(self.TypeLastLayout)
self.setCentralWidget(self.TypeLayout)
#connect
self.AddTypeButton.clicked.connect(self.AddType)
self.EditTypeButton.clicked.connect(self.EditType)
self.DeleteTypeButton.clicked.connect(self.DeleteType)
self.TypeSearchSubmit.clicked.connect(self.TypeSearchLayout)
def TypeSearchLayout(self):
#Items
Search = self.TypeSearchItem.text()
Type = Type_Controller()
results = Type.SearchForType(Search)
print(results)
self.setWindowTitle('Type Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.TypeRecordTableSearchresults = QTableWidget(Maxrows,2)
self.TypeRecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Question_Type',])
row = 0
column = 0
for i in range(0,2):
for each in results:
print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.TypeRecordTableSearchresults.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Type
self.TypeSearchItem = QLineEdit('Type_Name')
self.TypeSearchSubmit = QPushButton('Start Search')
self.AddTypeButton = QPushButton('Add')
self.EditTypeButton = QPushButton('Edit')
self.DeleteTypeButton = QPushButton('Delete')
#create layout
self.TypeTableBox = QVBoxLayout()
self.TypeTableBox.addWidget(self.TypeRecordTableSearchresults)
#create vertical layout
self.TypeButtonLayout = QHBoxLayout()
self.TypeButtonLayout.addWidget(self.AddTypeButton)
self.TypeButtonLayout.addWidget(self.EditTypeButton)
self.TypeButtonLayout.addWidget(self.DeleteTypeButton)
#scond to last layout
self.TypeLastLayout = QVBoxLayout()
self.TypeLastLayout.addLayout(self.TypeTableBox)
self.TypeLastLayout.addWidget(self.TypeSearchItem)
self.TypeLastLayout.addWidget(self.TypeSearchSubmit)
self.TypeLastLayout.addLayout(self.TypeButtonLayout)
#Final Layout
self.TypeLayout = QWidget()
self.TypeLayout.setLayout(self.TypeLastLayout)
self.setCentralWidget(self.TypeLayout)
#connect
self.AddTypeButton.clicked.connect(self.AddType)
self.EditTypeButton.clicked.connect(self.EditType)
self.DeleteTypeButton.clicked.connect(self.DeleteType)
def AddType(self):
Type = Type_Controller()
results = Type.Return_TypeNoInput()
self.setWindowTitle('Add Type')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Type Editor')
#create table and push buttons
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_Type'])
row = 0
column = 0
for i in range(0,2):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Question_TypeLabel = QLabel('Enter an appropriate Question Type here!')
self.Question_Type= QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.TableLayout = QVBoxLayout()
self.TableLayout.addWidget(self.RecordTable)
#create layout
self.AddTypeLayout = QGridLayout()
self.AddTypeLayout.addWidget(self.Question_TypeLabel,0,0)
self.AddTypeLayout.addWidget(self.Question_Type,0,1)
self.FinalLayout = QVBoxLayout()
self.FinalLayout.addLayout(self.TableLayout)
self.FinalLayout.addLayout(self.AddTypeLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.AddTypeLayout = QWidget()
self.AddTypeLayout.setLayout(self.FinalLayout)
self.setCentralWidget(self.AddTypeLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddTypechanges)
def AddTypechanges(self):
self.Type_Name = self.Question_Type.text()
Type = Type_Controller()
Type.Add_Type(self.Type_Name)
def EditType(self):
Type = Type_Controller()
results = Type.Return_TypeNoInput()
self.setWindowTitle('Type Editor')
#create table and push buttons
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_TypeType'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Type_IDLabel = QLabel('enter an appropriate Type ID')
self.Question_TypeLabel = QLabel('Enter an appropriate Question_Type')
self.Question_Type= QLineEdit('Question_Type')
self.Type_ID = QLineEdit('Type_ID')
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditTypeLayout = QGridLayout()
self.EditTypeLayout.addWidget(self.Type_IDLabel,0,0)
self.EditTypeLayout.addWidget(self.Question_TypeLabel,0,1)
self.EditTypeLayout.addWidget(self.Question_Type,1,1)
self.EditTypeLayout.addWidget(self.Type_ID,1,0)
#mainWidget
self.FinalLayout =QVBoxLayout()
self.FinalLayout.addWidget(self.RecordTable)
self.FinalLayout.addLayout(self.EditTypeLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.EditType = QWidget()
self.EditType.setLayout(self.FinalLayout)
self.setCentralWidget(self.EditType)
#connections
self.SubmitChanges.clicked.connect(self.UpdateTypechanges)
def UpdateTypechanges(self):
Type_ID = self.Type_ID.text()
Question_Type = self.Question_Type.text()
Type = Type_Controller()
Type.Update_Type(Type_ID,Question_Type)
def DeleteType(self):
Type = Type_Controller()
results = Type.Return_TypeNoInput()
self.setWindowTitle('Types Editor')
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_Type'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.TypeIDLabel = QLabel('enter an appropriate Type ID')
self.TypeIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.TypeIDLabel,0,0)
self.InputLayout.addWidget(self.TypeIDinfo,0,1)
#mainWidget
self.TypeLayout =QVBoxLayout()
self.TypeLayout.addWidget(self.RecordTable)
self.TypeLayout.addLayout(self.InputLayout)
self.TypeLayout.addWidget(self.SubmitChanges)
self.DeleteType = QWidget()
self.DeleteType.setLayout(self.TypeLayout)
self.setCentralWidget(self.DeleteType)
#connections
self.SubmitChanges.clicked.connect(self.DeleteTypechanges)
def DeleteTypechanges(self):
Type_ID = self.TypeIDinfo.text()
Type = Type_Controller()
Type.Delete_Type(Type_ID)
def StudentViewWindow(self):
Student = Student_Controller()
results = Student.Return_StudentNoInput()
Maxrows = len(results)
self.setWindowTitle('Student Editor')
self.StudentViewRecordTable = QTableWidget(Maxrows,3)
self.StudentViewRecordTable.setHorizontalHeaderLabels(['Student ID','First Name','Last Name'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
print(self.StudentViewRecordTable.setItem(row,column,item))
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Student
self.SearchItem = QLineEdit('Student_Name')
self.StudentSearchSubmit = QPushButton('Start Search')
self.AddStudentButton = QPushButton('Add')
self.EditStudentButton = QPushButton('Edit')
self.DeleteStudentButton = QPushButton('Delete')
#create layout
self.StudentTableBox = QVBoxLayout()
self.StudentTableBox.addWidget(self.StudentViewRecordTable)
#create vertical layout
self.StudentButtonLayout = QHBoxLayout()
self.StudentButtonLayout.addWidget(self.AddStudentButton)
self.StudentButtonLayout.addWidget(self.EditStudentButton)
self.StudentButtonLayout.addWidget(self.DeleteStudentButton)
#scond to last layout
self.StudentSearchLastLayout = QVBoxLayout()
self.StudentSearchLastLayout.addLayout(self.StudentTableBox)
self.StudentSearchLastLayout.addWidget(self.SearchItem)
self.StudentSearchLastLayout.addWidget(self.StudentSearchSubmit)
self.StudentSearchLastLayout.addLayout(self.StudentButtonLayout)
#Final Layout
self.StudentLayout = QWidget()
self.StudentLayout.setLayout(self.StudentSearchLastLayout)
self.setCentralWidget(self.StudentLayout)
#connect
self.AddStudentButton.clicked.connect(self.AddStudent)
self.EditStudentButton.clicked.connect(self.EditStudent)
self.DeleteStudentButton.clicked.connect(self.DeleteStudent)
self.StudentSearchSubmit.clicked.connect(self.StudentSearchLayout)
def StudentSearchLayout(self):
#Items
Search = self.SearchItem.text()
Student = Student_Controller()
results = Student.SearchForStudent(Search)
print(results)
self.setWindowTitle('Student Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.StudentRecordTableSearchresults = QTableWidget(Maxrows,3)
self.StudentRecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','First Name','Last Name'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.StudentRecordTableSearchresults.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Student
self.SearchItem = QLineEdit('Student_Name')
self.StudentSearchSubmit = QPushButton('Start Search')
self.AddStudentButton = QPushButton('Add')
self.EditStudentButton = QPushButton('Edit')
self.DeleteStudentButton = QPushButton('Delete')
#create layout
self.StudentTableBox = QVBoxLayout()
self.StudentTableBox.addWidget(self.StudentRecordTableSearchresults)
#create vertical layout
self.StudentButtonLayout = QHBoxLayout()
self.StudentButtonLayout.addWidget(self.AddStudentButton)
self.StudentButtonLayout.addWidget(self.EditStudentButton)
self.StudentButtonLayout.addWidget(self.DeleteStudentButton)
#scond to last layout
self.StudentSearchLastLayout = QVBoxLayout()
self.StudentSearchLastLayout.addLayout(self.StudentTableBox)
self.StudentSearchLastLayout.addWidget(self.SearchItem)
self.StudentSearchLastLayout.addWidget(self.StudentSearchSubmit)
self.StudentSearchLastLayout.addLayout(self.StudentButtonLayout)
#Final Layout
self.StudentLayout = QWidget()
self.StudentLayout.setLayout(self.StudentSearchLastLayout)
self.setCentralWidget(self.StudentLayout)
#connect
self.AddStudentButton.clicked.connect(self.AddStudent)
self.EditStudentButton.clicked.connect(self.EditStudent)
self.DeleteStudentButton.clicked.connect(self.DeleteStudent)
def AddStudent(self):
Student = Student_Controller()
results = Student.Return_StudentNoInput()
self.setWindowTitle('Add Student')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Student Editor')
#create table and push buttons
self.AddStudentRecordTable = QTableWidget(Maxrows,3)
self.AddStudentRecordTable.setHorizontalHeaderLabels(['ID Number','First Name','Last Name'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.AddStudentRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.StudentFirstNameLabel = QLabel('Enter an appropriate First Name here!')
self.StudentLastNameLabel = QLabel('Enter an appropriate Last Name here!')
self.First_Name = QLineEdit()
self.Last_Name = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.AddStudentTableLayout = QVBoxLayout()
self.AddStudentTableLayout.addWidget(self.AddStudentRecordTable)
#create layout
self.AddStudentLayout = QGridLayout()
self.AddStudentLayout.addWidget(self.StudentFirstNameLabel,0,0)
self.AddStudentLayout.addWidget(self.StudentLastNameLabel,0,1)
self.AddStudentLayout.addWidget(self.First_Name,1,0)
self.AddStudentLayout.addWidget(self.Last_Name,1,1)
self.AddStudentFinalLayout = QVBoxLayout()
self.AddStudentFinalLayout.addLayout(self.AddStudentTableLayout)
self.AddStudentFinalLayout.addLayout(self.AddStudentLayout)
self.AddStudentFinalLayout.addWidget(self.SubmitChanges)
self.AddStudentLayout = QWidget()
self.AddStudentLayout.setLayout(self.AddStudentFinalLayout)
self.setCentralWidget(self.AddStudentLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddStudentchanges)
def AddStudentchanges(self):
self.StudentFirst_Name = self.First_Name.text()
self.StudentLast_Name = self.Last_Name.text()
Student = Student_Controller()
Student.Add_Student(self.StudentFirst_Name,self.StudentLast_Name)
def EditStudent(self):
Student = Student_Controller()
results = Student.Return_StudentNoInput()
self.setWindowTitle('Student Editor')
#create table and push buttons
Maxrows = len(results)
self.EditStudentRecordTable = QTableWidget(Maxrows,3)
self.EditStudentRecordTable.setHorizontalHeaderLabels(['ID Number','First Name','Last Name'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.EditStudentRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Student_IDLabel = QLabel('enter an appropriate Student ID')
self.StudentFirstNameLabel = QLabel('Enter an appropriate First Name')
self.StudentLastNameLabel = QLabel('Enter an appropriate Last Name')
self.Student_ID= QLineEdit('Student_ID')
self.StudentFirstName = QLineEdit('First_Name')
self.StudentLastName = QLineEdit('Last_Name')
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditStudentInputLayout = QGridLayout()
self.EditStudentInputLayout.addWidget(self.Student_IDLabel,0,0)
self.EditStudentInputLayout.addWidget(self.StudentFirstNameLabel,1,0)
self.EditStudentInputLayout.addWidget(self.StudentLastNameLabel,2,0)
self.EditStudentInputLayout.addWidget(self.Student_ID,0,1)
self.EditStudentInputLayout.addWidget(self.StudentFirstName,1,1)
self.EditStudentInputLayout.addWidget(self.StudentLastName,2,1)
#mainWidget
self.EditStudentFinalLayout =QVBoxLayout()
self.EditStudentFinalLayout.addWidget(self.EditStudentRecordTable)
self.EditStudentFinalLayout.addLayout(self.EditStudentInputLayout)
self.EditStudentFinalLayout.addWidget(self.SubmitChanges)
self.EditStudent = QWidget()
self.EditStudent.setLayout(self.EditStudentFinalLayout)
self.setCentralWidget(self.EditStudent)
#connections
self.SubmitChanges.clicked.connect(self.UpdateStudentchanges)
def UpdateStudentchanges(self):
Student_ID = self.Student_ID.text()
First_Name = self.StudentFirstName.text()
Last_Name = self.StudentLastName.text()
Student = Student_Controller()
Student.Update_Student(First_Name,Last_Name,Student_ID)
def DeleteStudent(self):
Student = Student_Controller()
results = Student.Return_StudentNoInput()
self.setWindowTitle('Students Editor')
Maxrows = len(results)
self.DeleteStudentRecordTable = QTableWidget(Maxrows,3)
self.DeleteStudentRecordTable.setHorizontalHeaderLabels(['ID Number','First Name','Last Name'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.DeleteStudentRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.StudentIDLabel = QLabel('enter an appropriate Student ID')
self.StudentIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.DeleteStudentInputLayout = QGridLayout()
self.DeleteStudentInputLayout.addWidget(self.StudentIDLabel,0,0)
self.DeleteStudentInputLayout.addWidget(self.StudentIDinfo,0,1)
#mainWidget
self.DeleteStudentLayout =QVBoxLayout()
self.DeleteStudentLayout.addWidget(self.DeleteStudentRecordTable)
self.DeleteStudentLayout.addLayout(self.DeleteStudentInputLayout)
self.DeleteStudentLayout.addWidget(self.SubmitChanges)
self.DeleteStudent = QWidget()
self.DeleteStudent.setLayout(self.DeleteStudentLayout)
self.setCentralWidget(self.DeleteStudent)
#connections
self.SubmitChanges.clicked.connect(self.DeleteStudentchanges)
def DeleteStudentchanges(self):
Student_ID = self.StudentIDinfo.text()
Student = Student_Controller()
Student.Delete_Student(Student_ID)
def AnswersViewWindow(self):
Answers = Answers_Controller()
results = Answers.Return_AnswersNoInput()
Maxrows = len(results)
self.setWindowTitle('Answers Editor')
self.AnswersViewRecordTable = QTableWidget(Maxrows,3)
self.AnswersViewRecordTable.setHorizontalHeaderLabels(['Answers ID','Correct Answer','Questions ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
print(self.AnswersViewRecordTable.setItem(row,column,item))
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Answers
self.SearchItem = QLineEdit('Answers_Name')
self.AnswersSearchSubmit = QPushButton('Start Search')
self.AddAnswersButton = QPushButton('Add')
self.EditAnswersButton = QPushButton('Edit')
self.DeleteAnswersButton = QPushButton('Delete')
#create layout
self.AnswersTableBox = QVBoxLayout()
self.AnswersTableBox.addWidget(self.AnswersViewRecordTable)
#create vertical layout
self.AnswersButtonLayout = QHBoxLayout()
self.AnswersButtonLayout.addWidget(self.AddAnswersButton)
self.AnswersButtonLayout.addWidget(self.EditAnswersButton)
self.AnswersButtonLayout.addWidget(self.DeleteAnswersButton)
#scond to last layout
self.AnswersSearchLastLayout = QVBoxLayout()
self.AnswersSearchLastLayout.addLayout(self.AnswersTableBox)
self.AnswersSearchLastLayout.addWidget(self.SearchItem)
self.AnswersSearchLastLayout.addWidget(self.AnswersSearchSubmit)
self.AnswersSearchLastLayout.addLayout(self.AnswersButtonLayout)
#Final Layout
self.AnswersLayout = QWidget()
self.AnswersLayout.setLayout(self.AnswersSearchLastLayout)
self.setCentralWidget(self.AnswersLayout)
#connect
self.AddAnswersButton.clicked.connect(self.AddAnswers)
self.EditAnswersButton.clicked.connect(self.EditAnswers)
self.DeleteAnswersButton.clicked.connect(self.DeleteAnswers)
self.AnswersSearchSubmit.clicked.connect(self.AnswersSearchLayout)
def AnswersSearchLayout(self):
#Items
Search = self.SearchItem.text()
Answers = Answers_Controller()
results = Answers.SearchForAnswers(Search)
print(results)
self.setWindowTitle('Answers Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.AnswersRecordTableSearchresults = QTableWidget(Maxrows,3)
self.AnswersRecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Correct Answer','Questions ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
Item = QTableWidgetItem(additem)
self.AnswersRecordTableSearchresults.setItem(row,column,Item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Answers
self.SearchItem = QLineEdit('Answers_Name')
self.AnswersSearchSubmit = QPushButton('Start Search')
self.AddAnswersButton = QPushButton('Add')
self.EditAnswersButton = QPushButton('Edit')
self.DeleteAnswersButton = QPushButton('Delete')
#create layout
self.AnswersTableBox = QVBoxLayout()
self.AnswersTableBox.addWidget(self.AnswersRecordTableSearchresults)
#create vertical layout
self.AnswersButtonLayout = QHBoxLayout()
self.AnswersButtonLayout.addWidget(self.AddAnswersButton)
self.AnswersButtonLayout.addWidget(self.EditAnswersButton)
self.AnswersButtonLayout.addWidget(self.DeleteAnswersButton)
#scond to last layout
self.AnswersSearchLastLayout = QVBoxLayout()
self.AnswersSearchLastLayout.addLayout(self.AnswersTableBox)
self.AnswersSearchLastLayout.addWidget(self.SearchItem)
self.AnswersSearchLastLayout.addWidget(self.AnswersSearchSubmit)
self.AnswersSearchLastLayout.addLayout(self.AnswersButtonLayout)
#Final Layout
self.AnswersLayout = QWidget()
self.AnswersLayout.setLayout(self.AnswersSearchLastLayout)
self.setCentralWidget(self.AnswersLayout)
#connect
self.AddAnswersButton.clicked.connect(self.AddAnswers)
self.EditAnswersButton.clicked.connect(self.EditAnswers)
self.DeleteAnswersButton.clicked.connect(self.DeleteAnswers)
def AddAnswers(self):
Answers = Answers_Controller()
results = Answers.Return_AnswersNoInput()
self.setWindowTitle('Add Answers')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Answers Editor')
#create table and push buttons
self.AddAnswersRecordTable = QTableWidget(Maxrows,3)
self.AddAnswersRecordTable.setHorizontalHeaderLabels(['ID Number','Correct Answer','Questions ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.AddAnswersRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.AnswersLabel = QLabel('Enter an appropriate Answer here!')
self.AnswersQuestionIDLabel = QLabel('Enter an appropriate Question_ID here!')
self.CorrectAnswer= QLineEdit()
self.AnswersQuestionID = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.AddAnswersTableLayout = QVBoxLayout()
self.AddAnswersTableLayout.addWidget(self.AddAnswersRecordTable)
#create layout
self.AddAnswersLayout = QGridLayout()
self.AddAnswersLayout.addWidget(self.AnswersLabel,0,0)
self.AddAnswersLayout.addWidget(self.AnswersQuestionIDLabel,0,1)
self.AddAnswersLayout.addWidget(self.CorrectAnswer,1,0)
self.AddAnswersLayout.addWidget(self.AnswersQuestionID,1,1)
self.AddAnswersFinalLayout = QVBoxLayout()
self.AddAnswersFinalLayout.addLayout(self.AddAnswersTableLayout)
self.AddAnswersFinalLayout.addLayout(self.AddAnswersLayout)
self.AddAnswersFinalLayout.addWidget(self.SubmitChanges)
self.AddLastAnswersLayout = QWidget()
self.AddLastAnswersLayout.setLayout(self.AddAnswersFinalLayout)
self.setCentralWidget(self.AddLastAnswersLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddAnswerschanges)
def AddAnswerschanges(self):
self.Answers_Name = self.CorrectAnswer.text()
self.AnswersQuestion = self.AnswersQuestionID.text()
Answers = Answers_Controller()
Answers.Add_Answer(self.Answers_Name,self.AnswersQuestion)
def EditAnswers(self):
Answers = Answers_Controller()
results = Answers.Return_AnswersNoInput()
self.setWindowTitle('Answers Editor')
#create table and push buttons
Maxrows = len(results)
self.EditAnswersRecordTable = QTableWidget(Maxrows,3)
self.EditAnswersRecordTable.setHorizontalHeaderLabels(['ID Number','Correct Answer','Questions ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.EditAnswersRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.Answers_IDLabel = QLabel('Enter an appropriate Answers ID')
self.CorrectAnswersLabel = QLabel('Enter an appropriate Correct Answer')
self.AnswersQuestionIDLabel= QLabel('Enter an appropriate QuestionID')
self.Answers_ID= QLineEdit('Answers_ID')
self.AnswersQuestionID= QLineEdit('Questions_ID')
self.CorrectAnswer = QLineEdit('Correct_Answer')
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditAnswersInputLayout = QGridLayout()
self.EditAnswersInputLayout.addWidget(self.Answers_IDLabel,0,0)
self.EditAnswersInputLayout.addWidget(self.CorrectAnswersLabel,1,0)
self.EditAnswersInputLayout.addWidget(self.AnswersQuestionIDLabel,2,0)
self.EditAnswersInputLayout.addWidget(self.Answers_ID,0,1)
self.EditAnswersInputLayout.addWidget(self.CorrectAnswer,1,1)
self.EditAnswersInputLayout.addWidget(self.AnswersQuestionID,2,1)
#mainWidget
self.EditAnswersFinalLayout =QVBoxLayout()
self.EditAnswersFinalLayout.addWidget(self.EditAnswersRecordTable)
self.EditAnswersFinalLayout.addLayout(self.EditAnswersInputLayout)
self.EditAnswersFinalLayout.addWidget(self.SubmitChanges)
self.EditAnswers = QWidget()
self.EditAnswers.setLayout(self.EditAnswersFinalLayout)
self.setCentralWidget(self.EditAnswers)
#connections
self.SubmitChanges.clicked.connect(self.UpdateAnswerschanges)
def UpdateAnswerschanges(self):
Answers_ID = self.Answers_ID.text()
CorrectAnswer = self.CorrectAnswer.text()
Question_ID = self.AnswersQuestionID.text()
Answers = Answers_Controller()
Answers.Update_Answer(CorrectAnswer,Question_ID,Answers_ID)
def DeleteAnswers(self):
Answers = Answers_Controller()
results = Answers.Return_AnswersNoInput()
self.setWindowTitle('Answerss Editor')
Maxrows = len(results)
self.DeleteAnswersRecordTable = QTableWidget(Maxrows,3)
self.DeleteAnswersRecordTable.setHorizontalHeaderLabels(['ID Number','Correct Answer','Questions ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.DeleteAnswersRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.AnswersIDLabel = QLabel('enter an appropriate Answers ID')
self.AnswersIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.DeleteAnswersInputLayout = QGridLayout()
self.DeleteAnswersInputLayout.addWidget(self.AnswersIDLabel,0,0)
self.DeleteAnswersInputLayout.addWidget(self.AnswersIDinfo,0,1)
#mainWidget
self.DeleteAnswersLayout =QVBoxLayout()
self.DeleteAnswersLayout.addWidget(self.DeleteAnswersRecordTable)
self.DeleteAnswersLayout.addLayout(self.DeleteAnswersInputLayout)
self.DeleteAnswersLayout.addWidget(self.SubmitChanges)
self.DeleteAnswers = QWidget()
self.DeleteAnswers.setLayout(self.DeleteAnswersLayout)
self.setCentralWidget(self.DeleteAnswers)
#connections
self.SubmitChanges.clicked.connect(self.DeleteAnswerschanges)
def DeleteAnswerschanges(self):
Answers_ID = self.AnswersIDinfo.text()
Answers = Answers_Controller()
Answers.Delete_Answer(Answers_ID)
def AttemptViewWindow(self):
Attempt = Attempts_Controller()
results = Attempt.Return_AttemptNoInput()
Maxrows = len(results)
self.setWindowTitle('Attempt Editor')
self.AttemptViewRecordTable = QTableWidget(Maxrows,5)
self.AttemptViewRecordTable.setHorizontalHeaderLabels(['Attempt ID','Attempt_Score','Time_in_session','Date_of_Use','Test_ID','student_ID'])
row = 0
column = 0
for i in range(0,5):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.AttemptViewRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Attempt
self.SearchItem = QLineEdit('Attempt_Name')
self.AttemptSearchSubmit = QPushButton('Start Search')
self.AddAttemptButton = QPushButton('Add')
self.EditAttemptButton = QPushButton('Edit')
self.DeleteAttemptButton = QPushButton('Delete')
#create layout
self.AttemptTableBox = QVBoxLayout()
self.AttemptTableBox.addWidget(self.AttemptViewRecordTable)
#create vertical layout
self.AttemptButtonLayout = QHBoxLayout()
self.AttemptButtonLayout.addWidget(self.AddAttemptButton)
self.AttemptButtonLayout.addWidget(self.EditAttemptButton)
self.AttemptButtonLayout.addWidget(self.DeleteAttemptButton)
#scond to last layout
self.AttemptSearchLastLayout = QVBoxLayout()
self.AttemptSearchLastLayout.addLayout(self.AttemptTableBox)
self.AttemptSearchLastLayout.addWidget(self.SearchItem)
self.AttemptSearchLastLayout.addWidget(self.AttemptSearchSubmit)
self.AttemptSearchLastLayout.addLayout(self.AttemptButtonLayout)
#Final Layout
self.AttemptLayout = QWidget()
self.AttemptLayout.setLayout(self.AttemptSearchLastLayout)
self.setCentralWidget(self.AttemptLayout)
#connect
self.AddAttemptButton.clicked.connect(self.AddAttempt)
self.EditAttemptButton.clicked.connect(self.EditAttempt)
self.DeleteAttemptButton.clicked.connect(self.DeleteAttempt)
self.AttemptSearchSubmit.clicked.connect(self.AttemptSearchLayout)
def AttemptSearchLayout(self):
#Items
Search = self.SearchItem.text()
Attempt = Attempts_Controller()
results = Attempt.SearchForAttempt(Search)
print(results)
self.setWindowTitle('Attempt Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.AttemptRecordTableSearchresults = QTableWidget(Maxrows,5)
self.AttemptRecordTableSearchresults.setHorizontalHeaderLabels(['Attempt ID','Attempt_Score','Time_in_session','Date_of_Use','Test_ID','student_ID'])
row = 0
column = 0
for i in range(0,5):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
Item = QTableWidgetItem(additem)
self.AttemptRecordTableSearchresults.setItem(row,column,Item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Attempt
self.SearchItem = QLineEdit('Attempt_Name')
self.AttemptSearchSubmit = QPushButton('Start Search')
self.AddAttemptButton = QPushButton('Add')
self.EditAttemptButton = QPushButton('Edit')
self.DeleteAttemptButton = QPushButton('Delete')
#create layout
self.AttemptTableBox = QVBoxLayout()
self.AttemptTableBox.addWidget(self.AttemptRecordTableSearchresults)
#create vertical layout
self.AttemptButtonLayout = QHBoxLayout()
self.AttemptButtonLayout.addWidget(self.AddAttemptButton)
self.AttemptButtonLayout.addWidget(self.EditAttemptButton)
self.AttemptButtonLayout.addWidget(self.DeleteAttemptButton)
#scond to last layout
self.AttemptSearchLastLayout = QVBoxLayout()
self.AttemptSearchLastLayout.addLayout(self.AttemptTableBox)
self.AttemptSearchLastLayout.addWidget(self.SearchItem)
self.AttemptSearchLastLayout.addWidget(self.AttemptSearchSubmit)
self.AttemptSearchLastLayout.addLayout(self.AttemptButtonLayout)
#Final Layout
self.AttemptLayout = QWidget()
self.AttemptLayout.setLayout(self.AttemptSearchLastLayout)
self.setCentralWidget(self.AttemptLayout)
#connect
self.AddAttemptButton.clicked.connect(self.AddAttempt)
self.EditAttemptButton.clicked.connect(self.EditAttempt)
self.DeleteAttemptButton.clicked.connect(self.DeleteAttempt)
def AddAttempt(self):
Attempt = Attempts_Controller()
results = Attempt.Return_AttemptNoInput()
self.setWindowTitle('Add Attempt')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Attempt Editor')
#create table and push buttons
self.AddAttemptRecordTable = QTableWidget(Maxrows,5)
self.AddAttemptRecordTable.setHorizontalHeaderLabels(['Attempt ID','Attempt_Score','Time_in_session','Date_of_Use','Test_ID','Student_ID'])
row = 0
column = 0
for i in range(0,5):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.AddAttemptRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.AttemptScoreLabel = QLabel('Enter an appropriate Attempt Score here!')
self.TimeinSessionLabel = QLabel('Enter an appropriate Time in Session here!')
self.DateOfUseLabel = QLabel('Enter an appropriate DateOfUse here!')
self.TestIDLabel = QLabel('Enter an appropriate TestID here!')
self.StudentIDLabel = QLabel('Enter an appropriate Student ID here!')
self.AtteptScore = QLineEdit('AttemptScore')
self.TimeinSession = QLineEdit('TimeinSession')
self.DateOfUse = QLineEdit('DateOfUse')
self.TestID = QLineEdit('TestID')
self.StudentID = QLineEdit('StudentID')
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.AddAttemptTableLayout = QVBoxLayout()
self.AddAttemptTableLayout.addWidget(self.AddAttemptRecordTable)
self.AddAttemptLayout = QGridLayout()
self.AddAttemptLayout.addWidget(self.AttemptScoreLabel,0,0)
self.AddAttemptLayout.addWidget(self.TimeinSessionLabel,1,0)
self.AddAttemptLayout.addWidget(self.DateOfUseLabel,2,0)
self.AddAttemptLayout.addWidget(self.TestIDLabel,3,0)
self.AddAttemptLayout.addWidget(self.StudentIDLabel,4,0)
self.AddAttemptLayout.addWidget(self.AtteptScore,0,1)
self.AddAttemptLayout.addWidget(self.TimeinSession,1,1)
self.AddAttemptLayout.addWidget(self.DateOfUse,2,1)
self.AddAttemptLayout.addWidget(self.TestID,3,1)
self.AddAttemptLayout.addWidget(self.StudentID,4,1)
#create layout
self.AddAttemptFinalLayout = QVBoxLayout()
self.AddAttemptFinalLayout.addLayout(self.AddAttemptTableLayout)
self.AddAttemptFinalLayout.addLayout(self.AddAttemptLayout)
self.AddAttemptFinalLayout.addWidget(self.SubmitChanges)
self.AddAttemptLayout = QWidget()
self.AddAttemptLayout.setLayout(self.AddAttemptFinalLayout)
self.setCentralWidget(self.AddAttemptLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddAttemptchanges)
def AddAttemptchanges(self):
AttemptScore1 = self.AtteptScore.text()
TimeinSession1 = self.TimeinSession.text()
DateOfUse1 = self.DateOfUse.text()
TestID1 = self.TestID.text()
StudentID1 = self.StudentID.text()
Attempt = Attempts_Controller()
Attempt.Add_Attempts(AttemptScore1,TimeinSession1,DateOfUse1,TestID1,StudentID1)
def EditAttempt(self):
Attempt = Attempts_Controller()
results = Attempt.Return_AttemptNoInput()
self.setWindowTitle('Attempt Editor')
#create table and push buttons
Maxrows = len(results)
self.EditAttemptRecordTable = QTableWidget(Maxrows,5)
self.EditAttemptRecordTable.setHorizontalHeaderLabels(['Attempt ID','Attempt_Score','Time_in_session','Date_of_Use','Test_ID','student_ID'])
row = 0
column = 0
for i in range(0,5):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.EditAttemptRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.AttemptIDLabel = QLabel('Enter an appropriate Attempt ID here!')
self.AttemptScoreLabel = QLabel('Enter an appropriate Score here!')
self.TimeinSessionLabel = QLabel('Enter an appropriate Time in session here!')
self.DateOfUseLabel = QLabel('Enter an appropriate Date of Use here!')
self.TestIDLabel = QLabel('Enter an appropriate Test ID here!')
self.StudentIDLabel = QLabel('Enter an appropriate Student ID here!')
self.AttemptID = QLineEdit('Attempt ID')
self.AtteptScore = QLineEdit('AttemptScore')
self.TimeinSession = QLineEdit('TimeinSession')
self.DateOfUse = QLineEdit('DateOfUse')
self.TestID = QLineEdit('TestID')
self.StudentID = QLineEdit('StudentID')
self.SubmitChanges = QPushButton('SubmitChanges')
#create layout for labels and line edits
self.EditAttemptLayout = QGridLayout()
self.EditAttemptLayout.addWidget(self.AttemptIDLabel,0,0)
self.EditAttemptLayout.addWidget(self.AttemptScoreLabel,1,0)
self.EditAttemptLayout.addWidget(self.TimeinSessionLabel,2,0)
self.EditAttemptLayout.addWidget(self.DateOfUseLabel,3,0)
self.EditAttemptLayout.addWidget(self.TestIDLabel,4,0)
self.EditAttemptLayout.addWidget(self.StudentIDLabel,5,0)
self.EditAttemptLayout.addWidget(self.AttemptID,0,1)
self.EditAttemptLayout.addWidget(self.AtteptScore,1,1)
self.EditAttemptLayout.addWidget(self.TimeinSession,2,1)
self.EditAttemptLayout.addWidget(self.DateOfUse,3,1)
self.EditAttemptLayout.addWidget(self.TestID,4,1)
self.EditAttemptLayout.addWidget(self.StudentID,5,1)
#mainWidget
self.EditAttemptFinalLayout =QVBoxLayout()
self.EditAttemptFinalLayout.addWidget(self.EditAttemptRecordTable)
self.EditAttemptFinalLayout.addLayout(self.EditAttemptLayout)
self.EditAttemptFinalLayout.addWidget(self.SubmitChanges)
self.EditAttempt = QWidget()
self.EditAttempt.setLayout(self.EditAttemptFinalLayout)
self.setCentralWidget(self.EditAttempt)
#connections
self.SubmitChanges.clicked.connect(self.UpdateAttemptchanges)
def UpdateAttemptchanges(self):
Attempt_ID = self.AttemptID.text()
AtteptScore = self.AtteptScore.text()
TimeinSession = self.TimeinSession.text()
DateOfUse = self.DateOfUse.text()
TestID = self.TestID.text()
StudentID = self.StudentID.text()
Attempt = Attempts_Controller()
Attempt.Update_Attempts(Attempt_ID,AtteptScore,TimeinSession,DateOfUse,TestID,StudentID)
def DeleteAttempt(self):
Attempt = Attempts_Controller()
results = Attempt.Return_AttemptNoInput()
self.setWindowTitle('Attempts Editor')
Maxrows = len(results)
self.DeleteAttemptRecordTable = QTableWidget(Maxrows,5)
self.DeleteAttemptRecordTable.setHorizontalHeaderLabels(['Attempt ID','Attempt_Score','Time_in_session','Date_of_Use','Test_ID','student_ID'])
row = 0
column = 0
for i in range(0,5):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.DeleteAttemptRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.AttemptIDLabel = QLabel('enter an appropriate Attempt ID')
self.AttemptIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.DeleteAttemptInputLayout = QGridLayout()
self.DeleteAttemptInputLayout.addWidget(self.AttemptIDLabel,0,0)
self.DeleteAttemptInputLayout.addWidget(self.AttemptIDinfo,0,1)
#mainWidget
self.DeleteAttemptLayout =QVBoxLayout()
self.DeleteAttemptLayout.addWidget(self.DeleteAttemptRecordTable)
self.DeleteAttemptLayout.addLayout(self.DeleteAttemptInputLayout)
self.DeleteAttemptLayout.addWidget(self.SubmitChanges)
self.DeleteAttempt = QWidget()
self.DeleteAttempt.setLayout(self.DeleteAttemptLayout)
self.setCentralWidget(self.DeleteAttempt)
#connections
self.SubmitChanges.clicked.connect(self.DeleteAttemptchanges)
def DeleteAttemptchanges(self):
Attempt_ID = self.AttemptIDinfo.text()
Attempt = Attempts_Controller()
Attempt.Delete_Attempts(Attempt_ID)
def TestQuestionsViewWindow(self):
TestQuestions = TestQuestions_Controller()
results = TestQuestions.Return_TestQuestionsNoInput()
Maxrows = len(results)
self.setWindowTitle('TestQuestions Editor')
self.TestQuestionsRecordTable = QTableWidget(Maxrows,3)
self.TestQuestionsRecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Test_ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.TestQuestionsRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor TestQuestions
self.TestQuestionsSearchItem = QLineEdit('TestQuestions_Name')
self.TestQuestionsSearchSubmit = QPushButton('Start Search')
self.AddTestQuestionsButton = QPushButton('Add')
self.EditTestQuestionsButton = QPushButton('Edit')
self.DeleteTestQuestionsButton = QPushButton('Delete')
#create layout
self.TestQuestionsTableBox = QVBoxLayout()
self.TestQuestionsTableBox.addWidget(self.TestQuestionsRecordTable)
#create vertical layout
self.TestQuestionsButtonLayout = QHBoxLayout()
self.TestQuestionsButtonLayout.addWidget(self.AddTestQuestionsButton)
self.TestQuestionsButtonLayout.addWidget(self.EditTestQuestionsButton)
self.TestQuestionsButtonLayout.addWidget(self.DeleteTestQuestionsButton)
#scond to last layout
self.TestQuestionsLastLayout = QVBoxLayout()
self.TestQuestionsLastLayout.addLayout(self.TestQuestionsTableBox)
self.TestQuestionsLastLayout.addWidget(self.TestQuestionsSearchItem)
self.TestQuestionsLastLayout.addWidget(self.TestQuestionsSearchSubmit)
self.TestQuestionsLastLayout.addLayout(self.TestQuestionsButtonLayout)
#Final Layout
self.TestQuestionsLayout = QWidget()
self.TestQuestionsLayout.setLayout(self.TestQuestionsLastLayout)
self.setCentralWidget(self.TestQuestionsLayout)
#connect
self.AddTestQuestionsButton.clicked.connect(self.AddTestQuestions)
self.EditTestQuestionsButton.clicked.connect(self.EditTestQuestions)
self.DeleteTestQuestionsButton.clicked.connect(self.DeleteTestQuestions)
self.TestQuestionsSearchSubmit.clicked.connect(self.TestQuestionsSearchLayout)
def TestQuestionsSearchLayout(self):
#Items
Search = self.TestQuestionsSearchItem.text()
TestQuestions = TestQuestions_Controller()
results = TestQuestions.SearchForTestQuestions(Search)
print(results)
self.setWindowTitle('TestQuestions Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.TestQuestionsRecordTableSearchresults = QTableWidget(Maxrows,3)
self.TestQuestionsRecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Question_ID','Test_ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.TestQuestionsRecordTableSearchresults.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor TestQuestions
self.TestQuestionsSearchItem = QLineEdit('TestQuestions_Name')
self.TestQuestionsSearchSubmit = QPushButton('Start Search')
self.AddTestQuestionsButton = QPushButton('Add')
self.EditTestQuestionsButton = QPushButton('Edit')
self.DeleteTestQuestionsButton = QPushButton('Delete')
#create layout
self.TestQuestionsTableBox = QVBoxLayout()
self.TestQuestionsTableBox.addWidget(self.TestQuestionsRecordTableSearchresults)
#create vertical layout
self.TestQuestionsButtonLayout = QHBoxLayout()
self.TestQuestionsButtonLayout.addWidget(self.AddTestQuestionsButton)
self.TestQuestionsButtonLayout.addWidget(self.EditTestQuestionsButton)
self.TestQuestionsButtonLayout.addWidget(self.DeleteTestQuestionsButton)
#scond to last layout
self.TestQuestionsLastLayout = QVBoxLayout()
self.TestQuestionsLastLayout.addLayout(self.TestQuestionsTableBox)
self.TestQuestionsLastLayout.addWidget(self.TestQuestionsSearchItem)
self.TestQuestionsLastLayout.addWidget(self.TestQuestionsSearchSubmit)
self.TestQuestionsLastLayout.addLayout(self.TestQuestionsButtonLayout)
#Final Layout
self.TestQuestionsLayout = QWidget()
self.TestQuestionsLayout.setLayout(self.TestQuestionsLastLayout)
self.setCentralWidget(self.TestQuestionsLayout)
#connect
self.AddTestQuestionsButton.clicked.connect(self.AddTestQuestions)
self.EditTestQuestionsButton.clicked.connect(self.EditTestQuestions)
self.DeleteTestQuestionsButton.clicked.connect(self.DeleteTestQuestions)
def AddTestQuestions(self):
TestQuestions = TestQuestions_Controller()
results = TestQuestions.Return_TestQuestionsNoInput()
self.setWindowTitle('Add TestQuestions')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('TestQuestions Editor')
#create table and push buttons
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Test_ID'])
row = 0
column = 0
for i in range(0,2):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.TQQuestion_IDLabel = QLabel('Enter an appropriate Question ID here!')
self.TQQuestion_ID= QLineEdit()
self.TQTest_IDLabel = QLabel('Enter an appropriate Test ID here!')
self.TQTest_ID= QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.TableLayout = QVBoxLayout()
self.TableLayout.addWidget(self.RecordTable)
#create layout
self.AddTestQuestionsLayout = QGridLayout()
self.AddTestQuestionsLayout.addWidget(self.TQQuestion_IDLabel,0,0)
self.AddTestQuestionsLayout.addWidget(self.TQQuestion_ID,0,1)
self.AddTestQuestionsLayout.addWidget(self.TQTest_IDLabel,1,0)
self.AddTestQuestionsLayout.addWidget(self.TQTest_ID,1,1)
self.FinalLayout = QVBoxLayout()
self.FinalLayout.addLayout(self.TableLayout)
self.FinalLayout.addLayout(self.AddTestQuestionsLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.AddTestQuestionsLayout = QWidget()
self.AddTestQuestionsLayout.setLayout(self.FinalLayout)
self.setCentralWidget(self.AddTestQuestionsLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddTestQuestionschanges)
def AddTestQuestionschanges(self):
Questions_ID = self.TQQuestion_ID.text()
Test_ID = self.TQTest_ID.text()
TestQuestions = TestQuestions_Controller()
TestQuestions.Add_TestQuestions(Questions_ID,Test_ID)
def EditTestQuestions(self):
TestQuestions = TestQuestions_Controller()
results = TestQuestions.Return_TestQuestionsNoInput()
self.setWindowTitle('TestQuestions Editor')
#create table and push buttons
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,3)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Test_ID'])
row = 0
column = 0
for i in range(0,3):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.TQQuestion_IDLabel = QLabel('Enter an appropriate Question ID here!')
self.TQQuestion_ID= QLineEdit()
self.TQTest_IDLabel = QLabel('Enter an appropriate Test ID here!')
self.TQTest_ID= QLineEdit()
self.TQLabel = QLabel('Enter an appropriate Test Questions ID here!')
self.TQID= QLineEdit()
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditTestQuestionsLayout = QGridLayout()
self.EditTestQuestionsLayout.addWidget(self.TQQuestion_IDLabel,0,0)
self.EditTestQuestionsLayout.addWidget(self.TQQuestion_ID,0,1)
self.EditTestQuestionsLayout.addWidget(self.TQTest_IDLabel,1,1)
self.EditTestQuestionsLayout.addWidget(self.TQTest_ID,1,0)
self.EditTestQuestionsLayout.addWidget(self.TQLabel,1,2)
self.EditTestQuestionsLayout.addWidget(self.TQID,2,0)
#mainWidget
self.FinalLayout =QVBoxLayout()
self.FinalLayout.addWidget(self.RecordTable)
self.FinalLayout.addLayout(self.EditTestQuestionsLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.EditTestQuestions = QWidget()
self.EditTestQuestions.setLayout(self.FinalLayout)
self.setCentralWidget(self.EditTestQuestions)
#connections
self.SubmitChanges.clicked.connect(self.UpdateTestQuestionschanges)
def UpdateTestQuestionschanges(self):
Questions_ID = self.TQQuestion_ID.text()
Test_ID = self.TQTest_ID.text()
TestQuestions_ID = self.TQID.text()
TestQuestions = TestQuestions_Controller()
TestQuestions.Update_TestQuestions(Questions_ID,Test_ID,TestQuestions_ID)
def DeleteTestQuestions(self):
TestQuestions = TestQuestions_Controller()
results = TestQuestions.Return_TestQuestionsNoInput()
self.setWindowTitle('TestQuestionss Editor')
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,2)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Test_ID'])
row = 0
column = 0
for i in range(0,2):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.TestQuestionsIDLabel = QLabel('enter an appropriate TestQuestions ID')
self.TestQuestionsIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.TestQuestionsIDLabel,0,0)
self.InputLayout.addWidget(self.TestQuestionsIDinfo,0,1)
#mainWidget
self.TestQuestionsLayout =QVBoxLayout()
self.TestQuestionsLayout.addWidget(self.RecordTable)
self.TestQuestionsLayout.addLayout(self.InputLayout)
self.TestQuestionsLayout.addWidget(self.SubmitChanges)
self.DeleteTestQuestions = QWidget()
self.DeleteTestQuestions.setLayout(self.TestQuestionsLayout)
self.setCentralWidget(self.DeleteTestQuestions)
#connections
self.SubmitChanges.clicked.connect(self.DeleteTestQuestionschanges)
def DeleteTestQuestionschanges(self):
TestQuestions_ID = self.TestQuestionsIDinfo.text()
TestQuestions = TestQuestions_Controller()
TestQuestions.Delete_TestQuestions(TestQuestions_ID)
def ResponseViewWindow(self):
Response = Response_Controller()
results = Response.Return_ResponseNoInput()
Maxrows = len(results)
self.setWindowTitle('Response Editor')
self.ResponseRecordTable = QTableWidget(Maxrows,4)
self.ResponseRecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Attempt_ID','Student Response'])
row = 0
column = 0
for i in range(0,4):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.ResponseRecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
#Searchfor Response
self.ResponseSearchItem = QLineEdit('Response_Name')
self.ResponseSearchSubmit = QPushButton('Start Search')
self.AddResponseButton = QPushButton('Add')
self.EditResponseButton = QPushButton('Edit')
self.DeleteResponseButton = QPushButton('Delete')
#create layout
self.ResponseTableBox = QVBoxLayout()
self.ResponseTableBox.addWidget(self.ResponseRecordTable)
#create vertical layout
self.ResponseButtonLayout = QHBoxLayout()
self.ResponseButtonLayout.addWidget(self.AddResponseButton)
self.ResponseButtonLayout.addWidget(self.EditResponseButton)
self.ResponseButtonLayout.addWidget(self.DeleteResponseButton)
#scond to last layout
self.ResponseLastLayout = QVBoxLayout()
self.ResponseLastLayout.addLayout(self.ResponseTableBox)
self.ResponseLastLayout.addWidget(self.ResponseSearchItem)
self.ResponseLastLayout.addWidget(self.ResponseSearchSubmit)
self.ResponseLastLayout.addLayout(self.ResponseButtonLayout)
#Final Layout
self.ResponseLayout = QWidget()
self.ResponseLayout.setLayout(self.ResponseLastLayout)
self.setCentralWidget(self.ResponseLayout)
#connect
self.AddResponseButton.clicked.connect(self.AddResponse)
self.EditResponseButton.clicked.connect(self.EditResponse)
self.DeleteResponseButton.clicked.connect(self.DeleteResponse)
self.ResponseSearchSubmit.clicked.connect(self.ResponseSearchLayout)
def ResponseSearchLayout(self):
#Items
Search = self.ResponseSearchItem.text()
Response = Response_Controller()
results = Response.SearchForResponse(Search)
print(results)
self.setWindowTitle('Response Editor')
#create table and push buttons
if results == [] or None:
print('The Search failed')
Maxrows = len(results)
self.ResponseRecordTableSearchresults = QTableWidget(Maxrows,4)
self.ResponseRecordTableSearchresults.setHorizontalHeaderLabels(['ID Number','Question_ID','Attempt_ID','Student Response'])
row = 0
column = 0
for i in range(0,4):
for each in results:
print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.ResponseRecordTableSearchresults.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#Searchfor Response
self.ResponseSearchItem = QLineEdit('Response_Name')
self.ResponseSearchSubmit = QPushButton('Start Search')
self.AddResponseButton = QPushButton('Add')
self.EditResponseButton = QPushButton('Edit')
self.DeleteResponseButton = QPushButton('Delete')
#create layout
self.ResponseTableBox = QVBoxLayout()
self.ResponseTableBox.addWidget(self.ResponseRecordTableSearchresults)
#create vertical layout
self.ResponseButtonLayout = QHBoxLayout()
self.ResponseButtonLayout.addWidget(self.AddResponseButton)
self.ResponseButtonLayout.addWidget(self.EditResponseButton)
self.ResponseButtonLayout.addWidget(self.DeleteResponseButton)
#scond to last layout
self.ResponseLastLayout = QVBoxLayout()
self.ResponseLastLayout.addLayout(self.ResponseTableBox)
self.ResponseLastLayout.addWidget(self.ResponseSearchItem)
self.ResponseLastLayout.addWidget(self.ResponseSearchSubmit)
self.ResponseLastLayout.addLayout(self.ResponseButtonLayout)
#Final Layout
self.ResponseLayout = QWidget()
self.ResponseLayout.setLayout(self.ResponseLastLayout)
self.setCentralWidget(self.ResponseLayout)
#connect
self.AddResponseButton.clicked.connect(self.AddResponse)
self.EditResponseButton.clicked.connect(self.EditResponse)
self.DeleteResponseButton.clicked.connect(self.DeleteResponse)
def AddResponse(self):
Response = Response_Controller()
results = Response.Return_ResponseNoInput()
self.setWindowTitle('Add Response')
#create table and push buttons
Maxrows = len(results)
self.setWindowTitle('Response Editor')
#create table and push buttons
self.RecordTable = QTableWidget(Maxrows,4)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Attempt_ID','Student Response'])
row = 0
column = 0
for i in range(0,4):
for each in results:
#print(each)
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.RQuestion_IDLabel = QLabel('Enter an appropriate Question ID here!')
self.RQuestion_ID= QLineEdit()
self.RAttempt_IDLabel = QLabel('Enter an appropriate Attempt ID here!')
self.RAttempt_ID= QLineEdit()
self.StudentResponseLabel = QLabel('Enter an appropriate Student Response here!')
self.StudentResponse= QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create table layout
self.TableLayout = QVBoxLayout()
self.TableLayout.addWidget(self.RecordTable)
#create layout
self.AddResponseLayout = QGridLayout()
self.AddResponseLayout.addWidget(self.RQuestion_IDLabel,0,0)
self.AddResponseLayout.addWidget(self.RQuestion_ID,0,1)
self.AddResponseLayout.addWidget(self.RAttempt_IDLabel,1,0)
self.AddResponseLayout.addWidget(self.RAttempt_ID,1,1)
self.AddResponseLayout.addWidget(self.StudentResponseLabel,2,0)
self.AddResponseLayout.addWidget(self.StudentResponse,2,1)
self.FinalLayout = QVBoxLayout()
self.FinalLayout.addLayout(self.TableLayout)
self.FinalLayout.addLayout(self.AddResponseLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.AddResponseLayout = QWidget()
self.AddResponseLayout.setLayout(self.FinalLayout)
self.setCentralWidget(self.AddResponseLayout)
#something about result of line edit etc
self.SubmitChanges.clicked.connect(self.AddResponsechanges)
def AddResponsechanges(self):
Questions_ID = self.RQuestion_ID.text()
Attempt_ID = self.RAttempt_ID.text()
StudentResponse = self.StudentResponse.text()
Response = Response_Controller()
Response.Add_Response(Questions_ID,Attempt_ID,StudentResponse)
def EditResponse(self):
Response = Response_Controller()
results = Response.Return_ResponseNoInput()
self.setWindowTitle('Response Editor')
#create table and push buttons
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,4)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Attempt_ID','Student Response'])
row = 0
column = 0
for i in range(0,4):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
self.RQuestion_IDLabel = QLabel('Enter an appropriate Question ID here!')
self.RQuestion_ID= QLineEdit()
self.RAttempt_IDLabel = QLabel('Enter an appropriate Attempt ID here!')
self.RAttempt_ID= QLineEdit()
self.ResponseLabel = QLabel('Enter an appropriate Test Response ID here!')
self.ResponseID= QLineEdit()
self.StudentResponseLabel = QLabel('Enter an appropriate Student Response here!')
self.StudentResponse= QLineEdit()
self.SubmitChanges = QPushButton('Submit Results')
#create layout for labels and line edits
self.EditResponseLayout = QGridLayout()
self.EditResponseLayout.addWidget(self.RQuestion_IDLabel,0,0)
self.EditResponseLayout.addWidget(self.RQuestion_ID,0,1)
self.EditResponseLayout.addWidget(self.RAttempt_IDLabel,1,1)
self.EditResponseLayout.addWidget(self.RAttempt_ID,1,0)
self.EditResponseLayout.addWidget(self.ResponseLabel,1,2)
self.EditResponseLayout.addWidget(self.ResponseID,2,0)
self.EditResponseLayout.addWidget(self.StudentResponseLabel,2,1)
self.EditResponseLayout.addWidget(self.SubmitChanges,2,2)
#mainWidget
self.FinalLayout =QVBoxLayout()
self.FinalLayout.addWidget(self.RecordTable)
self.FinalLayout.addLayout(self.EditResponseLayout)
self.FinalLayout.addWidget(self.SubmitChanges)
self.EditResponse = QWidget()
self.EditResponse.setLayout(self.FinalLayout)
self.setCentralWidget(self.EditResponse)
#connections
self.SubmitChanges.clicked.connect(self.UpdateResponsechanges)
def UpdateResponsechanges(self):
Questions_ID = self.RQuestion_ID.text()
Attempt_ID = self.RAttempt_ID.text()
Response_ID = self.ResponseID.text()
StudentResponse = self.StudentResponse.text()
Response = Response_Controller()
Response.Update_Response(Questions_ID,Attempt_ID,Response_ID,StudentResponse)
def DeleteResponse(self):
Response = Response_Controller()
results = Response.Return_ResponseNoInput()
self.setWindowTitle('Responses Editor')
Maxrows = len(results)
self.RecordTable = QTableWidget(Maxrows,4)
self.RecordTable.setHorizontalHeaderLabels(['ID Number','Question_ID','Attempt_ID','Student Response'])
row = 0
column = 0
for i in range(0,4):
for each in results:
additem = results[row][column]
if type(additem) == int:
additem = str(additem)
item = QTableWidgetItem(additem)
self.RecordTable.setItem(row,column,item)
if row <= Maxrows:
row +=1
if row == Maxrows:
column +=1
row = 0
#create table and push buttons
self.ResponseIDLabel = QLabel('enter an appropriate Response ID')
self.ResponseIDinfo = QLineEdit()
self.SubmitChanges = QPushButton('Submit Changes')
#create layout for labels and line edits
self.InputLayout = QGridLayout()
self.InputLayout.addWidget(self.ResponseIDLabel,0,0)
self.InputLayout.addWidget(self.ResponseIDinfo,0,1)
#mainWidget
self.ResponseLayout =QVBoxLayout()
self.ResponseLayout.addWidget(self.RecordTable)
self.ResponseLayout.addLayout(self.InputLayout)
self.ResponseLayout.addWidget(self.SubmitChanges)
self.DeleteResponse = QWidget()
self.DeleteResponse.setLayout(self.ResponseLayout)
self.setCentralWidget(self.DeleteResponse)
#connections
self.SubmitChanges.clicked.connect(self.DeleteResponsechanges)
def DeleteResponsechanges(self):
Response_ID = self.ResponseIDinfo.text()
Response = Response_Controller()
Response.Delete_Response(Response_ID)
def SUVATInputLayout(self):
self.setWindowTitle('SimulationWindow')
#create line edits and labels
self.SimulationMessage = QLabel("Welcome to SUVAT Equation Simulation")
self.DisplacementLabel = QLabel("Displacement")
self.DisplacementLineEdit = QLineEdit()
self.InitialvelocityLabel = QLabel("Initial Velocity")
self.InitialvelocityLineEdit = QLineEdit()
self.FinalLabel = QLabel("Final Velocity")
self.FinalLineEdit = QLineEdit()
self.AccelerationLabel = QLabel("Acceleration")
self.AccelerationLineEdit = QLineEdit()
self.TimeLabel = QLabel("Time")
self.TimeLineEdit = QLineEdit()
self.SubmitButton = QPushButton('SubmitItems')
#SUVAT inputs
self.InputGridLayout = QGridLayout()
self.InputGridLayout.addWidget(self.DisplacementLabel,0,0)
self.InputGridLayout.addWidget(self.DisplacementLineEdit,0,1)
self.InputGridLayout.addWidget(self.InitialvelocityLabel,1,0)
self.InputGridLayout.addWidget(self.InitialvelocityLineEdit,1,1)
self.InputGridLayout.addWidget(self.FinalLabel,2,0)
self.InputGridLayout.addWidget(self.FinalLineEdit,2,1)
self.InputGridLayout.addWidget(self.AccelerationLabel,3,0)
self.InputGridLayout.addWidget(self.AccelerationLineEdit,3,1)
self.InputGridLayout.addWidget(self.TimeLabel,4,0)
self.InputGridLayout.addWidget(self.TimeLineEdit,4,1)
self.InputsLayout = QVBoxLayout()
self.InputsLayout.addWidget(self.SimulationMessage)
self.InputsLayout.addLayout(self.InputGridLayout)
self.InputsLayout.addWidget(self.SubmitButton)
#QWidget
self.SUVATInputsWidget = QWidget()
self.SUVATInputsWidget.setLayout(self.InputsLayout)
self.setCentralWidget(self.SUVATInputsWidget)
#Connect
self.SubmitButton.clicked.connect(self.SimulationPrep)
def SimulationPrep(self):
Displacement = self.DisplacementLineEdit.text()
try:
Displacement = int(Displacement)
except:
if Displacement == '':
Displacement = 0
InitialVelocity = self.InitialvelocityLineEdit.text()
try:
InitialVelocity = int(InitialVelocity)
except:
if InitialVelocity == '':
InitialVelocity = 0
FinalVelocity = self.FinalLineEdit.text()
try:
FinalVelocity = int(FinalVelocity)
except:
if FinalVelocity == '':
FinalVelocity = 0
Acceleration = self.AccelerationLineEdit.text()
try:
Acceleration = int(Acceleration)
except:
if Acceleration == '':
Acceleration = 0
Time = self.TimeLineEdit.text()
try:
Time = int(Time)
except:
if Time == '':
Time = 0
InputList = [Displacement,InitialVelocity,FinalVelocity,Acceleration,Time]
Propulsion1 = Propulsion('Spaceship',0,0,0,45,InitialVelocity,FinalVelocity,Acceleration,Displacement,Time,-9.81)
PreSimulationResults = Propulsion1.SUVATInputs(InputList)
Propulsion1.SUVATLink(PreSimulationResults)
SUVAT = Propulsion1.StartSimulation()
SUVAT2 = Propulsion1.getSuvat()
self.SimulationWindow(SUVAT,SUVAT2)
def SimulationWindow(self,SUVAT,SUVAT2):
print(SUVAT)
FormattedTime = SUVAT2[4]
FormattedTime = FormattedTime*1000
Distance = SUVAT2[0]
Distance = ((Distance*10)/10)
DistanceDec = Distance
#Image container
ProjectileScenes = MyView()
# graphics view
#Graphics Items
Image = QPixmap('ball-6x6.png')
scene = QtGui.QGraphicsScene(self)
item = QtGui.QGraphicsPixmapItem(Image)
#first item is
scene.addItem(item)
# Remember to hold the references to QTimeLine and QGraphicsItemAnimation instances.
# They are not kept anywhere, even if you invoke QTimeLine.start().
self.TimeLine = QtCore.QTimeLine(FormattedTime)
self.TimeLine.setFrameRange(0,1)
self.Animate = QtGui.QGraphicsItemAnimation()
self.Animate.setItem(item)
self.Animate.setTimeLine(self.TimeLine)
# Each method determining an animation state (e.g. setPosAt, setRotationAt etc.)
# takes as a first argument a step which is a value between 0 (the beginning of the
# animation) and 1 (the end of the animation)
self.Animate.setPosAt(0, QtCore.QPointF(0,0))
self.Animate.setPosAt(0.1, QtCore.QPointF(Distance,-20))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.2, QtCore.QPointF(Distance,-40))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.3, QtCore.QPointF(Distance,-60))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.4, QtCore.QPointF(Distance,-80))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.4, QtCore.QPointF(Distance,-90))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.6, QtCore.QPointF(Distance,-80))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.7, QtCore.QPointF(Distance,-60))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.8, QtCore.QPointF(Distance,-40))
Distance = Distance+DistanceDec
self.Animate.setPosAt(0.9, QtCore.QPointF(Distance,-20))
Distance = Distance+DistanceDec
self.Animate.setPosAt(1, QtCore.QPointF(Distance,0))
self.ProjectileView = QtGui.QGraphicsView(scene)
#Push Buttons
self.MakeProjectile = QPushButton("Make a projectile")
self.StartSim = QPushButton("Start Simulating")
#create grid layout
self.BottomGridLayout = QGridLayout()
#add items to grid
self.BottomGridLayout.addWidget(self.StartSim,1,5)
#create box layout
self.SimulationBoxLayout = QVBoxLayout()
#add grid layout and Label to Box Layout
self.SimulationBoxLayout.addWidget(self.ProjectileView)
self.SimulationBoxLayout.addLayout(self.BottomGridLayout)
#create main Widget
self.SimulationWidget = QWidget()
self.SimulationWidget.setLayout(self.SimulationBoxLayout)
self.setCentralWidget(self.SimulationWidget)
#connect
self.StartSim.clicked.connect(self.RunGraphics)
def RunGraphics(self):
self.TimeLine.start()
def RunSimulation(self):
self.SUVATInputLayout()
def TestingClassWindow(self):
self.setWindowTitle('Tests')
#create line edits and labels
self.TestInput = Q
def SimulationReset(self):
Choice = None
if self.ResetButton.clicked.connect:
choice = 1
print("the button works")
else:
choice = 2
def Retry(self):
print('1')
def TestSelection(self):
self.setWindowTitle("Test Selection")
#createMenu and Labels
TestList = StudentTest.getTests(StudentTest)
#ComboCox
self.ComboBoxInput = QComboBox()
for each in TestList:
self.ComboBoxInput.addItem(each[0])
self.DialogLabel = QLabel("Please Select a Test!")
# GridLayout
self.GridLayout = QGridLayout()
self.GridLayout.addWidget(self.DialogLabel,0,0)
self.GridLayout.addWidget(self.ComboBoxInput,0,1)
#setMain Widget
self.TestWidget = QWidget()
self.TestWidget.setLayout(self.GridLayout)
self.setCentralWidget(self.TestWidget)
#return TestList
self.ComboBoxInput.currentIndexChanged.connect(self.TestComboBox)
def TestComboBox(self,string):
Test_Name = ''
TestList = StudentTest.getTests(StudentTest)
TestValue = 0
for i in range(len(TestList)):
TestList[i]=i
for each in TestList:
if each == string:
TestValue = each
for i in range(len(TestList)):
if i == TestValue:
TestList = StudentTest.getTests(StudentTest)
Test_Name = TestList[-i][0]
Test_Name = str(Test_Name)
print(Test_Name,)
Test_ID = StudentTest.getTestID(StudentTest,Test_Name)
for i in range(len(Test_ID)):
Test_ID = Test_ID[-i][0]
print(Test_ID,'1')
Questions_ID = StudentTest.RetrieveQuestionsID(self,Test_ID)
print(Questions_ID)
QuestionsOriginal = StudentTest.RetrieveQuestions(self,Questions_ID)
Questions = QuestionsOriginal
print(Questions)
for i in range(len(Questions)):
Questions = [-1][-1]
Answers = StudentTest.RetrieveAnswers(self,Questions_ID)
for i in range(len(Answers)):
Answers = [-1][0]
CorrectAnswer = Answers
self.TestResponses(Test_Name,Questions_ID,QuestionsOriginal,Answers,CorrectAnswer)
def TestResponses(self,Test_Name,Questions_ID,QuestionsOriginal,Answers,CorrectAnswer):
print(Test_Name,Questions_ID,QuestionsOriginal,Answers)
self.setWindowTitle("TestQuestions")
# Push Buttons
self.TestRetryButton= QPushButton("Retry?")
self.SubmitResults = QPushButton("Submit Results")
#create layouts
self.BottomGridLayout = QGridLayout()
self.ButtonBoxLayout = QVBoxLayout()
#create Line Edits and Labels
#append items to layout
self.Question1 = QLabel(QuestionsOriginal[-1][-1])
self.QuestionResponse1 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question1,0,0)
self.BottomGridLayout.addWidget(self.QuestionResponse1,0,1)
if len(QuestionsOriginal) >1:
self.Question2 = QLabel(QuestionsOriginal[1])
self.QuestionResponse2 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question2,1,0)
self.BottomGridLayout.addWidget(self.QuestionResponse2,1,1)
if len(QuestionsOriginal) >2:
self.Question3 = QLabel(QuestionsOriginal[2])
self.QuestionResponse3 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question3,2,0)
self.BottomGridLayout.addWidget(self.QuestionResponse3,2,1)
if len(QuestionsOriginal) >3:
self.Question4 = QLabel(QuestionsOriginal[3])
self.QuestionResponse4 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question4,3,0)
self.BottomGridLayout.addWidget(self.QuestionResponse4,3,1)
if len(QuestionsOriginal) >4:
self.Question5 = QLabel(QuestionsOriginal[4])
self.QuestionResponse5 = QLineEdit()
if len(QuestionsOriginal) >5:
self.Question6 = QLabel(QuestionsOriginal[5])
self.QuestionResponse6 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question5,4,0)
self.BottomGridLayout.addWidget(self.QuestionResponse5,4,1)
if len(QuestionsOriginal) >6:
self.Question7 = QLabel(QuestionsOriginal[6])
self.QuestionResponse7 = QLineEdit()
self.BottomGridLayout.addWidgeLinkTestingLayoutt(self.Question6,5,0)
self.BottomGridLayout.addWidget(self.QuestionResponse6,5,1)
if len(QuestionsOriginal) >7:
self.Question8 = QLabel(QuestionsOriginal[7])
self.QuestionResponse8 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question7,6,0)
self.BottomGridLayout.addWidget(self.QuestionResponse7,6,1)
if len(QuestionsOriginal) >8:
self.Question9 = QLabel(QuestionsOriginal[8])
self.QuestionResponse9 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question8,7,0)
self.BottomGridLayout.addWidget(self.QuestionResponse8,7,1)
if len(QuestionsOriginal) >9:
self.Question9 = QLabel(QuestionsOriginal[8])
self.QuestionResponse9 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question9,8,0)
self.BottomGridLayout.addWidget(self.QuestionResponse9,8,1)
if len(QuestionsOriginal) >10:
self.Question10 = QLabel(QuestionsOriginal[9])
self.QuestionResponse10 = QLineEdit()
self.BottomGridLayout.addWidget(self.Question10,9,0)
self.BottomGridLayout.addWidget(self.QuestionResponse10,9,1)
self.QuestionMessage = QLabel("Answer all of the questions correctly!")
#QVBoxlayout
self.ButtonBoxLayout.addWidget(self.TestRetryButton)
self.ButtonBoxLayout.addWidget(self.SubmitResults)
#create box layout
self.QuestionLayout = QVBoxLayout()
#add grid to aelf.InfoHold(Test_Name,Questions_ID,QuestionsOriginal,Answers)nd label to box layout
self.QuestionLayout.addWidget(self.QuestionMessage)
self.QuestionLayout.addLayout(self.BottomGridLayout)
self.QuestionLayout.addLayout(self.ButtonBoxLayout)
#create main widget
self.QuestionWidget = QWidget()
self.QuestionWidget.setLayout(self.QuestionLayout)
self.setCentralWidget(self.QuestionWidget)
#add functions to buttons
self.TestRetryButton.clicked.connect(self.Retry)
self.SubmitResults.clicked.connect(self.MarkQuestions)
self.SubmitResults.clicked.connect(lambda:CorrectAnswer)
def MarkResponses(self):
self.ResponseList = []
try:
self.QuestionResult = self.QuestionResponse1.text()
self.ResponseList.append(self.QuestionResult)
except:
print()
try:
self.QuestionResult1 = self.QuestionResponse2.text()
self.ResponseList.append(self.QuestionResult1)
except:
print()
try:
self.QuestionResult2 = self.QuestionResponse3.text()
self.ResponseList.append(self.QuestionResult2)
except:
print()
try:
self.QuestionResult3 = self.QuestionResponse4.text()
self.ResponseList.append(self.QuestionResult3)
except:
print()
try:
self.QuestionResult4 = self.QuestionResponse5.text()
self.ResponseList.append(self.QuestionResult4)
except:
print()
try:
self.QuestionResult5 = self.QuestionResponse6.text()
self.ResponseList.append(self.QuestionResult5)
except:
print()
try:
self.QuestionResult6 = self.QuestionResponse7.text()
self.ResponseList.append(self.QuestionResult6)
except:
print()
try:
self.QuestionResult7 = self.QuestionResponse8.text()
self.ResponsCorrectAnswereList.append(self.QuestionResult7)
except:
print()
try:
self.QuestionResult8 = self.QuestionResponse9.text()
self.ResponseList.append(self.QuestionResult8)
except:
print()
try:
self.QuestionResult9 = self.QuestionResponse10.text()
self.ResponseList.append(self.QuestionResult9)
except:
print()
print(self.ResponseList)
return self.ResponseList
def MarkQuestions(self,CorrectAnswer):
Answers = self.MarkResponses()
Answered = False
Score = 1
TrueScore = 0
print(CorrectAnswer)
MaxScore = len(CorrectAnswer)
while Answered != True:
for each in Answers:
for i in range(MaxScore):
if each == (CorrectAnswer[i]):
TrueScore += 1
Score +=1
print('Question',i+1,'. was correct')
Answered = True
i = 0
elif each != (CorrectAnswer[i]):
print('Question',i+1,'. was wrong')
Answered = False
if Answered == False:
Retry = 0
Retry = input('would you like to retry enter 1 if you would like too!')
if Retry == '1':
for each in Answers:
Answers.pop()
TrueScore = 0
Score = 1
Retry = 0
i = 0
self.Retry(QuestionIDList)
else:
if Answers[0] == Answers:
TrueScore += 1
Score +=1
Answered = True
if TrueScore == MaxScore:
Answered = True
print('You Scored',TrueScore,'out of a total of',MaxScore)
self.AnswerReview(TrueScore,MaxScore)
def AnswerReview(self,TrueScore,MaxScore):
self.setWindowTitle("Answers Review!")
TrueScore = str(TrueScore)
MaxScore = str(MaxScore)
#createMenu and Labels
self.AnswersTable = QGridLayout()
self.AnswerLabel1 = QLabel('You Got:')
self.AnswerLabel2 = QLabel(TrueScore)
self.AnswerLabel3 = QLabel('out of:')
self.AnswerLabel4 = QLabel(MaxScore)
# GridLayout
self.AnswersTable.addWidget(self.AnswerLabel1)
self.AnswersTable.addWidget(self.AnswerLabel2)
self.AnswersTable.addWidget(self.AnswerLabel3)
self.AnswersTable.addWidget(self.AnswerLabel4)
self.AnswersReviewGridLayout = QVBoxLayout()
self.AnswersReviewGridLayout.addWidget(self.AnswersTable)
#setMain Widget
self.AnswersReviewWidget = QWidget()
self.AnswersReviewWidget.setLayout(self.AnswersReviewGridLayout)
self.setCentralWidget(self.AnswersReviewWidget)
class MyView(QtGui.QGraphicsView):
def __init__(self):
QtGui.QGraphicsView.__init__(self)
self.Image = QPixmap('rocket.jpg')
self.scene = QtGui.QGraphicsScene(self)
self.item = QtGui.QGraphicsPixmapItem(self.Image)
#first item is
self.scene.addItem(self.item)
self.setScene(self.scene)
# Remember to hold the references to QTimeLine and QGraphicsItemAnimation instances.
# They are not kept anywhere, even if you invoke QTimeLine.start().
self.tl = QtCore.QTimeLine(14000)
self.tl.setFrameRange(0, 100)
self.a = QtGui.QGraphicsItemAnimation()
self.a.setItem(self.item)
self.a.setTimeLine(self.tl)
# Each method determining an animation state (e.g. setPosAt, setRotationAt etc.)
# takes as a first argument a step which is a value between 0 (the beginning of the
# animation) and 1 (the end of the animation)
self.a.setPosAt(0, QtCore.QPointF(0, -10))
self.a.setRotationAt(0.5, 36000)
self.a.setPosAt(0.9, QtCore.QPointF(-10,100))
self.a.setPosAt(1, QtCore.QPointF(1000,100))
self.tl.start()
if __name__ == "__main__":
application = QApplication(sys.argv)
MainWindow = MainWindow()
MainWindow.show()
MainWindow.raise_()
application.exec_()
| [
"[email protected]"
] | |
1ce4e956fe58872f2719ab4f3c67b8c279caf0a8 | 8cf5c91fa744f49b40264061d4fd510ea761cf8f | /build/lib/dragonn/visualize_util.py | f672496759111da32eecdc588fa5a4607cc0eb20 | [
"MIT"
] | permissive | AvantiShri/dragonn | 0f38371ac7734099279f3b3e204565d9a663102f | aeb9674f39b71d07ff62d2c3745bef4a2e55b95f | refs/heads/master | 2020-04-23T18:01:34.394772 | 2019-02-18T06:07:40 | 2019-02-18T06:07:40 | 171,352,688 | 0 | 0 | MIT | 2019-02-18T20:37:59 | 2019-02-18T20:37:59 | null | UTF-8 | Python | false | false | 5,953 | py | # Adapted from Keras source code
# License: https://github.com/fchollet/keras/blob/master/LICENSE
import itertools
from keras.layers.containers import Graph, Sequential
from keras.layers.core import Merge
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pydot
except ImportError:
# fall back on pydot if necessary
import pydot
if not pydot.find_graphviz():
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
def layer_typename(layer):
return type(layer).__module__ + "." + type(layer).__name__
def get_layer_to_name(model):
"""Returns a dict mapping layer to their name in the model"""
if not isinstance(model, Graph):
return {}
else:
node_to_name = itertools.chain(
model.nodes.items(), model.inputs.items(), model.outputs.items()
)
return {v: k for k, v in node_to_name}
class ModelToDot(object):
"""
This is a helper class which visits a keras model (Sequential or Graph) and
returns a pydot.Graph representation.
This is implemented as a class because we need to maintain various states.
Use it as ```ModelToDot()(model)```
Keras models can have an arbitrary number of inputs and outputs. A given
layer can have multiple inputs but has a single output. We therefore
explore the model by starting at its output and crawling "up" the tree.
"""
def _pydot_node_for_layer(self, layer, label):
"""
Returns the pydot.Node corresponding to the given layer.
`label` specify the name of the layer (only used if the layer isn't yet
associated with a pydot.Node)
"""
# Check if this already exists (will be the case for nodes that
# serve as input to more than one layer)
if layer in self.layer_to_pydotnode:
node = self.layer_to_pydotnode[layer]
else:
layer_id = 'layer%d' % self.idgen
self.idgen += 1
label = label + " (" + layer_typename(layer) + ")"
if self.show_shape:
# Build the label that will actually contain a table with the
# input/output
outputlabels = str(layer.output_shape)
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = ''
label = "%s\n|{input:|output:}|{{%s}|{%s}}" % (
label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
self.g.add_node(node)
self.layer_to_pydotnode[layer] = node
return node
def _process_layer(self, layer, layer_to_name=None, connect_to=None):
"""
Process a layer, adding its node to the graph and creating edges to its
outputs.
`connect_to` specify where the output of the current layer will be
connected
`layer_to_name` is a dict mapping layer to their name in the Graph
model. Should be {} when processing a Sequential model
"""
# The layer can be a container layer, in which case we can recurse
is_graph = isinstance(layer, Graph)
is_seq = isinstance(layer, Sequential)
if self.recursive and (is_graph or is_seq):
# We got a container layer, recursively transform it
if is_graph:
child_layers = layer.outputs.values()
else:
child_layers = [layer.layers[-1]]
for l in child_layers:
self._process_layer(l, layer_to_name=get_layer_to_name(layer),
connect_to=connect_to)
else:
# This is a simple layer.
label = layer_to_name.get(layer, '')
layer_node = self._pydot_node_for_layer(layer, label=label)
if connect_to is not None:
self.g.add_edge(pydot.Edge(layer_node, connect_to))
# Proceed upwards to the parent(s). Only Merge layers have more
# than one parent
if isinstance(layer, Merge): # Merge layer
for l in layer.layers:
self._process_layer(l, layer_to_name,
connect_to=layer_node)
elif hasattr(layer, 'previous') and layer.previous is not None:
self._process_layer(layer.previous, layer_to_name,
connect_to=layer_node)
def __call__(self, model, recursive=True, show_shape=False,
connect_to=None):
self.idgen = 0
# Maps keras layer to the pydot.Node representing them
self.layer_to_pydotnode = {}
self.recursive = recursive
self.show_shape = show_shape
self.g = pydot.Dot()
self.g.set('rankdir', 'TB')
self.g.set('concentrate', True)
self.g.set_node_defaults(shape='record')
if hasattr(model, 'outputs'):
# Graph
for name, l in model.outputs.items():
self._process_layer(l, get_layer_to_name(model),
connect_to=connect_to)
else:
# Sequential container
self._process_layer(model.layers[-1], {}, connect_to=connect_to)
return self.g
def to_graph(model, **kwargs):
"""
`recursive` controls whether we recursively explore container layers
`show_shape` controls whether the shape is shown in the graph
"""
return ModelToDot()(model, **kwargs)
def plot(model, to_file='model.png', **kwargs):
graph = to_graph(model, **kwargs)
graph.write_png(to_file)
| [
"[email protected]"
] | |
84384e7ac129c854281286bde8c8fa39109edf50 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/409/usersdata/308/79040/submittedfiles/av1_programa1.py | cf9b07d9090afe0bfb3912b3b86c0d8a9fbf8726 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
#Lendo valor do usuário
x = int(input('Informe o valor: '))
#testando se é par
if (x%2==0):
print('PAR')
else:
print('IMPAR')
| [
"[email protected]"
] | |
7ce9bf77dfec1cb38f3c07dd07663289deb8f780 | eb382e151b1d6718a0c1b77a8b9c27798c3cbc64 | /couchapp/Lib/restkit/resource.py | bc171758a63e45a5257412b7ac973cf0fff4f6ba | [] | no_license | sellis/com.pittypanda.plugins.couch | 05bdb3a10723bd975a8e899a6f2f43abf5e0f595 | b9421d10f3aa20c0a5448bfcafe4ea7bd67a2d45 | refs/heads/master | 2021-01-24T05:48:20.204623 | 2010-10-26T20:28:51 | 2010-10-26T20:28:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,015 | py | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
"""
restkit.resource
~~~~~~~~~~~~~~~~
This module provide a common interface for all HTTP request.
"""
from copy import copy
import urlparse
from restkit.errors import ResourceNotFound, Unauthorized, RequestFailed,\
ParserError, RequestError
from restkit.client import HttpRequest, HttpResponse
from restkit.filters import BasicAuth
from restkit import util
from restkit.pool.simple import SimplePool
_default_pool = None
def default_pool(keepalive, timeout):
global _default_pool
if _default_pool is None:
_default_pool = SimplePool(keepalive=keepalive, timeout=timeout)
return _default_pool
class Resource(object):
"""A class that can be instantiated for access to a RESTful resource,
including authentication.
"""
charset = 'utf-8'
encode_keys = True
safe = "/:"
keepalive = True
basic_auth_url = True
response_class = HttpResponse
def __init__(self, uri, **client_opts):
"""Constructor for a `Resource` object.
Resource represent an HTTP resource.
:param uri: str, full uri to the server.
:param client_opts: `restkit.client.HttpRequest` Options
"""
client_opts = client_opts or {}
self.initial = dict(
uri = uri,
client_opts = client_opts.copy()
)
# set default response_class
if self.response_class is not None and \
not 'response_class' in client_opts:
client_opts['response_class'] = self.response_class
# set default pool if needed
if not 'pool_instance' in client_opts and self.keepalive:
timeout = client_opts.get('timeout') or 300
keepalive = client_opts.get('keepalive') or 10
client_opts['pool_instance'] = default_pool(keepalive, timeout)
self.filters = client_opts.get('filters') or []
if self.basic_auth_url:
# detect credentials from url
u = urlparse.urlparse(uri)
if u.username:
password = u.password or ""
# add filters
filters = copy(self.filters)
filters.append(BasicAuth(u.username, password))
client_opts['filters'] = filters
# update uri
uri = urlparse.urlunparse((u.scheme, u.netloc.split("@")[-1],
u.path, u.params, u.query, u.fragment))
self.uri = uri
self.client_opts = client_opts
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.uri)
def clone(self):
"""if you want to add a path to resource uri, you can do:
.. code-block:: python
resr2 = res.clone()
"""
obj = self.__class__(self.initial['uri'],
**self.initial['client_opts'])
return obj
def __call__(self, path):
"""if you want to add a path to resource uri, you can do:
.. code-block:: python
Resource("/path").get()
"""
uri = self.initial['uri']
new_uri = util.make_uri(uri, path, charset=self.charset,
safe=self.safe, encode_keys=self.encode_keys)
obj = type(self)(new_uri, **self.initial['client_opts'])
return obj
def close(self):
""" Close all the connections related to the resource """
pool = self.client_opts.get('pool_instance')
if not pool:
return
parsed_url = urlparse.urlparse(self.uri)
pool.clear_host(util.parse_netloc(parsed_url))
def get(self, path=None, headers=None, **params):
""" HTTP GET
:param path: string additionnal path to the uri
:param headers: dict, optionnal headers that will
be added to HTTP request.
:param params: Optionnal parameterss added to the request.
"""
return self.request("GET", path=path, headers=headers, **params)
def head(self, path=None, headers=None, **params):
""" HTTP HEAD
see GET for params description.
"""
return self.request("HEAD", path=path, headers=headers, **params)
def delete(self, path=None, headers=None, **params):
""" HTTP DELETE
see GET for params description.
"""
return self.request("DELETE", path=path, headers=headers, **params)
def post(self, path=None, payload=None, headers=None, **params):
""" HTTP POST
:param payload: string passed to the body of the request
:param path: string additionnal path to the uri
:param headers: dict, optionnal headers that will
be added to HTTP request.
:param params: Optionnal parameterss added to the request
"""
return self.request("POST", path=path, payload=payload,
headers=headers, **params)
def put(self, path=None, payload=None, headers=None, **params):
""" HTTP PUT
see POST for params description.
"""
return self.request("PUT", path=path, payload=payload,
headers=headers, **params)
def make_params(self, params):
return params or {}
def make_headers(self, headers):
return headers or []
def unauthorized(self, response):
return True
def request(self, method, path=None, payload=None, headers=None,
**params):
""" HTTP request
This method may be the only one you want to override when
subclassing `restkit.rest.Resource`.
:param payload: string or File object passed to the body of the request
:param path: string additionnal path to the uri
:param headers: dict, optionnal headers that will
be added to HTTP request.
:param params: Optionnal parameterss added to the request
"""
while True:
uri = util.make_uri(self.uri, path, charset=self.charset,
safe=self.safe, encode_keys=self.encode_keys,
**self.make_params(params))
# make request
http = HttpRequest(**self.client_opts)
resp = http.request(uri, method=method, body=payload,
headers=self.make_headers(headers))
if resp is None:
# race condition
raise ValueError("Unkown error: response object is None")
if resp.status_int >= 400:
if resp.status_int == 404:
raise ResourceNotFound(resp.body_string(),
response=resp)
elif resp.status_int in (401, 403):
if self.unauthorized(resp):
raise Unauthorized(resp.body_string(),
http_code=resp.status_int,
response=resp)
else:
raise RequestFailed(resp.body_string(),
http_code=resp.status_int,
response=resp)
else:
break
return resp
def update_uri(self, path):
"""
to set a new uri absolute path
"""
self.uri = util.make_uri(self.uri, path, charset=self.charset,
safe=self.safe, encode_keys=self.encode_keys)
self.original['uri'] = util.make_uri(self.original['uri'], path,
charset=self.charset,
safe=self.safe,
encode_keys=self.encode_keys)
| [
"[email protected]"
] | |
c01239ec19657a3b971198fb74b565f9bb6abe61 | 7109daa5820683340c1e393b57ba5242d624d952 | /core/dataset.py | 852114105aa754accc2c9e7286202d80713449c0 | [] | no_license | bysen32/CODE0 | 9a7f15fb6dc5e4234ca5e432f097727cacd3f538 | e32535797dbfd24389d7754bc3e57cf24c284b11 | refs/heads/master | 2022-12-24T05:40:02.360762 | 2020-09-24T14:02:37 | 2020-09-24T14:02:37 | 298,296,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | import numpy as np
import scipy.misc
import os
from PIL import Image
from torchvision import transforms
from config import INPUT_SIZE
class CUB:
def __init__(self, root, is_train=True, data_len=None):
self.root = root
self.is_train = is_train
img_name_list = []
img_txt_file = open(os.path.join(self.root, "images.txt"))
for line in img_txt_file:
img_name_list.append(line[:-1].split(" ")[-1])
label_list = []
label_txt_file = open(os.path.join(self.root, "image_class_labels.txt"))
for line in label_txt_file:
label_list.append(int(line[:-1].split(" ")[-1]) - 1)
train_test_list = []
train_val_file = open(os.path.join(self.root, "train_test_split.txt"))
for line in train_val_file:
train_test_list.append(int(line[:-1].split(" ")[-1]))
train_file_list = [x for i, x in zip(train_test_list, img_name_list) if i]
test_file_list = [x for i, x in zip(train_test_list, img_name_list) if not i]
if self.is_train:
self.train_img = [scipy.misc.imread(os.path.join(self.root, "images", train_file)) for train_file in train_file_list[:data_len]]
self.train_label = [x for i, x in zip(train_test_list, label_list) if i][:data_len]
else:
self.test_img = [scipy.misc.imread(os.path.join(self.root, "images", test_file)) for test_file in test_file_list[:data_len]]
self.test_label = [x for i, x in zip(train_test_list, label_list) if not i][:data_len]
def __getitem__(self, index):
if self.is_train:
img, target = self.train_img[index], self.train_label[index]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode="RGB")
img = transforms.Resize((600, 600), Image.BILINEAR)(img)
img = transforms.RandomCrop(INPUT_SIZE)(img)
img = transforms.RandomHorizontalFlip()(img)
img = transforms.ToTensor()(img)
img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img)
else:
img, target = self.test_img[index], self.test_label[index]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode="RGB")
img = transforms.Resize((600, 600), Image.BILINEAR)(img)
img = transforms.CenterCrop(INPUT_SIZE)(img)
img = transforms.ToTensor()(img)
img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img)
return img, target
def __len__(self):
if self.is_train:
return len(self.train_label)
else:
return len(self.test_label)
if __name__ == "__main__":
import torch
INPUT_SIZE = (448, 448)
trainset = CUB(root="./CUB_200_2011", is_train=True)
print(len(trainset.train_img))
print(len(trainset.train_label))
for data in trainset:
print(data[0].size(), data[1])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=4, drop_last=False)
for i, data in enumerate(trainloader):
img, label= data[0].cuda(), data[1].cuda()
print(i, label)
# dataset = CUB(root="./CUB_200_2011", is_train=False)
# print(len(dataset.test_img))
# print(len(dataset.test_label))
# for data in dataset:
# print(data[0].size(), data[1])
| [
"[email protected]"
] | |
4b43e2b427a43ba07dfc176dcf8a63cf10d902c4 | 1a9e6b7105cef0aeac6d3c4ae74087c578e3103f | /Django Homeworks/mysite/blog/forms.py | 09f8d88b919888b97638cd57d7610d319d28e7f7 | [] | no_license | NarekID/Basic-IT-Center_Python | cb0c935bf61d371407ae2e7620c38ed1e25fb4a2 | b842519db43cfc19b042d1c2eeac21592a7596ff | refs/heads/master | 2020-08-03T09:15:02.455482 | 2019-12-02T11:53:23 | 2019-12-02T11:53:23 | 211,696,769 | 0 | 0 | null | 2020-04-30T13:50:14 | 2019-09-29T17:04:12 | Python | UTF-8 | Python | false | false | 354 | py | from django import forms
class EmailPostFrom(forms.Form):
name = forms.CharField(max_length=30)
email = forms.EmailField()
to = forms.EmailField()
comments = forms.CharField(required=False, widget=forms.Textarea)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
| [
"[email protected]"
] | |
0afdc85c81462cefc1a808107b7c37374cc5d2cd | a39be2eeb4d98a1171fa3da03654978d420babdd | /exportIllegalShopNameAndAddressForMyUCloud.py | f6a8a2077d26a281b13d7e501e491c21bf123046 | [] | no_license | yhqairqq/data_analyser | bbf153bccbcf57031aa90ceaa78abd4e329d6de9 | 7bd0996243c5a78cb99efc91d17e168d2184d17f | refs/heads/master | 2020-06-11T21:56:24.353103 | 2016-12-14T09:59:13 | 2016-12-14T09:59:13 | 75,620,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,962 | py | #!/usr/bin/env python
# coding:utf-8
import datetime
import pymongo
from bson.objectid import ObjectId
from utils import *
import codecs
def check_contain_chinese(check_str):
try:
for ch in check_str:
ch = unicode(ch)
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
except Exception, e:
print 'check_contain_chinese in', e
def check_contain_english(uchar_str):
try:
for uchar in uchar_str:
uchar = unicode(uchar)
if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
return True
return False
except Exception, e:
print 'check_contain_english in', e
def check_contain_digital(uchar_str):
try:
for uchar in uchar_str:
uchar = unicode(uchar)
if uchar >= u'\u0030' and uchar <= u'\u0039':
return True
return False
except Exception, e:
print 'check_contain_digital in', e
def remove_ch(str):
str = re.sub(u' ','',str)
str = re.sub(u'…','',str)
str = re.sub(u'.','',str)
str = re.sub(u'。','',str)
str = re.sub(u',','',str)
str = re.sub(u'…','',str)
str = re.sub(u'>','',str)
str = re.sub(u'@','',str)
str = re.sub(u'/','',str)
str = re.sub(u'-','',str)
str = re.sub(u'_','',str)
str = re.sub(u'#','',str)
str = re.sub(u'﹉','',str)
str = re.sub(u';','',str)
str = re.sub(u'、','',str)
str = re.sub(u'】','',str)
str = re.sub(u'%','',str)
str = re.sub(u'\+','',str)
str = re.sub(u'`','',str)
str = re.sub(u'、','',str)
str = re.sub(u'\?','',str)
str = re.sub(u'|','',str)
str = re.sub(u'!','',str)
str = re.sub(u',','',str)
str = re.sub(u'~','',str)
str = re.sub(u'—','',str)
str = re.sub(u'。','',str)
str = re.sub(u'’','',str)
str = re.sub(u'‘','',str)
str = re.sub(u'\*','',str)
str = re.sub(u'(','',str)
str = re.sub(u'&','',str)
str = re.sub(u'》','',str)
str = re.sub(u'《','',str)
return str
def updateCategory(db):
page_size = 10000
i = 1.0
last_row_id = ''
content = db.WithErrorMainShop5.find(
filter={
'tag':'subCategory'
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.WithErrorMainShop5.find(
filter={
'tag':'subCategory'
}
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
one=db.MainShop5.find(
filter={
'_id':json[u'_id2']
}
)
for curcor in one:
date_time = datetime.datetime.utcnow()
#处理大小分类
category = curcor[u'category']
subCategory = curcor[u'subCategory']
if len(category)==0 or len(category[0])>60 or len(category[0])<1:
category=[u'其他']
if len(subCategory)==0 or len(subCategory[0])>60 or len(subCategory[0])<1:
subCategory=[u'其他']
#假如处理的方法
result = db.MainShop5.update({'_id':json[u'_id2']},{'$set':{'category':category,'subCategory':subCategory,"updateAt":date_time}})
if result[u'nModified']==1 and i%1000==0 :
print '更新成功>>>>>>',curcor
except Exception, e:
print e, "Exception"
print "last_row_id>>>>>" + str(last_row_id)
if last_row_id != '':
content = db.WithErrorMainShop5.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)},
'tag':'subCategory'
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
exit(1);
def updateEmptyBrand(db):
page_size = 10000
i = 1.0
last_row_id = ''
content = db.MainShop5.find(
filter={
'brand':''
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.MainShop5.find(
filter={
'brand':''
}
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
# print i
# 假如处理的方法
result = db.MainShop6.update_one({'_id':json[u'_id']},{'$set':json},True)
if result.upserted_id!=None :
result1 = db.MainShop5.delete_one({'_id':json[u'_id']})
if result1.deleted_count==1 and i%1000==0:
print '删除-->',json[u'_id']
if i%1000==0:
print '插入>>>>>>',json[u'_id']
except Exception, e:
print e, "Exception"
if last_row_id!='':
print "last_row_id>>>>>" + str(last_row_id)
if last_row_id != '':
content = db.MainShop5.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)},
'brand':''
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
exit(1);
def updateEmptyAddress(db):
page_size = 100
i = 1.0
last_row_id = ''
content = db.MainShop5.find(
filter={
'address':''
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.MainShop5.find(
filter={
'address':''
}
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
if i%100==0:
print i
# 假如处理的方法
result = db.MainShop6.update_one({'_id':json[u'_id']},{'$set':json},True)
if result.upserted_id!=None :
result1 = db.MainShop5.delete_one({'_id':json[u'_id']})
if result1.deleted_count==1 and i%1000==0:
print '删除-->',json[u'_id']
if i%1000==0:
print '插入>>>>>>',json[u'_id']
except Exception, e:
print e, "Exception"
if last_row_id!='':
print "last_row_id>>>>>" + str(last_row_id)
if last_row_id != '':
content = db.MainShop5.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)},
'address':''
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
exit(1);
def updateIlligalBrand(db):
page_size = 10000
i = 1.0
last_row_id = ''
content = db.MainShop5.find(
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.MainShop5.find(
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
brandname = json[u'brand']
if check_contain_english(brandname) ==False and check_contain_chinese(brandname)==False and check_contain_digital(brandname)==False:
brandname = remove_ch(brandname)
if len(brandname)==0:
result = db.MainShop6.update_one({'_id':json[u'_id']},{'$set':json},True)
if result.upserted_id!=None :
result1 = db.MainShop5.delete_one({'_id':json[u'_id']})
if result1.deleted_count==1 and i%1000==0:
print '删除-->',json[u'_id']
if i%1000==0:
print '插入>>>>>>',json[u'_id']
except Exception, e:
print e, "Exception"
if last_row_id!='':
print "last_row_id>>>>>" + str(last_row_id)
if last_row_id != '':
content = db.MainShop5.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)}
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
exit(1);
def remove_other(str):
new =u''
for ch in str:
if is_other(ch)==False:
new = new+ch
return new
def updateShopName(db):
page_size = 100
i = 1.0
last_row_id = ''
content = db.MainShop5.find(
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.MainShop5.find(
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
shopName = json[u'shopName']
shopName1 = remove_other(shopName)
if len(shopName) == len(shopName1):
continue
# 假如处理的方法
result = db.MainShop5.update_one({'_id': json[u'_id']}, {'$set': {"shopName":shopName1}}, True)
if result.upserted_id != None:
print shopName1,"更新成功"
except Exception, e:
print e, "Exception"
if last_row_id != '':
content = db.MainShop5.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)}
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
exit(1);
def exportIlligalShopNameAndAddressInMainShopFromCouponShopList(db):
outf = "illegalShopNameAndAddress169_.txt"
fo = open(outf, 'wb')
page_size = 100
i = 1.0
last_row_id = ''
content = db.BankCouponInfo.find(
).sort('_id', pymongo.ASCENDING).limit(page_size)
count = db.BankCouponInfo.find(
).sort('_id', pymongo.ASCENDING).count()
print 'totalSize:', count
if count == 0:
exit(1)
while True:
row = 0
for json in content:
row = row + 1
if row == page_size - 1:
last_row_id = json[u'_id']
i = i + 1
if i % 10000 == 0:
print "完成>>>>%.2f" % (i / count * 100), "%"
try:
shopIdList = json[u'shopIdList']
for mainshopId in shopIdList:
cursor = db.MainShop5.find(
filter={
"_id":ObjectId(mainshopId)
}
)
for raw in cursor:
id = str(raw[u'_id'])
shopName = raw[u'shopName']
address = raw[u'address']
if len(shopName)<4 or len(address)<4:
line =id+","+shopName+","+address+'\n'.encode('utf-8')
fo.writelines(line)
except Exception, e:
print e, "Exception"
print last_row_id
if last_row_id != '':
content = db.BankCouponInfo.find(
filter={
'_id': {'$gt': ObjectId(last_row_id)}
# 任意元素匹配所有条件
}
).sort('_id', pymongo.ASCENDING).limit(page_size)
last_row_id = ''
else:
print "完成>>>>%.2f" % (i / count * 100), "%"
fo.close()
exit(1);
if __name__ == '__main__':
conn = pymongo.MongoClient('127.0.0.1', 33332)
# conn = pymongo.MongoClient('10.15.159.169', 30000)
# conn = pymongo.MongoClient('10.15.86.90', 30000)
# 连接数据库
db = conn.crawl_hz
exportIlligalShopNameAndAddressInMainShopFromCouponShopList(db) | [
"[email protected]"
] | |
cd7f7d399fa00905604dce431cb7de0c129bef0f | 5904c88885e276cb8d008437cbe650351d68284e | /iplookup/__init__.py | f3ddd6092bd9bbd07e8d30730181ec553be46b24 | [
"BSD-2-Clause"
] | permissive | Ankirama/iplookup | eaf615ec72c904cb4c9e940ede7ea47e8177dad5 | e63050fb47c559f79120aea312bd9721ff4e6f94 | refs/heads/master | 2021-06-23T02:10:24.400456 | 2017-08-18T12:53:26 | 2017-08-18T12:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | __all__ = ["iplookup"]
| [
"[email protected]"
] | |
64a254c52c248efb0cf5c010ebe8b732d1a8f71e | 7eb9b92ee295e3218a494ca7ea7572d4b5090392 | /exerc083.py | 790da16988d2439ce3f0e36ef1a4fc95e9f9fba2 | [] | no_license | andersonpereiragithub/CursoEmVideoPython | c7257b5d5cbdee6ea81b32fc993e09ad4e69cc9e | 7390a378cf80a22d85a628398c92148b01fee9cb | refs/heads/master | 2023-07-03T16:03:28.874668 | 2021-08-16T01:20:36 | 2021-08-16T01:20:36 | 383,771,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Crie um programa onde o usuário digite uma expressão qualquer
# que use parênteses. Seu aplicativo deverá analisar se a
# expressão passada está com os parênteses abertos e fechados na
# ordem correta.
pilha = []
expressao = str(input('Digite uma expressão entre parênteses: '))
for parentese in expressao:
if parentese == '(':
pilha.append('(')
elif parentese == ')':
if len(pilha) > 0:
pilha.pop()
else:
pilha.append(')')
break
if len(pilha) == 0:
print('Sua expressão está válida!')
else:
print('Sua expressão NÃO está válida!')
| [
"[email protected]"
] | |
12c5e8a6058dd9ddb4067dc52b21882958b3ddac | e152859d9eb6d588131ffcefc31b6f6229177a60 | /executor-machine-learning-ms/app/listener/listener_settings.py | 0a6080371c4722640e3590dae33d3575cf7ad92e | [] | no_license | marioczpn/microservice-machine-learning-challenge-proposal | 7c856c57c28cccffe2f89b1992f5f0e2d8f8dae9 | 48c7c06462844165f33812e9a52df406f9895795 | refs/heads/main | 2023-09-05T10:58:15.235975 | 2021-11-24T14:38:58 | 2021-11-24T14:38:58 | 431,515,413 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import os
from face_detection.face_detection_executor import FaceDetectionMachineLearningExecutor
from services.mocksdk_service.MockSDKModule import MockSDK
# The purpose of this configuration file is to give more flexibility, allowing to add more modules into the application.
# Configuration keys
EXECUTOR_KEY = 'executor'
# API's keys
FACE_DETECTION_API_KEY = os.environ['FACE_DETECTION_API_NAME']
LISTENER_CONFIG = {
FACE_DETECTION_API_KEY: {
EXECUTOR_KEY: FaceDetectionMachineLearningExecutor(MockSDK()),
}
}
| [
"[email protected]"
] | |
2b5c7daf4e443721c7aaea517f907d64a4885998 | 580f06758cd81f45f19553e273d873f40430aef9 | /Use Python/MergeIntervals.py | 0a0a5eb200bfe885d38ab50d1fe3a12efcf2c18c | [] | no_license | yiqin/HH-Coding-Interview-Prep | 026faa39bf11e5a404460bc6f315883b3fc2eedc | 5f40d62d3bd7a937ea21f92bf737329797c87bcd | refs/heads/master | 2021-03-12T20:18:10.562406 | 2015-11-06T18:21:11 | 2015-11-06T18:21:11 | 42,781,871 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | class Interval(object):
"""docstring for Interval"""
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
"""docstring for Solution"""
def merge(self, intervals):
intervals.sort(key = lambda x:x.start)
results = []
if len(intervals) == 0:
return []
tmp = Interval(intervals[0].start, intervals[0].end)
for i in range(1, len(intervals)):
next = intervals[i]
if tmp.end >= next.end:
pass
elif tmp.end >= next.start:
tmp.end = next.end
elif tmp.end < next.start:
results.append(Interval(tmp.start, tmp.end))
# update
tmp.start = next.start
tmp.end = next.end
results.append(tmp)
print(results[0].start)
return results
interval1 = Interval(1, 4)
interval2 = Interval(5, 6)
interval3 = Interval(8, 10)
interval4 = Interval(15, 18)
solution = Solution()
results = solution.merge([interval1, interval2])
print(len(results)) | [
"[email protected]"
] | |
52cc99ba003663596c2f4e9b4a593123b794f3da | e6378e26e05ddad794f770b83bc0471c13da6452 | /myproject/webapp/serializers.py | 132f883d812c54a3b6173ebe67b1430d88433c23 | [] | no_license | dheerajgadhamsetty/REST_API | 122954bfc3dae2de0d218b8909792d3f75480903 | d370c7cce5ed6b325755ab243c20f58335cafbba | refs/heads/master | 2023-07-18T08:12:15.463796 | 2021-08-29T06:53:28 | 2021-08-29T06:53:28 | 400,971,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.db.models import fields
from django.db.models.base import Model
from rest_framework import serializers
from .models import Employee
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
#fields = ('firstname', 'lastname')
fields = '__all__'
| [
"dheerajgadhamsetty"
] | dheerajgadhamsetty |
d25ac17866c59602f3a74c473525baa8b1525ecc | 42186fa6507999ce60d334a0f04d9ae2127579cd | /shanghai/crypto/aessss/aes.py | 28f4167b8afbc6cd5a1d96fa31eac1158ba77de1 | [] | no_license | Imtinmin/CTF_Challenge | ef8b62b3a4a1741d814d989f795a243257ff6f2b | ea276596f9effdbe0cf9ef4457e2e676e652bb74 | refs/heads/master | 2022-12-21T12:40:40.625562 | 2020-04-30T03:27:56 | 2020-04-30T03:27:56 | 158,999,004 | 18 | 3 | null | 2022-12-10T04:34:27 | 2018-11-25T04:53:04 | PHP | UTF-8 | Python | false | false | 6,458 | py | # -*- coding:utf-8 -*-
import random
import sys
import string
from hashlib import sha256
import SocketServer
from Crypto.Cipher import AES
from secret import FLAG, IV, KEY
class Task(SocketServer.BaseRequestHandler):
def proof_of_work(self):
proof = ''.join(
[random.choice(string.ascii_letters+string.digits) for _ in xrange(20)])
# print proof
digest = sha256(proof).hexdigest()
self.request.send("sha256(XXXX+%s) == %s\n" % (proof[4:], digest))
self.request.send('Give me XXXX:')
x = self.request.recv(10)
x = x.strip()
if len(x) != 4 or sha256(x+proof[4:]).hexdigest() != digest:
return False
return True
def pad(self, s):
s += (256 - len(s)) * chr(256 - len(s))
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.s_box):
ret[pos] = s[index]
return ''.join(ret)
def unpad(self, s):
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.invs_box):
ret[pos] = s[index]
return ''.join(ret[0:-ord(ret[-1])])
s_box = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
]
invs_box = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
]
def encrypt(self, msg):
cipher = AES.new(KEY, AES.MODE_CBC, IV)
return cipher.encrypt(msg).encode('hex')
def handle(self):
if not self.proof_of_work():
return
self.request.settimeout(15)
req = self.request
flag_len = len(FLAG)
assert(flag_len == 33)
self.flag = self.pad(FLAG)
assert(len(self.flag) == 256)
while True:
req.sendall(
'Welcome to AES(WXH) encrypt system.\n1. get encrypted flag.\n2. pad flag.\n3.Do some encrypt.\nYour choice:')
cmd = req.recv(2).strip()
try:
cmd = int(cmd)
except ValueError:
cmd = 0
if cmd == 1:
enc = self.encrypt(self.flag)
req.sendall('Here is the encrypted flag: 0x%s\n' % enc)
elif cmd == 2:
req.sendall('Pad me something:')
self.flag = self.unpad(self.flag)[
:flag_len] + req.recv(1024).strip()
assert(len(self.flag) <= 256)
self.flag = self.pad(self.flag)
req.sendall('Done.\n')
elif cmd == 3:
req.sendall('What do you want to encrypt:')
msg = self.pad(req.recv(1024).strip())
assert(len(msg) <= 256)
enc = self.encrypt(msg)
req.sendall('Here is the encrypted message: 0x%s\n' % enc)
else:
req.sendall('Do not lose heart~ !% Once WXH AK IOI 2019 can Solved! WXH is the first in the tianxia!')
req.close()
return
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = '0.0.0.0', 23333
print 'Run in port:23333'
server = ThreadedServer((HOST, PORT), Task)
server.allow_reuse_address = True
server.serve_forever()
| [
"[email protected]"
] | |
4eeccaf83120bc0894347b8916c466c18737500d | 4ecbc07cdc980f899510e0db2971ba754c474670 | /timm/models/layers/mlp.py | 05d076527cfb6f15bcf5f2830fa36777abbc5a1e | [
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | shrikumaran/pytorch-image-models | 8c74ec7d705b6b2fb223d519afdd61f33c108cec | 6d8272e92c3d5f13a9fdd91dfe1eb7fae6784589 | refs/heads/master | 2023-06-16T06:41:30.088230 | 2021-07-08T18:23:55 | 2021-07-08T18:51:12 | 384,009,847 | 0 | 0 | Apache-2.0 | 2021-07-08T05:22:35 | 2021-07-08T05:22:34 | null | UTF-8 | Python | false | false | 3,774 | py | """ MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
# override init of fc1 w/ gate portion set to weight near zero, bias=1
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
gate_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.