blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be17e52f4d57cde3138c921d58f6b8edcd9a372e | d2318b949a3503909ff9276568f2cd8b6089c009 | /djangonautic/djangonautic/urls.py | 2c5ec90c51b0748ce3f778e21500a1cef2dade9f | []
| no_license | AdrianValdes/django-blog | 1f5d1a198704ba8a3730cdb6e4975a372ab96298 | 63485e72b5ea4b0edefce5d61073462ca02a4606 | refs/heads/main | 2023-01-05T05:18:19.824663 | 2020-10-28T10:11:55 | 2020-10-28T10:11:55 | 307,976,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py |
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path("articles/", include("articles.urls")),
path("about/", views.about),
path("", views.index)
]
| [
"[email protected]"
]
| |
0e89ff008a7a7e5cdc2d5f004fae605af78a9091 | 56c9e8f4f28400fc42bb1ee9ad1615a1520854c3 | /Area.py | bb6893e43a3ece38a19467b9040e979187465b5a | []
| no_license | IbnAzeez/Python-Area | 157a9bf333dfc27ed409d4ec123056eb7b735e7a | a60d046dbdd821c4ef76a2391588a15651bf223e | refs/heads/master | 2021-04-24T05:59:30.872779 | 2020-03-25T20:53:38 | 2020-03-25T20:53:38 | 250,088,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | import math
print ('Welcome User')
print ('This program accepts the radius of a circle and returns the area as an output')
print('--------------------------------')
print('Please input radius')
radius = float(input())
Area = math.pi * radius * radius
print('Calculating Area of the Circle, wait a minute!')
print('--------------------------------')
print('Area of Circle is: ')
print(Area)
print('--------------------------------')
print('No need to try using a calculator, the answer is spot on')
print('Thank you')
| [
"[email protected]"
]
| |
ee1cc126f996d7479b571b66119ece007be82d74 | 21511fdabe3f0f76bba25d4d6f62fd964d090d15 | /TD02_Bitcoin_Today_practice.py | 3cffde3080ab5c72fc7e705f0f5c8e3757e3a401 | []
| no_license | lilyanB/BP-TD2-BitcoinSeed | efbc2fdb8adc67c60581bce36468327827fd9101 | 9a32f8da6e228ec8ef624054014889380c1c8782 | refs/heads/main | 2023-08-16T00:55:48.972306 | 2021-10-10T20:03:40 | 2021-10-10T20:03:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,339 | py | import secrets
import hashlib
import binascii
import unicodedata
import hmac
import ecdsa
import struct
import base58
from ecdsa.curves import SECP256k1
from ecdsa.ecdsa import int_to_string, string_to_int
from mnemonic import Mnemonic
import bip32utils
from bip32utils import BIP32Key
from bip32utils import BIP32_HARDEN
##############
#Créer un entier aléatoire pouvant servir de seed à un wallet de façon sécurisée
##############
bits = secrets.randbits(128)
bits_hex = hex(bits)
private_key = bits_hex[2:]
##############
#Représenter cette seed en binaire et le découper en lot de 11 bits
##############
bits_bin = bin(bits)
bits_bin = bits_bin[2:]
data = binascii.unhexlify(private_key)
h = hashlib.sha256(data).hexdigest()
b = bin(int(binascii.hexlify(data),16))[2:].zfill(len(data)*8)
checksum = bin(int(h,16))[2:].zfill(256)[: len(data)* 8//32]
tab=[]
word=""
cpt=0
if(len(str(b))<128):
for i in range(0, 128-len(str(b))):
word+="0"
cpt+=1
for j in b:
word=str(word)+str(j)
cpt+=1
if cpt==11:
cpt=0
tab.append(word)
word=""
word+=str(checksum)
tab.append(word)
##############
#Attribuer à chaque lot un mot selon la liste BIP 39 et afficher la seed en mnémonique
##############
with open("english.txt", "r") as f:
wordlist = [w.strip() for w in f.readlines()]
seed = []
for k in range(len(tab)):
for i in range(len(tab[k])//11):
indx = int(tab[k][11*i:11*(i+1)],2)
seed.append(wordlist[indx])
phrase = " ".join(seed)
##############
#Permettre l’import d’une seed mnémonique
##############
seed_temp = str(input("\nVoulez vous importer votre propre seed ? (y/n)"))
if(seed_temp=="y"):
phrase = str(input("\nEntrez votre propre seed : "))
print(phrase)
normalized_mnemonic = unicodedata.normalize("NFKD", phrase)
password = ""
normalized_passphrase = unicodedata.normalize("NFKD", password)
passphrase = "mnemonic" + normalized_passphrase
mnemonic = normalized_mnemonic.encode("utf-8")
passphrase = passphrase.encode("utf-8")
bin_seed = hashlib.pbkdf2_hmac("sha512", mnemonic, passphrase, 2048)
hex_bin = binascii.hexlify(bin_seed[:64])
mnemon = Mnemonic('english')
seed_mnemonic = mnemon.to_seed(mnemonic)
##############
#Extraire la master private key et le chain code
##############
seed_bytes = binascii.unhexlify(hex_bin)
I = hmac.new(b"Bitcoin seed", seed_bytes, hashlib.sha512).digest()
L, R = I[:32], I[32:]
master_private_key = int.from_bytes(L, 'big')
master_chain_code = R
##############
#Extraire la master public key and private
##############
seed = binascii.unhexlify(hex_bin)
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
secret = Il
chain = Ir
xprv = binascii.unhexlify("0488ade4")
xpub = binascii.unhexlify("0488b21e")
depth = b"\x00"
fpr = b'\0\0\0\0'
index = 0
child = struct.pack('>L', index)
k_priv = ecdsa.SigningKey.from_string(secret, curve=SECP256k1)
K_priv = k_priv.get_verifying_key()
data_priv = b'\x00' + (k_priv.to_string())
if K_priv.pubkey.point.y() & 1:
data_pub= b'\3'+int_to_string(K_priv.pubkey.point.x())
else:
data_pub = b'\2'+int_to_string(K_priv.pubkey.point.x())
raw_priv = xprv + depth + fpr + child + chain + data_priv
raw_pub = xpub + depth + fpr + child + chain + data_pub
hashed_xprv = hashlib.sha256(raw_priv).digest()
hashed_xprv = hashlib.sha256(hashed_xprv).digest()
hashed_xpub = hashlib.sha256(raw_pub).digest()
hashed_xpub = hashlib.sha256(hashed_xpub).digest()
raw_priv += hashed_xprv[:4]
raw_pub += hashed_xpub[:4]
#######################
#Full information root key (master public key, master private key...)
######################
root_key = bip32utils.BIP32Key.fromEntropy(seed)
root_address = root_key.Address()
root_public_hex = root_key.PublicKey().hex()
root_private_wif = root_key.WalletImportFormat()
print("\n--------------------------------")
print('Root key:')
print(f'\t{root_key.dump()}')
#######################
#Générer un clé enfant
######################
child_key = root_key.ChildKey(0).ChildKey(0)
child_address = child_key.Address()
child_public_hex = child_key.PublicKey().hex()
child_private_wif = child_key.WalletImportFormat()
print("\n--------------------------------")
print('Child key m/0/0:')
print(f'\t{child_key.dump()}')
#######################
#Générer une clé enfant à l’index N
######################
t = str(input("\nVoulez vous utiliser un index (sans niveau d'indexation) ? (y/n)"))
if (t=="y"):
n = int(input("\nVeuillez choisir le niveau d'indexation ? "))
print("Index choisi : ",n)
i = 0
for x in range(n):
i=i+1
child_key_son = root_key.ChildKey(0).ChildKey(i)
child_address_son = child_key_son.Address()
child_public_hex_son = child_key_son.PublicKey().hex()
child_private_wif_son = child_key_son.WalletImportFormat()
print("--------------------------------")
print('Child key m/0/',i)
print(f'\tAddress: {child_address_son}')
print(f'\tPublic : {child_public_hex_son}')
print(f'\tPrivate: {child_private_wif_son}\n')
print(i)
#######################
#Générer une clé enfant à l’index N au niveau de dérivation M
######################
else:
n = int(input("\nVeuillez choisir le niveau d'indexation ? "))
print("Index choisi : ",n)
m = int(input("\nVeuillez choisir le niveau de dérivation ? "))
print("Dérivation choisi : ",m)
i = 0
for x in range(n):
i=i+1
child_key_son = root_key.ChildKey(m).ChildKey(i)
child_address_son = child_key_son.Address()
child_public_hex_son = child_key_son.PublicKey().hex()
child_private_wif_son = child_key_son.WalletImportFormat()
print("--------------------------------")
print('Child key m/',m,'/',i)
print(f'\tAddress: {child_address_son}')
print(f'\tPublic : {child_public_hex_son}')
print(f'\tPrivate: {child_private_wif_son}\n')
print(i)
#######################
#Information propre
######################
print("-------------------------------------")
print("Vous allez choisir toutes les informations que vous souhaitez récupérer.")
step1 = str(input("\nVoulez vous récupérer la private key? (y/n)"))
if(step1=="y"):
print("private key : ",private_key)
print("-------------------------------------")
step2 = str(input("\nVoulez vous afficher la seed en lot de 11 bites? (y/n)"))
if(step2=="y"):
print("Seed en lot : ",tab)
print("-------------------------------------")
step3 = str(input("\nVoulez vous afficher la phrase en mnémonique? (y/n)"))
if(step3=="y"):
print("Phrase : ",phrase)
print("-------------------------------------")
step4 = str(input("\nVoulez vous afficher la seed BIP39? (y/n)"))
if(step4=="y"):
print(f'BIP39 Seed: {seed_mnemonic.hex()}\n')
print("-------------------------------------")
step5 = str(input("\nVoulez vous afficher la master publique key et la master private key? (y/n)"))
if(step5=="y"):
print("\nOnly public and private root keys:")
print(f'\tPrivate : ,{base58.b58encode(raw_priv)}')
print(f'\tPublic : ,{base58.b58encode(raw_pub)}')
print(f'master chain code (bytes): {master_chain_code}')
print("-------------------------------------")
print("Merci pour votre confiance.")
| [
"[email protected]"
]
| |
875b61123dd93a948ac250e15ea5b998a1923731 | 68947387d77e0c869494dd8e7539594a3d1a2ed9 | /Sean_Mitchell_CS_317_Extra_Credit.py | 75af29007b6e66934a52fb1e71785e43f3a3b07e | []
| no_license | SeanMitchell1994/CS317_ExtraCredit | 375702aaefe58a7baf6005b71c9feee6fb19b640 | 526ed1b173fb6490944f05cb2d1d459560057f43 | refs/heads/master | 2020-05-16T19:12:35.240076 | 2019-04-29T01:24:28 | 2019-04-29T01:24:28 | 183,251,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | # ====================================================
# Sean Mitchell
# CS 317-20 Spring 2019
# Extra Credit
#
# Creates a series of petals and rings using turtle
# The total shape as a color gradient that starts low
# and goes high as the shape size increases
# This base color is randomly chosen at runtime
#
# This version include tail_recursion.py
# ====================================================
from turtle import *
import colorsys
import time
from random import randint
from tail_recursion import tail_recursive, recurse # tail_recursion.py is not mine, it's just an
# interesting trick to speed up the program because
# of sequential recurisive calls
# Full credit is provided in the tail_recursion.py
# The program runs fine without, it's just slower
# With this added, it ran roughly 40% faster
color_lut = [] # color lookup table
@tail_recursive
def quarter_circle(steps,length,side,base_color):
# steps = number of times to run
# length = length to move forward
# side = which side is the petal (coming or leaving origin?)
# base_color = value of randomly chosen base color
#
# Draws a quarter circle
# exit condition
if (steps <= 0):
return
# determines if the petal is coming or leaving the origin
if (side == 1):
color(color_lut[base_color - (steps) + 90])
elif (side == -1):
color(color_lut[base_color + (steps)])
# shifts by the value of the length
forward(length)
right(-length)
# recursive call
quarter_circle(steps-1,length,side,base_color)
@tail_recursive
def inner_circle(steps,base_color):
# steps = number of times to run
# base_color = value of randomly chosen base color
#
# Draws the inner geometry using quarter_circle()
# exit condition
if (steps <= 0):
return
# Draws a full petal
quarter_circle(90,1,1,base_color)
right(270)
quarter_circle(90,1,-1,base_color)
# shifts to the right by 5 pixels
right(5)
# recursive call
inner_circle(steps-1,base_color)
@tail_recursive
def petal_ring(steps,base_color):
# steps = number of times to run
# base_color = value of randomly chosen base color
#
# Draws the outer geometry using quarter_circle()
# exit condition
if (steps <= 0):
return
# Draws a full petal
quarter_circle(90,1,1,base_color+90)
right(270)
quarter_circle(90,1,-1,base_color+90)
# shifts the position to follow the outline of the circle
forward(9)
right(-84)
# recursive call
petal_ring(steps-1,base_color)
def Main():
start = time.time()
# populates the color lookup table
for i in range(1000):
color_lut.append(colorsys.hsv_to_rgb(i/1000, 1.0, 1.0))
# generates the random base color
base_color = randint(0, 800)
# run settings
pensize(2)
bgcolor('black')
speed(0)
hideturtle()
# draws the first circle
color(color_lut[base_color + 90])
circle(85)
up()
setpos(0, 85)
down()
# draws the inner petals
inner_circle(19,base_color)
#draws the outer circle
color(color_lut[base_color+180])
up()
setpos(-15,-75)
down()
circle(160)
# draws the outer petals
up()
setheading(0)
setpos(85,90)
down()
petal_ring(60,base_color)
end = time.time()
print(end - start)
done()
Main()
| [
"[email protected]"
]
| |
51dd65811d72d74966faf28d8b397f1eb74579b0 | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /components/ble/mynewt-nimble/docs/conf.py | 629b8a4f14b0e686d9f12357cc72d9f04ee83c5c | [
"LicenseRef-scancode-gary-s-brown",
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 5,476 | py | # -*- coding: utf-8 -*-
#
# Mynewt documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 10 11:33:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'breathe', 'sphinx.ext.todo',
'sphinx.ext.extlinks'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NimBLE Bluetooth Stack'
copyright = u'Copyright © 2018 The Apache Software Foundation, Licensed under the Apache License, Version 2.0 Apache and the Apache feather logo are trademarks of The Apache Software Foundation.'
author = u'The Apache Software Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0-b1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'README.rst', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'none'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_path = []
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mynewtdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Mynewt.tex', u'NimBLE Bluetooth Stack',
u'The Apache Software Foundation', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mynewt', u'Mynewt Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Mynewt', u'NimBLE Bluetooth Stack',
author, 'Mynewt', 'One line description of project.',
'Miscellaneous'),
]
breathe_projects = {
"mynewt": "_build/xml"
}
breathe_default_project = "mynewt"
breathe_domain_by_extension = {
"h" : "c",
}
| [
"[email protected]"
]
| |
42aaccadf80def0301c5a1a59bb6af0850cce386 | a2bdc6d7b3a1290ee7ec69244ddf9d445cb7b595 | /preprocessing/create_sentence_boundary_rstdt.py | 9e98aae2231e209e7d14f2f790f86d4f49386950 | []
| no_license | arne-cl/DiscourseConstituencyInduction-ViterbiEM | 5b7f303f781c67ee1a193ffe9a25355022c48e6b | 49ac7d9af9fa31e0796125576de815f2b8f3e833 | refs/heads/master | 2022-04-17T14:19:57.251851 | 2020-04-06T07:34:19 | 2020-04-06T07:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,787 | py | import os
import utils
def get_sentence_boundaries(path_tok, path_conll):
"""
:type path_tok: str
:type path_conll: str
:rtype: list of (int, int)
Compute sentence boundaries based on the tokenized file and the sentence-splitted file
"""
edus = read_edus(path_tok) # list of list of int
sentences = read_sentences(path_conll) # list of list of str
# Assign EDU ID to each token in the sentence list.
tokens_with_edu_ids = utils.flatten_lists(edus)
assert len(tokens_with_edu_ids) == len(utils.flatten_lists(sentences))
sentences_with_edu_ids = assign_edu_ids_to_sentences(sentences, tokens_with_edu_ids)
# Adjustment
sentences_with_edu_ids = adjust(sentences_with_edu_ids, n_edus=len(edus))
assert len(tokens_with_edu_ids) == len(utils.flatten_lists(sentences_with_edu_ids))
# Compute boundaries
bnds = compute_boundaries(sentences_with_edu_ids)
# Check
test_boundaries(bnds, n_edus=len(edus))
return bnds
def read_edus(path):
"""
:type path: str
:rtype: list of list of int
Eech EDU is a list of integer that specifies the EDU ID.
"""
edus = []
edu_id = 0
for line in open(path):
tokens = line.strip().split()
tokens = [edu_id for _ in range(len(tokens))] # NOTE
edus.append(tokens)
edu_id += 1
return edus
def read_sentences(path):
"""
:type path: str
:rtype: list of list of str
"""
sentences = []
# Init
tokens = []
for line in open(path):
line = line.strip()
if line == "":
if len(tokens) == 0:
continue
sentence = ["*" for _ in range(len(tokens))] # NOTE
sentences.append(sentence)
# Init
tokens = []
else:
items = line.split("\t")
token = items[1]
tokens.append(token)
if len(tokens) != 0:
sentence = ["*" for _ in range(len(tokens))] # NOTE
sentences.append(sentence)
return sentences
def assign_edu_ids_to_sentences(sentences, tokens_with_edu_ids):
"""
:type sentences: list of list of str
:type tokens_with_edu_ids: list of int
:rtype: list of list of int
"""
sentences_with_edu_ids = []
index = 0
for sentence in sentences:
length = len(sentence)
sentences_with_edu_ids.append(tokens_with_edu_ids[index:index+length])
index += length
return sentences_with_edu_ids
def adjust(sentences_with_edu_ids, n_edus):
"""
:type sentences_with_edu_ids: list of list of int
:type n_edus: int
:rtype: list of list of int
After using this function, each EDU belongs to only one sentence.
e.g., [[i,i,i,i,i+1,i+1], [i+1,i+1,i+1,i+1,i+2], [i+3,i+3]]
-> [[i,i,i,i,-1,-1], [i+1,i+1,i+1,i+1,i+2], [i+3,i+3]]
"""
new_sentences = []
# Record the sentence ID where the tokens in each EDU appears the most frequently
memo = {}
for edu_id in range(0, n_edus):
max_count = -1
max_sentence_id = None
for sentence_id, sentence in enumerate(sentences_with_edu_ids):
count = sentence.count(edu_id)
if max_count <= count:
max_count = count
max_sentence_id = sentence_id
memo[edu_id] = (max_sentence_id, max_count)
# Replacement
for sentence_id, sentence in enumerate(sentences_with_edu_ids):
# Replace the token (EDU ID) with -1,
# if this sentence is not the most-frequent sentence for the EDU (ID).
new_sentence = [edu_id if memo[edu_id][0] == sentence_id else -1 for edu_id in sentence]
new_sentences.append(new_sentence)
return new_sentences
def compute_boundaries(sentences_with_edu_ids):
"""
:type sentences_with_edu_ids: list of list of int
:rtype: list of (int, int)
"""
bnds = []
for sentence in sentences_with_edu_ids:
max_edu_id = max(sentence)
min_edu_id = min(sentence)
if max_edu_id == -1:
# Empty sentence.
continue
if min_edu_id == -1:
vals = set(sentence)
vals.remove(-1)
min_edu_id = min(vals)
bnds.append((min_edu_id, max_edu_id))
return bnds
def test_boundaries(bnds, n_edus):
"""
:type bnds: list of (int, int)
:type n_edus: int
:rtype: bool
"""
for edu_id in range(0, n_edus):
check = False
# Each EDU must belongs to at least one span.
for begin_i, end_i in bnds:
if begin_i <= edu_id <= end_i:
check = True
assert check
def write_boundaries(bnds, path):
"""
:type bnds: list of (int, int)
:type path: str
"""
with open(path, "w") as f:
for begin_i, end_i in bnds:
f.write("%d %d\n" % (begin_i, end_i))
def main():
config = utils.Config()
filenames = os.listdir(os.path.join(config.getpath("data"), "rstdt", "renamed"))
filenames = [n for n in filenames if n.endswith(".edus")]
filenames.sort()
for file_i, filename in enumerate(filenames):
path_tok = os.path.join(
config.getpath("data"), "rstdt", "tmp.preprocessing",
filename + ".tokenized")
path_conll = os.path.join(
config.getpath("data"), "rstdt", "tmp.preprocessing",
filename + ".tokenized.conll")
path_out = os.path.join(
config.getpath("data"), "rstdt", "tmp.preprocessing",
filename.replace(".edus", ".sentence.boundaries"))
bnds = get_sentence_boundaries(path_tok, path_conll)
write_boundaries(bnds, path_out)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
bf2a5ffd97cccaa92d2fe656cb94dd123b0325cf | 020ddf79c83d24cbf8a3af9c51278d1239db6f8d | /gyp/art-dalvikvm.gyp | 5ab319a59a6cd83fb378a8ac59b487bd0f7d8a71 | []
| no_license | DmitrySkiba/ARTPart | cbf7a12d50823064f8071b9e7e644bfedb846c3d | 1c16aa6bb6519a37286d6c72d23ab7d4787f5b6b | refs/heads/master | 2016-09-06T02:56:20.756704 | 2015-03-28T05:54:07 | 2015-03-28T05:54:07 | 30,858,852 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | gyp | # Copyright (C) 2015 Dmitry Skiba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'common.gypi',
'art-common.gypi',
],
'targets': [
{
'target_name': 'art-dalvikvm',
'product_name': 'dalvikvm',
'type': 'executable',
'dependencies': [
'<!(<(dependency) system-libcutils)',
'<!(<(dependency) art-compiler)',
'<!(<(dependency) art-runtime)',
'<!(<(dependency) libnativehelper)',
],
'sources': [
'<(art_root)/dalvikvm/dalvikvm.cc'
],
},
],
}
| [
"[email protected]"
]
| |
2588b58c60d47bb584362ec1a073150105b82209 | 8b71e73e4776aeef30a5fd23b185510f2ce8d0a8 | /pythonic_ocr-master/flask/bin/coverage3 | a2c4654b38caa41ba41c6aedc2c5c7dc5dca4d9d | []
| no_license | angelaaaateng/LinAlg_Project | d5e428dc1bae34f74850833d8d616f300658531c | eb5847ff327deb781cb409b933da00f455149373 | refs/heads/master | 2022-12-07T11:31:54.286539 | 2019-06-07T22:15:40 | 2019-06-07T22:15:40 | 186,526,704 | 2 | 1 | null | 2022-11-22T00:57:00 | 2019-05-14T02:06:10 | Python | UTF-8 | Python | false | false | 330 | #!/Users/Sam/ml/webml/flask/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'coverage==4.0.1','console_scripts','coverage3'
__requires__ = 'coverage==4.0.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('coverage==4.0.1', 'console_scripts', 'coverage3')()
)
| [
"[email protected]"
]
| ||
0aa51c85f1acc32bca1b8efbc5819d5ae89a7c73 | a35e499ac96ccba9cf3c991c007754a48cf48b65 | /spark-on-eks/source/lib/spark_on_eks_stack.py | 4051b5ff572fe0a7928fd9b4d0cae2422914de96 | [
"MIT-0"
]
| permissive | QPC-database/sql-based-etl-on-amazon-eks | df433bd7035a26ac65e8987965d420d4a3e61534 | 9bec6879666cf637b79f94e0290d36faacf7281b | refs/heads/main | 2023-06-22T03:15:22.806742 | 2021-07-07T03:48:26 | 2021-07-07T03:48:26 | 384,840,396 | 1 | 0 | MIT-0 | 2021-07-11T01:54:50 | 2021-07-11T01:54:50 | null | UTF-8 | Python | false | false | 6,000 | py | # // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# // SPDX-License-Identifier: MIT-0
from aws_cdk import (
core,
aws_eks as eks,
aws_secretsmanager as secmger
)
from lib.cdk_infra.network_sg import NetworkSgConst
from lib.cdk_infra.iam_roles import IamConst
from lib.cdk_infra.eks_cluster import EksConst
from lib.cdk_infra.eks_service_account import EksSAConst
from lib.cdk_infra.eks_base_app import EksBaseAppConst
from lib.cdk_infra.s3_app_code import S3AppCodeConst
from lib.cdk_infra.spark_permission import SparkOnEksSAConst
from lib.util.manifest_reader import *
import json,os
class SparkOnEksStack(core.Stack):
@property
def code_bucket(self):
return self.app_s3.code_bucket
@property
def argo_url(self):
return self._argo_alb.value
@property
def jhub_url(self):
return self._jhub_alb.value
def __init__(self, scope: core.Construct, id: str, eksname: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
source_dir=os.path.split(os.environ['VIRTUAL_ENV'])[0]+'/source'
# Cloudformation input params
datalake_bucket = core.CfnParameter(self, "datalakebucket", type="String",
description="Your existing S3 bucket to be accessed by Jupyter Notebook and ETL job. Default: blank",
default=""
)
login_name = core.CfnParameter(self, "jhubuser", type="String",
description="Your username login to jupyter hub",
default="sparkoneks"
)
# Auto-generate a user login in secrets manager
jhub_secret = secmger.Secret(self, 'jHubPwd',
generate_secret_string=secmger.SecretStringGenerator(
exclude_punctuation=True,
secret_string_template=json.dumps({'username': login_name.value_as_string}),
generate_string_key="password")
)
# A new bucket to store app code and access logs
self.app_s3 = S3AppCodeConst(self,'appcode')
# 1. Setup EKS base infrastructure
network_sg = NetworkSgConst(self,'network-sg', eksname, self.app_s3.code_bucket)
iam = IamConst(self,'iam_roles', eksname)
eks_cluster = EksConst(self,'eks_cluster', eksname, network_sg.vpc, iam.managed_node_role, iam.admin_role)
EksSAConst(self, 'eks_sa', eks_cluster.my_cluster, jhub_secret)
base_app=EksBaseAppConst(self, 'eks_base_app', eks_cluster.my_cluster)
# 2. Setup Spark application access control
app_security = SparkOnEksSAConst(self,'spark_service_account',
eks_cluster.my_cluster,
login_name.value_as_string,
self.app_s3.code_bucket,
datalake_bucket.value_as_string
)
# 3. Install Arc Jupyter notebook to as Spark ETL IDE
jhub_install= eks_cluster.my_cluster.add_helm_chart('JHubChart',
chart='jupyterhub',
repository='https://jupyterhub.github.io/helm-chart',
release='jhub',
version='0.11.1',
namespace='jupyter',
create_namespace=False,
values=load_yaml_replace_var_local(source_dir+'/app_resources/jupyter-values.yaml',
fields={
"{{codeBucket}}": self.app_s3.code_bucket,
"{{region}}": core.Aws.REGION
})
)
jhub_install.node.add_dependency(base_app.alb_created)
# get Arc Jupyter login from secrets manager
name_parts= core.Fn.split('-',jhub_secret.secret_name)
name_no_suffix=core.Fn.join('-',[core.Fn.select(0, name_parts), core.Fn.select(1, name_parts)])
config_hub = eks.KubernetesManifest(self,'JHubConfig',
cluster=eks_cluster.my_cluster,
manifest=load_yaml_replace_var_local(source_dir+'/app_resources/jupyter-config.yaml',
fields= {
"{{MY_SA}}": app_security.jupyter_sa,
"{{REGION}}": core.Aws.REGION,
"{{SECRET_NAME}}": name_no_suffix
},
multi_resource=True)
)
config_hub.node.add_dependency(jhub_install)
# 4. Install ETL orchestrator - Argo
# can be replaced by other workflow tool, ie. Airflow
argo_install = eks_cluster.my_cluster.add_helm_chart('ARGOChart',
chart='argo-workflows',
repository='https://argoproj.github.io/argo-helm',
release='argo',
version='0.1.4',
namespace='argo',
create_namespace=True,
values=load_yaml_local(source_dir+'/app_resources/argo-values.yaml')
)
argo_install.node.add_dependency(config_hub)
# Create a Spark workflow template with different T-shirt size
submit_tmpl = eks_cluster.my_cluster.add_manifest('SubmitSparkWrktmpl',
load_yaml_local(source_dir+'/app_resources/spark-template.yaml')
)
submit_tmpl.node.add_dependency(argo_install)
# 5.(OPTIONAL) retrieve ALB DNS Name to enable Cloudfront in the following nested stack.
# Recommend to remove the CloudFront component
# Setup your TLS certificate with your own domain name.
self._jhub_alb=eks.KubernetesObjectValue(self, 'jhubALB',
cluster=eks_cluster.my_cluster,
json_path='..status.loadBalancer.ingress[0].hostname',
object_type='ingress.networking',
object_name='jupyterhub',
object_namespace='jupyter'
)
self._jhub_alb.node.add_dependency(config_hub)
self._argo_alb = eks.KubernetesObjectValue(self, 'argoALB',
cluster=eks_cluster.my_cluster,
json_path='..status.loadBalancer.ingress[0].hostname',
object_type='ingress.networking',
object_name='argo-argo-workflows-server',
object_namespace='argo'
)
self._argo_alb.node.add_dependency(argo_install)
| [
"[email protected]"
]
| |
63a9b43aa118f51f86d01d2e1b256268bcdf1754 | e8dde776a6315101b25bcbd749a3c569cadb992d | /Scrapy_gs/shiyan/shiyan/items.py | 02c51063b99a73367ff56098dd12b5b8a1b8143c | []
| no_license | lsaac128/shiyanlou-code | d415e6c77a356cc4f072113622973a07d3cac27b | 620d803167544336a93687cb624093744b992370 | refs/heads/master | 2020-07-16T09:52:32.959670 | 2019-12-13T05:37:21 | 2019-12-13T05:37:21 | 205,764,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ShiyanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
name = scrapy.Field()
| [
"[email protected]"
]
| |
59f15c4e88d2a7e22e763aed34804af7a59fab04 | 837a6d9d8799fc64070eff54b24b6d6da035773c | /r2m_simulation/build/curiosity_mars_rover_description/catkin_generated/pkg.installspace.context.pc.py | dc13c4e238a6fd88c602e7348f3c6c2ece6ae909 | []
| no_license | umangkaswala/r2m_rover | 60b386ce8630536a09345799e720dc8bdda9f839 | 24f8ca29183df4e2cf7ba7600496ec0274a51a2b | refs/heads/master | 2022-01-08T19:56:21.736580 | 2019-07-31T17:26:52 | 2019-07-31T17:26:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "curiosity_mars_rover_description"
PROJECT_SPACE_DIR = "/home/daisy/Desktop/R2M/r2m_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
]
| |
eea6a5bdf52069a9d6c674bbba325b3c7939a612 | 58cde93d4adcf82f6753f4e06643045dead75146 | /venv/lib/python3.8/site-packages/bitlyapi/bitly.py | f4ad79c934674ae43f0e4b8360d2a38bd4f4334a | []
| no_license | atakanatamert/ShortyURLShortener | 2f6d73cce87687361374a5a38d22349bb144d901 | 10bd75e77f851d5ba3f97046f18ead63f1e54310 | refs/heads/master | 2022-04-16T16:32:06.303762 | 2020-03-24T12:54:31 | 2020-03-24T12:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,271 | py | #!/usr/bin/python
import os
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
API_URL = 'http://api.bit.ly/v3'
class APIError (Exception):
'''Raised by BitLy instances in the event of errors returned by the
bit.ly REST API. An APIError instance provides the following
attributes:
- *code* -- The numeric error code returned by bit.ly.
- *message* -- The textual error message returned by bit.ly.
- *result* -- The BitLy object associated with this exception
(may be None).
'''
def __init__ (self, code, message, result=None):
super(APIError, self).__init__()
self.errorCode = code
self.errorMessage = message
self.result = result
def __str__ (self):
return 'Bit.ly API error: %d: %s' % (self.errorCode,
self.errorMessage)
class BitLy (object):
'''BitLy is a wrapper over the bit.ly REST API
(http://code.google.com/p/bitly-api/wiki/ApiDocumentation). API
calls are generated dynamically by the ``__getattr__`` method, and
arbitrary keyword arguments are converted into URL parameters.
Example usage::
>>> api = bitly.BitLy(api_user, api_key)
>>> res = api.shorten(longUrl='http://github.com/larsks')
>>> print res['http://github.com/larsks']['shortUrl']
http://bit.ly/9KKBJH
'''
api_url = API_URL
def __init__ (self, api_user, api_key):
self.api_user = api_user
self.api_key = api_key
def _build_query_string(self, kwargs):
params = {
'login' : self.api_user,
'apiKey' : self.api_key,
}
params.update(kwargs)
return urllib.urlencode(params)
def __getattr__ (self, func):
'''Generates a function that calls *func* via the bit.ly
REST api. Transforms any keyword arguments into URL
paramters.
Returns a Python dictionary containing the result of the bit.ly
method call.
Raise bitly.APIError on errors returned by bit.ly.'''
def _ (**kwargs):
url = '/'.join([self.api_url, func])
query_string = self._build_query_string(kwargs)
fd = urllib.urlopen(url, query_string)
res = json.loads(fd.read())
if res['status_code'] != 200:
raise APIError(
res['status_code'],
res['status_txt'],
res)
elif not 'data' in res:
raise APIError(-1, 'Unexpected response from bit.ly.', res)
return res['data']
return _
def main():
'''Reads configuration from the [bitly] section of
~/.bitly. Returns a BitLy() object to the caller. This is
primarily during development -- if you load this module
from the command line with ``python -i bitly/bitly.py``, the
``api`` object will be available to you for testing.'''
from ConfigParser import ConfigParser
cf = ConfigParser()
cf.read(os.path.expanduser('~/.bitly'))
api = BitLy(
cf.get('bitly', 'api_user'),
cf.get('bitly', 'api_key')
)
return api
if __name__ == '__main__':
api = main()
| [
"[email protected]"
]
| |
35c12f8dd042fa9e218f0f82d1ced393a6413f71 | e839d7d13689529c945cfd923fa460b3a1fcd1f6 | /invariant3b.py | 58472fd2f3e0139e275e8344a2cc544051671055 | []
| no_license | KaplanLab/Invariants | 3a81765cf9debcc15faed425dc966ae3be5c7eec | b4432ec4639b0d08c0a90630fb1e32a13dfffebf | refs/heads/master | 2020-03-11T08:47:20.151603 | 2018-04-23T06:26:32 | 2018-04-23T06:26:32 | 129,892,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,781 | py |
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import argparse
import sys
def main():
parser=argparse.ArgumentParser(description='Calculates smoothness',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-in',help='input file',dest='infile',type=str,required=True)
parser.add_argument('-out',help='output file prefix',dest='outprefix',type=str,required=True)
parser.add_argument('-d',help='x,y distances to compare (x<y ; compare interactions of i+x with i vs i+y with i)',dest='xy',type=int,nargs=2,default=[1,10],metavar=('X','Y'))
args=parser.parse_args()
infile=args.infile
outprefix=args.outprefix
xy=args.xy
x,y = xy[0],xy[1]
print ("loading npz...\n",file=sys.stderr)
with np.load(infile) as i:
d=i['d']
chr_bin_range=i['chr_bin_range']
chrs=i['chrs']
bin_pos=i['bin_pos']
n=i['n']
nonan=lambda x: x[~np.isnan(x)]
print ("calculating smoothness...",file=sys.stderr)
d[(range(n),range(n))]=np.nan
inv3b=np.zeros(n)
inv3b[:]=np.nan
np.seterr(divide='ignore', invalid='ignore')
for i in range(0,n-y):
c = bin_pos[i,0]
same_chr_bins = (bin_pos[:,0]==c) # bins that are in same chr as i
rng = ( chr_bin_range[c,0], chr_bin_range[c,1] ) # consider only cis bins
distf = lambda x1,x2: np.nanmean(np.abs(x1-x2)) # mean absolute difference
diff_x = distf( d[i+x,rng[0]:rng[1]], d[i,rng[0]:rng[1]] ) # diff_x is the mean absolute difference between the cis interactions of i and the cis interactions of i+x
diff_y = distf( d[i+y,rng[0]:rng[1]], d[i,rng[0]:rng[1]] ) # diff_y is the mean absolute difference between the cis interactions of i and the cis interactions of i+y
inv3b[i] = diff_y - diff_x
print ("saving and plotting...",file=sys.stderr)
np.save(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'.npy',inv3b)
np.savetxt(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_stats.tab',[np.median(nonan(inv3b))])
plt.figure(figsize=(3,10))
vp=plt.violinplot(nonan(inv3b),showextrema=False,widths=0.8)
for pc in vp['bodies']:
pc.set_alpha(0.8)
vp['bodies'][0].set_facecolor('red')
plt.savefig(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_hist.png',dpi=300)
plt.figure(figsize=(20,3))
plt.plot(inv3b,'.',color='red')
plt.title("median: "+str(np.median(nonan(inv3b))))
plt.vlines(chr_bin_range[:,0],0,np.nanmax(inv3b))
plt.savefig(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_plot.png',dpi=300)
if __name__=="__main__":
main()
| [
"[email protected]"
]
| |
09b7744d2aa86aeee801bb3ad1a107a845567fa8 | 56fb302a723dee461a37da81ae6e134b2660ca9f | /clientfeatures/__init__.py | 80108ce886b86e16b77679fc184b7e05809418fe | [
"MIT"
]
| permissive | bpeschier/django-clientfeatures | 0ef3cc72bc98c768ac5f751d884aea30c2033c4f | ea9ff9cc5c0dd147ba43e46fa480a5cd4af6666d | refs/heads/master | 2020-05-03T12:31:12.385573 | 2017-04-06T09:15:21 | 2017-04-06T09:15:21 | 21,525,946 | 0 | 1 | null | 2016-12-09T15:59:40 | 2014-07-05T18:43:51 | Python | UTF-8 | Python | false | false | 67 | py | default_app_config = 'clientfeatures.apps.ClientFeaturesAppConfig'
| [
"[email protected]"
]
| |
3da13c58c4199d31c98e3b0c81e7ab5d55abad24 | a873f3cd46a10ad879fc56d78e1f533d8bf486c0 | /z_python-stu1/first/廖雪峰/迭代.py | 4115de44525792f329471d5da4b183b906436215 | []
| no_license | shenhaiyu0923/resful | d0301b39363e6b3d3659f62fa4a9b2532ebcd225 | 1e66cae7d68fa231794776953cc1a5e999bf36c6 | refs/heads/master | 2021-07-08T20:46:57.300298 | 2021-06-01T08:17:27 | 2021-06-01T08:17:27 | 244,308,016 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '[email protected]'
def findMinAndMax(L):
if len(L) == 0:
return (None, None)
else:
for i, x in enumerate(L):
if i == 0:
min = max = x
else:
if x > max:
max = x
if x < min:
min = x
return (min, max)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
| [
"[email protected]"
]
| |
9b209805bbc3e5381db705ee82f66c38d2e5ef39 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.15/_downloads/plot_compute_rt_average.py | fd3b17129bcbbdb519a78a19a35ccce09b59e38c | []
| permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 1,912 | py | """
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked = mne.combine_evoked([evoked, ev], weights='nave')
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
| [
"[email protected]"
]
| |
aaef9b61d387b044c9aa2c263efce50935535202 | c03076b16fc7864f91def78d477dfa2f6104f1d6 | /Senior Codes and References/RPi/RPi-CK/pc_interface.py | 9c66edef2a877aa2a633548660551730e4f091e3 | []
| no_license | chia0360/MDP-Group04 | 16b01cabbd05dd9911f8834f615203f6bf308a6b | 4f87966d525cbae248909f928efd747b288e56c4 | refs/heads/master | 2021-01-11T18:23:27.813562 | 2017-04-11T02:45:37 | 2017-04-11T02:45:37 | 79,529,619 | 0 | 0 | null | 2017-02-11T10:31:49 | 2017-01-20T05:49:06 | Java | UTF-8 | Python | false | false | 2,048 | py | import socket
from interface import *
class pc_interface (interface):
# def __init__(self):
def connect(self):
try:
self.host = "192.168.9.9"
self.port = 3000
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.socket.allow_reuse_address = True
# self.socket-setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
self.socket.bind((self.host, self.port))
self.socket.listen(3)
print "Waiting for connection from PC."
self.client_sock, self.address = self.socket.accept()
print "Connected to: ", self.address
#receive the first message from client, know the client address
#data, self.pcaddr = self.ipsock.recv(1024)
print("PC Connected")
except Exception, e:
print "Error@PCConnect: %s" %str(e)
def disconnect(self):
try:
self.socket.close()
except Exception, e:
print "Error@PCDisconnect: %s" %str(e)
def writetoPC(self,msg):
try:
self.client_sock.sendto(msg, self.address)
print "Write to PC: %s" %(msg)
except Exception, e:
print "Error@PCWrite: %s" %str(e)
#Added now
connected = 0
connected = self.socket.connect()
while connected == 0:
self.socket.disconnect
time.sleep(1)
self.socket.connect()
def readfromPC(self):
try:
#msg, addr = self.ipsock.recvfrom (1024)
msg = self.client_sock.recv(1024)
print "Read from PC: %s" %(msg)
return msg
except Exception, e:
print "Error@PCRead: %s" %str(e)
#Added now
connected = 0
connected = self.socket.connect()
while connected == 0:
self.socket.disconnect
time.sleep(1)
self.socket.connect()
| [
"[email protected]"
]
| |
98abec5efcce801961012aeb66f7b575f4629f70 | 4ce948abfe57dbca767784294a804be6586b6c74 | /login_page/login.py | 2d0d6760e771f8cd1aca33d2ed03237a2c84aed3 | []
| no_license | XampleV/Password-Ch3cker | 275ee1a8be1424e1ecc9257060f605324030b292 | 14dc9ce7732b671a5e35b2dbea181210d253ebc6 | refs/heads/main | 2023-04-17T04:54:43.800189 | 2021-04-03T21:40:48 | 2021-04-03T21:40:48 | 354,161,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtWidgets import *
import sys
from login_page.login_page import Ui_Form as login
from login_page.program_functions import login_functions
import tkinter as tk
import tkinter.messagebox
root = tk.Tk()
root.withdraw()
app = QApplication()
login_f = login_functions()
continue_app = {"start":False}
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = login()
self.ui.setupUi(self)
self.CustomSettings()
self.SetupButtons()
self.show()
def CustomSettings(self):
self.setWindowTitle("Password Ch3cker - Login")
self.ui.password_input.setEchoMode(QtWidgets.QLineEdit.Password)
self.ui.signup_password_input.setEchoMode(QtWidgets.QLineEdit.Password)
def SetupButtons(self):
self.ui.signup_button.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.signup_page))
self.ui.already_a_user_button.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.login_page))
self.ui.register_button.clicked.connect(lambda: self.register_func())
self.ui.login_button.clicked.connect(lambda: self.login_func())
self.ui.submit_auth_button.clicked.connect(lambda: self.check_code())
def register_func(self):
email, password = self.ui.signup_email_input.text(), self.ui.signup_password_input.text()
if ("@" not in email):
tkinter.messagebox.showerror("Invalid Email", "Please enter a valid email.")
return
if (password == ""):
tkinter.messagebox.showerror("Invalid Password", "Please enter a valid password.")
return
# actually signing up here now...
register = login_f.register_account(email, password)
if (type(register) == str):
tkinter.messagebox.showerror("Failure", f"Failed to create your account.\nError: {register}")
return
if (register == True):
tkinter.messagebox.showinfo("Success", "Successfully created your account!")
self.ui.stackedWidget.setCurrentWidget(self.ui.login_page)
return
tkinter.messagebox.showerror("Failed", "Failed to create your account!")
def login_func(self):
login = login_f.login_account(self.ui.email_input.text(), self.ui.password_input.text())
if (login == True):
self.ui.stackedWidget.setCurrentWidget(self.ui.auth_page)
return
tkinter.messagebox("Failure", "The credentials are incorrect.")
def check_code(self):
global continue_app
check = login_f.check_code(self.ui.email_input.text(), self.ui.auth_code_input.text())
if (check == True):
continue_app["start"] = True
tkinter.messagebox.showinfo('Success', "Successfully logged in!")
root.destroy()
return
tkinter.messagebox.showerror("Failure", "Wrong code entered. ")
| [
"[email protected]"
]
| |
5fe0c36dfd90189443ff510579824e9ecd37ce54 | d9b992130073e63ca1173e317a1362bd54d431e5 | /blog/views.py | fc23bef1a263262c769abeb825f89da1319fbf0f | []
| no_license | boris-t/django_test_project | 61a0adc8dc2f35013d74fac413d67ffa2ed0b79e | 978ffa177eac84f2c5e17d098623a2f546ca5280 | refs/heads/master | 2020-08-28T00:53:10.776462 | 2019-10-25T14:37:40 | 2019-10-25T14:37:40 | 217,538,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from django.shortcuts import render
posts = [
{
'author': 'Boris',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'October 25, 2019'
},
{
'author': 'Corey',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'August 25, 2019'
}
]
def home(request):
context = {
'posts': posts
}
return render(request, 'blog/home.html', context)
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
| [
"[email protected]"
]
| |
4581a71677876a19591feba186db5b19a914147a | 1a7595a5896ca709eb98805b2a570bf12775a9ff | /muonShieldOptimization/study_muEloss.py | 56d16036361c9cb08313151c37f2833ff4938c81 | []
| no_license | nathandpenha/CERN-FairShip | 953683117f4971b323392bc1213b7ae7d9a3a708 | 10db3d519a5ac8fd67132afd39736b550cb60a30 | refs/heads/master | 2021-05-24T10:10:11.763338 | 2020-05-06T18:46:14 | 2020-05-06T18:46:14 | 261,848,065 | 2 | 0 | null | 2020-05-06T18:47:30 | 2020-05-06T18:39:22 | C++ | UTF-8 | Python | false | false | 10,048 | py | #!/usr/bin/env python
import ROOT,os,sys,getopt,time,shipRoot_conf
ROOT.gROOT.ProcessLine('#include "FairModule.h"')
time.sleep(20)
import shipunit as u
from ShipGeoConfig import ConfigRegistry
mcEngine = "TGeant4"
runnr = 1
nev = 1000000
setup = {}
#
setup['NA62'] = {'thickness': 125*u.cm/2., 'material':'krypton','momentum': 10*u.GeV,'maxTheta':350*u.GeV} # 3000s for 5M
# rad length 4.71cm 125/4.71 = 27
# https://indico.in2p3.fr/event/420/contributions/29860/attachments/24033/29479/moriond.pdf
setup['ATLAS'] = {'thickness': 172*u.cm/2., 'material':'iron','momentum': 350*u.GeV,'maxTheta':350*u.GeV} # 3000s for 5M
# atlas testbeam http://cds.cern.ch/record/1123152/files/CERN-THESIS-2008-070.pdf?version=1
# LArEM ~24X0 TileCal 4 compartments, same size LiqAr rad length 14cm
# http://cds.cern.ch/record/1263861/files/ATL-CAL-PUB-2010-001.pdf tile cal mainly iron, LAr 1.35 DM 0.63 TileCal 8.18
# iron intlen 16.97 -> (1.35 + 0.63 + 8.18)*16.97
setup['Fig3'] = {'thickness': 0.1*u.cm, 'material':'lead','momentum': 2*u.GeV,'maxTheta':0.2}
setup['Fig4'] = {'thickness': 0.1*u.cm, 'material':'lead','momentum': 8*u.GeV,'maxTheta':0.04}
setup['Fig5'] = {'thickness': 0.1*u.cm, 'material':'lead','momentum': 14*u.GeV,'maxTheta':0.02}
setup['Fig6'] = {'thickness': 1.44*u.cm, 'material':'copper','momentum': 11.7*u.GeV,'maxTheta':0.045}
setup['Fig7'] = {'thickness': 1.44*u.cm, 'material':'copper','momentum': 7.3*u.GeV,'maxTheta':0.045}
s = sys.argv[1]
thickness = setup[s]['thickness']
material = setup[s]['material']
momentum = setup[s]['momentum']
maxTheta = setup[s]['maxTheta']
checkOverlap = True
storeOnlyMuons = True
outFile = "msc"+s+".root"
theSeed = 0
ecut = 0.0
import rootUtils as ut
h={}
def run():
# -------------------------------------------------------------------
ROOT.gRandom.SetSeed(theSeed) # this should be propagated via ROOT to Pythia8 and Geant4VMC
shipRoot_conf.configure() # load basic libraries, prepare atexit for python
# ship_geo = ConfigRegistry.loadpy("$FAIRSHIP/geometry/geometry_config.py", Yheight = 10, tankDesign = 5, muShieldDesign = 7, nuTauTargetDesign=1)
# -----Timer--------------------------------------------------------
timer = ROOT.TStopwatch()
timer.Start()
# -----Create simulation run----------------------------------------
gFairBaseContFact = ROOT.FairBaseContFact() # required by change to FairBaseContFact to avoid TList::Clear errors
run = ROOT.FairRunSim()
run.SetName(mcEngine) # Transport engine
if nev==0: run.SetOutputFile("dummy.root")
else: run.SetOutputFile(outFile) # Output file
run.SetUserConfig("g4Config.C") # user configuration file default g4Config.C
rtdb = run.GetRuntimeDb()
# -----Materials----------------------------------------------
run.SetMaterials("media.geo")
# -----Create geometry----------------------------------------------
cave= ROOT.ShipCave("CAVE")
cave.SetGeometryFileName("cave.geo")
run.AddModule(cave)
#
target = ROOT.simpleTarget()
material, thickness, 0
#
target.SetEnergyCut(ecut*u.GeV)
if storeOnlyMuons: target.SetOnlyMuons()
target.SetParameters(material,thickness,0.)
run.AddModule(target)
#
primGen = ROOT.FairPrimaryGenerator()
myPgun = ROOT.FairBoxGenerator(13,1) # pdg id and multiplicity
if s=="NA62": myPgun.SetPRange(momentum,maxTheta)
else: myPgun.SetPRange(momentum-0.01,momentum+0.01)
myPgun.SetPhiRange(0,0) # // Azimuth angle range [degree]
myPgun.SetThetaRange(0,0) # // Polar angle in lab system range [degree]
myPgun.SetXYZ(0.*u.cm, 0.*u.cm, -1.*u.mm - (thickness) )
primGen.AddGenerator(myPgun)
#
run.SetGenerator(primGen)
# -----Initialize simulation run------------------------------------
run.Init()
if nev==0: return
gMC = ROOT.TVirtualMC.GetMC()
fStack = gMC.GetStack()
fStack.SetMinPoints(1)
fStack.SetEnergyCut(-1.)
# -----Start run----------------------------------------------------
print "run for ",nev,"events"
run.Run(nev)
# -----Start Analysis---------------
ROOT.gROOT.ProcessLine('#include "Geant4/G4EmParameters.hh"')
emP = ROOT.G4EmParameters.Instance()
emP.Dump()
h['f']= ROOT.gROOT.GetListOfFiles()[0].GetName()
# -----Finish-------------------------------------------------------
timer.Stop()
rtime = timer.RealTime()
ctime = timer.CpuTime()
print ' '
print "Macro finished succesfully."
print "Output file is ", outFile
print "Real time ",rtime, " s, CPU time ",ctime,"s"
def makePlot(f,book=True):
# print interaction and radiation length of target
sGeo=ROOT.gGeoManager
if sGeo:
v = sGeo.FindVolumeFast('target')
m = v.GetMaterial()
length = v.GetShape().GetDZ()*2
print "Material:",m.GetName(),'total interaction length=',length/m.GetIntLen(),'total rad length=',length/m.GetRadLen()
else:
density= 2.413
length= 125.0
print "Use predefined values:",density,length
if book:
ut.bookHist(h,'theta','scattering angle '+str(momentum)+'GeV/c;{Theta}(rad)',500,0,maxTheta)
ut.bookHist(h,'eloss','rel energy loss as function of momentum GeV/c',100,0,maxTheta,10000,0.,1.)
ut.bookHist(h,'elossRaw','energy loss as function of momentum GeV/c',100,0,maxTheta, 10000,0.,100.)
sTree = f.cbmsim
for n in range(sTree.GetEntries()):
rc = sTree.GetEvent(n)
Ein = sTree.MCTrack[0].GetEnergy()
M = sTree.MCTrack[0].GetMass()
Eloss = 0
for aHit in sTree.vetoPoint:
Eloss+=aHit.GetEnergyLoss()
print Ein,Eloss/Ein
rc = h['eloss'].Fill(Ein,Eloss/Ein)
rc = h['elossRaw'].Fill(Ein,Eloss)
ut.bookCanvas(h,key=s,title=s,nx=900,ny=600,cx=1,cy=1)
tc = h[s].cd(1)
if s=="NA62":
h['eloss'].Draw()
h['95'] = h['eloss'].ProjectionX('95',96,100)
h['95'].Sumw2()
h['0'] = h['eloss'].ProjectionX('0',1,100)
h['0'].Sumw2()
rc = h['95'].Divide(h['0'] )
h['95'].Draw()
h['meanEloss'] = h['elossRaw'].ProjectionX()
for n in range(1,h['elossRaw'].GetNbinsX()+1):
tmp = h['elossRaw'].ProjectionY('tmp',n,n)
eloss = tmp.GetMean()
h['meanEloss'].SetBinContent(n,eloss/density/length*1000)
h['meanEloss'].SetTitle('mean energy loss MeV cm2 / g')
h['meanEloss'].Draw()
elif s=="ATLAS":
h['eloss'].Draw()
h['>eloss']=h['eloss'].ProjectionY().Clone('>eloss')
cum = 0
N = float(h['>eloss'].GetEntries())
for n in range(h['>eloss'].GetNbinsX(),0,-1):
cum+=h['>eloss'].GetBinContent(n)
h['>eloss'].SetBinContent(n,cum/N)
print "Ethreshold event fraction in %"
for E in [15.,20.,30.,50.,80.]:
n = h['>eloss'].FindBin(E/350.)
print " %5.0F %5.2F "%(E,h['>eloss'].GetBinContent(n)*100)
else:
tc.SetLogy(1)
h['theta_100']=h['theta'].Clone('theta_100')
h['theta_100']=h['theta'].Rebin(5)
h['theta_100'].Scale(1./h['theta_100'].GetMaximum())
h['theta_100'].Draw()
h[s].Print(s+'.png')
h[s].Print(s+'.root')
f.Write(h['theta'].GetName())
f.Write(h['theta_100'].GetName())
def readChain():
tmp = "/mnt/hgfs/microDisk/Data/mscNA62_X.root"
for i in [0,1]:
f = ROOT.TFile(tmp.replace('X',str(i)))
if i==1: makePlot(f,False)
else: makePlot(f)
def NA62():
na62Points = open('NA62.points')
allPoints = na62Points.readlines()
N = int((len(allPoints)-1)/3.)
h['NA62']=ROOT.TGraphErrors(N)
for l in range(N):
tmp = allPoints[3*l].split(',')
x=float(tmp[0])
y=float(tmp[1].replace('\n',''))
tmp = allPoints[3*l+1].split(',')
y1=float(tmp[1].replace('\n',''))
tmp = allPoints[3*l+2].split(',')
y2=float(tmp[1].replace('\n',''))
h['NA62'].SetPoint(l,x,y*1E-6)
h['NA62'].SetPointError(l,0,abs(y1-y2)/2.*1E-6)
h['NA62'].SetLineColor(ROOT.kRed)
h['NA62'].SetMarkerColor(ROOT.kRed)
h['NA62'].SetMarkerStyle(20)
def makeSummaryPlot():
# using data in /mnt/hgfs/microDisk/Data/eloss/eloss_sum.root
# krypton total interaction length= 1.97246306079 total rad length= 26.5231000393
pdg={10.0:1.914,14.0:1.978,20.0:2.055,30.0:2.164,40.0:2.263,80.0:2.630,100.:2.810,140.:3.170,200.:3.720,277.:4.420,300.:4.631,400.:5.561}
h['Gpdg'] = ROOT.TGraph(len(pdg))
Gpdg = h['Gpdg']
Gpdg.SetMarkerColor(ROOT.kRed)
Gpdg.SetMarkerStyle(20)
keys = pdg.keys()
keys.sort()
for n in range(len(keys)):
Gpdg.SetPoint(n,keys[n],pdg[keys[n]])
density= 2.413
length= 125.0
ut.readHists(h,"/mnt/hgfs/microDisk/Data/eloss/eloss_sum.root")
ut.readHists(h,"/mnt/hgfs/microDisk/Data/eloss/eloss_withRaw.root")
ut.bookCanvas(h,key='summary',title=" ",nx=1200,ny=600,cx=2,cy=1)
tc = h['summary'].cd(1)
h['0'] = h['eloss'].ProjectionX('0',1,h['eloss'].GetNbinsY())
h['0'].Sumw2()
NA62()
for t in [93,95]:
h[t] = h['eloss'].ProjectionX(str(t),int(h['eloss'].GetNbinsY()*t/100.),h['eloss'].GetNbinsY())
h[t].Sumw2()
h[t].SetStats(0)
h[t].SetMarkerStyle(24)
rc = h[t].Divide(h['0'] )
h[t].Rebin(2)
h[t].Scale(1./2.)
if t!=93:
h[t].SetMarkerColor(ROOT.kBlue)
h[t].Draw('same')
else:
h[t].SetMaximum(1E-5)
h[t].SetMarkerColor(ROOT.kMagenta)
h[t].SetXTitle('incoming muon momentum [GeV/c]')
h[t].SetYTitle('prob #DeltaE>X%')
h[t].SetTitle('')
h[t].Draw()
h['NA62'].Draw('sameP')
h['lg'] = ROOT.TLegend(0.53,0.79,0.98,0.94)
h['lg'].AddEntry(h['NA62'],'NA62 measurement >95%','PL')
h['lg'].AddEntry(h[95],'FairShip >95%','PL')
h['lg'].AddEntry(h[93],'FairShip >93%','PL')
h['lg'].Draw()
tc = h['summary'].cd(2)
h['meanEloss'] = h['elossRaw'].ProjectionX()
for n in range(1,h['elossRaw'].GetNbinsX()+1):
tmp = h['elossRaw'].ProjectionY('tmp',n,n)
eloss = tmp.GetMean()
h['meanEloss'].SetBinContent(n,eloss/density/length*1000)
h['meanEloss'].SetBinError(n,0)
h['meanEloss'].SetTitle('mean energy loss MeV cm^{2}/g')
h['meanEloss'].SetStats(0)
h['meanEloss'].SetMaximum(7.)
h['meanEloss'].SetXTitle('incoming muon momentum [GeV/c]')
h['meanEloss'].SetYTitle('mean energy loss [MeV cm^[2]]/g')
h['meanEloss'].SetTitle('')
h['meanEloss'].Draw()
Gpdg.Draw('sameP')
h['lg2'] = ROOT.TLegend(0.53,0.79,0.98,0.94)
h['lg2'].AddEntry(h['Gpdg'],'muon dE/dx, PDG ','PL')
h['lg2'].AddEntry(h['meanEloss'],'energy deposited in krypton, FairShip','PL')
h['lg2'].Draw()
h['summary'].Print('catastrophicEnergyLoss.png')
| [
"[email protected]"
]
| |
680d96b054b74302c31825e8d1fb6d117d66499b | a4515918f56dd7ab527e4999aa7fce818b6dd6f6 | /Data Structures/LinkedLists/Python/copy_random_pointer.py | b3ef4628d60d581eefa6d200d79fc56db9a8d61f | [
"MIT"
]
| permissive | rathoresrikant/HacktoberFestContribute | 0e2d4692a305f079e5aebcd331e8df04b90f90da | e2a69e284b3b1bd0c7c16ea41217cc6c2ec57592 | refs/heads/master | 2023-06-13T09:22:22.554887 | 2021-10-27T07:51:41 | 2021-10-27T07:51:41 | 151,832,935 | 102 | 901 | MIT | 2023-06-23T06:53:32 | 2018-10-06T11:23:31 | C++ | UTF-8 | Python | false | false | 1,099 | py | """
A linked list is given such that each node contains an additional random
pointer which could point to any node in the list or null.
Return a deep copy of the list.
"""
from collections import defaultdict
class RandomListNode(object):
def __init__(self, label):
self.label = label
self.next = None
self.random = None
def copy_random_pointer_v1(head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
dic = dict()
m = n = head
while m:
dic[m] = RandomListNode(m.label)
m = m.next
while n:
dic[n].next = dic.get(n.next)
dic[n].random = dic.get(n.random)
n = n.next
return dic.get(head)
# O(n)
def copy_random_pointer_v2(head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
copy = defaultdict(lambda: RandomListNode(0))
copy[None] = None
node = head
while node:
copy[node].label = node.label
copy[node].next = copy[node.next]
copy[node].random = copy[node.random]
node = node.next
return copy[head]
| [
"[email protected]"
]
| |
196258d3272de1c192f99e01320a0b59413e834b | dddc7193d2cee71dd4f1a7bf5f869d984edd44b3 | /Avalanche analysis.py | d0bc8c2548fd5edf57fe4680d0d59b8e164298b9 | []
| no_license | BaptisteMP/ML_avalanches_prediction | 19c16bd0b63c5f4fcfa9edaccb1c9f883d55c594 | dd95afafc75514489a14d7ff6f3a8fe149ceb18b | refs/heads/master | 2020-12-28T02:27:15.898636 | 2020-02-04T09:23:58 | 2020-02-04T09:23:58 | 238,152,034 | 0 | 0 | null | 2020-02-04T09:22:45 | 2020-02-04T07:53:24 | null | UTF-8 | Python | false | false | 10,872 | py |
# coding: utf-8
# In[15]:
import numpy as np
from sklearn import cross_validation
import csv as csv
import pandas as pd
import sklearn
# In[53]:
with open('avalanche_accidents_switzerland_since_1995.csv', 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
taille_data = 'tamere'
avalanches = [ [_ for _ in range(17)] for j in range(402) ]
i = 0
for x in data:
for k in range(17):
word = x[k]
avalanches[i][k] = word
i += 1
# In[49]:
avalanches
# In[57]:
data = np.array(avalanches)
# In[62]:
data[0][16]
# In[3]:
# In[60]:
data.shape
# In[63]:
datapd = pd.DataFrame(data=data[1:, :], columns=data[0,:])
# In[81]:
datapd
# In[73]:
#On supprime la colonne datequality
del datapd['date.quality']
# In[80]:
datapd.describe()
# In[79]:
#On supprime les colonnes start.zone.coordinates, coordinates.quality, canton
del datapd['canton'], datapd['start.zone.coordinates.x'], datapd['start.zone.coordinates.y'], datapd['coordinates.quality']
# In[84]:
X = datapd['forecasted.dangerlevel']
def details_uniq(X):
dicX={}
for x in X:
if x not in dicX:
dicX[x] = 1
else:
dicX[x] += 1
return dicX
details_uniq(X)
# In[85]:
dead = datapd['number.dead']
caught = datapd['number.caught']
fb = datapd['number.fully.buried']
print(details_uniq(dead), details_uniq(caught), details_uniq(fb))
# In[86]:
del datapd['avalanche.id']
# In[87]:
datapd
# In[109]:
#suite du preprocess:
#transformation de la date en quelque chose d'exploitable:
def transfo_annee(str_date):
annee = int(str_date[:4])
return annee-1995
#on fait le mois +6 mod 12 pour transférer août
def transfo_date(str_date):
mois = int(str_date[5:7])
jour = int(str_date[8:10])
transf = jour + 31*((mois+6)%12) #on attribue un nombre unique à chaque jour de l'année, en gardant une continuité entre
#décembre et janvier.
return transf
# In[112]:
datapd['date'] = datapd['date'].apply(transfo_date)
datapd['hydrological.year'] = datapd['hydrological.year'].apply(transfo_annee)
# In[121]:
datapd = datapd.rename(index=str, columns={'date':'day_and_month', 'hydrological.year':'year'})
# In[122]:
datapd
# In[123]:
details_uniq(datapd['start.zone.slope.aspect'])
# In[124]:
dic_directions = {'NW':2,'NNE':15,'E':4,'NNW':1,'SE':10,'N':0,'W':12,'SW':6,'ESE':11,'NE':14,'WNW':3,'S':8,'ENE':13,'WSW':5,'SSE':9,'NA':-1,'SSW':7}
# In[125]:
#Modification de l'orientation de la zone, pour avoir des valeurs continues
datapd['start.zone.slope.aspect'] = datapd['start.zone.slope.aspect'].apply(lambda x: dic_directions[x])
# In[141]:
datapd = datapd.rename(index=str, columns={'start.zone.slope.aspect':'zone_orientation'})
# In[126]:
datapd
# In[127]:
dangerlevels = details_uniq(datapd['forecasted.dangerlevel'])
inclinations = details_uniq(datapd['start.zone.inclination'])
print(dangerlevels, inclinations)
# In[136]:
def mean_withoutNA(dico_counts):
nb_useful = 0
tot_count = 0
for occurence in dico_counts:
if occurence != 'NA':
current_count = int(dico_counts[occurence])
nb_useful += current_count
tot_count += current_count*int(occurence)
return tot_count/nb_useful
# In[137]:
mean_danger = mean_withoutNA(dangerlevels)
mean_inclinations = mean_withoutNA(inclinations)
print(mean_danger, mean_inclinations)
# In[138]:
#ON remplace les NA par les moyennes des autres valeurs pour l'inclinaison et le risque d'avalanche
def replace(value_to_check, string_to_replace, mean):
if value_to_check == string_to_replace:
return mean
return value_to_check
# In[139]:
datapd['forecasted.dangerlevel'] = datapd['forecasted.dangerlevel'].apply(lambda x: replace(x, 'NA', 2.69))
datapd['start.zone.inclination'] = datapd['start.zone.inclination'].apply(lambda x: replace(x, 'NA', 40.27))
# In[142]:
datapd
# In[143]:
activities = details_uniq(datapd['activity'])
activities
# In[144]:
#On enlève la colonne local.name et on remplace le unknown par la moyenne
del datapd['local.name']
# In[145]:
dico_activities = {'offpiste': 2, 'tour': 3, 'transportation.corridor': 1, 'building': 0, 'other, mixed or unknown': 'NA'}
datapd['activity'] = datapd['activity'].apply(lambda x: dico_activities[x])
activities = details_uniq(datapd['activity'])
mean_activites = mean_withoutNA(activities)
datapd['activity'] = datapd['activity'].apply(lambda x: replace(x, 'NA', mean_activites))
# In[205]:
datapd
# In[147]:
# on a plusieurs objectifs: prédire d'abord si une avalanche mortelle aura lieu suivant les conditions données.
# Puis on peut prédire suivant les conditions quel est le risque d'avalanche selon le type d'activité pratiquée.
#Enfin on peut calculer notre risque et le comparer au risque initial calculé donné dans la bdd
# Prédire si une avalanche mortelle aura lieu suivant certaines conditions:
#
# On n'a pas de données sur les moments où il n'y a pas d'avalanches, ce qu'on peut faire:
# - pour un point contenant des conditions trouver les distances à tous les autres points,
# en pondérant pour certaines features, puis calculer un risque suivant la distance cumulée
# - pour un point, on calcule les plus proches voisins sans la date, puis on considère la date des voisins pour trouver une fréquence
# de déclenchement d'avalanches
# - Clustering des points, en déduire suivants les clusters des risques?
#
#
# In[206]:
#on pondère pas parce qu'on sait pas comment trouver les poids, sachant qu'on peut pas évaluer le modèle comme on n'a pas de risque "type"
data = datapd.values
train_risk = data[:, :6]
# In[155]:
#Pour calculer les poids de la pondération, on fait un algo génétique, et on calcule le risque type avec une fonction
#Fitness, qui calcule les proches voisins (distance avec les poids =1 ) et en déduit suivant la fréquence d'une avalanche
#dans ces conditions un risque potentiel.
datapd = datapd.apply(pd.to_numeric)
# In[157]:
datapd.describe()
# In[ ]:
dic_std = {'2':530, '3':5.3, '4':4.17}
def risk_fitness(vecteur, other_points, precision_distance):
proches = []
for point in other_points:
est_proche = True
for k in dic_std:
if k == 3 and not -1 <= abs(point[k]-vecteur[k])%16 <= 1:
est_proche = False
distance_max = dic_std[k]*precision_distance
elif not abs(point[k]-vecteur[k]) <= distance_max:
est_proche = False
if est_proche: proches.append(point)
#on prend le nombre d'avalanches proches, ca nous donne un risque potentiel
#ON ABANDONNE l'IDEE, TROP COMPLIQUE ET CA MARCHE PAS
# In[166]:
#création des données avec la normalisation des colonnes day_and_month, start zone elevation, zone orientation, startzone inclination
#et forecasted dangerlevel
data[:, 0] = (data[:, 0]-250.69)/59
data[:, 1], data[:, 5] = data[:, 5], data[:, 1]
data[:, 1] = (data[:, 1]-2.69)/0.55
data[:, 2] = (data[:, 2]-2517)/530
data[:, 3] = (data[:, 3]-6.94)/5.3
data[:, 4] = (data[:, 4]-40.3)/4.17
# In[207]:
data
# In[180]:
#on utilise les poids égaux à 1, on calcule les distances cumulées aux nb_proches plus proches voisins, on note combien de morts
#cela a causé au total -> on en déduit un risque si la distance totale est grande
dic_mean_std = {'0':[250.69, 59], '2': [2517,530], '3': [6.94, 5.3], '4': [40.3, 4.17], '5': [2.69, 0.55]}
def distance_cum(point, other_point, nb_proches): #on s'intéresse aux 5 premieres colonnes
nb_other_points = other_point.shape[0]
distances = [0 for i in range(nb_other_points)]
for i in range(nb_other_points):
dist = 0
current_pt = other_point[i]
for k in [0, 2, 4, 5]:
dist += abs(current_pt[k]-point[k]) / dic_mean_std[str(k)][1]
#zone orientation traitée à part à cause du modulo 16
dist += abs(current_pt[3]-point[3])%16 / dic_mean_std[str(3)]
distances[i] = dist
return sum(sort(distances)[-nb_proches:])
# In[217]:
distances = []
nb_points = data.shape[0]
nb_proches = 20
for i in range(nb_points):
distances.append(distance_cum(data[i], data[0:i, :], nb_proches) + distance_cum(data[i], data[i+1:, :], nb_proches))
# In[211]:
print(argmax(distances))
maxdist = distances[314]
# In[212]:
print(argmin(distances))
mindist = distances[0]
# In[218]:
distances = 1 - ((np.array(distances) - mindist) / maxdist)
distances
# In[184]:
x = np.arange(10).reshape((5,2))
# In[208]:
data[310:317]
# In[194]:
x[0:3:2,:]
# In[232]:
#ON TENTE un clustering
# In[231]:
from sklearn.cluster import KMeans
# In[268]:
data_clustering = datapd.copy()
# In[269]:
del data_clustering['year']
# In[270]:
data_clustering
# In[271]:
meandata = data_clustering.mean()
mindata = data_clustering.min()
maxdata = data_clustering.max()
data_clustering = (data_clustering - data_clustering.mean()) / (data_clustering.max() - data_clustering.min())
# In[272]:
X = data_clustering.values
# In[273]:
nb_clusters = 10
kmeans = KMeans(n_clusters=nb_clusters, random_state=0).fit(X)
print(kmeans.labels_)
print(kmeans.cluster_centers_)
# In[287]:
def back_real(vectors):
n = vectors.shape[1]
for vec in vectors:
for i in range(n):
vec[i] = vec[i]*(maxdata[i]-mindata[i]) + meandata[i]
return vectors
# In[275]:
back_real(kmeans.cluster_centers_)
# In[292]:
#autres clusters avec seulement les données de terrain et on sort pour chaque clusters la moyenne des dead,caught,fully burried,activity
data_cluster = datapd.copy()
del data_cluster['year']
meandata = data_cluster.mean()
mindata = data_cluster.min()
maxdata = data_cluster.max()
data_clust = (data_cluster - data_cluster.mean()) / (data_cluster.max() - data_cluster.min())
X = data_clust.values[:, :5]
taille = X.shape[0]
nb_clusters = 4
kmeans = KMeans(n_clusters=nb_clusters, random_state=0).fit(X)
labels = kmeans.labels_
centers = kmeans.cluster_centers_
print(labels)
print(centers)
dics = [{'dead':0, 'count':0, 'caught':0, 'burried':0, 'activity':0} for _ in range(nb_clusters)]
data_clust_np = data_cluster.values
for i in range(taille):
vect = data_clust_np[i, :]
clust = labels[i]
dics[clust]['count'] += 1
dics[clust]['dead'] += vect[5]
dics[clust]['burried'] += vect[7]
dics[clust]['caught'] += vect[6]
dics[clust]['activity'] += vect[8]
for dic in dics:
dic['dead/count'] = dic['dead']/dic['count']
dic['caught/count'] = dic['caught']/dic['count']
dic['burried/count'] = dic['burried']/dic['count']
print(dics)
print(back_real(centers))
# In[289]:
back_real(centers)
| [
"[email protected]"
]
| |
05b2fd5abd1def71c9d8b87c5de771533ae8cf73 | 953f37707f4ebb477101480a8429ead70537607a | /create_sets.py | f3a770270e71da728a8ba96523459053f54df775 | []
| no_license | ollKTH/DL_project_latest | 87f6eb22daa41b172a4b3bf33269b628f14e10bd | 7dd7fcb9c9e0b34dc4931eec9365cfc6c2dc6caa | refs/heads/master | 2020-03-16T02:15:50.540061 | 2018-05-15T20:17:54 | 2018-05-15T20:17:54 | 132,460,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import numpy as np
def split(_images, _labels, _factor):
'''
Splits _images and _labels into training data and test data
:param _images:
:param _labels:
:param _factor:
:return:
'''
# First just try splitting straight off
_length = len(_labels)
_split_idx = np.round(_length * (1 - _factor))
_split_idx = np.int32(_split_idx)
_x_train = _images[1:_split_idx, :, :]
_y_train = _labels[1:_split_idx]
_x_test = _images[_split_idx+1:, :, :]
_y_test = _labels[_split_idx+1:]
return _x_train, _y_train, _x_test, _y_test
| [
"[email protected]"
]
| |
b9f3a49f7f1fe0e94be6a1066047c260b2555dcc | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/TauES_test/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851334/HTT_24Jul_newTES_manzoni_Down_Jobs/Job_18/run_cfg.py | 38dbc249e4f6a3beb3e7f9386fe60200d89f9895 | []
| no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,054 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851334/HTT_24Jul_newTES_manzoni_Down_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_146.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_147.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_148.root')
)
| [
"[email protected]"
]
| |
3d0018af2b85f6d0cf6fa49a37de798b4adf04e7 | 44440302635bf339b268775d88a7aed3a59f7d7b | /leetcode/mediansortarray.py | c1d096501b353371dc59887495b0995bf5f0348b | []
| no_license | EdwardNgo/Algorithm-application | 79eeb64b65071f0c014ff34fe7e75b865f97d6ee | 36d1bc5c6510d95f97dedebf11fbdf83824a39cc | refs/heads/master | 2023-07-12T01:28:49.750121 | 2021-08-15T02:28:14 | 2021-08-15T02:28:14 | 366,232,906 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | def solution(nums1,nums2):
| [
"[email protected]"
]
| |
8fe088fdb84933fc73e957b74a89bbc398332e43 | 1fe8b4a22ba986e06788c31237171f986d5b440e | /uploader/admin.py | 3d8d57f43d4ba6b1119c84b3d84eaf416bfb5bec | []
| no_license | whitews/StickyTempFile | 0f654bafe23b6e4e104fe76368cd642c30f918d5 | 1812fa2c669c2128e13c9533b5ea6a97e5449e3e | refs/heads/master | 2021-01-19T05:29:59.049931 | 2013-03-07T21:36:11 | 2013-03-07T21:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from uploader.models import *
from django.contrib import admin
admin.site.register(UploadedFile)
| [
"[email protected]"
]
| |
1fcf7c5adfe328a9cbff60ebb27546955e3c696e | 586e822dd294e5b57de67b908c1d20583dfeada0 | /data_eda/missing_data.py | 6497f4c0a1641bc513384010180c7de6984a80a9 | []
| no_license | myhaa/Machine-Learning | 1c40ed7e02d7fdd75babb663faf29501926c5477 | f698ec353261fd10c1e6d97f0ad9477673779060 | refs/heads/master | 2023-07-22T04:06:16.600229 | 2021-09-07T15:43:00 | 2021-09-07T15:43:00 | 371,622,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | # -*- coding:utf-8 _*-
"""
Author: meiyunhe
Email: [email protected]
Date: 2021/05/17
File: missing_data.py
Software: PyCharm
Description: 缺失数据通用函数合集
"""
# loading modules
import os
from warnings import warn
def check_missing(df, save_path=None):
"""
统计各特征缺失数和缺失比例
:param df: pandas Dataframe
:param save_path: 保存路径
:return:
"""
res = pd.concat([df.isnull().sum(), df.isnull().mean()], axis=1)
res = res.rename(index=str, columns={0: 'total missing', 1: 'ratio'})
if save_path:
save_path = os.path.join(save_path, 'missing.csv')
res.to_csv(save_path)
print('missing result saved at: ', save_path)
return res
def drop_missing(df, axis=0):
"""
删除NA所在行或者列
:param df:
:param axis: 同dropna的axis
:return:
"""
df_copy = df.copy(deep=True)
df_copy = df_copy.dropna(axis=axis, inplace=False)
return df_copy
def impute_NA_with_arbitrary(df, impute_value, NA_col=None):
"""
填补缺失值,用指定值填补
:param df:
:param impute_value: 填补值
:param NA_col: 需要填补的特征list
:return:
"""
if NA_col is None:
NA_col = []
df_copy = df.copy(deep=True)
for i in NA_col:
if df_copy[i].isnull().sum() > 0:
df_copy[i+'_NA_impute_'+str(impute_value)] = df_copy[i].fillna(impute_value)
else:
warn("Column {} has no missing".format(i))
return df_copy
def impute_NA_with_method(df, method='mean', NA_col=None):
"""
填补缺失值,用均值、中位数、众数等方法
:param df:
:param method: 指定方法
:param NA_col: 需要填补的特征list
:return:
"""
if NA_col is None:
NA_col = []
df_copy = df.copy(deep=True)
for i in NA_col:
if df_copy[i].isnull().sum()>0:
if method == 'mean':
df_copy[i+'_NA_impute_mean'] = df_copy[i].fillna(df[i].mean())
elif method == 'median':
df_copy[i + '_NA_impute_median'] = df_copy[i].fillna(df[i].median())
elif method == 'mode':
df_copy[i + '_NA_impute_mode'] = df_copy[i].fillna(df[i].mode()[0])
else:
warn("Column {} has no missing".format(i))
return df_copy
def impute_NA_with_distribution(df, NA_col=None):
"""
填补缺失值 at the far end of the distribution of that variable calculated by
mean + 3*std
:param df:
:param NA_col: 需要填补的特征list
:return:
"""
if NA_col is None:
NA_col = []
df_copy = df.copy(deep=True)
for i in NA_col:
if df_copy[i].isnull().sum()>0:
df_copy[i + '_NA_impute_distribution'] = df_copy[i].fillna(df[i].mean()+3*df[i].std())
else:
warn("Column {} has no missing".format(i))
return df_copy
def impute_NA_with_random_sampling(df, NA_col=None, random_state=0):
"""
填补缺失值,从样本中随机抽样填补
:param df:
:param NA_col:
:param random_state:
:return:
"""
if NA_col is None:
NA_col = []
df_copy = df.copy(deep=True)
for i in NA_col:
if df_copy[i].isnull().sum()>0:
df_copy[i+'_NA_impute_random_sampling'] = df_copy[i]
random_sampling = df_copy[i].dropna().sample(df_copy[i].isnull().sum(), random_state=random_state)
random_sampling.index = df_copy[df_copy[i].isnull()].index
df_copy.loc[df_copy[i].isnull(), str(i)+'_NA_impute_random_sampling'] = random_sampling
else:
warn("Column {} has no missing".format(i))
return df_copy
if __name__ == '__main__':
import pandas as pd
# from io import StringIO
#
# data = "col1,col2,col3,col4\na,b,1,5\na,b,2,6\nc,d,2,NA"
# df = pd.read_csv(StringIO(data))
# print(df.head())
from machine_learning.data_input.load_data import data_loader
df = data_loader()
print(df.head())
print(check_missing(df))
# print(drop_missing(df, axis=0))
v1 = 'Age'
print(impute_NA_with_arbitrary(df, 10, NA_col=[v1]))
print(impute_NA_with_method(df, method='median', NA_col=[v1]))
print(impute_NA_with_distribution(df, NA_col=[v1]))
print(impute_NA_with_random_sampling(df, NA_col=[v1], random_state=0))
| [
"[email protected]"
]
| |
da88d06918d51f179409201b383857c90aa6a173 | 0e529cabbd10c2428746088d8c0327523e2ee10b | /results/plot_graphs.py | e10f46f88cb38384594c2f511e5f35f4bb3b6f21 | []
| no_license | gsakkas/gpus-kmeans | b0de5565c7f450996ca435dcd5fdda265534a927 | bd7dcc4738b7d75d4b7d394122d9bd46b8d171ea | refs/heads/master | 2019-07-25T20:36:25.351104 | 2017-04-25T16:41:58 | 2017-04-25T16:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,583 | py | """
========
Barchart
========
A bar plot with errorbars and height labels on individual bars
"""
import numpy as np
import matplotlib.pyplot as plt
def parse_results(condition, filename):
fin = open(filename)
lines = fin.read().rstrip().split("\n")
fin.close()
kept_lines = filter(condition, lines)
result = map(lambda x: float(x.split(",")[-2].split()[0]), kept_lines)
return result
def plot_one(output_name, dataset, files, x_labels, title):
cond = lambda x: x.startswith('cusparse, ' + dataset + ',')
cusparse_times = parse_results(cond, files[0])
N = len(cusparse_times)
cond = lambda x: x.startswith('cublas, ' + dataset + ',')
cublas_times = parse_results(cond, files[1])
cond = lambda x: x.startswith('scikit_kmeans, ' + dataset + ',')
serial_times = parse_results(cond, files[2])
max_value = max(max(serial_times), max(cublas_times), max(cusparse_times))
## TODO: Put inertias here
ind = np.arange(N) # the x locations for the groups
width = 0.20 # the width of the bars
gap = 0.05
n_y_ticks = 10
fig, ax = plt.subplots()
rects1 = ax.bar(ind, serial_times, width, color='g')
rects2 = ax.bar(ind + (width + gap), cublas_times, width, color='c')
rects3 = ax.bar(ind + 2*(width + gap), cusparse_times, width, color='m')
# add some text for labels, title and axes ticks
ax.set_ylabel('Time/Iteration (seconds/iter)')
ax.set_title(title + " dataset")
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_labels)
ax.set_xlabel('#K - Number of clusters')
# ax.set_yticks(np.arange(0, max_value * 1.1, max_value/n_y_ticks))
ax.set_yscale('log')
ax.legend((rects1[0], rects2[0], rects3[0]), ('serial', 'cuBlas', 'cuSparse'), loc=2)
ax.grid(True)
plt.savefig(output_name)
## TODO: Show inertia somewhere
title = "Spatial Network"
output_name = "road_dataset.png"
dataset = 'data/road_spatial_network_dataset/spatial_network.data'
files = ["titan_x_final.txt"] * 2 + ["konka_scikit_results.out"]
x_labels = map(str, range(5,46,5) + [55])
plot_one(output_name, dataset, files, x_labels, title)
title = "Nu - Minebench"
output_name = "nu_minebench.png"
dataset = 'data/nu_minebench_dataset/kmeans/edge.data'
files = ["titan_x_final.txt"] * 2 + ["konka_scikit_results.out"]
x_labels = map(str, range(50,401,50) + [500, 600])
plot_one(output_name, dataset, files, x_labels, title)
output_name = "daily_sports.png"
dataset = 'data/daily_sports_activities/data.data'
files = ["results_daily.out"] * 2 + ["scikit_final.out"]
x_labels = map(str, [5,8,10,13,15,18,20,25,30,35])
plot_one(output_name, dataset, files, x_labels, title)
| [
"[email protected]"
]
| |
7cffd984d55e0708e92416f0d126056f75c33470 | ec062c479c09ce250c3e23ff47f144f423b55648 | /py/Lib/site-packages/azure/mgmt/compute/compute/v2016_04_30_preview/models/virtual_machine_paged.py | f4ce0dcbeaf516525bd3f7441a2a98148efea77a | []
| no_license | betisb/InputParser | c442ffc877a941bd5b7aac4d843a4d21594d8e96 | 68747d69e04d126f7ea679f93a291a6de244a95f | refs/heads/master | 2021-07-13T05:05:19.479329 | 2019-05-28T16:56:53 | 2019-05-28T16:56:53 | 188,087,891 | 0 | 2 | null | 2020-07-24T00:14:31 | 2019-05-22T17:52:13 | Python | UTF-8 | Python | false | false | 978 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class VirtualMachinePaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualMachine <azure.mgmt.compute.compute.v2016_04_30_preview.models.VirtualMachine>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualMachine]'}
}
def __init__(self, *args, **kwargs):
super(VirtualMachinePaged, self).__init__(*args, **kwargs)
| [
"[email protected]"
]
| |
f35a1b5fd5ac6605e90666ff032d753126a89666 | 45db4a55c6bd5137b17bf8dfa54ed94f361c3bf6 | /ResonantCircuits/resonantCircuit.py | e092ec6a20e63812e33f488d85c4af3afa794def | []
| no_license | CatT-DancingDev/PythonProjects | 1be3e8f0b0528be1ccbe8aeadb76ac8a5f9961ae | 7b59d9b1843eaddb9254f980f178d6e8ba551106 | refs/heads/main | 2023-04-15T08:06:25.240981 | 2021-04-25T04:13:15 | 2021-04-25T04:13:15 | 361,327,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,609 | py | ###############################################################################################
#
# Program: Resonant Circuit Design
# Module: resonantCircuit.py
# Author: Catherine Trujillo
# Course: CSC 217-470
# Date: 7/07/2020
#
###############################################################################################
#
#
# Description: This module defines/implements the superclass ResonantCircuit, which stores the
# data needed to describe a resonant frequency response.
#
############################## CLASS METHODS LIST #############################################
#
# __init__(self)
# setRF(self, rf)
# setB(self, b)
# setK(self, k)
# getRF(self)
# getB(self)
# getK(self)
# display(self)
#
############################## CLASS DEFINITION ################################################
class ResonantCircuit:
############################## METHODS #########################################################
#
# Method: __init__(self)
#
# Parameters: self
# Return Value: ResonantCircuit object
#
# Purpose: Intantiate a ResonantCircuit Object with data fields for:
# _rf = Resonant Frequency in rad/s
# _b = Bandwidth in rad/s
# _k = Gain at RF
#
#################################################################################################
def __init__(self):
self._rf = 0
self._b = 0
self._k = 0
#################################################################################################
#
# Method: getRF(self)
#
# Parameters: self
# Return Value: self._rf
#
# Purpose: Returns the value of self._rf
#
#################################################################################################
def getRF(self):
return self._rf
#################################################################################################
#
# Method: getB(self)
#
# Parameters: self
# Return Value: self._b
#
# Purpose: Returns the value of self._b
#
#################################################################################################
def getB(self):
return self._b
#################################################################################################
#
# Method: getK(self)
#
# Parameters: self
# Return Value: self._k
#
# Purpose: Returns the value of self._k
#
#################################################################################################
def getK(self):
return self._k
#################################################################################################
#
# Method: setRF(self, rf)
#
# Parameters: self, float rf
# Return Value: None
#
# Purpose: Assigns the value of rf to self._rf
#
#################################################################################################
def setRF(self, rf):
self._rf = rf
#################################################################################################
#
# Method: setB(self, b)
#
# Parameters: self, float b
# Return Value: None
#
# Purpose: Assigns the value of b to self._b
#
#################################################################################################
def setB(self, b):
self._b = b
#################################################################################################
#
# Method: setK(self, k)
#
# Parameters: self, float k
# Return Value: None
#
# Purpose: Assigns the value of k to self._k
#
#################################################################################################
def setK(self, k):
self._k = k
#################################################################################################
#
# Method: display(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: Displays the description of the resonant frequency response
#
#################################################################################################
def display(self):
print("RESONANT FREQUENCY RESPONSE:")
print("Resonant Frequency = {} rad/s".format(self._rf))
print("Bandwidth = {} rad/s".format(self._b))
print("Gain At Resonant Frequency = {} \n".format(self._k))
##################################### END CLASS #################################################
| [
"[email protected]"
]
| |
47d5f3f1a68dad5b149971eeaec9c78cb01c4c8e | 2bdef3359de28afe1def5a5cae208489ff4af781 | /sort.py | ef7c31e24d46670956b4d836d49ef4d5b3d5afd7 | []
| no_license | Hallyson34/uPython | 07a63b6f6385f67fae722619ec612ffa4a1512ae | 5668ac9d97aee31c091fff3c6c79a94549802b6e | refs/heads/master | 2023-07-07T19:36:20.280226 | 2021-08-07T02:56:57 | 2021-08-07T02:56:57 | 358,599,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | A, B, C = map(int, input().split())
if A<B and A<C:
print(A)
if B<C:
print(f"{B}\n{C}")
else:
print(f"{C}\n{B}")
elif B<A and B<C:
print(B)
if A<C:
print(f"{A}\n{C}")
else:
print(f"{C}\n{A}")
else:
print(C)
if B<A:
print(f"{B}\n{A}")
else:
print(f"{A}\n{B}")
print(f"\n{A}\n{B}\n{C}") | [
"[email protected]"
]
| |
a8a98f6a93e509c990d8f37c41df6cc4c7f00232 | 57e43b31a0b1448b6635ec505bfa27fcf5e4ebf3 | /spotiplay/prompt.py | f3f29e40d30a134a18cef16e36c7e0604b9d66e5 | [
"MIT"
]
| permissive | jaruserickson/spotiplay | 6894036617befa46abbe22cdf7ed41fbff18f635 | 3e6e4a381d3899de75e7684d65cad1d40b81307b | refs/heads/master | 2021-07-11T17:10:59.766154 | 2017-10-14T01:02:24 | 2017-10-14T01:02:24 | 105,844,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | from __future__ import absolute_import, unicode_literals
import getpass
import os
from prompt_toolkit import prompt, AbortAction
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.token import Token
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.contrib.completers import WordCompleter
from spotiplay.history import history
style = style_from_dict({
Token.Username: 'bg:#ffffff #81b71a italic',
Token.At: '#999999',
Token.Host: '#81b71a',
Token.Separator: '#81b71a',
Token.Text: '#e6e6e6',
Token.Arrow: '#999999',
Token.SelectedText: 'reverse underline',
Token.Toolbar: '#e6e6e6 bg:#262626',
})
def completer():
list = []
for name in history():
list.append(name)
return WordCompleter(set(list), ignore_case=True)
def get_bottom_toolbar_tokens(cli):
return [
(Token.Toolbar, ' exit: ctrl+d | clear: ctrl+c ')
]
def get_prompt_tokens(cli):
return [
(Token.Username, getpass.getuser()),
(Token.At, '@'),
(Token.Host, os.uname()[1]),
(Token.Separator, ' - '),
(Token.Text, 'Add Songs or CMD:'),
(Token.Arrow, '\n> '),
]
def custom_prompt():
return prompt(
get_prompt_tokens=get_prompt_tokens,
history=history(),
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
on_abort=AbortAction.RETRY,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
completer=completer(),
complete_while_typing=True,
style=style
)
| [
"[email protected]"
]
| |
16b120ca9699f1c3db050a4efeacf9845bbd09af | 16f8e9dddeb046c51ed381e7e7fe20375633646d | /account/api/throttles.py | 182865cde730d41ced5768f5acbc02ff318e3eac | []
| no_license | AmilAbdullazadeh/django-rest | 600c556a1b8ad22ad1b794eeb1d256254b91a747 | 9aca56f85bb9884795716282d1f7d524f6f89d5e | refs/heads/master | 2022-12-12T04:47:58.228880 | 2020-09-01T06:58:39 | 2020-09-01T06:58:39 | 291,921,694 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from rest_framework.throttling import AnonRateThrottle
class RegisterThrottle(AnonRateThrottle):
scope = 'registerthrottle'
| [
"amilva"
]
| amilva |
c9c74a48846b3633abdbf55d3140a6485cc465a8 | 3ea851736abf69250dc1fd093e8f74abfe70e108 | /qa/migrations/0001_initial.py | 3b422157f32211a037e204eb69faa691b9d93397 | []
| no_license | SJ23y/my-first-blog | c211789d6552fed5c58cde37b59cc20243668dc6 | a5fec4fcba943ab8505e6191d787a40e32c8b1cb | refs/heads/master | 2020-12-27T09:31:02.253319 | 2017-06-09T22:51:49 | 2017-06-09T22:51:49 | 68,472,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('text', models.TextField()),
('added_at', models.DateTimeField()),
('author', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('tittle', models.CharField(max_length=255)),
('text', models.TextField()),
('added_at', models.DateTimeField()),
('rating', models.IntegerField()),
('author', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
('likes', models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='question_like_user')),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(to='qa.Question'),
),
]
| [
"[email protected]"
]
| |
ff6683aeb21bc1bc07ca8691f1529e640f825454 | 75f2b0c621d13c2c1d8e0c5243583db7feb9f22d | /pantry_raid/models/forms/substitutionform.py | 670b9220afefe4d0a3ab3d6584c1e0e27d689cc0 | []
| no_license | Wizracc/PantryRaid | 562ed8581e95ee77f57688b0d5200f01c21fc357 | db90e70ba0524321a56491d7164f21a92da5e587 | refs/heads/master | 2022-12-10T22:55:21.400690 | 2020-02-11T09:05:11 | 2020-02-11T09:05:11 | 239,718,611 | 0 | 0 | null | 2022-09-16T18:17:08 | 2020-02-11T09:04:04 | Python | UTF-8 | Python | false | false | 1,152 | py | from flask_wtf import FlaskForm as Form
from wtforms import FieldList, FormField, SubmitField, StringField
from pantry_raid.models.forms.autocompletes import AutocompleteField
from pantry_raid.models.forms.buttons import CustomButton
class SubstituteField(Form):
quantity = StringField("Quantity", render_kw={
"id": "sub_qty",
"placeholder": "Quantity",
"style": "width: 100%; max-width: 40%;"
})
ingredient = AutocompleteField("Ingredient", render_kw={
"id": "sub_ingredient",
"placeholder": "Ingredient",
"style": "width: 100%; max-width: 40%;"
})
class SubstitutionForm(Form):
add_target = AutocompleteField("Target Ingredient", render_kw={
"id": "target",
"numIngredients": 0,
"autocomplete": "off"
})
target_qty = StringField("Target Quantity", render_kw={
"id": "target_qty"
})
substitute = FieldList(FormField(SubstituteField), min_entries=1)
add_ingredient = CustomButton("<i class=\"fas fa-plus-circle\"></i>")
submit = SubmitField("Add Substitution", render_kw={
"style": "width: 100%; height: 5em"
})
| [
"[email protected]"
]
| |
c3efcb46b691153ec799d7d3fc4cdaf0a4eb11e9 | b6a780446903726f1b8e80bbce105086f66387e6 | /tests/test_directory.py | a3130c6cbc09299c4ba0d0e8dc5f5a5eecc95097 | [
"WTFPL"
]
| permissive | dornheimer/gogtool | 89544278180872fc9688ed4edf6f6a81f417b0e1 | 09ee025fea49cf11634baed970974ef716150743 | refs/heads/master | 2018-07-13T21:32:03.459370 | 2018-07-05T12:08:41 | 2018-07-05T12:08:41 | 113,966,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import os
import unittest
from testfixtures import LogCapture
from gogtool.directory import Directory
VALID_PATH = os.path.dirname(__file__)
INVALID_PATH = "directory/does/not/exist"
class TestLogging(unittest.TestCase):
def setUp(self):
self.capture = LogCapture()
def tearDown(self):
self.capture.uninstall()
def test_init_invalid(self):
with self.assertRaises(SystemExit) as cm:
directory = Directory(INVALID_PATH)
self.assertEqual(directory.path, None)
self.assertTrue(cm.exception.code, 2)
self.capture.check(
('gogtool.helper.log',
'ERROR',
f"Directory could not be initialized: '{INVALID_PATH}' does not exist.")
)
def test_init_valid(self):
directory = Directory(VALID_PATH)
self.assertEqual(directory.path, VALID_PATH)
self.capture.check(
('gogtool.helper.log',
'DEBUG',
f"Directory initialized with {VALID_PATH}")
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
d6d926460684758bfa1783aa26618668c90b37af | c3f1ebc74dec01fed73bc7ff6956f3c7fc24e430 | /SCAMP_Python_Track/Portfolio/landing_page/migrations/0002_auto_20200724_2220.py | d74ba3df40d156f6cb9faa1caf5c02278a0832b8 | []
| no_license | Aduketemi/SCAMP-Assesment | f8978006ca90766fc5ee58c2d629247eabc51300 | 859dc6f341f3e6e122362e87e3fab9e98114a6d0 | refs/heads/master | 2022-11-24T09:58:35.418395 | 2020-08-02T09:32:05 | 2020-08-02T09:32:05 | 257,937,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Generated by Django 3.0.8 on 2020-07-24 21:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('landing_page', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Home',
new_name='Profile',
),
]
| [
"[email protected]"
]
| |
0504c14f96bdce17781d5abc4efbafb8c383f80a | ca00031df3e47bd9905ff9d40573e7837f5addba | /venv/Lib/site-packages/ibm_db_tests/test_144_BindParamInsertStmtPARAM_FILE.py | 3f9338d68102248c1ec75df865f10266935ad7c8 | [
"Apache-2.0"
]
| permissive | abycyriac/Covid-Vaccine-Procurement-Webapp-Flask | dda02eb2e2807021f0621735fea458f740aae478 | a6ff7236cc6e5722f2da461a27ac55e8e17719fb | refs/heads/main | 2023-05-11T23:40:54.794264 | 2021-06-02T20:11:51 | 2021-06-02T20:11:51 | 373,290,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import sys
import os
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_144_BindParamInsertStmtPARAM_FILE(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_144)
def run_test_144(self):
conn = ibm_db.connect(config.database, config.user, config.password)
if conn:
# Drop the test table, in case it exists
drop = 'DROP TABLE pictures'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
# Create the test table
create = 'CREATE TABLE pictures (id INTEGER, picture BLOB)'
result = ibm_db.exec_immediate(conn, create)
stmt = ibm_db.prepare(conn, "INSERT INTO pictures VALUES (0, ?)")
picture = os.path.dirname(os.path.abspath(__file__)) + "/pic1.jpg"
if sys.platform == 'zos':
rc = ibm_db.bind_param(stmt, 1, picture, ibm_db.SQL_PARAM_INPUT, ibm_db.SQL_BLOB)
else:
rc = ibm_db.bind_param(stmt, 1, picture, ibm_db.SQL_PARAM_INPUT, ibm_db.SQL_BINARY)
rc = ibm_db.execute(stmt)
num = ibm_db.num_rows(stmt)
print(num)
else:
print("Connection failed.")
#__END__
#__LUW_EXPECTED__
#1
#__ZOS_EXPECTED__
#1
#__SYSTEMI_EXPECTED__
#1
#__IDS_EXPECTED__
#1
| [
"[email protected]"
]
| |
71688feb0681e39a08137247598481369fa9f252 | a08b5385e41fd4a99cc47e71df8310a6ce58721a | /flappy_bird.py | 9199d0f2f28bf0155f7aa3c92d55a6b808867a51 | []
| no_license | walg/NEAT-Flappy-Bird | 91ecbf17969bce6240225b3d5d6a180693cb5efd | 707dee098cfa39cf25e295e76015149f23b5da81 | refs/heads/master | 2020-07-02T22:44:01.620700 | 2019-08-09T00:40:29 | 2019-08-09T00:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,612 | py | """
The classic game of flappy bird. Make with python
and pygame. Features pixel perfect collision using masks :o
Date Modified: Jul 30, 2019
Author: Tech With Tim
Estimated Work Time: 5 hours (1 just for that damn collision)
"""
import pygame
import random
import os
import time
import neat
import visualize
import pickle
pygame.font.init() # init font
WIN_WIDTH = 600
WIN_HEIGHT = 800
FLOOR = 730
STAT_FONT = pygame.font.SysFont("comicsans", 50)
END_FONT = pygame.font.SysFont("comicsans", 70)
DRAW_LINES = False
WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption("Flappy Bird")
pipe_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","pipe.png")).convert_alpha())
bg_img = pygame.transform.scale(pygame.image.load(os.path.join("imgs","bg.png")).convert_alpha(), (600, 900))
bird_images = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","bird" + str(x) + ".png"))) for x in range(1,4)]
base_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","base.png")).convert_alpha())
gen = 0
class Bird:
"""
Bird class representing the flappy bird
"""
MAX_ROTATION = 25
IMGS = bird_images
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
"""
Initialize the object
:param x: starting x pos (int)
:param y: starting y pos (int)
:return: None
"""
self.x = x
self.y = y
self.tilt = 0 # degrees to tilt
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
"""
make the bird jump
:return: None
"""
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
"""
make the bird move
:return: None
"""
self.tick_count += 1
# for downward acceleration
displacement = self.vel*(self.tick_count) + 0.5*(3)*(self.tick_count)**2 # calculate displacement
# terminal velocity
if displacement >= 16:
displacement = (displacement/abs(displacement)) * 16
if displacement < 0:
displacement -= 2
self.y = self.y + displacement
if displacement < 0 or self.y < self.height + 50: # tilt up
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else: # tilt down
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
"""
draw the bird
:param win: pygame window or surface
:return: None
"""
self.img_count += 1
# For animation of bird, loop through three images
if self.img_count <= self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count <= self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count <= self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
elif self.img_count <= self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*4 + 1:
self.img = self.IMGS[0]
self.img_count = 0
# so when bird is nose diving it isn't flapping
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
# tilt the bird
blitRotateCenter(win, self.img, (self.x, self.y), self.tilt)
def get_mask(self):
"""
gets the mask for the current image of the bird
:return: None
"""
return pygame.mask.from_surface(self.img)
class Pipe():
"""
represents a pipe object
"""
GAP = 200
VEL = 5
def __init__(self, x):
"""
initialize pipe object
:param x: int
:param y: int
:return" None
"""
self.x = x
self.height = 0
# where the top and bottom of the pipe is
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(pipe_img, False, True)
self.PIPE_BOTTOM = pipe_img
self.passed = False
self.set_height()
def set_height(self):
"""
set the height of the pipe, from the top of the screen
:return: None
"""
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
"""
move pipe based on vel
:return: None
"""
self.x -= self.VEL
def draw(self, win):
"""
draw both the top and bottom of the pipe
:param win: pygame window/surface
:return: None
"""
# draw top
win.blit(self.PIPE_TOP, (self.x, self.top))
# draw bottom
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird, win):
"""
returns if a point is colliding with the pipe
:param bird: Bird object
:return: Bool
"""
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask,top_offset)
if b_point or t_point:
return True
return False
class Base:
"""
Represnts the moving floor of the game
"""
VEL = 5
WIDTH = base_img.get_width()
IMG = base_img
def __init__(self, y):
"""
Initialize the object
:param y: int
:return: None
"""
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
"""
move floor so it looks like its scrolling
:return: None
"""
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
"""
Draw the floor. This is two images that move together.
:param win: the pygame surface/window
:return: None
"""
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def blitRotateCenter(surf, image, topleft, angle):
"""
Rotate a surface and blit it to the window
:param surf: the surface to blit to
:param image: the image surface to rotate
:param topLeft: the top left position of the image
:param angle: a float value for angle
:return: None
"""
rotated_image = pygame.transform.rotate(image, angle)
new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center)
surf.blit(rotated_image, new_rect.topleft)
def draw_window(win, birds, pipes, base, score, gen, pipe_ind):
"""
draws the windows for the main game loop
:param win: pygame window surface
:param bird: a Bird object
:param pipes: List of pipes
:param score: score of the game (int)
:param gen: current generation
:param pipe_ind: index of closest pipe
:return: None
"""
if gen == 0:
gen = 1
win.blit(bg_img, (0,0))
for pipe in pipes:
pipe.draw(win)
base.draw(win)
for bird in birds:
# draw lines from bird to pipe
if DRAW_LINES:
try:
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_TOP.get_width()/2, pipes[pipe_ind].height), 5)
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_BOTTOM.get_width()/2, pipes[pipe_ind].bottom), 5)
except:
pass
# draw bird
bird.draw(win)
# score
score_label = STAT_FONT.render("Score: " + str(score),1,(255,255,255))
win.blit(score_label, (WIN_WIDTH - score_label.get_width() - 15, 10))
# generations
score_label = STAT_FONT.render("Gens: " + str(gen-1),1,(255,255,255))
win.blit(score_label, (10, 10))
# alive
score_label = STAT_FONT.render("Alive: " + str(len(birds)),1,(255,255,255))
win.blit(score_label, (10, 50))
pygame.display.update()
def eval_genomes(genomes, config):
"""
runs the simulation of the current population of
birds and sets their fitness based on the distance they
reach in the game.
"""
global WIN, gen
win = WIN
gen += 1
# start by creating lists holding the genome itself, the
# neural network associated with the genome and the
# bird object that uses that network to play
nets = []
birds = []
ge = []
for genome_id, genome in genomes:
genome.fitness = 0 # start with fitness level of 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230,350))
ge.append(genome)
base = Base(FLOOR)
pipes = [Pipe(700)]
score = 0
clock = pygame.time.Clock()
run = True
while run and len(birds) > 0:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
break
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width(): # determine whether to use the first or second
pipe_ind = 1 # pipe on the screen for neural network input
for x, bird in enumerate(birds): # give each bird a fitness of 0.1 for each frame it stays alive
ge[x].fitness += 0.1
bird.move()
# send bird location, top pipe location and bottom pipe location and determine from network whether to jump or not
output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] > 0.5: # we use a tanh activation function so result will be between -1 and 1. if over 0.5 jump
bird.jump()
base.move()
rem = []
add_pipe = False
for pipe in pipes:
pipe.move()
# check for collision
for x, bird in enumerate(birds):
if pipe.collide(bird, win):
ge[x].fitness -= 1
birds.remove(bird)
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if add_pipe:
score += 1
# can add this line to give more reward for passing through a pipe (not required)
for genome in ge:
genome.fitness += 5
pipes.append(Pipe(WIN_WIDTH))
for r in rem:
pipes.remove(r)
remove = []
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() - 10 >= FLOOR or bird.y < -50:
remove.append((bird,nets[x],ge[x]))
for r in remove: # remove birds, associated genome and nets if requried
ge.remove(r[2])
nets.remove(r[1])
birds.remove(r[0])
draw_window(WIN, birds, pipes, base, score, gen, pipe_ind)
# break if score gets large enough
'''if score > 20:
pickle.dump(nets[0],open("best.pickle", "wb"))
break'''
def run(config_file):
"""
runs the NEAT algorithm to train a neural network to play flappy bird.
:param config_file: location of config file
:return: None
"""
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
#p.add_reporter(neat.Checkpointer(5))
# Run for up to 50 generations.
winner = p.run(eval_genomes, 50)
# show final stats
print('\nBest genome:\n{!s}'.format(winner))
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward.txt')
run(config_path)
| [
"[email protected]"
]
| |
e107fbc09d667d6e7002ee969db496e3d3d7b58d | 8e9d04512cf1b0424cd5de331a30620a356c0818 | /Events/migrations/0001_initial.py | f38d44832908ee00a227c43cdd668d5229471fec | []
| no_license | shraysalvi/Arlarse001 | 949f8ad6a2da790bf2164cb555c9b537f07d2775 | 962b73f613880dae064a8756a01629bd8a095b0c | refs/heads/master | 2023-09-02T08:04:03.831670 | 2021-11-07T20:56:18 | 2021-11-07T20:56:18 | 424,014,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | # Generated by Django 3.2.7 on 2021-11-04 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ApplyCandidates',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('gender', models.CharField(max_length=10)),
('email', models.EmailField(max_length=254)),
('dob', models.DateField()),
('state', models.CharField(max_length=20)),
('college_name', models.CharField(max_length=200)),
('degree_level', models.CharField(max_length=10)),
('degree_program', models.CharField(max_length=50)),
('graduation_date', models.DateField()),
('video', models.URLField()),
],
),
migrations.CreateModel(
name='EmailSubscriberForm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subscriber_mail', models.EmailField(max_length=254)),
],
),
]
| [
"[email protected]"
]
| |
2dd9fc92bb479d7ea6fd9fd47d4c000b3e9b3bc7 | 0f4422e10006b348d6aae29ba03bceb4baca52d9 | /vanilla_GANs/vanilla_gans_old.py | 74f567027dc5c44173e0dbc6b4a167ec9e1bd62d | []
| no_license | nikeshnaik/GANs_In_Tensorflow | edeb24fa76175f501cdb3189c3ca0ea03c64f417 | 09342bfb60ac7c7fc91085a4480561c7eaa500a5 | refs/heads/master | 2022-02-16T17:27:41.664188 | 2019-08-10T08:49:48 | 2019-08-10T08:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,034 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import sys
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.initializers import he_normal
modules = ['tensorflow','numpy','matplotlib','os']
if not all([True if each in sys.modules else False for each in modules]):
raise ModuleNotFoundError
tf.reset_default_graph()
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1./tf.sqrt(in_dim/2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
# X = tf.placeholder(tf.float32,shape=[None, 784],name='input_image')
Discr_W1 = tf.Variable(xavier_init([784,128]))
Discr_b1 = tf.Variable(tf.zeros(shape=[128]))
Discr_W2 = tf.Variable(xavier_init([128,1]))
Discr_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_Discr = [Discr_W1, Discr_W2, Discr_b1, Discr_b2]
# Z = tf.placeholder(tf.float32,shape=[None,100])
batch_size= 64
Z_dim = 100
def sample_Z(m,n):
return np.random.uniform(-1.,1.,size=[64,n])
def generator(z):
with tf.name_scope('Generator'):
Gene_h1 = Dense(128,activation='relu')(z)
Gene_log_prob = Dense(784,activation=None)(Gene_h1)
Gene_prob = Activation('sigmoid',name='generator_prob')(Gene_log_prob)
return Gene_prob
def discriminator(x):
with tf.name_scope('Discriminator'):
Discr_h1 = tf.nn.relu(tf.matmul(x, Discr_W1)+Discr_b1)
Discr_logit = tf.matmul(Discr_h1, Discr_W2) + Discr_b2
return Discr_logit
def plot(samples):
fig = plt.figure(figsize=(4,4))
gs = gridspec.GridSpec(4,4)
gs.update(wspace=0.05, hspace=0.05)
for i,sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28,28),cmap='Greys_r')
return fig
X = tf.placeholder(tf.float32,shape=[None, 784],name='input_image')
Z = tf.placeholder(tf.float32,shape=[None,100])
Gene_Sample = generator(Z)
Discr_logit_real = discriminator(X)
Discr_logit_fake = discriminator(Gene_Sample)
with tf.name_scope('Cost'):
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Discr_logit_real,labels=tf.ones_like(Discr_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Discr_logit_fake,labels=tf.zeros_like(Discr_logit_fake)))
D_loss = tf.add(D_loss_real,D_loss_fake)
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Discr_logit_fake,labels=tf.ones_like(Discr_logit_fake)))
with tf.name_scope('optimizers'):
Gene_vars = [i for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Generator')]
Discr_vars = [i for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Discriminator')] + [theta_Discr]
Dis_optimizer = tf.train.AdamOptimizer().minimize(D_loss,var_list=Discr_vars)
Gen_optimizer = tf.train.AdamOptimizer().minimize(G_loss,var_list=Gene_vars)
mnist = input_data.read_data_sets('../../MNIST_data',one_hot=True)
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for it in range(100000):
if it%100 ==0:
sample = sess.run([Gene_Sample],feed_dict={Z:sample_Z(16,Z_dim)})
fig = plot(sample)
plt.savefig('out/{}.png'.format(str(i).zfill(3)),bbox_inches='tight')
i+=1
plt.close(fig)
X_batch, _ = mnist.train.next_batch(batch_size)
_,D_loss_curr = sess.run([Dis_optimizer, D_loss], feed_dict={X:X_batch, Z:sample_Z(batch_size,Z_dim)})
_, G_loss_curr = sess.run([Gen_optimizer, G_loss], feed_dict={Z:sample_Z(batch_size,Z_dim)})
if it%100==0:
print("Iter: {}".format(it))
print("D_loss: {:.4}".format(D_loss_curr))
print("G_loss: {:.4}".format(G_loss_curr))
print()
| [
"[email protected]"
]
| |
eb64c1bd34e11a9a1bcdb5b6f0b59899137e2b03 | 3bfbb10285f484b228cf071d0c46239302df716b | /venv/Scripts/pip3-script.py | e8f450b961f0c02797ef36abc4f8f7c2644b76ca | []
| no_license | messi10hitu/RegularExp | 0599a3612d3d1eb204c1fb46b94c9e6994368f84 | 76f8af9bcdc4bbce6b75f7582df71924b13cb301 | refs/heads/master | 2021-03-15T00:34:16.005198 | 2020-03-12T10:37:25 | 2020-03-12T10:37:25 | 246,808,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!C:\Users\hitesh\PycharmProjects\RegularExp\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
]
| |
d2c8fcbbc8f09554ddb3fcbbe699c4096c534d5d | 5b4ef66e0edd872ae675042d7ff4f2891cbe27a9 | /src/config.py | b5ba1cdcf8e3c34c8d67774be029ea2a45b33204 | [
"MIT"
]
| permissive | 0xMoJo7/spiders | 5776e3a4edb9f768e85b7cb3280f5058a29466ef | c9c32fa4259101f6a6e8e0cb750739b1ba8d7386 | refs/heads/master | 2023-05-28T07:07:08.833459 | 2019-08-07T18:25:09 | 2019-08-07T18:25:09 | 201,060,758 | 0 | 0 | MIT | 2023-05-22T22:29:13 | 2019-08-07T13:54:49 | Python | UTF-8 | Python | false | false | 674 | py | click_depth = 12 # how many links deep to go into webpage
min_wait = 5 # minimum wait time between requests
max_wait = 10 # maximum wait time between requests
min_proxy_pages = 2 # number of times to use list of proxies before fetching new group
max_proxy_pages = 14
debug = True
root_urls = [
"https://simpli.fi",
]
blacklist = [
"https://t.co",
"t.umblr.com",
"messenger.com",
"itunes.apple.com",
"l.facebook.com",
"bit.ly",
"mediawiki",
".css",
".ico",
".xml",
"intent/tweet",
"twitter.com/share",
"signup",
"login",
"dialog/feed?",
".png",
".jpg",
".json",
".svg",
".gif",
"zendesk",
"clickserve",
"facebook",
"twitter"
]
| [
"[email protected]"
]
| |
03c44bbcc3cb986d47719e3b53198dc3ce277e67 | 3d752bef425e906cf0d44bd6ec1683faf53b9ff5 | /Arithmetic Game.py | 456bb95f4f24f345eb93f7bed194aac057013e2f | []
| no_license | MuhamadNawzad/Beginner-projects | 4abe2bf70eb494c4996e44413a12617a7b50d317 | 189842e0c66c36919cb4af7284509dd09c6ae06f | refs/heads/main | 2023-04-07T19:25:50.145710 | 2021-04-19T07:53:27 | 2021-04-19T07:53:27 | 359,373,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | import random
import time
class ArithmeticGame:
def __init__(self, num_questions):
self.num_questions = num_questions
def generate_questions(self):
operand1 = random.randint(0, 30)
operand2 = random.randint(0, 30)
operand = random.choice(['+', '-', '*', '//'])
if operand == '+':
answer = operand1 + operand2
if operand == '-':
answer = operand1 - operand2
if operand == '*':
answer = operand1 * operand2
if operand == '//':
answer = operand1 // operand2
question = str(operand1) + ' ' + str(operand) + ' ' + str(operand2)
return question, answer
def play_game(self):
start_time = time.time()
correct_ans = 0
for i in range(self.num_questions):
question, answer = self.generate_questions()
print(question)
user_answer = int(input('What is your answer?: '))
if answer == user_answer:
print('Your answer is correct.')
correct_ans = correct_ans + 1
else:
print('Your answer is wrong!')
end_time = time.time()
print('You answered ' + str(correct_ans) + ' questions correctly.')
print('You answered in {0:0.1f} seconds'.format(end_time - start_time))
new_game = ArithmeticGame(2)
new_game.play_game() | [
"[email protected]"
]
| |
fc13fa23c678c4876fa24591c96db49e66f9ba4b | 577797412ba7c82748a36e0af06a5918456af0be | /Raspi/src/gameObjects/Category.py | 3af4069cb53c6a72a32154e3da847f7ff3a2e455 | []
| no_license | juanmoo/Buzzer-System | 67b1ac1776d8bf3295ddfb7b187e7477fda46e6f | af9516daa2567420191e7c260f06ecc2dd14df55 | refs/heads/master | 2020-04-06T06:58:37.797413 | 2016-08-11T23:46:01 | 2016-08-12T01:19:21 | 59,366,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | class Category(object):
idNumber = 0
def __init__(self, name, questionNumber,full_points, partial_points, penalty_points):
self.questionNumber = questionNumber
self.idNumber = Category.idNumber
Category.idNumber += 1
self.name = name
"""
pointKey stores the point values of the different types of question
outcomes in the following manner:
[0, full_points, partial_points, penalty_points]
"""
self.pointKey = [full_points, partial_points, 0, penalty_points]
def __lt__(self, other):
assert isinstance(other, Category)
return self.idNumber<other.idNumber
def __str__ (self):
return self.name+"-"+str(self.idNumber)
def getQuestionNumber(self):
return self.questionNumber
| [
"[email protected]"
]
| |
3017eff3a8d21fac6867ed2bc8da08b705f9d229 | cfc415c9b247521b872bf86fd22b55b4a3ff2ee3 | /tensorflow/tools/compatibility/tf_upgrade_v2_test.py | 4b83d50036b6c4e9572b40d7b6377685f94dacc8 | [
"Apache-2.0"
]
| permissive | chengmengli06/tensorflow | f7fdb51d709e87b302d60a6dc9391cb6bbaaa3e1 | e81d0c5499eab1ae2d301c5caa128e0b69b0289b | refs/heads/master | 2021-06-24T21:54:28.571878 | 2018-11-16T06:45:48 | 2018-11-16T06:45:48 | 157,813,648 | 0 | 0 | Apache-2.0 | 2018-11-16T04:42:57 | 2018-11-16T04:42:57 | null | UTF-8 | Python | false | false | 6,225 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.assert_near(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.assert_near` to "
"`tf.debugging.assert_near`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay"]:
text = "%s(a, b)\n" % decay
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % decay])
self.assertIn("%s has been changed" % decay, report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % ns])
self.assertIn("loss_reduction has been changed", report)
def testCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| [
"[email protected]"
]
| |
104d93508feb5e6c8b3a020f564aa36f195f1f5d | f854d8fa7abfa3014c9657618a0bf6c5444ce701 | /test/model_Feature_AAN_only_video_Pooling.py | a70c88936341fd6b028c6536e76d1493d027879e | [
"MIT"
]
| permissive | ivyha010/AttendAffectNet | 255713cfee8c1b201d39f96c186eb76f23b58095 | 583c15070baf6ba3c95e8515466631666c07ba73 | refs/heads/main | 2022-12-25T17:25:28.273506 | 2020-10-14T02:37:58 | 2020-10-14T02:37:58 | 303,881,618 | 16 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,137 | py | # Copyright: http://nlp.seas.harvard.edu/2018/04/03/attention.html
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import seaborn
seaborn.set_context(context="talk")
# ENCODER: CLONE
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# ENCODER
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask=None):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ATTENTION #
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # means x and self_attention in MultiHeadedAttention in # 2) Apply attention on all the projected vectors in batch.
# MULTI-HEAD ATTENTION
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
# LAYER NORM
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
# SUBLAYER CONNECTION (Residual connection)
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
# POSSITION FEED-FORWARD
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, dropout=0.1): #d_ff,
super(PositionwiseFeedForward, self).__init__()
#self.w_1 = nn.Linear(d_model, d_ff)
#self.w_2 = nn.Linear(d_ff, d_model)
self.w = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w(self.dropout(x + F.tanh(x))) # self.w_2(self.dropout(F.relu(self.w_1(x))))
# ENCODER LAYER
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
# INPUT: EMBEDDING AND SOFTMAX
class Embeddings(nn.Module): # source
def __init__(self, d_model, numdims): # , numdims1, numdims2): # numdims can be number of dimensions of scr
super(Embeddings, self).__init__()
self.lut = nn.Linear(numdims, d_model)
self.d_model = d_model
self.dropout = nn.Dropout()
def forward(self, x):
x = x.float()
return self.lut(x) * math.sqrt(self.d_model) # self.lut(x) * math.sqrt(self.d_model)
# BASE: ENCODER and a FULLY CONNECTED LAYER
class Encoder_FullyConnected(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, d_model= 512, resnet50dim=2048, flownetdim=1024, i3ddim=1024): # 512
super(Encoder_FullyConnected, self).__init__()
self.reduced_1 = nn.Linear(resnet50dim, d_model)
self.reduced_2 = nn.Linear(flownetdim, d_model)
self.reduced_3 = nn.Linear(i3ddim, d_model)
self.encoder = encoder
self.linear = nn.Linear(d_model, 1) # nn.Linear(d_model, 1)
self.drop = nn.Dropout(0.1)
def forward(self, src1, src2, src3):
"Take in and process masked src and target sequences."
reduced1 = self.reduced_1(src1)
reduced2 = self.reduced_2(src2)
reduced3 = self.reduced_3(src3)
src = torch.cat((reduced1, reduced2, reduced3), dim=1)
temp = self.encoder(src) # shape: batchsize x 3 x d_model
temp_permute = temp.permute(0, 2, 1) # shape: batchsize x d_model x 3
pooling = nn.AvgPool1d(temp_permute.shape[-1]) # pooling window = 3
temp2 = pooling(temp_permute) # shape: batchsize x d_model x 1
# Many-to-one structure
temp3 = self.linear(self.drop(F.tanh(temp2.squeeze(-1))))
return temp3
# FULL MODEL
def make_model(resnet50dim=248, flownetdim=1024, i3ddim=1024, N=6, d_model=512, h=8, dropout=0.5):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, dropout)
model = Encoder_FullyConnected(Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), d_model, resnet50dim, flownetdim, i3ddim)
return model
| [
"[email protected]"
]
| |
7eb105d6e6a9cab22984c6db01666070c56c508b | 2bf76e30ad517adf8805a9fdb22e60c4c010eea3 | /ipypandex/tests/echo_pandas.py | 4c35a62cf638ddd4aa4f4bf2ae5ef84c977c07cf | [
"BSD-3-Clause"
]
| permissive | isabella232/ipypandex | 2be06d8be96280f110ffd063eb7f8c81a6d4dc8c | fc1023266a7e3e784595f296629f4fd827fb7d0f | refs/heads/main | 2023-02-11T20:15:02.731204 | 2021-01-06T00:41:44 | 2021-01-06T00:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | import pandas as pd
from IPython.utils.capture import capture_output
with capture_output() as c:
display(pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}))
print(c.outputs[0].data)
| [
"[email protected]"
]
| |
aeb34e6f1e8723cc6424c196cb99ef779f507e4d | c2081f368428e5fb684e08863ecac4f37f5717e5 | /jobapplicant/wsgi.py | 045dbc1851268e7d082365cdb2495383f2d755be | []
| no_license | agimenezpy/jobapplicant | 9148e80e3e535f7ea956992ba9c7fc0ea472b0e8 | 99ac06464a9137061c89fea0389b7c95422c29f2 | refs/heads/master | 2020-06-05T08:48:25.222470 | 2013-10-04T00:42:33 | 2013-10-04T00:42:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | """
WSGI config for jobapplicant project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobapplicant.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
]
| |
80810bf8538a097220492556fb02df2122426b9e | e4007870b4d75ba23c2f12ac6646f272cf17865c | /FFMPEG_Scripts/Video_Drawer.py | ff79049fa690bf27f94f3a7db415cde233945c49 | [
"MIT"
]
| permissive | knut0815/PythonUtility | 385ce332ff34501be7ad21ac7948eb609770e72a | 0062e1e60dc151776b963d13bc4c1763eb90d333 | refs/heads/master | 2023-01-10T09:58:14.619531 | 2020-11-10T12:22:47 | 2020-11-10T12:22:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,420 | py | import sys
import platform
import subprocess
import os
horizontal_center = 'x=(w-tw)/2'
horizontal_right_margin = 'x=(w-tw)'
vertical_bottom_margin = 'y=h-(2*lh)'
class VideoDrawer(object):
@staticmethod
def _get_font_ifp():
if platform.system() == 'windows':
font_ifp = 'C:\\Windows\\Fonts\\Arial.ttf'
else:
font_ifp = '/usr/share/fonts/truetype/freefont/FreeMono.ttf'
return font_ifp
@staticmethod
def _get_font_ifp_option():
return 'fontfile=' + VideoDrawer._get_font_ifp()
@staticmethod
def _get_font_size_option(size):
return 'fontsize=' + str(size)
@staticmethod
def _get_color_option(color):
return 'fontcolor=' + color
@staticmethod
def _get_activate_box_option():
return 'box=1'
@staticmethod
def _get_box_color_option(color):
return 'boxcolor=' + color
@staticmethod
def _get_box_with_option(width):
return 'boxborderw=' + str(width)
@staticmethod
def _get_text_option(text):
return 'text=\'' + str(text) + '\''
@staticmethod
def _get_frame_number_text_option():
return 'text=\'%{frame_num}\''
@staticmethod
def _get_start_number_option(start_number):
return 'start_number=' + str(start_number)
@staticmethod
def _get_enable_between_option(start, end, values_in_frames=True):
# This option is used to show some string only in a specific subpart of the video
# http://ffmpeg.org/ffmpeg-all.html#Expression-Evaluation
# n: the number of current processed frame, starting from 0
# t: the number of current processed frame, starting from 0
if values_in_frames:
test_variable = 'n'
else:
test_variable = 't'
return 'enable=\'between(' + test_variable + ',' + str(start) + ',' + str(end) + ')\''
@staticmethod
def _create_colon_separated_draw_options(option_list):
option_str = ''
option_str += '"' # prepend quote
option_str += 'drawtext='
for ele in option_list[:-1]:
option_str += ele + ': '
option_str += option_list[-1]
option_str += '"' # append quote
return option_str
@staticmethod
def add_text_to_video(ifp,
ofp,
text_time_interval_triples_list=None,
add_frame_numbers=True):
options = ''
options += ' ' + '-i'
options += ' ' + ifp
options += ' ' + '-vf'
font_ifp_option = VideoDrawer._get_font_ifp_option()
x_pos_option = horizontal_center
y_pos_option = vertical_bottom_margin
font_color_option = VideoDrawer._get_color_option('black')
font_size_option = VideoDrawer._get_font_size_option(20)
active_box_option = VideoDrawer._get_activate_box_option()
box_color_option = VideoDrawer._get_box_color_option('green')
box_width_option = VideoDrawer._get_box_with_option(5)
if text_time_interval_triples_list is not None:
draw_text_options = ''
for index, text_with_time_stamp in enumerate(text_time_interval_triples_list):
text_option = VideoDrawer._get_text_option(text_with_time_stamp[0])
start = text_with_time_stamp[1]
end = text_with_time_stamp[2]
enable_between_option = VideoDrawer._get_enable_between_option(start, end)
single_draw_options = VideoDrawer._create_colon_separated_draw_options(
[font_ifp_option,
text_option,
enable_between_option,
x_pos_option,
y_pos_option,
font_color_option,
font_size_option,
active_box_option,
box_color_option,
box_width_option
])
if index > 0:
draw_text_options += ',' # draw commands must be comma separated
draw_text_options += single_draw_options
options += ' ' + draw_text_options
if add_frame_numbers:
frame_number_text_option = VideoDrawer._get_frame_number_text_option()
start_number_option = VideoDrawer._get_start_number_option(0)
x_pos_option = horizontal_right_margin
draw_options = VideoDrawer._create_colon_separated_draw_options(
[font_ifp_option,
frame_number_text_option,
start_number_option,
x_pos_option,
y_pos_option,
font_color_option,
font_size_option,
active_box_option,
box_color_option,
box_width_option
])
if text_time_interval_triples_list is not None:
options += ',' + draw_options # draw commands must be comma separated
else:
options += ' ' + draw_options
options += ' ' + '-c:a'
options += ' ' + 'copy'
call_str = 'ffmpeg' + ' ' + options + ' ' + ofp
print('call_str', call_str)
subprocess.call(call_str, shell=True)
# Make sure the file has been created
assert os.path.isfile(ofp)
| [
"[email protected]"
]
| |
f36b312afc18e9f6b1941362c2dfbc66574e3deb | 98b63e3dc79c75048163512c3d1b71d4b6987493 | /tensorflow/python/keras/tests/memory_test.py | 465df84d6fef375a6f515ec1eb64815e4b74ec3f | [
"Apache-2.0"
]
| permissive | galeone/tensorflow | 11a4e4a3f42f4f61a65b432c429ace00401c9cc4 | 1b6f13331f4d8e7fccc66bfeb0b066e77a2b7206 | refs/heads/master | 2022-11-13T11:56:56.143276 | 2020-11-10T14:35:01 | 2020-11-10T14:35:01 | 310,642,488 | 21 | 12 | Apache-2.0 | 2020-11-06T16:01:03 | 2020-11-06T16:01:02 | null | UTF-8 | Python | false | false | 2,599 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in eager execution.
It is possible that this test suite will eventually become flaky due to taking
too long to run (since the tests iterate many times), but for now they are
helpful for finding memory leaks since not all PyObject leaks are found by
introspection (test_util decorators). Please be careful adding new tests here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager.memory_tests import memory_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SingleLayerNet(keras.Model):
"""Simple keras model used to ensure that there are no leaks."""
def __init__(self):
super(SingleLayerNet, self).__init__()
self.fc1 = keras.layers.Dense(5)
def call(self, x):
return self.fc1(x)
class MemoryTest(test.TestCase):
def testMemoryLeakInSimpleModelForwardOnly(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape():
net(inputs)
memory_test_util.assert_no_leak(f)
def testMemoryLeakInSimpleModelForwardAndBackward(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape() as tape:
result = net(inputs)
tape.gradient(result, net.variables)
del tape
memory_test_util.assert_no_leak(f)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
8edff7f124c93cf9e0c641c9547b814688ecc5ad | e52a6dfca5667a03ca7d981687da075cfeca7b1b | /grp_18/urls.py | 5fadd527fdbaf1dc57b6948ac3c5f15d0536401e | []
| no_license | tinkercodes/grp_18 | d780c626c81ab44b836605d1a6f556f862bb5849 | 3022c91d248a78eef740404c51abdf3d072052da | refs/heads/master | 2023-07-04T00:16:36.728857 | 2021-08-09T13:40:34 | 2021-08-09T13:40:34 | 329,650,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | """grp_18 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
path('',include('home.urls')),
]
| [
"[email protected]"
]
| |
9be6c9b60a6871fc27eb6f3f9518c33a42785596 | c6c6c32547ba334f75a5cc938a9c07e708670365 | /buses/migrations/0002_alter_busbooking_bus_id.py | af6ea77ae0304b32d0b5ac41d86b6f261725998a | []
| no_license | wilsonmwiti/SmartTravel | e693acb0b323d1be9ae1c58917a32ef6a418448d | 9513f0f15745f9e73e70680c5d9e5798de85be7c | refs/heads/master | 2023-09-01T14:16:28.471037 | 2021-10-14T10:55:20 | 2021-10-14T10:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.2.8 on 2021-10-13 05:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('buses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='busbooking',
name='bus_id',
field=models.CharField(max_length=100),
),
]
| [
"[email protected]"
]
| |
162486044551faf2aa0a85dc6dc886e6e37a3b8b | 49a2cb132930bb0ea8a352be1fc48ce32e18f801 | /day02/1.py | ce85d16f725ec5c3f4825ce1d9c42064de153e1d | []
| no_license | zkutasi/adventofcode-2018 | c26175cc2e2c750e4c5cd298796ebf2440892a5a | 3cffce64a539156be1d316868ec812d7d472c3d4 | refs/heads/master | 2020-04-12T04:00:25.607159 | 2018-12-25T16:16:50 | 2018-12-25T16:16:50 | 162,281,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/env python
import sys
def mapper(id):
m = {}
for c in id:
if c in m.keys():
m[c] += 1
else:
m[c] = 1
return m
with open(sys.argv[1]) as f:
doubles=0
tripples=0
for line in f.readlines():
m = mapper(line)
if 2 in m.values():
doubles += 1
if 3 in m.values():
tripples += 1
print "Checksum: %s" % (doubles*tripples,)
| [
"[email protected]"
]
| |
c614d728dc81ff7d988e8e31bef916f6660284a7 | 0b787275b7389f352da1b9cc38abba8ca42540a3 | /16 excepciones.py | 5653f0cb5eedb20d5bceb648edc0557db7c233e8 | []
| no_license | jorgerojaspython/clases_de_python | 76d9eb0b3233f75fae1279b04781393ce39aa33f | df9382cabfa02c3a11a315f77e10df15da3fe7c9 | refs/heads/master | 2020-12-13T22:29:09.958678 | 2020-02-01T17:17:48 | 2020-02-01T17:17:48 | 234,550,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | try:
x=int(input("Enter a number"))
y=1/x
print(y)
except ZeroDivisionError:
print("no puede dividir para cero")
except ValueError:
print("debe ser un entero")
print("the end")
import math
x = float(input("Enter a number: "))
assert x >= 0.0
x = math.sqrt(x)
print(x)
| [
"[email protected]"
]
| |
5b7e6308dff6821aa9531435e71f9c5518ce982a | cf1a8586e0ae28b96f5dc9f129e58e0eea962794 | /__main__.py | 8a231072a221c5ff3cae082173473c9fdd2d6744 | []
| no_license | PlexHaxx/plexlabels | 091fc668dae3d098bb7d8b7e3a3a1cbded4e2cfd | b553ea3b11bdd81b36643b19b2fe54d2a1758284 | refs/heads/master | 2020-05-29T11:37:38.126858 | 2015-02-23T11:40:14 | 2015-02-23T11:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | __author__ = 'raphi'
from MediaInfo.MediaInfo import MediaFile
from HttpApi.XMLHandler import parseXML
from HttpApi.HttpApi import HttpConn
if __name__ == "__main__":
import sys;
overviews = []
conn = HttpConn(sys.argv[1], sys.argv[2], sys.argv[3])
interval = int(sys.argv[4])
intrusive = False
if len(sys.argv) >= 6:
if(sys.argv[5] == "intrusive"):
intrusive = True
print("make it intrusive")
if conn.testconnection():
if(interval > 0):
overviews = conn.getnew();
if(interval <= 0):
overviews = conn.getoverview();
for section in overviews:
parseXML(section, conn, intrusive, interval)
| [
"[email protected]"
]
| |
cf244cde6773aed449200ffb151ecb0d64c2134d | e8f3bd3fbb344c2005c74c41710d9bf109235727 | /小甲鱼课后习题/22/习题2.py | c5a42c9681a2b8c9b1c46dc83da34917f0c522f4 | []
| no_license | tigerruncode/Python-1 | 2ec99f44921d61f62398d17c2ef992c98c6f7611 | 6d7436b89dc86a40d38dee1f3260dd009e642c4e | refs/heads/master | 2021-04-15T18:50:55.105097 | 2018-03-18T08:15:53 | 2018-03-18T08:15:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | print('---Calculate the maximum common divisor of x and y---')
def gcd(x, y): # 使用递归算法及欧几里得算法求最大公约数
if x % y:
return gcd(y, x % y)
else:
return y
c = int(input('x:'))
d = int(input('y:'))
print(gcd(c, d))
| [
"[email protected]"
]
| |
6a8417179c00b75db42711f0e24dbf1199b787d0 | b797b310a33e7c77e768f0896f21f0f9ee7fc9d4 | /test_mysite/test_mysite/settings.py | f9f007f5051ffa0c80e1b499568702ff7b9807cc | []
| no_license | l1ghtn1ng-sec/1024 | 45c187bfc7887deb74259ca89d7891c4a3d20786 | 17b995e85fef71ccc56ed6ac7d44f061784eeef5 | refs/heads/master | 2022-04-11T18:53:14.980106 | 2020-03-04T09:10:52 | 2020-03-04T09:10:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | """
Django settings for test_mysite project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-pj1%8_#w-pcab&uh2fmiq!zedty7gt9*dg9gop3ub_)0v4zcj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django', #数据库名字
'USER': 'root', #账号
'PASSWORD': '', #密码
'HOST': '127.0.0.1', #IP
'PORT': '3306', #端口
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | [
"[email protected]"
]
| |
5151b43cb8a410151868a0254c8c304e9e1e501f | c32f2e2906d815a052875b465dad4033bfb28d41 | /tunes_app/models.py | b1d0eefb5a891eb791733de8761814e13dfb8426 | []
| no_license | MetropolitanNumeric/tunes | cb9c9547f97f4ed34c0de6a8648d293adc1ee38c | 1c1fcaa1eda2f6473bd0b59bc0521da16f6de0cc | refs/heads/master | 2021-01-18T22:11:46.348065 | 2016-09-27T16:33:36 | 2016-09-27T16:33:36 | 68,855,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | from django.db import models
from django.core.urlresolvers import reverse
class Album(models.Model):
name = models.CharField(max_length=50)
artist = models.ForeignKey('Artist')
genre = models.ForeignKey('Genre')
def __str__(self):
return self.name
def get_artist_names(self):
artist_names = [artist.name for artist in self.artist.all()]
return " ".join(artist_names)
def get_absolute_url(self):
return reverse('tunes_app:album_detail', args=[self.pk])
class Track(models.Model):
name = models.CharField(max_length=50)
album = models.ForeignKey('Album')
def __str__(self):
return self.name
def get_absolute_url(self):
#return reverse('namespace:name', args=[self.pk])
pass
class Artist(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Genre(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name | [
"[email protected]"
]
| |
831b79a3092e0a4c027903556aa8d5a0da312dbb | dfa52338be02769ae4383ec81620e7c10a774dd1 | /temp.py | 3dd9d66d49ec3761c6ccfaaf7b35ecc438fecc26 | []
| no_license | shuvayan/EloquentJavascript | 345933eb7f41d1829134441d65e09053b7919391 | 99681ffe3375a45a35eaec056038d78823de76ed | refs/heads/master | 2020-12-02T21:20:00.787785 | 2017-09-08T08:58:28 | 2017-09-08T08:58:28 | 96,296,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,091 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import pandas as pd
#import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#matplotlib inline
# Import statements required for Plotly
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
#Import libraries for modelling:
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss
from imblearn.over_sampling import SMOTE
import xgboost
# Import and suppress warnings
import warnings
warnings.filterwarnings('ignore')
import os
os.chdir = 'C:/Users/shuvayan.das/Documents/AttritionModelling'
attrition = pd.read_csv('C:/Users/shuvayan.das/Documents/AttritionModelling/Attrition.csv')
attrition.head()
#Drop the employee code:
attrition.isnull().any()
#Only department has missing values,assign a seperate category to these records
attrition_df = attrition.fillna("unknown")
attrition_df.isnull().any()
attrition_df.columns.to_series().groupby(attrition_df.dtypes).groups
# The target column is in integer format,change to categorical
attrition_df['Terminated'] = attrition_df['Terminated'].astype('category')
# There are some records where the Tenure is negative or the Tenure is less than LastPromoted Time
if ((attrition_df['Tenure'] <= attrition_df['TimeLastPos']) | (attrition_df['Tenure'] <= 0)):
attrition_df['Flag_Variable'] = 1
else:
attrition_df['Flag_Variable'] = 0
attrition_df.to_csv("Attrition_processed.csv")
#Distribution of the dataset
# Plotting the KDEplots
f, axes = plt.subplots(3, 3, figsize=(10, 10), sharex=False, sharey=False)
# Defining our colormap scheme
s = np.linspace(0, 3, 10)
cmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Age'].values
y = attrition_df['Tenure'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])
axes[0,0].set( title = 'Age against Tenure')
cmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Age'].values
y = attrition_df['Annual Income'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,1])
axes[0,1].set( title = 'Age against Annual Income')
cmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['TimeLastPos'].values
y = attrition_df['Age'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,2])
axes[0,2].set( title = 'TimeLastPos against Age')
cmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Tenure'].values
y = attrition_df['Last Rating'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,1])
axes[1,1].set( title = 'Tenure against Last Rating')
cmap = sns.cubehelix_palette(start=2.0, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Tenure'].values
y = attrition_df['Annual Income'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[2,0])
axes[2,0].set( title = 'Years at company against Annual Income')
f.tight_layout()
# 3D Plots:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = attrition_df['Tenure']
y = attrition_df['TimeLastPos']
z = attrition_df['LastRating']
c = attrition_df['Terminated']
_ = ax.scatter(xs=x, ys=y, zs=z, c=c)
_ = ax.set_xlabel('Tenure')
_ = ax.set_ylabel('Annual Income')
_ = ax.set_zlabel('LastRating')
_ = plt.title('Plot 1: Multivariate Visualization of Attrition by Color(red if left)')
plt.show()
# creating a list of only numerical values for correlation.
numerical = ['Tenure','TimeLastPos','Annual Income','Age','LastRating']
data = [
go.Heatmap(
z= attrition[numerical].astype(float).corr().values, # Generating the Pearson correlation
x=attrition[numerical].columns.values,
y=attrition[numerical].columns.values,
colorscale='Viridis',
reversescale = False,
text = True ,
opacity = 1.0
)
]
layout = go.Layout(
title='Pearson Correlation of numerical features',
xaxis = dict(ticks='', nticks=36),
yaxis = dict(ticks='' ),
width = 900, height = 700,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='labelled-heatmap')
# Define a dictionary for the target mapping
target_map = {'Yes':1.0, 'No':0.0}
# Use the pandas apply method to numerically encode our attrition target variable
attrition["Attrition_numerical"] = attrition_df["Terminated"].apply(lambda x: target_map[x])
#Pairplot Visualisations
# Refining our list of numerical variables
g = sns.pairplot(attrition[numerical], hue='Attrition_numerical', palette='seismic',
diag_kind = 'kde',diag_kws=dict(shade=True),hue = "Terminated")
g.set(xticklabels=[])
| [
"[email protected]"
]
| |
1ac83f62049fce578856131aec4108fba430100f | d2fb9166f0c8a40261715b9a06bb7a7c77fce46c | /apps/programs/models.py | 3eccd8950bc394620b5c36afc57b68880826271e | []
| no_license | surya20r/UNote | a5d205050bedb87e7011fe679f844943e39576bb | 5d034b1dcb3a6bdf307f18eb769b8dcfc5ca5448 | refs/heads/master | 2023-08-08T03:52:33.155638 | 2020-01-15T06:00:59 | 2020-01-15T06:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from django.db import models
# Create your models here.
class Program (models.Model):
name = models.CharField(max_length=200)
value = models.CharField(max_length=200) | [
"[email protected]"
]
| |
ebe4f996d27d27006df85a8764fadbe018701ef2 | 3ce7471ed89bc360b35a867ec0b6712442ce1fab | /abo/backends/paymill/__init__.py | 39123b2018592752b38648c62de609a23c971931 | [
"BSD-3-Clause"
]
| permissive | kralla/django-abo | b0684e37b34655ef8ceada09220c9be8016552bc | e0fa98ec44d5ea31ff2313ca0f96ff12d6aa26e4 | refs/heads/master | 2021-01-17T20:05:08.842631 | 2015-01-05T15:49:27 | 2015-01-05T15:49:27 | 26,873,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from abo.backends import PaymentProcessorBase
default_app_config = 'abo.backends.paymill.apps.PaymillConfig'
class PaymentProcessor(PaymentProcessorBase):
BACKEND = 'abo.backends.paymill'
BACKEND_NAME = _('Paymill')
BACKEND_ACCEPTED_CURRENCY = ('EUR', 'CZK', 'DKK', 'HUF', 'ISK', 'ILS',
'LVL', 'CHF', 'NOK', 'PLN', 'SEK', 'TRY',
'GBP', )
@classmethod
def get_gateway_url(cls, request):
return reverse('abo-paymill-authorization'), "GET", {}
| [
"[email protected]"
]
| |
8ab7a399f4ab540a36f920fa8cdb90d8ca3db19b | 6b6c55c1631adb035dcf9cf92bb17eebbb738ff2 | /PAR III/update_stock.py | 07fdb1b648d0c84e8bb540b22dc4b6c4f1a4d708 | [
"MIT"
]
| permissive | Anderson-VargasQ/mecatronicaUNT_Prog2_Digitalizaci-n_del_Sistema_de_Ventas.- | def210c9b124176372118c1b5f9d2138881bcd7b | a151f13da27040440eee7ae97520e34a9dc9f70c | refs/heads/main | 2023-03-13T06:46:51.434048 | 2021-03-05T08:49:37 | 2021-03-05T08:49:37 | 344,736,478 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import pymongo
def update_stock(codigo_producto,stock,stock_disp):
client = pymongo.MongoClient("mongodb+srv://grupo_hailpy:[email protected]/Proyecto?retryWrites=true&w=majority")
db = client.test
try:
print("MongoDB version is %s" %
client.server_info()['version'])
except pymongo.errors.OperationFailure as error:
print(error)
quit(1)
my_database = client.test
my_collection = my_database.bases
#Para cambiar parametros dentro de un dato
my_collection.update_one(
{ "_id": codigo_producto }, # query
{
"$set": { # new data
"stock":stock,
"stock_disp":stock_disp
}
}
) | [
"[email protected]"
]
| |
acd2950cfed4e0f2cc4704c1816266b24aa32f92 | 290b347513ec8bab851d6b76ff28288aaed31a27 | /fetch_posters.py | 84772c6881b932d4dad684c1ec3c2c00de891a48 | [
"MIT"
]
| permissive | pncnmnp/Movie-Recommendation | a94df105a6dae16768dae1e3df680e8b0bc0f0fa | 2103464bab5cef47e49a7821da6846ef05699ffd | refs/heads/master | 2022-08-25T03:26:12.190914 | 2019-11-18T04:26:14 | 2019-11-18T04:26:14 | 210,127,386 | 4 | 6 | MIT | 2022-06-21T22:58:14 | 2019-09-22T10:16:09 | Python | UTF-8 | Python | false | false | 940 | py | from file_paths import *
import pandas as pd
import requests
from PIL import Image
import time
import os
POSTER_BASE_URL = "https://image.tmdb.org/t/p/w185"
poster_df = pd.read_csv(PATH_POSTERS)
poster_df["poster_path"] = POSTER_BASE_URL + poster_df["poster_path"]
movie_ids = pd.read_csv(PATH_MOVIES)["id"].tolist()
for i in range(0, 45466):
if os.path.exists("./flask/static/posters/" + poster_df["id"][i] + ".jpg"):
if int(poster_df["id"][i]) in movie_ids:
movie_ids.remove(int(poster_df["id"][i]))
print("DUPLICATE: " + poster_df["id"][i])
continue
elif int(poster_df["id"][i]) in movie_ids:
url = poster_df["poster_path"][i]
img = Image.open(requests.get(url, stream=True).raw)
img.save("./flask/static/posters/" + poster_df["id"][i] + ".jpg")
print("SAVED: " + poster_df["id"][i] + "LEFT: " + str(len(movie_ids)))
time.sleep(0.5)
movie_ids.remove(int(poster_df["id"][i]))
if len(movie_ids) < 1000:
break | [
"[email protected]"
]
| |
bca7014dbc8741221a33e7d7a12baae97ef356c0 | a0b9b0e0184af72708cf8ba11d50d8dbbcd5d333 | /Mentee/urls.py | 714b9dbeaf1efc703132409fa78f11cbac306240 | []
| no_license | fajriefji/DJANGO_coba | d710ac2aa5602de1d0659775a2297805aeddbed9 | 5a02688d93ed820fb1dfbe2070c19973ebb90d36 | refs/heads/master | 2020-04-22T14:08:41.786488 | 2019-02-13T03:28:16 | 2019-02-13T03:28:16 | 170,433,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.urls import path
from .import views
urlpatterns = [
path('Mentee', views.Mentee, name='Mentee'),
] | [
"[email protected]"
]
| |
0ba939c744fe84858b93f836679c134f43f4fe14 | 21ce30e043c07052a7fa39bb9fdd6cb7fda90aad | /algorithms hackerrank/cavity map.py | 35b32a34dc7171dfe4cebf4daebd0e4c3d1f0590 | []
| no_license | nikhildewoolkar/Competitive-coding | e41d5cc0ab5a45c9cf7177d3a1da553d45ccf6c3 | e45ba7e0c9fcc19da9fde2b0657eb7646f24147b | refs/heads/master | 2023-03-16T00:03:11.575553 | 2021-03-09T11:26:33 | 2021-03-09T11:26:33 | 340,855,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def cavityMap(grid):
grid1=grid.copy()
for i in range(1,len(grid)-1):
for j in range(1,len(grid)-1):
if(grid[i][j]>grid[i-1][j] and grid[i][j]>grid[i+1][j] and grid[i][j]>grid[i][j-1] and grid[i][j]>grid[i][j+1]):
grid1[i][j]="X"
for i in grid1:
print(''.join(i))
n = int(input())
grid = []
for _ in range(n):
grid_item = list(input())
grid.append(grid_item)
result = cavityMap(grid)
| [
"[email protected]"
]
| |
80aed699a64f930d26a90b0f2021a48436486041 | 799119e8f0676280275b4252d5cb18e957ecf271 | /nets/resnet.py | c59f191bac3f55e246999567c765f60456d6e0e9 | []
| no_license | VERSPD0/Deep-Mutual-Learning | 5f842cfced8160c0ad9374ac162c43a3be3a3510 | 1cb3191e2943f85b126e0ed206804e0e9440cdb8 | refs/heads/main | 2023-04-20T17:56:48.302821 | 2021-05-11T06:57:57 | 2021-05-11T07:42:53 | 366,284,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,256 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
class Resnet(object):
"""docstring for Resnet"""
def __init__(self,is_training,keep_prob,stack_num=3,num_classes=100):
super(Resnet, self).__init__()
self.num_classes = num_classes
self.is_training = is_training
self.regularizer = tf.contrib.layers.l2_regularizer(scale=1e-4)
self.initializer = tf.contrib.layers.xavier_initializer()
self.stack_num = stack_num
self.keep_prob = keep_prob
def residual_block(self,inputs,output_channel,stride=[1,1]):
residual = tf.identity(inputs)
input_channel = residual.shape[-1]
x_width = residual.shape[-2]
inputs = self.conv2d(inputs,output_channel,stride=stride[0])
inputs = self.conv2d(inputs,output_channel,stride=stride[1],relu=False)
inputs_width = inputs.shape[-2]
if input_channel!=output_channel or x_width !=inputs_width:
residual = self.conv2d(residual,output_channel,kernel_size=1,stride=stride[1],relu=False)
return tf.nn.relu(tf.add(inputs,residual))
def conv2d(self,inputs,output_channel,kernel_size=3,stride=1,relu=True):
inputs = tf.layers.conv2d(inputs,filters=output_channel,kernel_size=kernel_size,strides=stride,padding='same',
kernel_initializer=self.initializer,kernel_regularizer=self.regularizer)
inputs = tf.layers.batch_normalization(inputs,training=self.is_training)
inputs = tf.nn.relu(inputs) if relu else inputs
return inputs
def forward(self,inputs, scope):
with tf.variable_scope(scope, 'resnet', [inputs, self.num_classes]) as scope:
out = self.conv2d(inputs,16)
out = self.make_layer(out,[16,32])
out = self.make_layer(out,[32,64])
out = self.make_layer(out,[64,64])
out = tf.layers.average_pooling2d(out,pool_size=8,strides=1)
out = tf.layers.flatten(out)
predicts = tf.layers.dense(out,units=self.num_classes,kernel_initializer=self.initializer,kernel_regularizer=self.regularizer)
softmax_out = tf.nn.softmax(predicts,name='output')
end_points = {'Predictions': softmax_out, 'Logits': predicts}
return predicts,end_points
# return predicts,softmax_out
def make_layer(self,inputs,output_channel):
stride_2 = output_channel[1] // output_channel[0]
for i in range(self.stack_num-1):
inputs = self.residual_block(inputs,output_channel[0])
inputs = self.residual_block(inputs,output_channel[1],stride=[1,stride_2])
return inputs
def loss(self,predicts,labels):
losses = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels,predicts))
l2_reg = tf.losses.get_regularization_losses()
losses+=tf.add_n(l2_reg)
return losses
'''
layer number :6*stack_num+2
'''
def resnet20(is_training=True,keep_prob=0.5):
net = Resnet(is_training=is_training,keep_prob=keep_prob,stack_num=3)
return net
def resnet32(is_training=True,keep_prob=0.5):
net = Resnet(is_training=is_training,keep_prob=keep_prob,stack_num=5)
return net
def resnet44(is_training=True,keep_prob=0.5):
net = Resnet(is_training=is_training,keep_prob=keep_prob,stack_num=7)
return net
def resnet56(is_training=True,keep_prob=0.5):
net = Resnet(is_training=is_training,keep_prob=keep_prob,stack_num=9)
return net
def resnet110(is_training=True, keep_prob=0.5):
net = Resnet(is_training=is_training,keep_prob=keep_prob,stack_num=18)
return net
if __name__=='__main__':
with tf.device('/cpu:0'):
net = resnet56()
data = np.random.randn(64,32,32,3)
inputs = tf.placeholder(tf.float32,[64,32,32,3])
predicts,softmax_out = net.forward(inputs)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
init = tf.global_variables_initializer()
sess = tf.Session(config=config)
sess.run(init)
output = sess.run(predicts,feed_dict={inputs:data})
print(output.shape)
sess.close() | [
"[email protected]"
]
| |
03a6cc2e483937f89f007060d6086be7425f4626 | 4e9d3ba19a694c25fdbfd4ed1c6ab66339674beb | /python/GafferUI/PopupWindow.py | b13e219fa456b6bdee63ed65695f9d5a99197b0f | [
"BSD-3-Clause"
]
| permissive | mcanthony/gaffer | 0a6af7856b1c2ecae5620a9f2bd04316f2df271c | 32189357fda4bc4b2e5367a06af64928c479ffaf | refs/heads/master | 2021-01-18T19:59:29.212027 | 2015-10-26T20:43:45 | 2015-10-26T20:43:45 | 45,088,868 | 2 | 0 | null | 2015-10-28T04:30:06 | 2015-10-28T04:30:04 | null | UTF-8 | Python | false | false | 7,109 | py | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
class PopupWindow( GafferUI.Window ) :
def __init__( self, title="GafferUI.Window", borderWidth=8, child=None, sizeMode=GafferUI.Window.SizeMode.Automatic, closeOnLeave=False, **kw ) :
GafferUI.Window.__init__( self, title, borderWidth, child=child, sizeMode=sizeMode, **kw )
self._qtWidget().setWindowFlags( self._qtWidget().windowFlags() | QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool )
self._qtWidget().setAttribute( QtCore.Qt.WA_TranslucentBackground )
self._qtWidget().setMouseTracking( True )
self._qtWidget().paintEvent = Gaffer.WeakMethod( self.__paintEvent )
self._qtWidget().mousePressEvent = Gaffer.WeakMethod( self.__mousePressEvent )
self._qtWidget().mouseReleaseEvent = Gaffer.WeakMethod( self.__mouseReleaseEvent )
self._qtWidget().mouseMoveEvent = Gaffer.WeakMethod( self.__mouseMoveEvent )
self._qtWidget().enterEvent = Gaffer.WeakMethod( self.__enterEvent )
self._qtWidget().leaveEvent = Gaffer.WeakMethod( self.__leaveEvent )
# setVisible() will animate this to 1
self._qtWidget().setWindowOpacity( 0 )
self.__visibilityAnimation = None
self.__dragOffset = None
self.__cursor = None
self.setCloseOnLeave( closeOnLeave )
## Reimplemented from base class to make nice opacity animations
def setVisible( self, visible ) :
if visible == self.getVisible() :
return
self.__visibilityAnimation = _VisibilityAnimation( self._qtWidget(), visible )
self.__visibilityAnimation.start()
## Reimplemented from base class to account for nice opacity animations
def getVisible( self ) :
result = GafferUI.Window.getVisible( self )
# account for the fact that we might be animating towards invisibility
if self.__visibilityAnimation is not None and self.__visibilityAnimation.state() == self.__visibilityAnimation.Running :
if GafferUI._Variant.fromVariant( self.__visibilityAnimation.endValue() ) == 0 :
result = False
return result
def setCloseOnLeave( self, closeOnLeave ) :
self.__closeOnLeave = closeOnLeave
def getCloseOnLeave( self ) :
return self.__closeOnLeave
def __mousePressEvent( self, event ) :
if event.button() == QtCore.Qt.LeftButton :
if self.__cursor == QtCore.Qt.SizeFDiagCursor :
size = self._qtWidget().size()
self.__dragOffset = QtCore.QPoint( size.width(), size.height() ) - event.globalPos()
else :
self.__dragOffset = self._qtWidget().frameGeometry().topLeft() - event.globalPos()
def __mouseReleaseEvent( self, event ) :
if event.button() == QtCore.Qt.LeftButton :
self.__dragOffset = None
self.__setCursorFromPosition( event )
def __mouseMoveEvent( self, event ) :
if event.buttons() & QtCore.Qt.LeftButton and self.__dragOffset is not None :
if self.__cursor == QtCore.Qt.SizeFDiagCursor :
newSize = event.globalPos() + self.__dragOffset
self._qtWidget().resize( newSize.x(), newSize.y() )
else :
self._qtWidget().move( event.globalPos() + self.__dragOffset )
elif self.getResizeable() :
self.__setCursorFromPosition( event )
def __enterEvent( self, event ) :
if self.__closeOnLeave and self.__visibilityAnimation is not None :
if self.__visibilityAnimation.state() == self.__visibilityAnimation.Running :
# we currently visible, but we have an animation, so we must be
# in the process of becoming invisible. reverse that.
self.setVisible( True )
def __leaveEvent( self, event ) :
self.__setCursor( None )
if self.__closeOnLeave :
self.setVisible( False )
def __paintEvent( self, event ) :
painter = QtGui.QPainter( self._qtWidget() )
painter.setRenderHint( QtGui.QPainter.Antialiasing )
painter.setBrush( QtGui.QColor( 76, 76, 76 ) )
painter.setPen( QtGui.QColor( 0, 0, 0, 0 ) )
radius = self._qtWidget().layout().contentsMargins().left()
size = self.size()
painter.drawRoundedRect( QtCore.QRectF( 0, 0, size.x, size.y ), radius, radius )
if self.getResizeable() :
painter.drawRect( size.x - radius, size.y - radius, radius, radius )
def __setCursorFromPosition( self, event ) :
radius = self._qtWidget().layout().contentsMargins().left()
size = self.size()
p = event.pos()
if p.x() > size.x - radius and p.y() > size.y - radius :
self.__setCursor( QtCore.Qt.SizeFDiagCursor )
else :
self.__setCursor( None )
def __setCursor( self, cursor ) :
if cursor == self.__cursor :
return
if self.__cursor is not None :
QtGui.QApplication.restoreOverrideCursor()
if cursor is not None :
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursor ) )
self.__cursor = cursor
def __closeIfLeft( self ) :
self.close()
class _VisibilityAnimation( QtCore.QVariantAnimation ) :
def __init__( self, window, visible ) :
QtCore.QVariantAnimation.__init__( self )
self.__window = window
startValue = self.__window.windowOpacity()
endValue = 1.0 if visible else 0.0
self.setStartValue( startValue )
self.setEndValue( endValue )
self.setDuration( abs( startValue - endValue ) * 500 )
def updateCurrentValue( self, value ) :
value = GafferUI._Variant.fromVariant( value )
self.__window.setWindowOpacity( value )
if value == 0 :
self.__window.hide()
elif not self.__window.isVisible() :
self.__window.show()
| [
"[email protected]"
]
| |
e30491020a6fd96d5b31d0024462ca68ec66ef9d | 5ab82eb488e973fe9e4020f91adbfe4c38e16158 | /média.py | a4ad4a2580b78565a17b75ae014dd3f1a3202035 | []
| no_license | Robertobappe/Introdu-o-Ci-ncia-da-Computa-o-com-Python-Parte-1 | b6251a32a9f1f9669aa471fdbd459ed977d34757 | dd6c5af437bb0c482415b824f75742fc35bb5d27 | refs/heads/main | 2023-08-10T17:28:48.226620 | 2021-10-02T17:23:35 | 2021-10-02T17:23:35 | 411,464,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | pn=int(input("Digite a primeira nota:"))
sn=int(input("Digite a segunda nota:"))
tn=int(input("Digite a terceira nota:"))
qn=int(input("Digite a quarta nota:"))
media=((pn+sn+tn+qn)/4)
print("A média aritmética é",media)
| [
"[email protected]"
]
| |
ba40efc0bfb9c633b665651d2a5df988b9473ea7 | 250d92826005352e418d9bf9f902da4f6e60b85c | /Frittie/venv-frittie/lib/python2.7/site-packages/celery/task/trace.py | 2ac1a503d98c48ce5122543e571784ff46971406 | [
"BSD-2-Clause"
]
| permissive | itamsvtd/FrittieHome | 0e4133e6d64b5d2f42824ec7854b080262a49279 | a81a708bf5c7ef2347f4a3e738a46c2776dce9ed | refs/heads/master | 2020-06-06T07:37:51.622937 | 2012-09-27T03:36:05 | 2012-09-27T03:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,253 | py | # -*- coding: utf-8 -*-
"""
celery.task.trace
~~~~~~~~~~~~~~~~~~~~
This module defines how the task execution is traced:
errors are recorded, handlers are applied and so on.
"""
from __future__ import absolute_import
# ## ---
# This is the heart of the worker, the inner loop so to speak.
# It used to be split up into nice little classes and methods,
# but in the end it only resulted in bad performance and horrible tracebacks,
# so instead we now use one closure per task class.
import os
import socket
import sys
from warnings import warn
from kombu.utils import kwdict
from celery import current_app
from celery import states, signals
from celery._state import _task_stack, default_app
from celery.app.task import Task as BaseTask, Context
from celery.datastructures import ExceptionInfo
from celery.exceptions import RetryTaskError
from celery.utils.serialization import get_pickleable_exception
from celery.utils.log import get_logger
_logger = get_logger(__name__)
send_prerun = signals.task_prerun.send
prerun_receivers = signals.task_prerun.receivers
send_postrun = signals.task_postrun.send
postrun_receivers = signals.task_postrun.receivers
send_success = signals.task_success.send
success_receivers = signals.task_success.receivers
STARTED = states.STARTED
SUCCESS = states.SUCCESS
RETRY = states.RETRY
FAILURE = states.FAILURE
EXCEPTION_STATES = states.EXCEPTION_STATES
try:
_tasks = default_app._tasks
except AttributeError:
# Windows: will be set later by concurrency.processes.
pass
def mro_lookup(cls, attr, stop=()):
"""Returns the first node by MRO order that defines an attribute.
:keyword stop: A list of types that if reached will stop the search.
:returns None: if the attribute was not found.
"""
for node in cls.mro():
if node in stop:
return
if attr in node.__dict__:
return node
def task_has_custom(task, attr):
"""Returns true if the task or one of its bases
defines ``attr`` (excluding the one in BaseTask)."""
return mro_lookup(task.__class__, attr, stop=(BaseTask, object))
class TraceInfo(object):
__slots__ = ('state', 'retval')
def __init__(self, state, retval=None):
self.state = state
self.retval = retval
def handle_error_state(self, task, eager=False):
store_errors = not eager
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored
return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
}[self.state](task, store_errors=store_errors)
def handle_retry(self, task, store_errors=True):
"""Handle retry exception."""
# the exception raised is the RetryTaskError semi-predicate,
# and it's exc' attribute is the original exception raised (if any).
req = task.request
type_, _, tb = sys.exc_info()
try:
pred = self.retval
einfo = ExceptionInfo((type_, pred, tb))
if store_errors:
task.backend.mark_as_retry(req.id, pred.exc, einfo.traceback)
task.on_retry(pred.exc, req.id, req.args, req.kwargs, einfo)
return einfo
finally:
del(tb)
def handle_failure(self, task, store_errors=True):
"""Handle exception."""
req = task.request
type_, _, tb = sys.exc_info()
try:
exc = self.retval
einfo = ExceptionInfo((type_, get_pickleable_exception(exc), tb))
if store_errors:
task.backend.mark_as_failure(req.id, exc, einfo.traceback)
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
signals.task_failure.send(sender=task, task_id=req.id,
exception=exc, args=req.args,
kwargs=req.kwargs,
traceback=einfo.traceback,
einfo=einfo)
return einfo
finally:
del(tb)
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False):
"""Builts a function that tracing the tasks execution; catches all
exceptions, and saves the state and result of the task execution
to the result backend.
If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.
If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts
the original exception, uses that as the result and sets the task status
to `"RETRY"`.
If the call results in an exception, it saves the exception as the task
result, and sets the task status to `"FAILURE"`.
Returns a function that takes the following arguments:
:param uuid: The unique id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict.
"""
# If the task doesn't define a custom __call__ method
# we optimize it away by simply calling the run method directly,
# saving the extra method call and a line less in the stack trace.
fun = task if task_has_custom(task, '__call__') else task.run
loader = loader or current_app.loader
backend = task.backend
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
publish_result = not eager and not ignore_result
hostname = hostname or socket.gethostname()
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
task_on_success = None
task_after_return = None
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return
store_result = backend.store_result
backend_cleanup = backend.process_cleanup
pid = os.getpid()
request_stack = task.request_stack
push_request = request_stack.push
pop_request = request_stack.pop
push_task = _task_stack.push
pop_task = _task_stack.pop
on_chord_part_return = backend.on_chord_part_return
from celery import canvas
subtask = canvas.subtask
def trace_task(uuid, args, kwargs, request=None):
R = I = None
kwargs = kwdict(kwargs)
try:
push_task(task)
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
push_request(task_request)
try:
# -*- PRE -*-
if prerun_receivers:
send_prerun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
store_result(uuid, {'pid': pid,
'hostname': hostname}, STARTED)
# -*- TRACE -*-
try:
R = retval = fun(*args, **kwargs)
state = SUCCESS
except RetryTaskError, exc:
I = Info(RETRY, exc)
state, retval = I.state, I.retval
R = I.handle_error_state(task, eager=eager)
except Exception, exc:
if propagate:
raise
I = Info(FAILURE, exc)
state, retval = I.state, I.retval
R = I.handle_error_state(task, eager=eager)
[subtask(errback).apply_async((uuid, ))
for errback in task_request.errbacks or []]
except BaseException, exc:
raise
except: # pragma: no cover
# For Python2.5 where raising strings are still allowed
# (but deprecated)
if propagate:
raise
I = Info(FAILURE, None)
state, retval = I.state, I.retval
R = I.handle_error_state(task, eager=eager)
[subtask(errback).apply_async((uuid, ))
for errback in task_request.errbacks or []]
else:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.
[subtask(callback).apply_async((retval, ))
for callback in task_request.callbacks or []]
if publish_result:
store_result(uuid, retval, SUCCESS)
if task_on_success:
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)
# -* POST *-
if task_request.chord:
on_chord_part_return(task)
if task_after_return:
task_after_return(state, retval, uuid, args, kwargs, None)
if postrun_receivers:
send_postrun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs,
retval=retval, state=state)
finally:
pop_task()
pop_request()
if not eager:
try:
backend_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, exc:
_logger.error('Process cleanup failed: %r', exc,
exc_info=True)
except Exception, exc:
if eager:
raise
R = report_internal_error(task, exc)
return R, I
return trace_task
def trace_task(task, uuid, args, kwargs, request={}, **opts):
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
return task.__trace__(uuid, args, kwargs, request)[0]
except Exception, exc:
return report_internal_error(task, exc)
def trace_task_ret(task, uuid, args, kwargs, request={}):
return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
opts.setdefault('eager', True)
return build_tracer(task.name, task, **opts)(
uuid, args, kwargs, request)
def report_internal_error(task, exc):
_type, _value, _tb = sys.exc_info()
try:
_value = task.backend.prepare_exception(exc)
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
warn(RuntimeWarning(
'Exception raised outside body: %r:\n%s' % (
exc, exc_info.traceback)))
return exc_info
finally:
del(_tb)
| [
"[email protected]"
]
| |
523c2cb44bc1f073c73a02f1fbb6973101cea56d | 9b202d1440937afdc56128c04c132e249e525955 | /jstypes/named_accessor_property.py | 95095c63176c771aad0d6ce5239a7b7c346f5821 | [
"MIT"
]
| permissive | bbraithwaite/ECMAStrict | 3730bab300a3fd9254553133b78086d5b9706c60 | d97be539a2b16d164273d86ec2ad66ff3058ba15 | refs/heads/master | 2021-03-12T23:59:50.875615 | 2015-05-29T15:39:39 | 2015-05-29T15:39:39 | 32,482,637 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from .types import Undefined, Boolean
class NamedAccessorProperty():
def __init__(self):
"""Sets the default values for the type."""
self.__get = Undefined()
self.__set = Undefined()
self.__enumerable = Boolean('false')
self.__configurable = Boolean('false')
def get(self):
return self.__get
def set(self):
return self.__set
def enumerable(self):
return self.__enumerable
def configurable(self):
return self.__configurable
| [
"[email protected]"
]
| |
c7867baeca22849ea7b5625a957b27b04f171214 | 3dcb9b9de4e27ee0e7ece48dcd51f920638ca14d | /api/api.py | 291f60faae09d446c7bb503a005fc97f6adb87c9 | []
| no_license | chyld/flask-postgres-react-docker | 4f4a7fb52c52df6fd005af68668a1425139613b1 | e36f36cb32ae259d6472ca7813c4dfb0cb3213da | refs/heads/master | 2021-01-20T07:02:13.625385 | 2017-05-02T06:26:40 | 2017-05-02T06:26:40 | 89,951,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from marshmallow import Schema
import os
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:pass1234@db/animals'
db = SQLAlchemy(app)
@app.route('/hello', methods=['GET'])
def hello():
print('hello, hello, hello')
dogs = Dog.query.all()
# schema = DogSchema()
# result = schema.dump(dog)
print('running...')
for dog in dogs:
print('dog {0}:', dog)
return jsonify({'woof': 'boo'})
@app.route('/nested')
def nested():
return jsonify({"a": 3,
"b": True,
"c": None,
"d": "hello json",
"e": 3.14,
"f": [1, 2, 3],
"g": {"x":1, "y":2, "z":3}
})
@app.route('/echo', methods=['POST'])
def echo():
# import IPython
# from IPython import embed
# embed() # this call anywhere in your program will start IPython
# import pdb; pdb.set_trace()
# IPython.start_ipython()
return jsonify(request.json)
class Dog(db.Model):
__tablename__ = "dogs"
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column('name', db.String(100))
age = db.Column('age', db.Integer)
def __init__(self, name, age):
self.name = name
self.age = age
class DogSchema(Schema):
class Meta:
fields = ('id', 'name', 'age')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ['PORT']))
| [
"[email protected]"
]
| |
3f7b73f4572985238fd69887dbe034b6bdf3b83f | b61a47202ffe716826e3498461e1243f8694a3e7 | /hesapla-arg.py | eb6bdb13f4c5bfccaba2b33c5d1059aa3ad70f5b | []
| no_license | serhatyazicioglu/Data-Science-and-Machine-Learning-Bootcamp | f4b3e4ed58c511a9187a14e50a03ae8eb8de8372 | 6584f3a83459b5674cb11f1fc89e12f99bbceee0 | refs/heads/main | 2023-03-23T02:14:58.347805 | 2021-03-16T17:40:27 | 2021-03-16T17:40:27 | 331,138,928 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | # -*- coding: UTF-8 -*-
"""
Yazdığımız her uygulama grafik arayüzüne sahip olmaz.
Bazı uygulamalar komut satırına daha uygundur ve bu uygulamalar bazı parametrelere ihtiyaç duyar.
Argparse: Terminal üzerinden yazdığımız kodlara input'lar vermemizi sağlar.
Aşağıdaki argparse fonksiyonunu terminal üzerinden çalıştırmak için örnek kullanım şu şekildedir:
python <fonk.ismi.py> --sayi1 <1.değer> --sayi2 <2.değer> --islem <işlem türü>
python hesapla-arg.py --sayi1 5 --sayi2 10 --islem carp
"""
import argparse # kütüphane yüklenmesi. (mevcut değilse pip install argparse)
# get args
ap = argparse.ArgumentParser() # argparse nesnesini yapılandırma
ap.add_argument("--sayi1", required=True, help="sayi1 giriniz! (--sayi1)") # required: bu argümanın gerekli olduğunu belirtir.
ap.add_argument("--sayi2", required=True, help="sayi2 giriniz! (--sayi2)") # help: kullanıcıya bilgilendirme yapar.
ap.add_argument("--islem", required=True, help="İslem turu giriniz! (--islem=topla|cikar|carp|bol)") # kullanıcıdan yapacağı işlem bilgisini alıyoruz.
# terminal üzerinden örnek kullanım: python hesapla-arg.py --sayi1 5 --sayi2 10 --islem carp
args = vars(ap.parse_args()) # alınan tüm inputları args içerisinde topladık. sayi1 inputunu çağırmak için args["sayi1"] kullanılır.
try:
# set args to vars
sayi1 = float(args["sayi1"]) # sayi1 olarak girilen değeri float tipine dönüştürür ve sayi1 olarak kaydeder.
sayi2 = int(args["sayi2"]) # sayi2 olarak girilen değeri integer tipine dönüştürür ve sayi2 olarak kaydeder.
islem = args["islem"] # kullanıcıdan alınan islem inputunu islem olarak kaydettik.
print(islem + " isleminin sonucu:") # asagidaki islemlere göre yapilan islemi ve islem sonucunu baskilar.
if islem == "topla": # kullanıcıdan alınan input değeri topla ise ekrana toplamı baskılar.
print(sayi1 + sayi2)
elif islem == "cikar": # kullanıcıdan alınan input değeri cikar ise ekrana farkı baskılar.
print(sayi1 - sayi2)
elif islem == "carp": # kullanıcıdan alınan input değeri çarpma ise ekrana çarpımı baskılar.
print(sayi1 * sayi2)
elif islem == "bol": # kullanıcıdan alınan input değeri bölme ise ekrana bölümü baskılar.
print(sayi1 / sayi2)
else:
print("Tanımlanmamıs islem turu girdiniz!") # kullanıcı farklı bir değer girerse hata mesajı çıkarır.
except Exception as e:
print("Hata var! ==> " + str(e))
| [
"[email protected]"
]
| |
6ad81372712c472af6be858b03e71adf6307e80f | 32570615ec4465c972b0297cbb88ab9441ed06ee | /sample_program/配布2/001_helloworld.py | ea50287950d5295a1fc89d1bf92d7fe639def4ef | []
| no_license | inouelab-IoT/IoTproject | 22aa51e629df98d2d7de2b834f2c75bcff43cb38 | a07f7d9efd6ae6e7127bb91403dafae4d3971ff5 | refs/heads/master | 2023-01-23T11:55:42.604627 | 2020-12-09T19:17:07 | 2020-12-09T19:17:07 | 278,996,481 | 0 | 0 | null | 2020-10-19T03:54:02 | 2020-07-12T05:16:39 | HTML | UTF-8 | Python | false | false | 179 | py | def main():
i = 0
while i < 10:
print('hello world') # Python 3.X
#print 'hello world' # Python 2.X
i = i + 1
if (i == 5):
break
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
9e8ce529901859670d5a4bcf1fe290375fc0056d | ca186ed749fb7d618b40dd5012124d5053c279da | /vfeedcli.py | 34b9069588c03dd0856f05b0e133f7f25a319ad9 | [
"BSD-3-Clause"
]
| permissive | rmallof/vFeed | a6ec0fe9f1c5c1220bc5c5039d62cab1cbaeaade | 2fccd83aac2cc0b3c3f5e8db7d080e63833212bf | refs/heads/master | 2021-01-18T13:17:02.368454 | 2014-07-26T08:23:01 | 2014-07-26T08:23:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,556 | py | #!/usr/bin/env python
import sys
from vfeed import vFeed, vFeedInfo, vFeedXML, vFeedUpdate, vFeedStats
'''
vFeed - Open Source Cross-linked and Aggregated Local Vulnerability Database
Wiki Documentation https://github.com/toolswatch/vFeed/wiki
'''
def get_help():
info = vFeedInfo()
print ''
print '-----------------------------------------------------------------------------'
print info.get_version()['title']
print ' version ' + info.get_version()['build']
print ' ' + info.get_owner()['website']
print '-----------------------------------------------------------------------------'
print ''
print '[usage 1]: python' + str(sys.argv[0]) + ' <Method> <CVE>'
print '[info] Available vFeed methods:'
print 'Information ==> get_cve | get_cpe | get_cwe | get_capec | get_category | get_iavm'
print 'References ==> get_refs | get_scip | get_osvdb | get_certvn | get_bid'
print 'Risk ==> get_risk | get_cvss'
print 'Patchs 1/2 ==> get_ms | get_kb | get_aixapar | get_redhat | get_suse | get_debian | get_hp'
print 'Patchs 2/2 ==> get_mandriva | get_cisco | get_ubuntu | get_gentoo | get_fedora | get_vmware'
print 'Assessment ==> get_oval | get_nessus | get_openvas '
print 'Defense ==> get_snort | get_suricata'
print 'Exploitation ==> get_milw0rm | get_edb | get_saint | get_msf'
print ''
print '----------'
print '[usage 2]: python ' + str(sys.argv[0]) + ' export <CVE>'
print '[info]: This method will export the CVE as vFeed XML format'
print ''
print '----------'
print '[usage 3]: python ' + str(sys.argv[0]) + ' stats or latest_cve'
print '[info]: Available stats methods'
print 'Global statistics ==> stats'
print 'Latest Added CVEs ==> latest_cve '
print ''
print '----------'
print '[Update]: python ' + str(sys.argv[0]) + ' update'
print '[info]: This method will update the SQLite vfeed database to its latest release'
exit(0)
def call_get_cve(vfeed):
cveInfo = vfeed.get_cve()
if cveInfo:
print '[cve_description]:', cveInfo['summary']
print '[cve_published]:', cveInfo['published']
print '[cve_modified]:', cveInfo['modified']
def call_get_cvss(vfeed):
cvssScore = vfeed.get_cvss()
if cvssScore:
print '[cvss_base]:', cvssScore['base']
print '[cvss_impact]:', cvssScore['impact']
print '[cvss_exploit]:', cvssScore['exploit']
print '[AV (access vector)]:', cvssScore['access_vector']
print '[AC (access complexity)]:', cvssScore['access_complexity']
print '[Au (authentication)]:', cvssScore['authentication']
print '[C (confidentiality impact)]:', cvssScore['confidentiality_impact']
print '[I (integrity impact)]:', cvssScore['integrity_impact']
print '[A (availability impact)]:', cvssScore['availability_impact']
def call_get_refs(vfeed):
cveRef = vfeed.get_refs()
for i in range(0, len(cveRef)):
print '[reference_id]:', cveRef[i]['id']
print '[reference_link]', cveRef[i]['link']
print ''
print '[stats] %d Reference(s)' % len(cveRef)
def call_get_osvdb(vfeed):
cveOSVDB = vfeed.get_osvdb()
for i in range(0, len(cveOSVDB)):
print '[osvdb_id]:', cveOSVDB[i]['id']
print ''
print '[stats] %d OSVDB id(s)' % len(cveOSVDB)
def call_get_scip(vfeed):
cveSCIP = vfeed.get_scip()
for i in range(0, len(cveSCIP)):
print '[scip_id]:', cveSCIP[i]['id']
print '[scip_link]', cveSCIP[i]['link']
print ''
print '[stats] %d Scip id(s)' % len(cveSCIP)
def call_get_bid(vfeed):
cveBID = vfeed.get_bid()
for i in range(0, len(cveBID)):
print '[bid_id]:', cveBID[i]['id']
print '[bid_link]', cveBID[i]['link']
print ''
print '[stats] %d BID id(s)' % len(cveBID)
def call_get_certvn(vfeed):
cveCERTVN = vfeed.get_certvn()
for i in range(0, len(cveCERTVN)):
print '[certvn_id]:', cveCERTVN[i]['id']
print '[certvn_link]', cveCERTVN[i]['link']
print ''
print '[stats] %d CERT-VN id(s)' % len(cveCERTVN)
def call_get_iavm(vfeed):
cveIAVM = vfeed.get_iavm()
for i in range(0, len(cveIAVM)):
print '[iavm_title]', cveIAVM[i]['title']
print '[iavm_id]:', cveIAVM[i]['id']
print '[disa_key]:', cveIAVM[i]['key']
print ''
print '[stats] %d Iavm id(s)' % len(cveIAVM)
def call_get_cwe(vfeed):
cveCWE = vfeed.get_cwe()
for i in range(0, len(cveCWE)):
print '[cwe_id]:', cveCWE[i]['id']
print '[cwe_title]:', cveCWE[i]['title']
print ''
print '[stats] %d CWE id(s) ' % len(cveCWE)
def call_get_capec(vfeed):
cveCAPEC = vfeed.get_capec()
#get_cwe is invoked because CAPEC is related to CWE base
cveCWE = vfeed.get_cwe()
for i in range(len(cveCWE), len(cveCAPEC) + len(cveCWE)):
print '[capec_id]: %s associated with %s ' %(cveCAPEC[i]['id'],cveCAPEC[i]['cwe'])
print ''
print '[stats] %d CAPEC id(s) ' % len(cveCAPEC)
def call_get_category(vfeed):
cveCATEGORY = vfeed.get_category()
#get_cwe is invoked because CAPEC is related to CWE base
cveCWE = vfeed.get_cwe()
for i in range(len(cveCWE), len(cveCATEGORY) + len(cveCWE)):
print '[category] : %s --> %s ' %(cveCATEGORY[i]['id'],cveCATEGORY[i]['title'])
print ''
def call_get_cpe(vfeed):
cveCPE = vfeed.get_cpe()
for i in range(0, len(cveCPE)):
print '[cpe_id]:', cveCPE[i]['id']
print ''
print '[stats] %d CPE id(s)' % len(cveCPE)
def call_get_oval(vfeed):
cveOVAL = vfeed.get_oval()
for i in range(0, len(cveOVAL)):
print '[oval_id]:', cveOVAL[i]['id']
print '[oval_file]:', cveOVAL[i]['file']
print ''
print '[stats] %d OVAL Definition id(s)' % len(cveOVAL)
def call_get_snort(vfeed):
cveSnort = vfeed.get_snort()
for i in range(0, len(cveSnort)):
print '[snort_id]:', cveSnort[i]['id']
print '[snort_signature]:', cveSnort[i]['signature']
print '[snort_classtype]:', cveSnort[i]['classtype']
print ''
print '[stats] %d Snort Rule(s)' % len(cveSnort)
def call_get_suricata(vfeed):
cveSuricata = vfeed.get_suricata()
for i in range(0, len(cveSuricata)):
print '[suricata_id]:', cveSuricata[i]['id']
print '[suricata_signature]:', cveSuricata[i]['signature']
print '[suricata_classtype]:', cveSuricata[i]['classtype']
print ''
print '[stats] %d Suricata Rule(s)' % len(cveSuricata)
def call_get_nessus(vfeed):
cveNessus = vfeed.get_nessus()
for i in range(0, len(cveNessus)):
print '[nessus_id]:', cveNessus[i]['id']
print '[nessus_name]:', cveNessus[i]['name']
print '[nessus_file]:', cveNessus[i]['file']
print '[nessus_family]:', cveNessus[i]['family']
print ''
print '[stats] %d Nessus testing script(s)' % len(cveNessus)
def call_get_openvas(vfeed):
cveOpenvas = vfeed.get_openvas()
for i in range(0, len(cveOpenvas)):
print '[openvas_id]:', cveOpenvas[i]['id']
print '[openvas_name]:', cveOpenvas[i]['name']
print '[openvas_file]:', cveOpenvas[i]['file']
print '[openvas_family]:', cveOpenvas[i]['family']
print ''
print '[stats] %d OpenVAS testing script(s)' % len(cveOpenvas)
def call_get_edb(vfeed):
cveEDB = vfeed.get_edb()
for i in range(0, len(cveEDB)):
print '[edb_id]:', cveEDB[i]['id']
print '[edb_file]:', cveEDB[i]['file']
print '[edb_link]:', cveEDB[i]['link']
print ''
print '[stats] %d ExploitDB id(s)' % len(cveEDB)
def call_get_milw0rm(vfeed):
cveMILW = vfeed.get_milw0rm()
for i in range(0, len(cveMILW)):
print '[milw0rm_id]:', cveMILW[i]['id']
print ''
print '[stats] %d Milw0rm id(s)' % len(cveMILW)
def call_get_saint(vfeed):
cveSAINT = vfeed.get_saint()
for i in range(0, len(cveSAINT)):
print '[saintexploit_id]:', cveSAINT[i]['id']
print '[saintexploit_title]:', cveSAINT[i]['title']
print '[saintexploit_file]:', cveSAINT[i]['file']
print ''
print '[stats] %d SaintExploit id(s)' % len(cveSAINT)
def call_get_msf(vfeed):
cveMSF = vfeed.get_msf()
for i in range(0, len(cveMSF)):
print '[msf_id]:', cveMSF[i]['id']
print '[msf_title]:', cveMSF[i]['title']
print '[msf_file]:', cveMSF[i]['file']
print ''
print '[stats] %d Metasploit Exploits/Plugins' % len(cveMSF)
def call_get_ms(vfeed):
cveMS = vfeed.get_ms()
for i in range(0, len(cveMS)):
print '[Microsoft_ms_id]:', cveMS[i]['id']
print '[Microsoft_ms_title]:', cveMS[i]['title']
print ''
print '[stats] %d Microsoft MS Patch(s)' % len(cveMS)
def call_get_kb(vfeed):
cveKB = vfeed.get_kb()
for i in range(0, len(cveKB)):
print '[Microsoft_kb_id]:', cveKB[i]['id']
print '[Microsoft_kb_id]:', cveKB[i]['title']
print ''
print '[stats] %d Microsoft KB bulletin(s)' % len(cveKB)
def call_get_aixapar(vfeed):
cveAIX = vfeed.get_aixapar()
for i in range(0, len(cveAIX)):
print '[IBM_AIXAPAR_id]:', cveAIX[i]['id']
print ''
print '[stats] %d IBM AIX APAR(s)' % len(cveAIX)
def call_get_redhat(vfeed):
cveRHEL, cveBUGZILLA = vfeed.get_redhat()
for i in range(0, len(cveRHEL)):
print '[redhat_id]:', cveRHEL[i]['id']
print '[redhat_patch_title]:', cveRHEL[i]['title']
print '[redhat_oval_id]:', cveRHEL[i]['oval']
print ''
print '[stats] %d Redhat id(s)' % len(cveRHEL)
print ''
for i in range(0, len(cveBUGZILLA)):
print '[redhat_bugzilla_issued]:', cveBUGZILLA[i]['date_issue']
print '[redhat_bugzilla__id]:', cveBUGZILLA[i]['id']
print '[redhat_bugzilla__title]:', cveBUGZILLA[i]['title']
print ''
print '[stats] %d Bugzilla id(s)' %len(cveBUGZILLA)
def call_get_suse(vfeed):
cveSUSE = vfeed.get_suse()
for i in range(0, len(cveSUSE)):
print '[suse_id]:', cveSUSE[i]['id']
print ''
print '[stats] %d Suse id(s)' % len(cveSUSE)
def call_get_cisco(vfeed):
cveCISCO = vfeed.get_cisco()
for i in range(0, len(cveCISCO)):
print '[cisco_id]:', cveCISCO[i]['id']
print ''
print '[stats] %d Cisco id(s)' % len(cveCISCO)
def call_get_ubuntu(vfeed):
cveUBUNTU = vfeed.get_ubuntu()
for i in range(0, len(cveUBUNTU)):
print '[ubuntu_id]:', cveUBUNTU[i]['id']
print ''
print '[stats] %d Ubuntu id(s)' % len(cveUBUNTU)
def call_get_gentoo(vfeed):
cveGENTOO = vfeed.get_gentoo()
for i in range(0, len(cveGENTOO)):
print '[gentoo_id]:', cveGENTOO[i]['id']
print ''
print '[stats] %d Gentoo id(s)' % len(cveGENTOO)
def call_get_fedora(vfeed):
cveFEDORA = vfeed.get_fedora()
for i in range(0, len(cveFEDORA)):
print '[fedora_id]:', cveFEDORA[i]['id']
print ''
print '[stats] %d Fedora id(s)' % len(cveFEDORA)
def call_get_debian(vfeed):
cveDEBIAN = vfeed.get_debian()
for i in range(0, len(cveDEBIAN)):
print '[debian_id]:', cveDEBIAN[i]['id']
print ''
print '[stats] %d Debian id(s)' % len(cveDEBIAN)
def call_get_mandriva(vfeed):
cveMANDRIVA = vfeed.get_mandriva()
for i in range(0, len(cveMANDRIVA)):
print '[mandriva_id]:', cveMANDRIVA[i]['id']
print ''
print '[stats] %d Mandriva id(s)' % len(cveMANDRIVA)
def call_get_vmware(vfeed):
cveVMWARE = vfeed.get_vmware()
for i in range(0, len(cveVMWARE)):
print '[vmware_id]:', cveVMWARE[i]['id']
print ''
print '[stats] %d VMware id(s)' % len(cveVMWARE)
def call_get_hp(vfeed):
cveHP = vfeed.get_hp()
for i in range(0, len(cveHP)):
print '[hp_id]:', cveHP[i]['id']
print '[hp_link]', cveHP[i]['link']
print ''
print '[stats] %d HP id(s)' % len(cveHP)
def call_get_risk(vfeed):
cveRISK = vfeed.get_risk()
cvssScore = vfeed.get_cvss()
print 'Severity:', cveRISK['severitylevel']
print 'Top vulnerablity:', cveRISK['topvulnerable']
print '\t[cvss_base]:', cvssScore['base']
print '\t[cvss_impact]:', cvssScore['impact']
print '\t[cvss_exploit]:', cvssScore['exploit']
print 'PCI compliance:', cveRISK['pciCompliance']
print 'is Top alert:', cveRISK['topAlert']
def main():
if len(sys.argv) == 3:
myCVE = sys.argv[2]
apiMethod = sys.argv[1]
if apiMethod == "export":
vfeed = vFeedXML(myCVE)
vfeed.export()
exit(0)
vfeed = vFeed(myCVE)
try:
globals()['call_%s' % apiMethod](vfeed)
except:
print'[error] the method %s is not implemented' % apiMethod
else:
exit(0)
elif len(sys.argv) == 2:
apiMethod = sys.argv[1]
if apiMethod == "update":
db = vFeedUpdate()
db.update()
exit(0)
if apiMethod == "stats":
stat = vFeedStats()
stat.stats()
exit(0)
if apiMethod == "latest_cve":
stat = vFeedStats()
stat.latest_cve()
exit(0)
else:
get_help()
else:
get_help()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
36a255bdfa0ae4e48cd1170b522ea6ada667d61b | 07e03f73ea68b22d10c92a0e61a86a42ff32d6fd | /Gerador de Planilhas para Memorion v4.py | a423fc21ce700d40c5aaebd5ca4d8826c056e7bb | []
| no_license | Erick-Faster/Projeto-Tradutor | d76b69fcf3d774aeb21573fcdcce62ac4b05706a | e0c53086879d9d9eb99c5e659eba4b6a44bc773f | refs/heads/master | 2020-12-19T06:16:12.003793 | 2020-01-22T19:08:54 | 2020-01-22T19:08:54 | 235,645,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,208 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 2019
Finished on Tue Jun 11 2019
###########FEATURES#############
-- Monta planilha de excel contendo
-- Palavras inseridas em alemao
-- Traducao das palavras
-- Genero dos Substantivos
-- 2 Exemplos de aplicacao
-- Tipo (substantivo, verbo, etc...)
-- Extração de dados do Pons e Reverso Context
-- Formato de planilha para ser inserido no Memorion
-- Formatação para que os sites leiam umlauts e eszetts
-- Extração de dados em xlsx e csv
-- Busca por arquivo base, dando nome como entrada
-- Escolhe nome para arquivo de saida
@author: Faster-PC
"""
import openpyxl, os, re
import pandas as pd
from selenium import webdriver
from unidecode import unidecode
'''
###################################
Funcoes
##################################
'''
#Coleta o formato de arquivo especifico
def Coleta(nomeBase,tipo):
if tipo == 1: #Se for um csv
base = pd.read_csv(nomeBase+'.csv',encoding='latin-1') #Latin-1 para corrigir erro com caracteres
elif tipo == 2: #Se for formato Excel
base = pd.read_excel(nomeBase+'.xlsx')
else:
palavras = ['Tisch','Tasche','Auto']
return palavras
palavras = base.iloc[:,0] #Cliva a primeira coluna
palavras = list(palavras) #Converte o DataFrame para Lista
return palavras
#Converte caracteres estranhos
def Converte(palavras,idioma):
regex = re.compile(r'[äöüÄÖÜß]') #Cita regras. Localiza caracteres entre []
if idioma == 'de':
for i in range(len(palavras)):
Verificador = False #Criterio para manter o looping
while Verificador == False: #Garante que todos os caracteres especiais sejam encontrados
try:
mo = regex.search(palavras[i]) #Procura em 'palavras' de acordo com regra
aux = mo.group() #caractere especial encontrado
span = mo.span() #posicao do caractere especial
palavraAux = list(palavras[i]) #Transforma string em lista
#Converte caractere especial em forma apropriada
if aux == 'Ä':
palavraAux[span[0]] = 'Ae'
pass
elif aux == 'Ö':
palavraAux[span[0]] = 'Oe'
pass
elif aux == 'Ü':
palavraAux[span[0]] = 'Ue'
pass
elif aux == 'ä':
palavraAux[span[0]] = 'ae'
pass
elif aux == 'ö':
palavraAux[span[0]] = 'oe'
pass
elif aux == 'ü':
palavraAux[span[0]] = 'ue'
pass
elif aux == 'ß':
palavraAux[span[0]] = 'ss'
pass
else:
print('ERROR')
pass
palavras[i] = ''.join(palavraAux) #transforma lista em string de novo
print('Conversao de %s bem sucedido!'%palavras[i])
palavraAux.clear() #elimina lista
except:
Verificador = True #Encerra busca
continue #Se nao encontrar, vai para o proximo caso
else: #Para todos os outros idiomas
for i in range(len(palavras)):
palavras[i] = unidecode(palavras[i]) #Remove acentos e caracteres especiais
return palavras
#Coleta Exemplos e Traducoes do Reverso Context
def Reverso(palavras,idiomaBase):
if idiomaBase == 'de':
idiomaB = 'deutsch'
pass
elif idiomaBase == 'fr':
idiomaB = 'franzosisch'
pass
elif idiomaBase == 'en':
idiomaB = 'englisch'
pass
elif idiomaBase == 'es':
idiomaB = 'spanisch'
pass
exemplos = [] #Vetor temporario
exemploFinal = [] #Vetor permanente
traducoes = []
traducaoFinal = []
for i in range (len(palavras)): #acao para cada palavra
browser.get("https://context.reverso.net/%C3%BCbersetzung/"+idiomaB+"-portugiesisch/"+palavras[i]) #site no qual informacao eh extraida
'''
exemplos
'''
try:
frases = browser.find_elements_by_class_name('text') #Encontra todos os elementos de frases
#Converte dados das frases de Web para String
for j in range (len(frases)):
exemplos.append(frases[j].text)
#Elimina vazios existentes no vetor temporario
for j in range (len(exemplos)):
try:
exemplos.remove("") #Remove todos os vazios da string
except:
break
#Confere se nao ha Typo
k = 0
if exemplos[0] == 'Meinst Du:':
k = 1
#Separa frases desejadas
exemplo = [exemplos[k],exemplos[k+1]," ~~ ",exemplos[k+2],exemplos[k+3]] #Seleciona as 2 primeiras frases
#Une vetor em uma unica String
stringExemplo = " | " #Separador entre cada elemento do vetor
stringExemplo = stringExemplo.join(exemplo) #Transforma vetor em uma string unica
#Adicionar string no vetor permanente
exemploFinal.append(stringExemplo)
print("Exemplo para %s processado!" %palavras[i])
exemplos = [] #zera vetor temporario
except:
exemploFinal.append("ERROR")
'''
Traducoes
'''
try:
traducaoWEB = browser.find_elements_by_class_name('translation')
for j in range (len(traducaoWEB)):
traducoes.append(traducaoWEB[j].text)
#Elimina vazios existentes no vetor temporario
for j in range (len(traducoes)):
try:
traducoes.remove("") #Remove todos os vazios da string
except:
break
if len(traducoes) > 1:
traducao = traducoes[0]+", "+traducoes[1]
else:
traducao = traducoes[0]
traducaoFinal.append(traducao)
print("Traducao adicionada: %s\n" %traducao)
traducoes = []
except:
traducaoFinal.append("ERROR")
return exemploFinal, traducaoFinal
#Coleta artigos classes e erros do site Pons
def Pons (palavras,idiomaBase):
for i in range (len(palavras)): #Repete de acordo com a qtde de palavras
browser.get("https://de.pons.com/%C3%BCbersetzung?q="+palavras[i]+"&l="+idiomaBase+"en&in=&lf=de&qnac=") #Entra no site PONS
print(palavras[i])
#Busca pelo genero
try:
artigo = browser.find_element_by_class_name('genus') #Busca genero
if artigo.text == "m":
artigos.append("Der")
pass
elif artigo.text == "f":
artigos.append("Die")
pass
elif artigo.text == "nt":
artigos.append("Das")
pass
else:
artigos.append("ERROR")
pass
print("Artigo: %s" %artigo.text)
except: #Comum quando nao eh um substantivo
artigos.append("") #Nao retorna artigo nenhum
#Busca pela classe/tipo da palavra (subst, verbo, adjetivo, etc)
try:
classe = browser.find_element_by_class_name('wordclass') #Busca classe
classes.append(classe.text) #add classe
print("Classe: %s\n" %classe.text)
except:
classes.append("ERROR")
#Verifica a possibilidade de possiveis erros
try:
erro = browser.find_element_by_tag_name('strong') #Procura na tag <strong>
erro = erro.text #atribui texto na variavel
regex = re.compile(r'(Meinten Sie vielleicht:)\s(\w+)') #Cria regra para padrao
mo = regex.search(erro) #procura padrao
auxErro = mo.group(1) #Valor que sera except caso nao seja encontrado
auxSugestao = mo.group(2) #Sugestao de palavra dada pelo Pons
if auxErro == 'Meinten Sie vielleicht:': #Caso o erro seja positivo
erros.append("WARNING -> %s"%auxSugestao) #Retorna erro com sugestao
else:
erros.append("") #Nao retorna nada
except:
erros.append("")
return artigos, classes, erros
#Funcao que insere tudo em um vetor final e salva no Excel no formato FlashCards do Memorion
def SalvarExcel(nomeArquivo,palavrasFinais,traducoes,artigos,exemplos,classes,erros):
vetorFinal = [] #Informacoes que irao para o Excel
for i in range(len(palavras)):
vetorFinal.append([traducoes[i],palavrasFinais[i],artigos[i],exemplos[i],classes[i],erros[i]]) #Add palavra, artigo, classe e exemplos
workbook = openpyxl.Workbook() #Cria arquivo Excel
for i in range (len(vetorFinal)): #Qtde de elementos do vetor final
workbook.active.append(vetorFinal[i]) #Add vetor, linha por linha
os.chdir('C:\\Users\\Faster-PC\\MyPythonFiles') #Seleciona Diretorio
#Verifica se o arquivo ja existe
savePoint = os.path.isfile('./'+nomeArquivo+'.xlsx')
if savePoint == False: #Caso nao exista, salvara nele msm
workbook.save(nomeArquivo+'.xlsx') #Salva Excel
print('%s.xlsx criado com sucesso!'%nomeArquivo)
else: #Caso ja exista
save = 2 #Valor atribuido ao nome do arquivo
saveStg = str(save) #Transforma int em String
#Condicao de parada
while savePoint == True: #Enquanto existir um arquivo igual
savePoint = os.path.isfile('./'+nomeArquivo+saveStg+'.xlsx') #Busca arquivo com numero na frente
if savePoint == False: #Se nao existir
workbook.save(nomeArquivo+saveStg+'.xlsx') #Salva Excel com numero
savePoint = False #Parou
print('%s%s.xlsx criado com sucesso!'%(nomeArquivo,saveStg))
else: #Se ainda existir
save = save + 1 #Add um numero ao arquivo
saveStg = str(save) #Transforma o numero em String
def GUI():
root.title("Gerador de FlashCards") #Titulo do programa
mainframe = ttk.Frame(root, padding="3 3 12 12") #Espacos extras nas 4 direcoes
mainframe.grid(column=0, row=0, sticky=(N, W, E, S)) #Dimensoes do frame principal
root.columnconfigure(0, weight=1) #coluna 0 possui 1 espaco garantido
root.rowconfigure(0, weight=1) #linha 0 possui um espaco garantido
#variaveis
nomeBase = StringVar()
nomeArquivo = StringVar()
idiomaBase = StringVar()
teste = StringVar()
nomeEntrada_entry = ttk.Entry(mainframe, width = 20, textvariable=nomeBase)
nomeEntrada_entry.grid(column=2,row=1,sticky=(W,E))
nomeSaida_entry = ttk.Entry(mainframe, width = 20, textvariable=nomeArquivo)
nomeSaida_entry.grid(column=2,row=3, sticky=(W,E))
ttk.Label(mainframe, text="Qual o nome do arquivo?").grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="Idioma:").grid(column=1, row=2, sticky=W)
ttk.Label(mainframe, text="Qual o nome da Saida?").grid(column=1, row=3, sticky=W)
ttk.Label(mainframe, textvariable=teste).grid(column=1, row=4, sticky=W)
ttk.Radiobutton(mainframe, text='De', variable=idiomaBase, value='de').grid(column=2, row=2, sticky=W)
ttk.Radiobutton(mainframe, text='Fr', variable=idiomaBase, value='fr').grid(column=2, row=2)
ttk.Radiobutton(mainframe, text='Es', variable=idiomaBase, value='es').grid(column=2, row=2, sticky=E)
ttk.Button(mainframe, text="Fechar", command=root.destroy).grid(column=2, row=5, sticky=E)
ttk.Button(mainframe, text="OK", command=funcaoTeste).grid(column=2, row=4, sticky=E)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5) #Para cada grid, deixa um espacinho
nomeEntrada_entry.focus() #Inicia comando na primeira caixa de entrada
root.bind('<Return>', funcaoTeste) #Ativa 'Enter' para o botao
'''
############################################################
AQUI COMECA O MAIN
############################################################
'''
root = Tk()
GUI()
root.mainloop()
'''
GUI
'''
from tkinter import *
from tkinter import ttk
def funcaoTeste(*args):
try:
if idiomaBase.get() == 'de':
teste.set('DEUTSCH')
pass
elif idiomaBase.get() == 'fr':
teste.set('FRANÇAIS')
pass
elif idiomaBase.get() == 'es':
teste.set('ESPAÑOL')
pass
else:
value = nomeArquivo.get()
teste.set(value)
pass
except:
teste.set('ERROR')
pass
nomeBase = nomeBase.get()
nomeArquivo = nomeArquivo.get()
idiomaBase = idiomaBase.get()
'''
Tipos de dados que serao extraidos
'''
palavrasFinais = []
artigos = []
classes = []
exemplos = []
traducoes = []
erros = []
'''
Questionario
'''
while True:
VerificaCSV = os.path.isfile('./'+nomeBase+'.csv')
VerificaXLSX = os.path.isfile('./'+nomeBase+'.xlsx')
if VerificaCSV == True and VerificaXLSX == False:
tipo = 1
break
elif VerificaCSV == False and VerificaXLSX == True:
tipo = 2
break
elif VerificaCSV == True and VerificaXLSX == True:
tipo = int(input("Qual o formato da fonte? [1]csv , [2]xlsx : "))
break
else:
print("Arquivo nao encontrado. Atribuindo teste")
tipo = 3
break
'''
Codigo de Coleta de palavras
'''
palavras = Coleta(nomeBase,tipo) #Coleta palavras de csv[1] ou excel[2]
palavrasFinais = palavras[:] #Cria nova lista de palavras nao convertidas, para ir na tabela final
palavras = Converte(palavras,idiomaBase) #Retira umlauts e eszetts
'''
Codigo de busca no Pons e Reverso
'''
browser = webdriver.PhantomJS() #Chama Navegador fantasma
artigos, classes, erros = Pons(palavras,idiomaBase) #Elementos que usam o Pons
exemplos, traducoes = Reverso(palavras,idiomaBase) #Elementos que usam o Reverso Context
browser.close() #Fecha navegador fantasma
'''
Salvando arquivo
'''
SalvarExcel(nomeArquivo,palavrasFinais,traducoes,artigos,exemplos,classes,erros)
'''
########################################
FIM DO CODIGO
########################################
'''
'''Observacoes'''
| [
"[email protected]"
]
| |
9657b3ceec8c66aed46b44498f1668e29d1b6871 | 3b09dc4623dac559c85c0333526d55b0615d79d7 | /problems/56.py | 94bcfe31fbef92738fe0088cba102cb331404cf7 | []
| no_license | Asperas13/leetcode | 5d45bd65c490ada9b3cb2c33331a728eab2ef9b4 | 7f2f1d4f221925945328a355d653d9622107fae7 | refs/heads/master | 2021-09-28T15:54:54.761873 | 2020-05-05T15:29:48 | 2020-05-05T15:30:59 | 145,767,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
if len(intervals) < 2:
return intervals
intervals.sort(key=lambda a: a[0])
prev = intervals[0]
result = []
for i in range(1, len(intervals)):
if intervals[i][0] >= prev[0] and intervals[i][0] <= prev[1]:
prev[0] = min(prev[0], intervals[i][0])
prev[1] = max(prev[1], intervals[i][1])
else:
result.append(prev)
prev = intervals[i]
result.append(prev)
return result | [
"[email protected]"
]
| |
2e24bb1da5abc68896108ac8b9934925cd0b5c5e | aa0c7bb4935ff68bb4ba2be4332890b760c9dda2 | /ipcv/scalespace.py | 998699a9f9b1b62a439bf745940a9dd6c314086b | [
"MIT"
]
| permissive | andersbll/ipcv | 0b4deb5f867a4fd642aa7864769e7f4c4901e809 | ea533def7967c9d3a53002ae109db8b256b51c1d | refs/heads/master | 2021-03-12T23:40:26.990304 | 2014-03-05T13:57:31 | 2014-03-05T13:57:31 | 15,453,581 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | import numpy as np
from scipy.ndimage.filters import gaussian_filter
class ScaleSpace:
def __init__(self, img_shape, sigmas, dys, dxs):
''' Compute the scale-space of an image.
Upon initialization, this class precomputes the Gaussian windows used
to smooth images of a fixed shape to save the computations at later
points.
'''
assert(len(sigmas) == len(dys) == len(dxs))
h, w = img_shape
g_y, g_x = np.mgrid[-.5+.5/h:.5:1./h, -.5+.5/w:.5: 1./w]
self.filters = []
for sigma, dy, dx in zip(sigmas, dys, dxs):
g = np.exp(- (g_x**2 + g_y**2) * (np.pi*2*sigma)**2 / 2.)
g = np.fft.fftshift(g)
if dy > 0 or dx > 0:
#TODO change list(range to np.linspace or similar
dg_y = np.array((list(range(0, h//2))+list(range(-h//2, 0))),
dtype=float, ndmin=2) / h
dg_x = np.array((list(range(0, w//2))+list(range(-w//2, 0))),
dtype=float, ndmin=2) / w
dg = (dg_y.T**dy) * (dg_x**dx) * (1j*2*np.pi)**(dy + dx)
g = np.multiply(g, dg)
self.filters.append(g)
def compute_f(self, img_f):
''' Compute the scale space of an image in the fourier domain.'''
return [np.multiply(img_f, f) for f in self.filters]
def compute(self, img):
''' Compute the scale space of an image.'''
img_f = np.fft.fft2(img)
return [np.fft.ifft2(np.multiply(img_f, f)).real for f in self.filters]
def scalespace(img, sigma, order=(0, 0)):
'''Compute the scale-space of an image. sigma is the scale parameter. dx
and dy specify the differentiation order along the x and y axis
respectively.'''
ss = ScaleSpace(img.shape, [sigma], [order[0]], [order[1]])
return ss.compute(img)[0]
def gradient_orientation(img, scale, signed=True, fft=False):
'''Calculate gradient orientations at scale sigma.'''
normalizer = scale**2
if fft:
Ly = normalizer*scalespace(img, scale, order=(1, 0))
Lx = normalizer*scalespace(img, scale, order=(0, 1))
else:
mode = 'reflect'
Ly = normalizer*gaussian_filter(img, scale, order=(1, 0), mode=mode)
Lx = normalizer*gaussian_filter(img, scale, order=(0, 1), mode=mode)
if signed:
go = np.arctan2(Ly, Lx)
else:
go = np.arctan(Ly/(Lx + 1e-10))
go_m = np.sqrt(Lx**2+Ly**2)
return go, go_m
def shape_index(img, scale, orientations=False, fft=False):
'''Calculate the shape index at the given scale.'''
normalizer = scale**2
if fft:
Lyy = normalizer*scalespace(img, scale, order=(2, 0))
Lxy = normalizer*scalespace(img, scale, order=(1, 1))
Lxx = normalizer*scalespace(img, scale, order=(0, 2))
else:
mode = 'reflect'
Lyy = normalizer*gaussian_filter(img, scale, order=(2, 0), mode=mode)
Lxy = normalizer*gaussian_filter(img, scale, order=(1, 1), mode=mode)
Lxx = normalizer*gaussian_filter(img, scale, order=(0, 2), mode=mode)
si = np.arctan((-Lxx-Lyy) / (np.sqrt((Lxx - Lyy)**2+4*Lxy**2)+1e-10))
si_c = .5*np.sqrt(Lxx**2 + 2*Lxy**2 + Lyy**2)
if orientations:
t = Lxx + Lyy
d = Lxx*Lyy - Lxy**2
l1 = t/2.0 + np.sqrt(np.abs(t**2/4 - d))
l2 = t/2.0 - np.sqrt(np.abs(t**2/4 - d))
y = l1-Lyy
x = Lxy
si_o = np.arctan(y/(x+1e-10))
si_om = l1-l2
return si, si_c, si_o, si_om
else:
return si, si_c
| [
"[email protected]"
]
| |
e82662f1e18ae247ee50643b8d5bf26ee588c2fa | 29e9365e68d3f5ef8e9a758c207ea9b0e8980c33 | /supporting_functions.py | 7901fd9772a1afd2af648fc02dfe5691aae221c7 | []
| no_license | csababekesi/rice_collector | ce3ebba00d16da951ec0c18903763a2ad1dde469 | 6f1061fe527670ad31b61758e8058711b76e6583 | refs/heads/master | 2022-07-04T05:10:02.807751 | 2020-05-10T16:33:22 | 2020-05-10T16:33:22 | 262,879,380 | 0 | 0 | null | 2020-05-10T21:33:25 | 2020-05-10T21:33:24 | null | UTF-8 | Python | false | false | 6,831 | py | import pyautogui
import time
import random
import cv2
import numpy as np
from datetime import datetime
import requests
import pytesseract
from global_variables import positions
# On Windows you have to install Tesseract and provide a PATH to it:
#pytesseract.pytesseract.tesseract_cmd = r'C:\Users\username\tesseract.exe'
def hour_passed(datetime_object) -> bool:
"""
DOCSTRING: this function calculates if an hour has passed compared to the input
datetime object.
INPUT: datetime object.
OUTPUT: boolean variable, True if an hour has passed and False if not.
"""
# Get the current time
current_time = datetime.now()
# Calculate the time difference between the two datetime objects
time_diff = current_time - datetime_object
# Calculate the number of secondsd passed
duration_in_s = time_diff.total_seconds()
# One hour in seconds
hour_in_sec = 60 * 60
# Check if an hour has passed
if(duration_in_s > hour_in_sec):
return True
else:
return False
def take_screenshot(save_image: bool = True, return_image: bool = False):
"""
DOCSTRING: the function takes a screenshot and depending on the parameters
saves and or returns the image
INPUT: save_image is a boolean variable, which if True will save the image
in the hour_logs folder.
return_image is a boolean variable which if True will return the image.
OUTPUT: an image if the return_image parameter was True and nothing if not.
"""
# Wait before screenshot so the page can show the new problem
time.sleep(5)
image = pyautogui.screenshot()
# Convert the image to an OpenCV image object
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Format the name of the picture
date = str(datetime.now())[:10] + '_' + str(datetime.now())[11:16]
image_name = 'current_progress_' + date
# Remove the invalid characters from the string and format it
invalid_characters = [' ', ':', '-', '.']
for character in invalid_characters:
image_name = image_name.replace(character, "_")
if(save_image):
# Save the image in the hour_logs folder
cv2.imwrite('hour_logs/' + image_name + '.jpg', image)
print('Screenshot taken!')
if(return_image):
# Return the image object
return image
def click_on_screen(coordinates:tuple) -> type(None):
"""
DOCSTRING: move the mouse to a specified location and left click
INPUT: tuple containing the x and y coordinates of the position
in the form of (x_position, y_position)
"""
# Move the mouse to the location in 1 second
pyautogui.moveTo(coordinates[0], coordinates[1], duration = 1)
# Click on the specified location
pyautogui.click(coordinates[0], coordinates[1])
def is_synonym(searched_word: str, input_word: str) -> bool:
"""
DOCSTRING: The function schecks if the searches word is a synonym of the imput
word by using the datamouse API.
INPUT: searched_word is the word for which we are trying to find the sysnonym of.
input_word is the word that is being tested if it is the synonym of the searched_word
OUTPUT: boolean variable, True if the input_word is the synonym of the searched word
and false if it is not.
"""
# Send a get request to datamouse and save the response
response = requests.get("https://api.datamuse.com/words?ml=" + searched_word)
# Store the response in a list
data_list = response.json()
# Store the synonyms in a list
new_list = [element['word'] for element in data_list]
# Check if the input word is a sysnonym of the searched word
if(input_word in new_list):
print(f'The sysnonym of {searched_word} was: {input_word}')
return True
else:
return False
def get_choices(image) -> list:
"""
DOCSTRING: converts a screenshot into a list of words containing the problem
and the possible solutions to it.
INPUT: OpenCV image object
OUTPUT: a list containing the problem (at index 0) and the possible answers
"""
# Create sub images of the screenshot containing
# the problem and the four choices
problem = image[positions['prob_y_top_bound']: positions['prob_y_bottom_bound'],
positions['prob_x_left_bound']: positions['prob_x_right_bound']
]
# Save the image for debugging
#cv2.imwrite('hour_logs/problem.jpg', problem)
first_choice = image[positions['first_y_top_bound']: positions['first_y_bottom_bound'],
positions['first_x_left_bound']: positions['first_x_right_bound']
]
# Save the image for debugging
#cv2.imwrite('hour_logs/first_choice.jpg', first_choice)
second_choice = image[positions['second_y_top_bound']: positions['second_y_bottom_bound'],
positions['second_x_left_bound']: positions['second_x_right_bound']
]
# Save the image for debugging
#cv2.imwrite('hour_logs/second_choice.jpg', second_choice)
third_choice = image[positions['third_y_top_bound']: positions['third_y_bottom_bound'],
positions['third_x_left_bound']: positions['third_x_right_bound']
]
# Save the image for debugging
#cv2.imwrite('hour_logs/third_choice.jpg', third_choice)
fourth_choice = image[positions['fourth_y_top_bound']: positions['fourth_y_bottom_bound'],
positions['fourth_x_left_bound']: positions['fourth_x_right_bound']
]
# Save the image for debugging
#cv2.imwrite('hour_logs/fourth_choice.jpg', fourth_choice)
# Create a list of images of the possible choices
image_list = [first_choice, second_choice, third_choice, fourth_choice]
# Create a list containing the possible answers
word_list = [pytesseract.image_to_string(image, lang = 'eng') for image in image_list]
# Convert the problem to a string and split it, since we are
# looking for the sysnonym of the first word in the sentence
text_problem = pytesseract.image_to_string(problem, lang = 'eng')
problem_list = text_problem.split()
# Try to insert the fist word in the problem, the OCR might fail
try:
word_list.insert(0, problem_list[0])
except:
print('OCR could not read in problem')
word_list.insert(0, 'None')
print(f'The problem is: {word_list[0]} and the possible answers are {word_list[1:]}')
return word_list | [
"[email protected]"
]
| |
831064eea03f634d055126cfd339905b4aaf40e1 | 41068281a63641164611a88fbf2da7d4d107cf90 | /members/forms.py | 0c4340f53d53da2361bc29d7f8f64ddcf9a7c132 | []
| no_license | ahmed-s-nada/seasons | 6a6dd28c7bbfdd19c3669ed283ce002db70e08e9 | 9a57b2c460c9fda701018a49652b3d987d7a3608 | refs/heads/master | 2021-05-08T11:38:19.612363 | 2018-02-14T20:30:06 | 2018-02-14T20:30:06 | 119,186,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | from django import forms
from members.models import member, SubMember
from datetime import datetime, date
class member_form(forms.ModelForm):
CLUB_CHOICES = ( ('ALAHLY', 'Al-Ahly' ), ('ALZAMALEK', 'Al-Zamalek'), ('WADIDEGLA', 'Wadi-Degla'), ('ALGEZIRA', 'Al-Gezira'), ('NEWGIZA', 'New-Giza'), ('ALSAID', 'Al-Said') )
# other_memberships = forms.MultipleChoiceField(
# required=False,
# widget=forms.CheckboxSelectMultiple,
# choices=CLUB_CHOICES,
# )
class Meta:
model = member
fields= '__all__'
def clean_phone(self):
if not self.cleaned_data['phone'] is None:
dig = [str(x) for x in range(10)]
# print (dig)
for c in self.cleaned_data['phone']:
# print (c)
if not c in dig:
raise forms.ValidationError('Only digits Please')
return self.cleaned_data['phone']
def clean_phone2(self):
if not self.cleaned_data['phone2'] is None:
dig = [str(x) for x in range(10)]
# print (dig)
for c in self.cleaned_data['phone2']:
# print (c)
if not c in dig:
raise forms.ValidationError('Only digits Please')
return self.cleaned_data['phone2']
def clean_fax(self):
if not self.cleaned_data['fax'] is None:
dig = [str(x) for x in range(10)]
# print (dig)
for c in self.cleaned_data['fax']:
# print (c)
if not c in dig:
raise forms.ValidationError('Only digits Please')
return self.cleaned_data['fax']
class SubMemberForm(forms.ModelForm):
class Meta:
model = SubMember
fields= '__all__'
def clean_birthDay(self):
today = date.today()
print (today)
if self.cleaned_data['sub_membership_type'] == 'C':
age_defferance = (today - self.cleaned_data['birthDay']).days
if age_defferance >= 7665:
raise forms.ValidationError ('The age is over 21 years!')
return self.cleaned_data['birthDay']
def clean_phone(self):
if not self.cleaned_data['phone'] is None:
dig = [str(x) for x in range(10)]
# print (dig)
for c in self.cleaned_data['phone']:
# print (c)
if not c in dig:
raise forms.ValidationError('Only digits Please')
return self.cleaned_data['phone']
| [
"[email protected]"
]
| |
e3c17501820af99668a5090962fa38e7abe36853 | 831a90cec5be1579901b39f4cd0fbbc57a60fd64 | /libs/roc.py | e692a4d9b35b13f4890da161294ecf95d2a149db | []
| no_license | emithongle/thesis-20160530 | f7090545bfc0c459a465f33f6e6790ee994afa32 | 69e5ddf4fec1f6dcfee0880678cca1b20fa759f3 | refs/heads/master | 2021-01-19T01:23:07.686667 | 2016-06-04T10:16:04 | 2016-06-04T10:16:04 | 59,984,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
from scipy.stats import mannwhitneyu
import scipy
def getScore(type='', y_true = [], y_score = []):
if (type == 'ROC'):
return roc_curve(y_true, y_score)
if (type == 'AUC'):
return roc_auc_score(y_true, y_score)
elif (type == 'U0'):
l0 = np.asarray([j[0] for (i, j) in zip(y_true, y_score.tolist()) if (i == 1)])
l1 = np.asarray([j[1] for (i, j) in zip(y_true, y_score.tolist()) if (i == 2)])
l2 = np.asarray([j[2] for (i, j) in zip(y_true, y_score.tolist()) if (i == 3)])
return calU0((l0, l1, l2))
# elif (type == 'U'):
# l0 = [j.index(max(j)) for (i, j) in zip(y_true, y_score.tolist()) if (i == 1)]
# l1 = [j.index(max(j))for (i, j) in zip(y_true, y_score.tolist()) if (i == 2)]
# l2 = [j.index(max(j)) for (i, j) in zip(y_true, y_score.tolist()) if (i == 3)]
# return calU((l0, l1, l2))
# elif (type == 'U_S'):
# # return mannwhitneyu(y_true, y_score)
# return calU_S(y_true, y_score)
#
# elif (type == 'U_MannWhitneyu'):
# return mannwhitneyu(y_true, y_score)
elif (type == 'VUS_1'):
return calVUS_1(y_true, y_score)
elif (type == 'VUS_2'):
l0 = [j.index(max(j)) for (i, j) in zip(y_true, y_score.tolist()) if (i == 1)]
l1 = [j.index(max(j)) for (i, j) in zip(y_true, y_score.tolist()) if (i == 2)]
l2 = [j.index(max(j)) for (i, j) in zip(y_true, y_score.tolist()) if (i == 3)]
return calVUS_2((l0, l1, l2))
return None
# =================================================
def calU0(y_score):
from scipy.stats import norm
mu1, mu2, mu3 = np.mean(y_score[0]), np.mean(y_score[1]), np.mean(y_score[2])
sigma1, sigma2, sigma3 = np.std(y_score[0]), np.std(y_score[1]), np.std(y_score[2])
a, b, c, d = sigma2/sigma1, (mu1 - mu2)/sigma1, sigma2/sigma3, (mu3 - mu2)/sigma3
bins, minS, maxS = 5000, -3, 3
rg = np.arange(minS, maxS, (maxS - minS)/bins)
import scipy.integrate as spi
return spi.quad(lambda x: norm.pdf(a * x - b) * norm.pdf(-c * x + d) * norm.pdf(x), -5, 5)[0]
# return integrate(norm.pdf(a * rg - b) * norm.pdf(-c * rg + d) * norm.pdf(rg) * ((maxS - minS)/bins))[0]
# =================================================
# 1. Mann-Whitney U Statistic
def calU(y_score):
count = sum([1 for i in y_score[0] for j in y_score[1] for k in y_score[2] if (i < j) and (j < k)])
return count / (len(y_score[0]) * len(y_score[1]) * len(y_score[2]))
# 1'
def calU_S(x, y):
# u, prob = scipy.stats.mannwhitneyu(x, y)
#
# m_u = len(x) * len(y) / 2
# sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
# z = (u - m_u) / sigma_u
#
# pval = 2 * scipy.stats.norm.cdf(z)
#
# return pval
return 0
# =================================================
# 2. Approach based on the confusion matrix
def calPVUS(cfm, i, j, k):
return (cfm[i][i] / (cfm[i][i] + cfm[i][j] + cfm[i][k])) * cfm[j][j] / (cfm[j][j] + cfm[j][k])
def calVUS_1(y_true, y_predicted):
from sklearn.metrics import confusion_matrix
import itertools
confMatrix = confusion_matrix(y_true, y_predicted)
tmp = [calPVUS(confMatrix, _[0], _[1], _[2]) for _ in list(itertools.permutations(range(3)))]
return sum(tmp) / 6
# =================================================
# 3. Approach based on emperical distribution functions
#
# def calVUS_2(y_true, y_score):
# return 0
def calPDF(data):
return np.asarray([x / sum(data) for x in data])
def calCDF(data):
return np.cumsum(calPDF(data))
def calVUS_2(data):
# database = { 1 : [S_i], 2: [S_j], 3: [S_k] } for i, j, k in N
bins = 100
minS = min([min(data[0]), min(data[1]), min(data[2])])
maxS = max([max(data[0]), max(data[1]), max(data[2])])
count_S1, rangeS = np.histogram(np.asarray(data[0]), bins=bins, range=(minS, maxS))
count_S2, tmp = np.histogram(np.asarray(data[1]), bins=bins, range=(minS, maxS)) #[0]
count_S3, tmp = np.histogram(np.asarray(data[2]), bins=bins, range=(minS, maxS)) # [0]
cdf1 = calCDF(count_S1)
pdf2 = calPDF(count_S2)
cdf3 = calCDF(count_S3)
return sum(cdf1 * (1 - cdf3) * pdf2)
| [
"[email protected]"
]
| |
537a655078659a64fd10f3ac755a9f1714cf227a | 57779967ffe41cab94d67542d9a5826a0abf943e | /migrations/versions/25aac75e8625_.py | ec7845d7e2051b41253a887559831fec3194d286 | []
| no_license | Jang-Boa/project_web | 83ead8a49e1c40f715b6da160940214c29c24643 | 4dcb358bfad82ece548ca1ac316c90d96adc28d4 | refs/heads/main | 2023-08-25T21:32:10.574866 | 2021-10-20T15:40:52 | 2021-10-20T15:40:52 | 416,203,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | """empty message
Revision ID: 25aac75e8625
Revises: cd08f7dab0bd
Create Date: 2021-10-19 17:50:22.149645
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '25aac75e8625'
down_revision = 'cd08f7dab0bd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Car', 'price_avg',
existing_type=sa.INTEGER(),
nullable=False,
existing_server_default=sa.text("'1'"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Car', 'price_avg',
existing_type=sa.INTEGER(),
nullable=True,
existing_server_default=sa.text("'1'"))
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
279d995d38352c3e51c4cfee7f485f1086921721 | d7a403786e2a87ac51c30f2e62b71481b4089403 | /build/vesc_driver/catkin_generated/pkg.develspace.context.pc.py | 5829164fe6f4bf19027c503ac262343e8d3a4a52 | []
| no_license | mahmoodjanabi/caroline | 57f3de7e248c334684c83f98d7da76df14c2a0ed | e994f25c9d9c9ad30be9182daf8752b1b0bd525b | refs/heads/master | 2020-04-17T05:06:00.176575 | 2019-04-08T01:55:09 | 2019-04-08T01:55:09 | 166,262,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/catkin_ws/src/vesc_driver/include".split(';') if "/home/ubuntu/catkin_ws/src/vesc_driver/include" != "" else []
PROJECT_CATKIN_DEPENDS = "nodelet;pluginlib;roscpp;std_msgs;vesc_msgs;serial".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "vesc_driver"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/devel"
PROJECT_VERSION = "0.0.1"
| [
"[email protected]"
]
| |
ee82f549982587ab5b564579fb516fba6bdf691f | 22013212df1e21f29d0180f2109841177a2a8791 | /basic_addons/account_budget_report/reports/__init__.py | 08af8422c824fc2e2e1015f5bb8891ccaf05f79f | []
| no_license | butagreeza/DTDATA_A | f965236c0d7faf0ec4082d27e2a0ff8e7dafe1c6 | 90b09f89714349a3f26de671a440a979aeebd54c | refs/heads/master | 2023-06-18T00:41:02.521432 | 2021-06-14T21:17:06 | 2021-06-14T21:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2017-TODAY Cybrosys Technologies(<https://www.cybrosys.com>).
# Author: Jesni Banu(<https://www.cybrosys.com>)
# you can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# It is forbidden to publish, distribute, sublicense, or sell copies
# of the Software or modified copies of the Software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# GENERAL PUBLIC LICENSE (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import budget_parser
import cross_overed_budget_report
import analytic_budget
| [
"[email protected]"
]
| |
1d2f837e23035bd76be330308e35d904b0065647 | ba5b9667eb2137af160d69724261e31314f7f5d7 | /awards/views.py | 04cc5c064ba2e2d1e5b8c7fde9f2e75e9a3bdf95 | [
"MIT"
]
| permissive | NatashaSenah/awards | 4e5ebd43edc6ba8225eb39131b936715ba887c38 | 352c72f40a1d1afa660456bd3d03934b097c149c | refs/heads/master | 2020-04-01T00:49:17.664144 | 2019-10-22T18:30:08 | 2019-10-22T18:30:08 | 152,714,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,938 | py | from .forms import NewProjectForm,ProfileForm,Votes
from django.contrib.auth.decorators import login_required
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse
from .models import Project,Profile,Ratings
from django.contrib.auth.models import User
# from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from django.http import Http404
from .serializer import MerchSerializer,ProfileSerializer
from rest_framework import status
from .permissions import IsAdminOrReadOnly
# Create your views here.
@login_required
def post(request):
posts = Project.objects.all()
return render(request,'all-awards/post.html',{"posts":posts})
def awards(request):
vote = Votes()
if request.method == 'POST':
vote_form = Votes(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Ratings(design=design,usability=usability,
content=content,creativity=creativity,
user=request.user,post=project)
rating.save()
return redirect('/')
else:
vote_form = Votes()
return render(request,'awards.html',{"vote":vote_form})
@login_required(login_url='/accounts/login/')
def projects(request, projects_id):
project = Project.objects.get(id=projects_id)
likes = Ratings.objects.filter(post=project)
design = []
usability = []
creativity = []
content = []
for x in likes:
design.append(x.design)
usability.append(x.usability)
creativity.append(x.creativity)
content.append(x.content)
de = []
us = []
cre = []
con = []
if len(usability)>0:
usa = (sum(usability)/len(usability))
us.append(usa)
if len(creativity)>0:
crea = (sum(creativity)/len(creativity))
cre.append(crea)
if len(design)>0:
des = (sum(design)/len(design))
de.append(des)
if len(content)>0:
cont = (sum(content)/len(content))
con.append(cont)
vote = Votes()
if request.method == 'POST':
vote_form = Votes(request.POST)
if vote_form.is_valid():
design = vote_form.cleaned_data['design']
usability = vote_form.cleaned_data['usability']
content = vote_form.cleaned_data['content']
creativity = vote_form.cleaned_data['creativity']
rating = Ratings(design=design,usability=usability,
content=content,creativity=creativity,
user=request.user,post=project)
rating.save()
return redirect('/')
return render(request,"awards.html",{"post":project,"des":de,"usa":us,"cont":con,"crea":cre,"vote":vote})
# try:
# project = Project.objects.get(id = project_id)
# except DoesNotExist:
# raise Http404()
# return render(request,"all-awards/awards.html", {"project":project})
def search_results(request):
if 'project' in request.GET and request.GET["project"]:
search_term = request.GET.get("project")
searched_project = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'all-awards/search.html',{"message":message,"project": searched_projects})
else:
message = "You haven't searched for any term"
return render(request, 'all-awards/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = NewProjectForm(request.POST,request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
return redirect('home')
print('saved')
return redirect('home')
else:
form = NewProjectForm()
return render(request, 'new_project.html', {"form": form})
def profile(request, username):
profile = get_object_or_404(User,username=username)
try:
profile_details = Profile.get_by_id(profile.id)
except:
profile_details = Profile.filter_by_id(profile.id)
# images = Project.get_profile_images(profile.id)
title = f'@{profile.username} Instagram photos and videos'
return render(request, 'profile/profile.html', {'title':title, 'profile':profile, 'profile_details':profile_details})
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
edit = form.save(commit=False)
edit.user = request.user
edit.save()
username = request.user.username
return redirect('profile', username=username)
else:
form = ProfileForm()
return render(request, 'profile/edit_profile.html', {'form': form})
class MerchList(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_merch = Project.objects.all()
serializers = MerchSerializer(all_merch, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = MerchSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class MerchDescription(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get_merch(self, pk):
try:
return Profile.objects.get(id=pk)
except Profile.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
merch = self.get_merch(pk)
serializers = ProfileSerializer(merch)
return Response(serializers.data)
def put(self, request, pk, format=None):
merch = self.get_merch(pk)
serializers = MerchSerializer(merch, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
merch = self.get_merch(pk)
merch.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | [
"[email protected]"
]
| |
4b009d4e9e69e1406c459682024b574fc33ec762 | 92eb95fdf52bef9a8c66a6bd7062ac3ab3fc4d0e | /setup.py | 01a14d692b5d558e08849ee91b0793dc734fefcf | []
| no_license | alfredodeza/helga-jenkins | 4636919b6ed50356e50d9f8ede9f88e423a97602 | 7ed0572eb563ee36fcfa0f436b0473fe56364a11 | refs/heads/master | 2021-01-17T13:33:52.710495 | 2016-12-01T12:52:58 | 2016-12-01T12:52:58 | 34,229,899 | 3 | 2 | null | 2016-06-06T15:46:22 | 2015-04-20T00:30:04 | Python | UTF-8 | Python | false | false | 911 | py | from setuptools import setup, find_packages
version = '0.0.5'
setup(name="helga-jenkins",
version=version,
description=('jenkins plugin for helga'),
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='irc bot jenkins',
author='alfredo deza',
author_email='contact [at] deza [dot] pe',
url='https://github.com/alfredodeza/helga-jenkins',
license='MIT',
packages=find_packages(),
install_requires=[
'python-jenkins',
],
entry_points = dict(
helga_plugins = [
'jenkins = helga_jenkins:helga_jenkins',
],
),
)
| [
"[email protected]"
]
| |
bacc780f56a918e21b35b9fecc1d2a15d95159bf | 5d1a348e11ad652e6cc8f894d4ca774429f335f9 | /Prob-and-Stats/_Calculators/confidence_intervals.py | 014691dd6abedfa0a271ad2b36d1498a30b5a843 | []
| no_license | anhnguyendepocen/UCSanDiegoX | 5332fe0780540038c0cde70af70d67544a3e7725 | 053a1fae52f9b46188a9fcf10729f70d10b3db63 | refs/heads/master | 2022-04-18T03:23:27.636938 | 2020-03-30T23:29:40 | 2020-03-30T23:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | import numpy as np
from scipy.stats import norm, t, sem
from math import sqrt
# list = [60, 56, 61, 68, 51, 53, 69, 54, 80, 90, 55, 35, 45]
list = np.random.randint(low=35,high=71, size=20)
print(list)
n = len(list)
mu = np.mean(list)
sigma = np.std(list)
var = np.var(list)
bounds = t.interval(0.90, len(list)-1, loc=np.mean(list), scale=sem(list))
print('The Mean Is =', mu)
print('The Raw Variance ("S^2") Is =', var)
print('The Standard Deviation Is =', sigma)
print('Lower Bounds =', bounds[0])
print('Upper Bounds =', bounds[1])
# the number of tweets a random user is a random variable with sigma=2
# in a sample of 121 users, the sample mean was 3.7
# find the 95% confidence interval for the distribtuion mean.
ci = 0.95
sig = .15
mean = 17.65
users = 50
inv_theta = norm.ppf((1+ci)/2)
std_error = sig/sqrt(users)
tweets_lower = mean - (inv_theta*std_error)
tweets_upper = mean + (inv_theta*std_error)
print('the bounds of number of tweets is =', tweets_lower, tweets_upper)
| [
"[email protected]"
]
| |
857b85a762b079c37792bc79922db845a5951bbf | 4c6a18d17df8d9327d047857b4d5e832a476e0e1 | /venv/bin/pip3 | 46486eacd61a1d2f2f2258c808bbaede576109b3 | []
| no_license | grzeslaws/oauthFlask | 2026f457a616a4bf89d5059607316684a7922f58 | fd8f4e91a85037486eb925e3c377398d43e0a5de | refs/heads/master | 2022-12-22T05:59:49.020048 | 2018-05-30T08:38:58 | 2018-05-30T08:38:58 | 135,413,272 | 0 | 0 | null | 2021-06-01T22:20:09 | 2018-05-30T08:34:49 | Python | UTF-8 | Python | false | false | 253 | #!/Users/grzesiek/Desktop/sqlalchemy/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
9222ec732fdf9bf8f7aad210a57dfc00e1c6611a | ba1585406af7a75e425b104a3a304ee99c9b11f0 | /asmodeus-histogram.py | 161eb5435e49646345aec772570c8c340a4aa4ff | [
"MIT"
]
| permissive | sesquideus/asmodeus | 1ac9dce56b124f2c8318914b0d666a6ea4e079db | cc56357b7fc336e28d19a7297a67890669550be8 | refs/heads/master | 2023-08-03T08:55:51.305957 | 2023-07-25T18:01:14 | 2023-07-25T18:01:14 | 152,603,129 | 4 | 2 | MIT | 2023-02-10T23:14:38 | 2018-10-11T14:15:16 | Python | UTF-8 | Python | false | false | 126 | py | #!/usr/bin/env python
from apps.histogram import AsmodeusHistogram
if __name__ == "__main__":
AsmodeusHistogram().run()
| [
"[email protected]"
]
| |
5473b4dc8a460bd82fdbb8d63a294758359036eb | 8b6edb9665bf90fe93d224fd2903e879d6f92f1d | /scripts/helpers.py | 9c11e71d8fc1a86076a5736f16fd84a717c1251c | []
| no_license | Mandyli1996/Multi-modal-learning-for-Neural-Record-Linkage | 808836f8b9f059e7fcf01db0a202bb100f27a806 | d6ada3bbc226adfa5ef5cfaae9b648e9b426921a | refs/heads/master | 2022-01-31T12:18:05.429898 | 2019-08-16T01:43:46 | 2019-08-16T01:43:46 | 197,054,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,109 | py | import pandas as pd
import numpy as np
import os
import re
# DATA HANDLING
def is_str_list(x):
"""
given a pd.Series of strings, return True if all elements
begin and end with square brackets
"""
return np.all(x.astype(str).str.startswith('[') & \
x.astype(str).str.endswith(']'))
def str_to_list(x):
"convert a string reprentation of list to actual list"
x = x[1:-1]
x = x.split(',')
return [int(i) for i in x]
def load_data(data_dir, filenames=['test_1', 'test_2', 'test_y',
'train_1', 'train_2', 'train_y',
'val_1', 'val_2', 'val_y']):
"""
returns a dictionary of test, train, and validation datasets with their
respective sources and targets. filenames serve as keys.
"""
data = dict()
for filename in filenames:
df = pd.read_csv(os.path.join(data_dir, filename+'.csv'), low_memory=False)
str_list_mask = df.apply(is_str_list, axis='rows')
df.loc[:, str_list_mask] = df.loc[:, str_list_mask].applymap(str_to_list)
data[filename] = df
return data
def str_to_list_df(x):
df = x.copy()
mask = df.apply(is_str_list, axis='rows')
df.loc[:, mask] = df.loc[:, mask].applymap(str_to_list)
return df
def str_to_num(x):
if type(x) == float:
return x
else:
return float(re.sub('[^0-9|^\.]', '', x))
def examine_data(set1, set2, columns, bool_mask, mapping):
df1 = set1.copy()
df2 = set2.copy()
def idx_to_word(x):
string = ''
for idx in x:
string += ' ' + mapping['idx2word'][idx]
return string
df1.loc[:, columns] = df1.loc[:, columns].applymap(idx_to_word)
df2.loc[:, columns] = df2.loc[:, columns].applymap(idx_to_word)
both = pd.concat([df1, df2], axis=1)
both = both.loc[bool_mask, :]
return both
# HYPEROPT VISUALIZATIONS
def hyperopt_val_diagnostic(val_name, trials):
ts = [trial['tid'] for trial in trials.trials]
results = [trial['result']['loss'] for trial in trials.trials]
fig, axes = plt.subplots(1, 3, figsize = (16,4))
axes[0].scatter(ts, vals)
axes[0].set(xlabel='iteration', ylabel=val_name)
axes[1].hist(np.array(vals).squeeze())
axes[1].set(xlabel=val_name, ylabel='frequency')
axes[2].scatter(vals, results)
axes[2].set(xlabel=val_name, ylabel='loss')
plt.tight_layout()
def visualize_hyperparameters(trials):
for val in trials.trials[0]['misc']['vals'].keys():
hyperopt_val_diagnostic(val, trials)
# HELPERS FOR MODEL GENERATION
def get_document_frequencies(raw_data_dir, mapping, set1='set1', set2='set2'):
# read csv data from directory as pd.DataFrame
set1 = pd.read_csv(os.path.join(raw_data_dir, set1 + '.csv'), encoding='latin1')
set2 = pd.read_csv(os.path.join(raw_data_dir, set2 + '.csv'), encoding='latin1')
# select only columns whose values are lists embedded as strings
mask1 = set1.apply(is_str_list, axis='rows')
mask2 = set2.apply(is_str_list, axis='rows')
# convert strings back into lists
set1 = set1.loc[:, mask1].applymap(str_to_list)
set2 = set2.loc[:, mask2].applymap(str_to_list)
# concatenate columns so all relevant attributes become a single list
def concat_columns(x):
idx_list = list()
for lst in x.values:
idx_list += lst
return idx_list
set1 = set1.apply(concat_columns, axis='columns')
set2 = set2.apply(concat_columns, axis='columns')
# +1 because default value of DefaultDict not counted
doc_freqs_1 = np.zeros(len(mapping['idx2word'])+1)
doc_freqs_2 = np.zeros(len(mapping['idx2word'])+1)
for index, item in set1.iteritems():
uniq_indices = set(item)
for idx in uniq_indices:
doc_freqs_1[idx] += 1
for index, item in set2.iteritems():
uniq_indices = set(item)
for idx in uniq_indices:
doc_freqs_2[idx] += 1
return doc_freqs_1, doc_freqs_2
| [
"[email protected]"
]
| |
0ed7b61468039bba0ea71d5de30d3e64d0193ad3 | 470ee78176c144bd8e97cdd87723c0b11881405e | /color model/test_large.py | ec8b305cfce7588c6f110c6f7a1151a3966c90b9 | []
| no_license | Jizhongpeng/RevSCI-net | d1f5317124bd51236690eb55d0f436103f04e02f | 71ac125ab47dce2e4c091936e3a659900b7da258 | refs/heads/master | 2023-06-06T03:40:43.300103 | 2021-06-28T11:10:45 | 2021-06-28T11:10:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,403 | py |
from dataLoadess import Imgdataset
from torch.utils.data import DataLoader
from models import re_3dcnn1
from utils import generate_masks, time2file_name, split_masks
import torch.optim as optim
import torch.nn as nn
import torch
import scipy.io as scio
import time
import argparse
import datetime
import os
import numpy as np
from torch.autograd import Variable
# from thop import profile
if not torch.cuda.is_available():
raise Exception('NO GPU!')
data_path = "./largescale_rgb"
test_path1 = "./test"
parser = argparse.ArgumentParser(description='Setting, compressive rate, size, and mode')
parser.add_argument('--last_train', default=20, type=int, help='pretrain model')
parser.add_argument('--model_save_filename', default='color_model', type=str,
help='pretrain model save folder name')
parser.add_argument('--max_iter', default=100, type=int, help='max epoch')
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--B', default=24, type=int, help='compressive rate')
parser.add_argument('--learning_rate', default=0.0001, type=float)
parser.add_argument('--size', default=[1080, 1920], type=int, help='input image resolution')
parser.add_argument('--mode', default='noreverse', type=str, help='training mode: reverse or noreverse')
args = parser.parse_args()
mask, mask_s = generate_masks(data_path)
loss = nn.MSELoss()
loss.cuda()
def test(test_path, epoch, result_path, model, args):
r = np.array([[1, 0], [0, 0]])
g1 = np.array([[0, 1], [0, 0]])
g2 = np.array([[0, 0], [1, 0]])
b = np.array([[0, 0], [0, 1]])
rgb2raw = np.zeros([3, args.size[0], args.size[1]])
rgb2raw[0, :, :] = np.tile(r, (args.size[0] // 2, args.size[1] // 2))
rgb2raw[1, :, :] = np.tile(g1, (args.size[0] // 2, args.size[1] // 2)) + np.tile(g2, (
args.size[0] // 2, args.size[1] // 2))
rgb2raw[2, :, :] = np.tile(b, (args.size[0] // 2, args.size[1] // 2))
rgb2raw = torch.from_numpy(rgb2raw).cuda().float()
test_list = os.listdir(test_path)
psnr_cnn = torch.zeros(len(test_list))
for i in range(len(test_list)):
pic = scio.loadmat(test_path + '/' + test_list[i])
if "orig" in pic:
pic = pic['orig']
elif "patch_save" in pic:
pic = pic['patch_save']
pic = pic / 255
pic_gt = np.zeros([pic.shape[3] // args.B, args.B, 3, args.size[0], args.size[1]])
for jj in range(pic.shape[3]):
if jj % args.B == 0:
meas_t = np.zeros([args.size[0], args.size[1]])
n = 0
pic_t = pic[:, :, :, jj]
mask_t = mask[n, :, :]
mask_t = mask_t.cpu()
pic_t = np.transpose(pic_t, [2, 0, 1])
pic_gt[jj // args.B, n, :, :, :] = pic_t
n += 1
meas_t = meas_t + np.multiply(mask_t.numpy(), torch.sum(torch.from_numpy(pic_t).cuda().float() * rgb2raw,
dim=0).cpu().numpy())
if jj == args.B - 1:
meas_t = np.expand_dims(meas_t, 0)
meas = meas_t
elif (jj + 1) % args.B == 0 and jj != args.B - 1:
meas_t = np.expand_dims(meas_t, 0)
meas = np.concatenate((meas, meas_t), axis=0)
meas = torch.from_numpy(meas).cuda().float()
pic_gt = torch.from_numpy(pic_gt).cuda().float()
meas_re = torch.div(meas, mask_s)
meas_re = torch.unsqueeze(meas_re, 1)
out_save1 = torch.zeros([meas.shape[0], args.B, 3, args.size[0], args.size[1]]).cuda()
with torch.no_grad():
psnr_1 = 0
for ii in range(meas.shape[0]):
model.mask = mask
out_pic1 = model(meas_re[ii:ii + 1, ::], args)
out_pic1 = out_pic1.reshape(1, 3, args.B, args.size[0], args.size[1]).permute(0, 2, 1, 3, 4)
out_save1[ii, :, :, :, :] = out_pic1[0, :, :, :, :]
for jj in range(args.B):
out_pic_forward = out_save1[ii, jj, :, :, :]
gt_t = pic_gt[ii, jj, :, :, :]
mse_forward = loss(out_pic_forward * 255, gt_t * 255)
mse_forward = mse_forward.data
psnr_1 += 10 * torch.log10(255 * 255 / mse_forward)
psnr_1 = psnr_1 / (meas.shape[0] * args.B)
psnr_cnn[i] = psnr_1
a = test_list[i]
name1 = result_path + '/RevSCInet_' + a[0:len(a) - 4] + '{}_{:.4f}'.format(epoch, psnr_1) + '.mat'
out_save1 = out_save1.cpu()
scio.savemat(name1, {'pic': out_save1.numpy()}, do_compression=True)
print("RevSCInet result: {:.4f}".format(torch.mean(psnr_cnn)))
if __name__ == '__main__':
date_time = str(datetime.datetime.now())
date_time = time2file_name(date_time)
result_path = 'recon' + '/' + date_time
model_path = 'model' + '/' + date_time
if not os.path.exists(result_path):
os.makedirs(result_path)
if args.last_train != 0:
rev_net = re_3dcnn1(18).cuda()
rev_net.mask = mask
rev_net.load_state_dict(torch.load('./model/' + args.model_save_filename + "/RevSCInet_model_epoch_{}.pth".format(args.last_train)))
rev_net = rev_net.module if hasattr(rev_net, "module") else rev_net
test(test_path1, args.last_train, result_path, rev_net.eval(), args)
| [
"[email protected]"
]
| |
55a9234942dba8569fdb8f0a0f4f68176c5a93e9 | c7ddc853a9dbcb9470ef685ab68a3c9bbaa9eb31 | /6.00.1x/pset4/ProblemSet4/ps4a_repl.py | 898bc75bc9e4568a28c02bf9df296075007b4cad | [
"Giftware"
]
| permissive | aasimsani/6.00.1x-2x | 6468ba67558c9c8a0f78c26e9a8da007bff5aaf2 | da724caee2352e0301035fa050f1c60b675c3d4b | refs/heads/master | 2021-01-13T12:37:42.056737 | 2017-03-28T03:37:24 | 2017-03-28T03:37:24 | 78,384,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,290 | py |
# 6.00x Problem Set 4A Template
#
# The 6.00 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
# Modified by: Sarina Canelake <sarina>
#
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
#WORDLIST_FILENAME = ""
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
# TO DO ... <-- Remove this comment when you code this function
score = 0
s1 = 0
for l in word:
s1 += SCRABBLE_LETTER_VALUES[l]
score = s1*len(word)
if len(word) == n:
score += 50
return score
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print letter, # print all on the same line
print
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n / 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
# TO DO ... <-- Remove this comment when you code this function
for l in word:
hand = hand.copy()
v = hand[l]
hand[l] = (v - 1)
return hand
#
# Problem #3: Test word validity
#
def itword(h,wlist,z):
try:
if wlist[z] in h:
return itword(h,wlist,z+1)
pass
except IndexError, e:
return True
else:
return False
pass
def check(clist):
if "False" in clist:
return False
else:
return True
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
# TO DO ... <-- Remove this comment when you code this function
wdic = {}
for x in word:
wdic[x] = wdic.get(x,0) +1
def recVword(wdic,hand,word,x):
if x > len(word)-1:
return True
elif wdic[word[x]] <= hand[word[x]]:
return recVword(wdic,hand,word,x+1)
else:
return False
try:
if word in wordList and recVword(wdic,hand,word,0) == True:
return True
else:
return False
except KeyError,e:
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# TO DO... <-- Remove this comment when you code this function
count = 0
for letter in hand.keys():
for j in range(hand[letter]):
count += 1
return count
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!)
# Keep track of the total score
tscore = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
print "Current hand: ",
displayHand(hand)
# Ask user for input
word = raw_input("Enter word, or a \".\" to indicate that you are finished: ")
# If the input is a single period:
if word == ".":
# End the game (break out of the loop)
print "Goodbye! Total score:", tscore, " points"
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if isValidWord(word,hand,wordList) == False:
# Reject invalid word (print a message followed by a blank line)
print "Invalid Word, try again."
print
# Otherwise (the word is valid):
else:
tscore += getWordScore(word,n)
print "\"", word, "\"","earned", getWordScore(word,n), " points. ", "Total:", tscore, " points"
print
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
# Update the hand
hand = updateHand(hand,word)
if calculateHandlen(hand) == 0:
print "Run out of letters. Total score:", tscore, " points"
else:
pass
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
# TO DO ... <-- Remove this comment when you code this function
end = 0
while end == 0:
choice = raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game: ")
try:
if choice == "n":
hand = dealHand(HAND_SIZE)
playHand(hand,wordList,HAND_SIZE)
elif choice == "e":
break
elif choice == "r":
playHand(hand,wordList,HAND_SIZE)
else:
print "Invalid Input!"
except UnboundLocalError, e:
print "You have not played a hand yet. Please play a new hand first!"
n = 7
#
# Build data structures used for entire session and play game
#
"""if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)"""
wordList = loadWords()
hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
word = "mail"
print isValidWord(word,hand,wordList)
| [
"[email protected]"
]
| |
58f7bc0c1b6d6896548b509a10c2457ecb0fc356 | cf6c905f8a446f8f5601d28502a3ed0914707b1c | /mysite/polls/models.py | 660fa2798bd686661b3b6421f208058838c9a465 | []
| no_license | cy0926/poll_system | dce4a7f283e3de139898acde832292030f0b74be | 7136dc78f85963dfd1922278bd8538252e686d56 | refs/heads/master | 2020-03-29T19:06:19.624540 | 2018-09-25T10:34:03 | 2018-09-25T10:34:03 | 150,247,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from django.db import models
from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class Question(models.Model):
def __str__(self):
return self.question_text
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
def __str__(self):
return self.choice_text
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
| [
"[email protected]"
]
| |
5a3725eeee67221e679b1ce3bd76c0a296537a2b | e2f8aacdfa66c56a4b7cece704452f4aa264edae | /test.py | 8d3fddf836b381580f1e48a004164189d2702b58 | []
| no_license | jvaldiviezo9/Simple-Test | 7bcef97b356d91a554ca4327f699e642a9e39b9e | 4fe61e89a54fbe4fdcccde43f3a1826833cbc4dc | refs/heads/master | 2020-07-28T07:33:48.333185 | 2016-11-10T21:00:34 | 2016-11-10T21:00:34 | 73,417,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | class A():
def __init__(self):
self.x=7
self.y=8
self.z="name"
class Employee(object):
def __init__(self, _dict):
self.__dict__.update(_dict)
class Employee_2(object):
def __init__(self, *initial_data, **kwargs):
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
if __name__ == '__main__':
d = {'x': 100, 'y': 300, 'z': "blah"}
a = A()
print(a.x, a.y, a.z)
a.__dict__.update(d)
print(a.x, a.y, a.z)
dict = {'name': 'Oscar', 'lastName': 'Reyes', 'age': 32}
e = Employee(dict)
print(e.name)
print(e.age)
print(round(9.090298349, 2))
| [
"[email protected]"
]
| |
986cfd2a0e57ba7345c23320f792cfa5cd7efc76 | 1959ac24c8be1c7f42e51dc4fb74b1fc1d4cf754 | /news/nav_processor.py | ce102aed7f4eead8536cb944c0b79f3de0627831 | []
| no_license | nanerleee/minicms | 7123882aedf500bbc3fe055987814d74d7543dcd | ae941da82fb5b7ecf69c0e3ccc9736111503e69b | refs/heads/master | 2021-01-12T07:48:44.080577 | 2016-12-21T06:46:33 | 2016-12-21T06:46:33 | 77,024,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from .models import Column
nav_display_columns = Column.objects.filter(nav_display=True)
def nav_column(request):
return {'nav_display_columns': nav_display_columns}
| [
"[email protected]"
]
| |
33708d51847a2c0f48609d985a8d8d1f806b84ba | 1f69ebe2eb1aa6214ba4bc0288940f2d4e580ab7 | /Assignment/assi1/harmonic.py | 70d44e00de90a4115ee6c3b1686d88795b276f9b | []
| no_license | Prathamesh-Mone/PPL19-20 | 68f8003760d62c782163def37fcc74050f9a8e4f | c192deff3e171099cca5ab6c880ef01ba149cb9c | refs/heads/master | 2022-10-05T09:30:16.835203 | 2020-06-11T06:21:50 | 2020-06-11T06:21:50 | 248,428,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | def sumreci(n) :
i = 1; new = 0
while i <= n :
if n%i == 0 :
new = new + 1/i
i = i + 1
return new
def numdivisors(n) :
i = 1; count = 0
while i <= n :
if n%i == 0 :
count = count + 1
i = i + 1
return count
if __name__ == "__main__" :
i = 1; l = 1
while i <= 8 :
p = sumreci(l)
q = numdivisors(l)
if q/p == int(q/p) :
print(l," is a harmonic number \n")
i = i + 1
l = l + 1
| [
"[email protected]"
]
| |
eea24f51c349fb5fbbbc953d159fc360bb09cf38 | 6355e7024c047cc074637011dcb9f9934073dbf3 | /les/wsgi.py | c6a33d0418d8d7a3a6c3bdc38beba57846e729a6 | []
| no_license | guilhascorreia24/Componentes-de-user | a25f56e4cab8b45fb7ba185fc5722d5521235f2a | acd8a63ac0ef448704616a378b5bc08b1c84ffb3 | refs/heads/master | 2021-02-28T20:41:04.678911 | 2020-03-13T21:13:02 | 2020-03-13T21:13:02 | 245,731,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for les project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'les.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
4d7f1f28dc9fb98b2c1de9f64a5c8e7490aa1c6b | 0c9e144bb9beec2d941708898943036b2ead74ca | /0x08-python-more_classes/2-rectangle.py | 5bc1989d63c2bb114cea06d1efb1648127614f15 | []
| no_license | rajsudeep/holbertonschool-higher_level_programming | d95657c9d67e10ecea0740de4e1dd206e16a292c | 9f9b905d5de58f145b1d61ceb1387dc05dcaae21 | refs/heads/master | 2020-07-22T22:54:55.643798 | 2020-02-12T00:45:29 | 2020-02-12T00:45:29 | 207,357,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/python3
"""
This module contains the Rectangle class
Defines a rectangle
"""
class Rectangle:
"""Rectangle defines a rectangle"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
""" Size of width """
return self.__width
@width.setter
def width(self, value):
if not isinstance(value, int):
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
""" Size of height """
return self.__height
@height.setter
def height(self, value):
if not isinstance(value, int):
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
""" Calculates area of rectangle """
return (self.width * self.height)
def perimeter(self):
""" Calculates perimeter of rectangle """
if self.width == 0 or self.height == 0:
return 0
return (2*self.height + 2*self.width)
| [
"[email protected]"
]
| |
9e560ff7f892b501b1c2b3e4d239b0401eddc10e | 2a1a5619a9e3ced181dc2c4fc6f046364a2076cb | /assignment_3/problem5/api.py | 41c01ebe76fa17d619507f4c62f02c77d6ec476f | []
| no_license | Jallenbo/is-206 | c9b8f0c72e2be4a5d6e75183c393565e25d87201 | fe4355681177cdf8ad7f5c9e10494a51561341ed | refs/heads/master | 2021-01-22T11:55:30.047406 | 2013-11-20T19:02:02 | 2013-11-20T19:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,771 | py | import os
import re
import random
import hashlib
import hmac
import logging
import json
from string import letters
import webapp2
import jinja2
from google.appengine.ext import db
#############################################################################
# Most of the code have already been commented in earlier problem sets #
# #
# Only new code will be commented here #
#############################################################################
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
secret = 'topphemmelig'
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
class BlogHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_json(self, d):
json_txt = json.dumps(d)
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
self.write(json_txt)
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
#Checks if link ends with .html or .json
#Sets format to HTML or JSON
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
if self.request.url.endswith('.json'):
self.format = 'json'
else:
self.format = 'html'
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def users_key(group = 'default'):
return db.Key.from_path('users', group)
class User(db.Model):
name = db.StringProperty(required = True)
pw_hash = db.StringProperty(required = True)
email = db.StringProperty()
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid, parent = users_key())
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(parent = users_key(),
name = name,
pw_hash = pw_hash,
email = email)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
if u and valid_pw(name, pw, u.pw_hash):
return u
def blog_key(name = 'default'):
return db.Key.from_path('blogs', name)
class Post(db.Model):
subject = db.StringProperty(required = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
last_modified = db.DateTimeProperty(auto_now = True)
def render(self):
self._render_text = self.content.replace('\n', '<br>')
return render_str("post.html", p = self)
#Makes JSON-code
def as_dict(self):
time_fmt = '%c'
d = {'subject': self.subject,
'content': self.content,
'created': self.created.strftime(time_fmt),
'last_modified': self.last_modified.strftime(time_fmt)}
return d
#Renders JSON using as_dict if format is set to JSON in initialize()
class BlogFront(BlogHandler):
def get(self):
posts = greetings = Post.all().order('-created')
if self.format == 'html':
self.render('front.html', posts = posts)
else:
return self.render_json([p.as_dict() for p in posts])
#Same as BlogFront
class PostPage(BlogHandler):
def get(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
if self.format == 'html':
self.render("permalink.html", post = post)
else:
self.render_json(post.as_dict())
class NewPost(BlogHandler):
def get(self):
if self.user:
self.render("newpost.html")
else:
self.redirect("/login")
def post(self):
if not self.user:
self.redirect('/blog')
subject = self.request.get('subject')
content = self.request.get('content')
if subject and content:
p = Post(parent = blog_key(), subject = subject, content = content)
p.put()
self.redirect('/%s' % str(p.key().id()))
else:
error = "subject and content, please!"
self.render("newpost.html", subject=subject, content=content, error=error)
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class Signup(BlogHandler):
def get(self):
self.render("signup-form.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username = self.username,
email = self.email)
if not valid_username(self.username):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(self.password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(self.email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self, *a, **kw):
raise NotImplementedError
class Register(Signup):
def done(self):
u = User.by_name(self.username)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.login(u)
self.redirect('/')
class Login(BlogHandler):
def get(self):
self.render('login-form.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class Logout(BlogHandler):
def get(self):
self.logout()
self.redirect('/signup')
app = webapp2.WSGIApplication([('/', BlogFront),
('/?(?:.json)?', BlogFront),
('/([0-9]+)(?:.json)?', PostPage),
('/newpost', NewPost),
('/signup', Register),
('/login', Login),
('/logout', Logout),
],
debug=True)
| [
"[email protected]"
]
| |
0b89423c220dcc5323af0513aba930115f2245b1 | d89e79cbabe985368645e12271419faf60b2f0cd | /forest_quiebra.py | 1ac0f0d398749fcf21abc3300581d81438ecc0da | []
| no_license | DiegoCelis33/CarranzaDiego_Ejercicio17 | 39e7f303c9a11685b1072a8a99c5d021bd770658 | ea0f2b7c24c33076b00a942ade4a35e621a1dc54 | refs/heads/master | 2021-04-23T20:23:48.557351 | 2020-03-25T16:58:56 | 2020-03-25T16:58:56 | 249,995,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | #!/usr/bin/env python
# coding: utf-8
# In[90]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd # para leer datos
import sklearn.ensemble # para el random forest
import sklearn.model_selection # para split train-test
import sklearn.metrics # para calcular el f1-score
from scipy.io import arff
# In[169]:
data1 = arff.loadarff('1year.arff')
data2 = arff.loadarff('2year.arff')
data3 = arff.loadarff('3year.arff')
data4 = arff.loadarff('4year.arff')
data5 = arff.loadarff('5year.arff')
data1 = pd.DataFrame(data1[0])
data2 = pd.DataFrame(data2[0])
data3 = pd.DataFrame(data3[0])
data4 = pd.DataFrame(data4[0])
data5 = pd.DataFrame(data5[0])
#data = pd.concat([data1, data2,data3,data4,data5], axis=0)
data = pd.concat([data1, data2,data3,data4,data5])
sd = getattr(data, "class")
data['class']=sd.astype(int)
data = data.dropna()
predictors = list(data.keys())
predictors.remove('class')
#print(predictors, np.shape(np.array(predictors)))
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
data[predictors], data['class'], test_size=0.5)
X_test, X_validation, y_test, y_validation = sklearn.model_selection.train_test_split(
data[predictors], data['class'], test_size=0.2)
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=10, max_features='sqrt')
n_trees = np.arange(1,100,25)
f1_train = []
f1_test = []
feature_importance = np.zeros((len(n_trees), len(predictors)))
for i, n_tree in enumerate(n_trees):
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_tree, max_features='sqrt')
clf.fit(X_train, y_train)
f1_train.append(sklearn.metrics.f1_score(y_train, clf.predict(X_train)))
f1_test.append(sklearn.metrics.f1_score(y_test, clf.predict(X_test)))
feature_importance[i, :] = clf.feature_importances_
maximo = n_trees[np.argmax(f1_test)]
# In[158]:
#plt.scatter(n_trees, f1_test)
# In[186]:
feature_importance = np.zeros((maximo, len(predictors)))
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=maximo, max_features='sqrt')
clf.fit(X_validation, y_validation)
f1_validation = sklearn.metrics.f1_score(y_validation, clf.predict(X_validation))
feature_importance[i, :] = clf.feature_importances_
avg_importance = np.average(feature_importance, axis=0)
a = pd.Series(avg_importance, index=predictors)
print(a)
plt.figure()
a.nlargest().plot(kind='barh')
plt.xlabel('Average Feature Importance')
plt.title('M='+str(maximo))
plt.savefig("features.png")
# In[171]:
f1_validation
# In[ ]:
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.