blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
585841a0ab7fec5d5e554df56e9525b8542746bf | a273c33036b697eaa90b01a22e5f01a31c61fda5 | /exercises/ListChaine.py | 5889a61caab9ac0da539cc442f6346bf568c7634 | [] | no_license | allaok/codestores | 1a55ed8798f6c99476fe24f27fda9a3c3fa03116 | f000bbb2518a8202875cbbcf6cc3a11e57db5792 | refs/heads/master | 2021-01-19T05:44:06.981591 | 2015-07-29T22:56:16 | 2015-07-29T22:56:16 | 39,902,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | __author__ = 'PWXG8293'
class Element:
def __init__(self):
self.value = None
self.next = None
class Liste:
def __init__(self):
self.first = None
def append(self, value):
element = Element()
element.value = value
element.next = None
if self.first is None:
self.first = element
else:
ptr = self.first
while ptr.next is not None:
ptr = ptr.next
ptr.next = element | [
"[email protected]"
] | |
2ce3c8a48982b584a60b2a960d76c25d0d5a33c3 | f8965d7b16e3cf70370b3bd181ef855a2ab89768 | /services/student_service.py | 1008f3f8b34bb77616f5d65ad565592c882b9575 | [
"Apache-2.0"
] | permissive | tuannguyendang/montypython | 59cae6fc6069cf5356670132470cdd52bad00d67 | c0b8ff7a8130e811ba16bfab8d5e013eac37f432 | refs/heads/main | 2023-05-12T02:24:50.693432 | 2021-05-31T02:14:16 | 2021-05-31T02:14:16 | 325,188,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | from uuid import uuid4
from services import StudentAssignmentService
from services.abstract import Assignment
class StudentService:
def __init__(self):
self.student_graders = {}
self.assignment_class = {}
def register(self, assignment_class):
if not issubclass(assignment_class, Assignment):
raise RuntimeError("Your class does not have the right methods")
id = uuid4()
self.assignment_class[id] = assignment_class
return id
def start_assignment(self, student, id):
self.student_graders[student] = StudentAssignmentService(
student, self.assignment_class[id]
)
def get_lesson(self, student):
assignment = self.student_graders[student]
return assignment.lesson()
def check_assignment(self, student, code):
assignment = self.student_graders[student]
return assignment.check(code)
def assignment_summary(self, student):
grader = self.student_graders[student]
return f"""
{student}'s attempts at {grader.assignment.__class__.__name__}:
attempts: {grader.attempts}
correct: {grader.correct_attempts}
passed: {grader.correct_attempts > 0}
"""
| [
"[email protected]"
] | |
dd2c0563158627b1fd4a1e16385be8b08316abe4 | a7d23974abd0d09681c17ca09038dc6dcd80a2ee | /extra/exporters.py | db0c766c8661667fb961183dbe55d8eadda1027e | [] | no_license | world9781/pydir | ccdf8aa560411957cf476324d49c7c1b4e0073c5 | 0c6c878a69bc5e0cabd12142d5fbb014fbade006 | refs/heads/master | 2023-03-15T20:27:57.409027 | 2018-10-22T19:09:11 | 2018-10-22T19:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | """
Exporters main duty is to represent a directory structure as XML or Json
TO BE IMPLEMENTED
"""
import os
import json
class BaseExporter(object):
"""A base for Writing Directory structure Exportation formats"""
def __init__(self,path_name):
self.pathname = path_name
def repr_as_dict(self,path):
base = {'root-name' : os.path.basename(path) }
if os.path.isdir(path):
base['type'] = 'Directory'
base['children'] = [self.repr_as_dict(os.path.join(path,the_dir)) for the_dir in os.listdir(path)]
else:
base['type'] = "file"
return base
def dump(self,out_file):
raise Exception("Unimplemented")
class JSONExporter(BaseExporter):
"""Export Directory Structure as JSON"""
def __init__(self,*args):
super(JSONExporter,self).__init__(*args)
def dump(self,out_file):
json.dump(self.repr_as_dict(self.pathname),out_file)
| [
"[email protected]"
] | |
ebf043a81c81d7202f4783736d677c16d360a834 | 828695b32588933b87b8a58c9f68a3d1ce23db17 | /jobs/migrations/0001_initial.py | 4b98fd0982e563eb9eed2e0d1246a174992521c9 | [] | no_license | jimpfred/portfolio | 6a82a632319c0628b54a1b68b13238d10be67458 | 3e62cacd52052844d6a8400fc142ba04d5bb0740 | refs/heads/main | 2023-07-12T08:00:09.698957 | 2021-08-17T21:42:33 | 2021-08-17T21:42:33 | 397,274,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Generated by Django 3.2.4 on 2021-08-16 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| [
"[email protected]"
] | |
b6e06bd57873d7cd596aa92ffcccf76eb8c487d1 | 6544fa558a6c08e4c67de393ed0d0ab554533839 | /DjangoProjects/DjangoProjects7EnquiryPageAndDeleteRoom/Rento/rento/rooms/models.py | 8ec356ad12ac7f769c1d4c1fecbeb6b79ef96526 | [] | no_license | tennnmani/bitproject7-DjangoPython- | 498878276ca0c847d0cf2ca73c1091074720d6e5 | fe13b4822c4cc5686a478dbfee915c108b6f9278 | refs/heads/main | 2023-02-21T16:56:10.842672 | 2021-02-25T04:13:52 | 2021-02-25T04:13:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,505 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=120) # max_length = required
def __str__(self):
return self.name
class Location(models.Model):
city = models.OneToOneField(City, on_delete=models.CASCADE)
location = models.CharField(max_length=120) # max_length = required
code = models.CharField(max_length=500) # max_length = required
def __str__(self):
return self.location
@staticmethod
def get_all_locations():
return Location.objects.all()
class Room(models.Model):
city = models.ForeignKey(City, on_delete=models.DO_NOTHING)
location = models.ForeignKey(Location, on_delete=models.DO_NOTHING)
house_number = models.CharField(max_length=120) # max_length = required
description = models.TextField(blank=True, null=True)
floor = models.IntegerField()
price = models.PositiveIntegerField()
image1 = models.ImageField(upload_to='')
image2 = models.ImageField(upload_to='')
image3 = models.ImageField(upload_to='')
water = models.BooleanField(default=False)
internet = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
description = models.TextField(max_length=500,blank=False, null=False)
date_posted = models.DateField(auto_now_add=True)
views = models.IntegerField(default=0)
blocked = models.BooleanField(default=False)
public = models.BooleanField(default=True)
private = models.BooleanField(default=False)
def __str__(self):
return str(self.pk)
@staticmethod
def get_all_rooms():
return Room.objects.all()
@staticmethod
def get_all_rooms_by_filter(location_id):
if location_id:
return Room.objects.filter(location = location_id)
else:
return Room.get_all_rooms()
@staticmethod
def get_all_rooms_by_waterinternetparkingfilter(water_id,internet_id,parking_id):
return Room.objects.filter(water = water_id, parking = internet_id, internet = parking_id)
@staticmethod
def get_all_rooms_by_allfilter(location_id,water_id,internet_id,parking_id):
return Room.objects.filter(location = location_id,water = water_id, parking = internet_id, internet = parking_id)
# def get_absolute_url(self):
# return reverse("products:product-detail", kwargs={"id": self.id}) #f"/products/{self.id}/" | [
"[email protected]"
] | |
c2ab7ebb09d2e187c4dbf7afea60cfab0e18c38b | 72eb6f8dcfe34996e9c16769fd272d0d4383743f | /OS/MP-3/test.py | 63a530e8860c13151c7675b4a9c1d76e81a69305 | [] | no_license | abhishekkrm/Projects | a11daabc3a051b02f8b899d6058878d08b7613d8 | e7cd5a414ee330ac32671b4eab060949227fe3c7 | refs/heads/master | 2021-03-19T11:16:33.525390 | 2015-02-15T22:15:43 | 2015-02-15T22:15:43 | 30,843,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import getopt
try :
#Keep Receiving the message unless self.cr_lf is found
time_val = 0
if time_val <= 0:
raise "timeout"
print ("DD")
except:
print ("HI")
print("TP")
| [
"[email protected]"
] | |
bcd8ad1f09ff8608cd0e1873f337a00d768cbe32 | d7a05a935169e7b4d1c3cc834ff70633908fb525 | /test_model.py | a5e8711ccbf4e9a40a69659d8d35d06c897711b8 | [] | no_license | NinaWie/classify_satellite_images | 72d44b7cdc9c9c038daccc6e354ede04b0b786d8 | 5810580999e557e56fc09d0404f2faccc9690e9a | refs/heads/master | 2020-08-29T02:02:12.105432 | 2019-10-27T17:27:56 | 2019-10-27T17:27:56 | 217,888,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | import numpy as np
import os
import cv2
import json
import keras
import argparse
import sys
from keras.applications.vgg16 import preprocess_input
from keras.models import model_from_json
def classify_tif_image(k, l):
"""
This loads the TIF file and creates the 256*256 tiles out of it. it then classifies each tile to one of the 9 classes.
:param k:
:param l:
:return:
"""
img = cv2.imread(inp_path + "/{}-{}.tif".format(k, l))
print(inp_path + "/{}-{}.tif".format(k, l))
print("Finished Loading Image")
shape = img.shape
imcount = 0
img_arr = []
filenames = []
for i in range(0, shape[0] - shape[0] % 256, 256):
for j in range(0, shape[1] - shape[1] % 256, 256):
tile = img[i:i + 256, j:j + 256, :]
assert (tile.shape == (256, 256, 3))
imcount += 1
img_arr.append(tile)
filenames.append("{}-{}_{}_{}".format(k, l, i, j))
assert (len(filenames) == len(img_arr))
img_arr = np.asarray(img_arr)
print(img_arr.shape)
# final data:
img_data = preprocess_input(img_arr.astype(np.float))
sizes = img_data.shape
print(img_data.shape)
# load json and create model
json_file = open(os.path.join(model_path, 'model.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(os.path.join(model_path, "model.h5"))
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
out = loaded_model.predict(img_data)
mapping = json.load(open(mapping_file, "r"))
res_dir = {filenames[i]: str(mapping[str(np.argmax(out[i]))]) for i in range(len(out))}
output_path = "{}-{}_pred_labels.json".format(k, l)
json.dump(res_dir, open(os.path.join(args.out_dir, output_path), "w"))
print("Saved predicted labels in a dictionary in ", output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train/test neural network for recognizing pitch type from joint trajectories')
parser.add_argument('-path_to_data', help='path to data to predict labels for - e.g. tiles', required=True)
parser.add_argument('-path_to_model', help='path to model.h5 and model.json', required=True)
parser.add_argument('-mapping_file', help='path to mapping file', required=True)
parser.add_argument('-out_dir', default=".", help='path to output the predicted labels', required=False)
parser.add_argument('-start', default="1", help='number from which image it should start', required=False)
parser.add_argument('-end', default="2", help='number to which image it should process', required=False)
args = parser.parse_args()
inp_path = args.path_to_data
model_path = args.path_to_model
mapping_file = args.mapping_file
from_num = int(args.start)
to_num = int(args.end)
if not os.path.exists:
print("ERROR: PATH DOES NOT EXIST!")
sys.exit()
for k in range(from_num, to_num):
for l in range(17, 20, 2):
classify_tif_image(k, l)
| [
"[email protected]"
] | |
952177163a4b1437e1561a38db38fa4d951331ba | 9cb8a9f5172f9af17eda5ca8d3c6ff297f0bf120 | /setup.py | 42aed970f5f0d61d42ac271f5d64f44f3aa5bc6c | [] | no_license | tzyk777/twrapper | 91c35e0a572c533071bc02c75952fd69fd2b4a22 | b07c7c307f324a214c876b0d50ec18771b0012e1 | refs/heads/master | 2021-01-17T16:29:16.968949 | 2016-10-29T21:54:06 | 2016-10-29T21:54:06 | 62,597,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from distutils.core import setup
setup(name='twrapper',
version='1.3.1',
description='Python twitter wrapper',
author='Zeyang Tao',
author_email='[email protected]',
packages=['twrapper']
) | [
"[email protected]"
] | |
12991bf43618c242644e572cf61bc414b413c0b4 | 152ebb6f75ac0d79f824ea219ca095be59b23fd0 | /client-side-module/client-side.py | 62ab47a62a652cdf6bcc0f1acf2da0f6c105606b | [] | no_license | Automated-CAD-Scoring-Suite/Remote-Communication-Module-for-3D-Slicer | f296d1aca08cb90d1feb28e6ee8e1d7e45af70e0 | fc6a2166c3208997bb7ed0aa8cd3ee0c7b6dc794 | refs/heads/main | 2023-02-23T21:16:25.327063 | 2021-01-30T04:04:53 | 2021-01-30T04:04:53 | 333,851,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | # Import Packages
import SimpleITK as sitk
import nibabel as nib
import matplotlib.pylab as plt
import numpy as np
def find_roi_2D(s):
# rotate -90
s_rotated = np.rot90(s, k=3)
# flip slice
s_fliped = np.flip(s, axis=0)
s_rotated_fliped = np.flip(s_rotated, axis=0)
# Get up and down coordiates
y1 = np.unravel_index(np.argmax(s, axis=None), s.shape)
y2 = np.unravel_index(np.argmax(s_fliped, axis=None), s.shape)
x1 = np.unravel_index(np.argmax(s_rotated, axis=None), s.shape)
x2 = np.unravel_index(np.argmax(s_rotated_fliped, axis=None), s.shape)
# return x1, x2, y1, y2 of image
return x1[0], s.shape[1] - x2[0], y1[0], s.shape[0] - y2[0]
def find_roi(sample):
X1, X2, Y1, Y2, Z1, Z2 = sample.shape[1], 0, sample.shape[0], 0, sample.shape[2], 0
for index in range(sample.shape[2]): # around Z (axial)
# Take slice from sample
s = sample[:, :, index]
# find points
x1, x2, y1, y2 = find_roi_2D(s)
# check for min x1,y1 and max x2,y2
X1 = min(x1, X1)
Y1 = min(y1, Y1)
X2 = max(x2, X2)
Y2 = max(y2, Y2)
for index in range(sample.shape[1]): # around X (sagital)
# Take slice from sample
s = sample[:, index, :]
# find points
z1, z2, y1, y2 = find_roi_2D(s)
# check for min z1,y1 and max z2,y2
Z1 = min(z1, Z1)
Y1 = min(y1, Y1)
Z2 = max(z2, Z2)
Y2 = max(y2, Y2)
for index in range(sample.shape[0]): # around Y (coronal)
# Take slice from sample
s = sample[index, :, :]
# find points
x1, x2, z1, z2 = find_roi_2D(s)
# check for min x1,z1 and max x2,z2
X1 = min(x1, X1)
Z1 = min(z1, Z1)
X2 = max(x2, X2)
Z2 = max(z2, Z2)
return X1, X2, Y1, Y2, Z1, Z2
def crop_roi(sample, x1, x2, y1, y2, z1, z2):
y = (y2 - y1 + 1) if (y1 != 0) else (y2 - y1)
x = (x2 - x1 + 1) if (x1 != 0) else (x2 - x1)
z = (z2 - z1 + 1) if (z1 != 0) else (z2 - z1)
sample_croped = np.empty((y, x, z, 1))
# for index in range(sample_croped.shape[2]):
# # Take slice from sample
# s = sample[:,:, index]
#
# # Crop
# croped_slice = np.copy(s[y1:y2+1 , x1:x2+1])
#
# sample_croped[:,:, index] = croped_slice
sample_croped = sample[y1:y2 + 1, x1:x2 + 1, z1:z2 + 1].copy()
return sample_croped
def load_itk(filename: str):
"""
This function reads a '.mhd' file using SimpleITK
:param filename: Path of .mhd file
:return: The image array, origin and spacing of the image.
"""
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return itkimage, ct_scan, origin, spacing
def plot_view(data):
plt.figure(figsize=(50, 50))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.01,0.01)
for i in range(data.shape[1]):
plt.subplot(8, 8, i+1)
plt.imshow(data[i])
plt.axis('off')
# use plt.savefig(...) here if you want to save the images as .jpg, e.g.,
plt.show()
# itkimage, ct_scan_data, origin, spacing = load_itk('../data/trv1p1cti.mhd')
itkimage, ct_scan_data, origin, spacing = load_itk('../data/trv1p1cti.nii')
# print(itkimage)
# ct_scan_data = nib.load('../data/trv1p1cti.nii').get_fdata()
# ct_scan_data = np.swapaxes(ct_scan_data, 0, 2)
# itkimage2, ct_scan_label, origin2, spacing2 = load_itk('../data/trv1p1cti-heart.nii')
# itkimage2, ct_scan_label, origin2, spacing2 = load_itk('../data/trv1p1cti-heart_4.nii.gz')
# ct_scan_label = np.swapaxes(ct_scan_label, 0, 2)
ct_scan_label = nib.load('../data/trv1p1cti-heart_4.nii').get_fdata()
print(ct_scan_label.shape)
sagital_image = ct_scan_label[213, :, :] # Axis 0
print(sagital_image.shape)
axial_image = ct_scan_label[:, :, 32] # Axis 2
print(axial_image.shape)
coronal_image = ct_scan_label[:, 154, :] # Axis 1
plt.figure(figsize=(20, 10))
plt.style.use('grayscale')
plt.subplot(141)
plt.imshow(np.rot90(sagital_image))
plt.title('Sagital Plane')
plt.axis('off')
plt.subplot(142)
plt.imshow(np.rot90(axial_image))
plt.title('Axial Plane')
plt.axis('off')
plt.subplot(143)
plt.imshow(np.rot90(coronal_image))
plt.title('Coronal Plane')
plt.axis('off')
plt.show()
# x1,x2,y1,y2,z1,z2 = find_roi(ct_scan_label)
# print(x1, x2, y1, y2, z1, z2)
# print(ct_scan_label[32].shape)
# x1,x2,y1,y2 = find_roi_2D(ct_scan_label[32])
# print(x1, x2, y1, y2)
# croped = ct_scan_label[32][x1:x2+1, y1:y2+1]
# cropped_data = crop_roi(ct_scan_data, x1,x2,y1,y2,z1,z2)
# print("New Shape:", cropped_data.shape)
print("Original Shape:", ct_scan_data.shape)
print("Label Shape:", ct_scan_label.shape)
# plot_view(sagital_image)
# plot_view(axial_image)
# plot_view(coronal_image)
| [
"[email protected]"
] | |
55a78abf836afadcc6c928b21b04a242cf25d686 | 77266fcd99d4b4770a6e22bf669150b1576c4e73 | /Assignment4/venv/Scripts/pip3-script.py | 85df86302a9b7e83b935b7e11b27688aeb77fbbc | [] | no_license | NourAdel/GA | d67ad9b9ed3cd1dcc6cfe00968217f5ed7b61b33 | c407eb51e6c35b9369298bf96b409a9e4aaeb627 | refs/heads/master | 2020-05-18T00:06:59.353710 | 2019-04-29T10:42:14 | 2019-04-29T10:42:14 | 184,050,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!D:\College\Genetic\Assignment4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
5da2bd8dc2830c9ae5ea68845892e133cd447295 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq2210.py | 06f183066edd0d13b690b7e34154e944725a31e0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=28
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.rx(-1.9069467407290044).on(input_qubit[2])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.rx(0.13823007675795101).on(input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.X.on(input_qubit[3])) # number=1
c.append(cirq.rx(-1.9352210746113125).on(input_qubit[3])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[2])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=17
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=25
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=27
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[3])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2210.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
88bc0b746f0606f86b7e67ef6a1772fa311c5961 | 3b4094f1161502a3d1dbc5712e6405009c3c4b8c | /wsgi/venv/lib/python2.7/site-packages/bokeh/models/map_plots.py | d178a4dc16cabf517ea3bb3b0b28d01d0b822b07 | [] | no_license | chriotte/FinalCourseworkCloudComputingChristopherOttesen | b604337c7e8064ee07e5a45a38e44ae52cb599ae | 08a0271f831e3f14bc836870c8a39b996b6d1d20 | refs/heads/master | 2021-01-18T19:54:02.090555 | 2017-04-01T15:14:03 | 2017-04-01T15:14:03 | 86,919,333 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | """ Models for displaying maps in Bokeh plots.
"""
from __future__ import absolute_import
from ..core import validation
from ..core.validation.warnings import MISSING_RENDERERS, NO_DATA_RENDERERS
from ..core.validation.errors import REQUIRED_RANGE, MISSING_GOOGLE_API_KEY
from ..core.has_props import HasProps
from ..core.properties import abstract
from ..core.properties import Enum, Float, Instance, Int, JSON, Override, String
from ..core.enums import MapType
from .plots import Plot
@abstract
class MapOptions(HasProps):
""" Abstract base class for map options' models.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the map.
""")
@abstract
class MapPlot(Plot):
""" Abstract base class for map plot models.
"""
class GMapOptions(MapOptions):
""" Options for GMapPlot objects.
"""
map_type = Enum(MapType, help="""
The `map type`_ to use for the GMapPlot.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
styles = JSON(help="""
A JSON array of `map styles`_ to use for the GMapPlot. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
class GMapPlot(MapPlot):
""" A Bokeh Plot with a `Google Map`_ displayed underneath.
Data placed on this plot should be specified in decimal lat long coordinates e.g. 37.123, -122.404.
It will be automatically converted into the web mercator projection to display properly over
google maps tiles.
.. _Google Map: https://www.google.com/maps/
"""
# TODO (bev) map plot might not have these
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
pass
@validation.error(MISSING_GOOGLE_API_KEY)
def _check_missing_google_api_key(self):
if self.api_key is None:
return str(self)
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
border_fill_color = Override(default="#ffffff")
api_key = String(help="""
Google Maps API requires an API key. See https://developers.google.com/maps/documentation/javascript/get-api-key
for more information on how to obtain your own.
""")
| [
"[email protected]"
] | |
b259dc41599ee92dc043a282fa8235f280b9d968 | 6f0a01dde7bf2998cd09e404cc4e2633fbf45dee | /archive/Tensorflow-101/ep-6-broadcasting.py | cee70b546ae0796a84c3a2c40cb581e09e075af2 | [
"MIT"
] | permissive | IncredibleDevHQ/incredible-dev-videos | b8a26b6ac6efad33f0e8dc2903c2b441ab106a69 | 38d5b3d85fd21b8ec8043b90312b500da398d9f9 | refs/heads/main | 2023-06-01T21:05:00.010981 | 2021-06-21T14:57:41 | 2021-06-21T14:57:41 | 341,483,252 | 2 | 2 | MIT | 2021-06-08T11:56:08 | 2021-02-23T08:32:00 | Python | UTF-8 | Python | false | false | 576 | py | # Broadcasting
import tensorflow as tf
x = tf.constant([1, 2, 3])
y = tf.constant(2)
z = tf.constant([2, 2, 2])
tf.print(tf.multiply(x, 2)) #[2 4 6]
tf.print(x * y) #[2 4 6]
tf.print(x * z) #[2 4 6]
x = tf.reshape(x,[3,1])
y = tf.range(1, 5)
print(tf.multiply(x, y))
# [[ 1 2 3 4]
# [ 2 4 6 8]
# [ 3 6 9 12]]
x_stretch = tf.constant(
[[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]])
y_stretch = tf.constant(
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
print(x_stretch * y_stretch)
print(tf.broadcast_to(
tf.constant([1, 2, 3,4]), [3, 4]
)) | [
"[email protected]"
] | |
da159e2f2ebf6d02cf5df84e0e70edd3ae7af159 | 08289088124d18029d0ad4388f49ac9c206738e2 | /etl_prefect_core.py | e4ebf3c138cb20b5c050e3ef489bbef516571689 | [] | no_license | AntonioNtV/prefect-pydata-denver-tutorial | e54660172ef484bf9d0610b84b0b9f47d7b2805c | cb6bfb1a236b85a26efdd8b144027351fbe49aa6 | refs/heads/master | 2023-08-10T21:45:38.654271 | 2021-10-07T19:04:31 | 2021-10-07T19:04:31 | 414,699,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | import requests
import json
import sqlite3
import pathlib
import prefect
from collections import namedtuple
from contextlib import closing
from datetime import timedelta
from prefect import task, Flow
from prefect.tasks.database.sqlite import SQLiteScript
from prefect.schedules import IntervalSchedule
from prefect.engine.results import LocalResult
DATABASE_NAME='cfpbcomplaints.db'
## setup
create_table = SQLiteScript(
db=DATABASE_NAME,
script='CREATE TABLE IF NOT EXISTS complaint (timestamp TEXT, state TEXT, product TEXT, company TEXT, complaint_what_happened TEXT)'
)
def alert_failed(obj, old_state, new_state):
if new_state.is_failed():
logger = prefect.context.get('logger')
logger.info("I actually requested this time!")
## extract
@task(cache_for=timedelta(days=1), state_handlers=[alert_failed], result=LocalResult(dir="{current_path}/results".format(current_path=pathlib.Path(__file__).parent.resolve())))
def get_complaint_data():
r = requests.get("https://www.consumerfinance.gov/data-research/consumer-complaints/search/api/v1/", params={'size':10})
response_json = json.loads(r.text)
logger = prefect.context.get('logger')
logger.info("I actually requested this time!")
return response_json['hits']['hits']
## transform
@task(state_handlers=[alert_failed])
def parse_complaint_data(raw_complaint_data):
complaints = []
Complaint = namedtuple('Complaint', ['data_received', 'state', 'product', 'company', 'complaint_what_happened'])
for row in raw_complaint_data:
source = row.get('_source')
this_complaint = Complaint(
data_received=source.get('date_recieved'),
state=source.get('state'),
product=source.get('product'),
company=source.get('company'),
complaint_what_happened=source.get('complaint_what_happened')
)
complaints.append(this_complaint)
return complaints
## load
@task(state_handlers=[alert_failed])
def store_complaints(parsed_complaint_data):
insert_cmd = "INSERT INTO complaint VALUES (?, ?, ?, ?, ?)"
with closing(sqlite3.connect(DATABASE_NAME)) as conn:
with closing(conn.cursor()) as cursor:
cursor.executemany(insert_cmd, parsed_complaint_data)
conn.commit()
def build_flow(schedule=None):
with Flow("etl flow", schedule=schedule, state_handlers=[alert_failed]) as flow:
db_table = create_table()
raw_complaint_data = get_complaint_data()
parsed_complaint_data = parse_complaint_data(raw_complaint_data)
populated_table = store_complaints(parsed_complaint_data)
populated_table.set_upstream(db_table) # db_table need to happen before populated_table
return flow
schedule = IntervalSchedule(interval=timedelta(minutes=1))
etl_flow = build_flow(schedule)
etl_flow.register(project_name='ETL FIRST PROJECT WITH PREFECT')
| [
"[email protected]"
] | |
a01b71e2dae640d49f54d02cf08acedbab149c70 | 961931333838aebe8bd17c30c19f3994e32d76ce | /src/leetcode/bfs/279. Perfect Squares.py | 128380fcb8630cd5d95ab5e6415f0e7e36e9fcdd | [] | no_license | MTGTsunami/LeetPython | 5161f9e31dc2ab1855123c2a3a151eb6f4d889bc | f7f3839f631f08a9e5bf8a02398b940f82e43e67 | refs/heads/master | 2023-04-17T16:59:45.621291 | 2021-04-26T07:24:50 | 2021-04-26T07:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | """
Given a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
"""
class MySolution(object): # A little bit larger than O(n) time
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * n
for i in range(1, n + 1):
sqrt = i ** 0.5
floor = int(sqrt)
if sqrt - floor == 0:
square[i - 1] = 1
nearest = floor
else:
while floor >= 1:
square[i - 1] = min(square[i - floor ** 2 - 1] + 1, square[i - 1])
floor -= 1
return square[-1]
class SolutionDP(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * (n + 1)
square[0] = 0
for i in range(1, n + 1):
j = 1
while j * j <= i:
square[i] = min(square[i - j * j] + 1, square[i])
j += 1
return square[-1]
class SolutionMath(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
def isSquare(n):
return (n ** 0.5 - int(n ** 0.5)) == 0
# Based on Lagrange's Four Square theorem, there
# are only 4 possible results: 1, 2, 3, 4.
# If n is a perfect square, return 1.
if isSquare(n):
return 1
# The result is 4 if and only if n can be written in the form of 4^k*(8*m + 7).
# Please refer to Legendre's four-square theorem.
while n % 4 == 0:
n /= 4
if n % 8 == 7:
return 4
for i in range(1, int(n ** 0.5) + 1):
if isSquare(n - i * i):
return 2
return 3
class SolutionBFS(object): # Important
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
depth = 0
nodes = set([n])
edges = [i * i for i in range(1, int(n ** 0.5) + 1)]
while True:
depth += 1
nextLevel = set()
for node in nodes:
for edge in edges:
if edge == node:
return depth
elif edge < node:
nextLevel.add(node - edge)
else:
break
nodes = nextLevel
| [
"[email protected]"
] | |
645fcfbac504304e88b27def48c4789f31873f48 | b56fb8740e74859f95d44854fd4fb4309e039e84 | /src/twenty_four/lib.py | 6dd6277a98e6e208d8c712d22c573fe3b50249fa | [] | no_license | qsweber/twenty-four-api | 03c20e06f725b94c9eab3643fc4dd60c0b464e0b | fb870a24a41b0bacf595d528b13f8f4bde1118af | refs/heads/master | 2022-11-17T02:34:25.585554 | 2020-07-08T03:34:48 | 2020-07-08T03:34:48 | 107,357,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,951 | py | import itertools
import typing
def plus(x: int, y: int) -> int:
return x + y
def minus(x: int, y: int) -> int:
return x - y
def dividedby(x: int, y: int) -> float:
if y == 0:
return 0
return x / float(y)
def times(x: int, y: int) -> int:
return x * y
def get_solutions(numbers: typing.List[int]) -> typing.List[str]:
functions = [plus, minus, dividedby, times]
function_combinations = [f for f in itertools.product(functions, repeat=3)]
combinations = set(i for i in itertools.permutations(numbers))
answers = []
for a, b, c, d in combinations:
for f1, f2, f3 in function_combinations:
res1 = round(f3(f2(f1(a, b), c), d), 7)
if res1 == 24.0:
answers.append(
"((({a} {f1} {b}) {f2} {c}) {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res2 = round(f2(f1(a, b), f3(c, d)), 7)
if res2 == 24.0:
answers.append(
"({a} {f1} {b}) {f2} ({c} {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res3 = round(f1(a, f3(f2(b, c), d)), 7)
if res3 == 24.0:
answers.append(
"{a} {f1} (({b} {f2} {c}) {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res4 = round(f3(f1(a, f2(b, c)), d), 7)
if res4 == 24.0:
answers.append(
"({a} {f1} ({b} {f2} {c})) {f3} {d}".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res5 = round(f1(a, f2(b, f3(c, d))), 7)
if res5 == 24.0:
answers.append(
"{a} {f1} ({b} {f2} ({c} {f3} {d}))".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
return answers
| [
"[email protected]"
] | |
f9a5490c2beeec964b97a7bd6462e457285bbb33 | 530c8697641092d9291514e919f52f79c4ff00d7 | /2d (1).py | 3c9f85943a5580834acc0df43d0b91cbcc24e107 | [] | no_license | haldanuj/EP219 | 59336fcb72ccf76df81f81b37a7c488c1f182d01 | 115633dac1d8f35a014467703672f73761baac7d | refs/heads/master | 2021-01-12T18:19:58.314348 | 2016-11-04T01:01:08 | 2016-11-04T01:01:08 | 69,415,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py |
import numpy as np
import matplotlib.pyplot as plt
import math
file=open('recoilenergydata_EP219.csv','r')
#defined list to store the values of log(L(s))
y=[]
#here defined log(L) function with s as parameter(sigma)
def likelihood(s):
file=open('recoilenergydata_EP219.csv','r')
i=0
sum1=0
sum2=0
sum3=0
sum4=0
while i<40:
if i<5:
[Er, Events]=file.readline().split(',')
m1= float(Events)
n1=float(Er)
n2=1000*(np.exp(-n1/10))
m2=np.log(n2)
sum1=sum1+(-n2 +m1*m2)
#sum1=summation of (-Bi + Di*log(Bi)) where Bi are backgound events and Di are
#observed events for 0<i<5
elif 5<=i<=15:
[Er, Events]=file.readline().split(',')
n1=float(Er)
m1= float(Events)
n2=1000*(np.exp(-n1/10))
t=s*20*(n1-5)
n3=t+n2
m2=np.log(n3)
sum2 =sum2 + (-n3 + m1*m2)
#sum2=summation of (-(Bi+Ti) + Di*log(Bi+Ti)) where Bi are backgound events,Di are
#observed events and Ti are observed events for 5<=i<15
elif 15<i<25:
[Er, Events]=file.readline().split(',')
n1=float(Er)
m1= float(Events)
n2=1000*(np.exp(-n1/10))
t=s*20*(25-n1)
n3=t+n2
m2=np.log(n3)
sum3 =sum3 + (-n3 + m1*m2)
#sum3=summation of (-(Bi+Ti) + Di*log(Bi+Ti)) where Bi are backgound events,Di are
#observed events and Ti are observed events for 15<=i<25
else :
[Er, Events]=file.readline().split(',')
m1= float(Events)
n1=float(Er)
n2=1000*(np.exp(-n1/10))
m2=np.log(n2)
sum4 =sum4 + (-n2 + m1*m2)
#sum4=summation of (-Bi + Di*log(Bi)) where Bi are backgound events and Di are
#observed events for 25<i<40
i=i+1
return (sum1 +sum2+sum3+sum4)
s=np.linspace(0, 1, 100)
y= likelihood(s)
fig, ax = plt.subplots()
ax.plot(s, y)
plt.title('Likelihood plot')
plt.xlabel('sigma')
plt.ylabel('log(sigma)')
plt.show()
| [
"[email protected]"
] | |
4355e732fc8866cde71cd3a8929fb289585ea09a | cea30cf853c1ddbe517292e8bcaf2265ddfeaa00 | /directions/migrations/0001_initial.py | 60c40bd4b3be8941d7d8643f00deab5c72d5f44f | [] | no_license | mehranj73/london-routes | b80242ecf60fa16c19dd0017be421ed790fe7b30 | 0fa50faf6813fc704379d0e0e4f2ad891e4121b0 | refs/heads/main | 2023-01-20T22:43:56.963476 | 2020-11-26T13:23:08 | 2020-11-26T13:23:08 | 325,857,652 | 1 | 0 | null | 2020-12-31T18:56:12 | 2020-12-31T18:56:11 | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 3.1.2 on 2020-10-25 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Direction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stop_offs', models.CharField(max_length=50)),
('image', models.CharField(max_length=200)),
],
),
]
| [
"[email protected]"
] | |
e91ec979aaed5918fde76b0f5e9594aa88de1975 | 47836a0e9dd477b17a08f0f1fdc0dec284e119eb | /cqt/strats/strategy_long_short_average.py | 5ec4dc0aa62cccce396687634037d60420e694e2 | [] | no_license | Jwang-2007/ML-Crypto-Trading | efe5667c9953bbe6541a183e749a85268b8613d2 | c83bc9ad68efaea65671a2268f6890bfbfccb79e | refs/heads/master | 2021-04-05T20:37:30.585610 | 2020-04-22T19:09:52 | 2020-04-22T19:09:52 | 248,598,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,921 | py | from cqt.strats.strategy import Strategy
from cqt.analyze.signal_long_short_crossing import signal_long_short_crossing as slsc
from cqt.analyze.signal_long_short_crossing import signal_average_envelope as sae
import copy
class StrategySimpleMA(Strategy):
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
section_coin = self.env.get_section(coin)
ind_coin = slsc(self.env, coin, time, self.rules)
price_coin = section_coin.get_price_close(time)
if ind_coin == -1:
ledger.sell_unit(coin, price_coin)
elif ind_coin == 1:
ledger.buy(coin, price_coin)
else:
pass
return ledger
class StrategyInverseMA(Strategy):
def __init__(self, mdl, ini_prtf, rules):
self.asset_model = mdl
self.initial_portfolio = ini_prtf
self.rules = rules
self.env = mdl
self.initial = ini_prtf
self.prices = copy.deepcopy(self.asset_model.get_section('btc').data)
self.signal=self.prices['price_close'].values*0
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
section_coin = self.env.get_section(coin)
ind_coin = slsc(self.env, coin, time, self.rules)
price_coin = section_coin.get_price_close(time)
time_step = self.prices.index.get_loc(time)
if ind_coin == 1:
ledger.sell_unit(coin, price_coin)
self.signal[time_step]=1
elif ind_coin == -1:
ledger.buy(coin, price_coin)
self.signal[time_step]=-1
else:
self.signal[time_step]=0
pass
return ledger
class StrategyBlendMA(Strategy):
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
rules_short = self.rules.copy()
rules_short['window_size'] = [rules_short['window_size'][0], rules_short['window_size'][1]]
rules_long = self.rules.copy()
rules_long['window_size'] = [rules_long['window_size'][2], rules_long['window_size'][3]]
ind_coin_long = slsc(self.env, coin, time, rules_long)
ind_coin_short = slsc(self.env, coin, time, rules_short)
strats_long = StrategySimpleMA(self.env, ledger, rules_long)
strats_short = StrategySimpleMA(self.env, ledger, rules_short)
if ind_coin_long == 1:
ledger = strats_long.apply_event_logic(time, ledger)
elif ind_coin_short == -1:
ledger = strats_short.apply_event_logic(time, ledger)
else:
pass
return ledger
| [
"[email protected]"
] | |
52a5fc44063f2e73239719204651a2f2b4b3e5e5 | 767b5482f3c5b9c2c85575c711e37561f5b8f198 | /engine/engine_lib/encoderlib.py | 27d186e1e4d625fe001279e1c8110f2ff708818f | [] | no_license | zhupite233/scaner | 8e39c903f295d06195be20067043087ec8baac4f | 7c29c02bca2247a82bcbb91cc86955cc27998c95 | refs/heads/master | 2020-05-18T03:23:03.459222 | 2019-04-15T04:29:10 | 2019-04-15T04:29:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,332 | py | #!/usr/bin/env python
"""
This is the encoding / decoding functions collection for DharmaEncoder. It
allows you to encode and decode various data formats.
(c) 2010 Nathan Hamiel
Email: nathan{at}neohaxor{dot}org
Hexsec Labs: http://hexsec.com/labs
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import hashlib
import cgi
import StringIO
import zlib
import decimal
from xml.sax.saxutils import unescape
from xml.sax.saxutils import escape
###################
# Encoder section #
###################
def url_encode(encvalue):
""" URL encode the specifed value. Example Format: Hello%20World """
try:
encoded_value = urllib.quote(encvalue)
except:
encoded_value = "There was a problem with the specified value"
return(encoded_value)
def full_url_encode(encvalue):
""" Full URL Hex encode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%")
hexval += val
return(hexval)
def base64_encode(encvalue):
""" Base64 encode the specified value. Example Format: SGVsbG8gV29ybGQ= """
try:
basedata = encvalue.encode("Base64")
except:
basedata = "There was an error"
return(basedata)
# def html_entity_encode(encvalue):
# """ Encode value using HTML entities. Example Format: """
#####
# Follow up on this. It needs to be fixed
#####
# encoded_value = cgi.escape(encvalue)
# return(encoded_value)
def hex_encode(encvalue):
""" Encode value to Hex. Example Format: 48656c6c6f2576f726c64"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).strip("0x")
hexval += val
return(hexval)
def hex_entity_encode(encvalue):
""" Encode value to a Hex entitiy. Example Format: Hello"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "&#x") + ";"
hexval += val
return(hexval)
def unicode_encode(encvalue):
""" Unicode encode the specified value in the %u00 format. Example:
%u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%u00")
hexval += val
return(hexval)
def escape_xml(encvalue):
""" Escape the specified HTML/XML value. Example Format: Hello&World """
escaped = escape(encvalue, {"'": "'", '"': """})
return(escaped)
def md5_hash(encvalue):
""" md5 hash the specified value.
Example Format: b10a8db164e0754105b7a99be72e3fe5"""
hashdata = hashlib.md5(encvalue).hexdigest()
return(hashdata)
def sha1_hash(encvalue):
""" sha1 hash the specified value.
Example Format: 0a4d55a8d778e5022fab701977c5d840bbc486d0 """
hashdata = hashlib.sha1(encvalue).hexdigest()
return(hashdata)
def sqlchar_encode(encvalue):
""" SQL char encode the specified value.
Example Format: CHAR(72)+CHAR(101)+CHAR(108)+CHAR(108)+CHAR(111)"""
charstring = ""
for item in encvalue:
val = "CHAR(" + str(ord(item)) + ")+"
charstring += val
return(charstring.rstrip("+"))
####
# oraclechr_encode not tested yet, but should work
####
def oraclechr_encode(encvalue):
""" Oracle chr encode the specified value. """
charstring = ""
for item in encvalue:
val = "chr(" + str(ord(item)) + ")||"
charstring += val
return(charstring.rstrip("||"))
def decimal_convert(encvalue):
""" Convert input to decimal value.
Example Format: 721011081081113287111114108100 """
decvalue = ""
for item in encvalue:
decvalue += str(ord(item))
return(decvalue)
def decimal_entity_encode(encvalue):
""" Convert input to a decimal entity.
Example Format: Hello World """
decvalue = ""
for item in encvalue:
decvalue += "&#" + str(ord(item)) +";"
return(decvalue)
def rot13_encode(encvalue):
""" Perform ROT13 encoding on the specified value.
Example Format: Uryyb Jbeyq """
return(encvalue.encode("rot13"))
###################
# Decoder section #
###################
def url_decode(decvalue):
""" URL Decode the specified value. Example Format: Hello%20World """
returnval = urllib.unquote(decvalue)
return(returnval)
def fullurl_decode(decvalue):
""" Full URL decode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
splithex = decvalue.split("%")
hexdec = ""
for item in splithex:
if item != "":
hexdec += chr(int(item, 16))
return(hexdec)
def base64_decode(decvalue):
""" Base64 decode the specified value.
Example Format: SGVsbG8gV29ybGQ= """
msg = """ There was an error. Most likely this isn't a valid Base64 value
and Python choked on it """
try:
base64dec = decvalue.decode("Base64")
return(base64dec)
except:
return(msg)
def hex_decode(decvalue):
""" Hex decode the specified value.
Example Format: 48656c6c6f2576f726c64 """
msg = """ There was an error, perhaps an invalid length for the hex
value """
try:
decodeval = decvalue.decode("hex")
return(decodeval)
except:
return(msg)
def hexentity_decode(decvalue):
""" Hex entity decode the specified value.
Example Format: Hello """
charval = ""
splithex = decvalue.split(";")
for item in splithex:
# Necessary because split creates an empty "" that tries to be
# converted with int()
if item != "":
hexcon = item.replace("&#", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def unescape_xml(decvalue):
""" Unescape the specified HTML or XML value: Hel啊lo&World"""
unescaped = unescape(decvalue, {"'": "'", """: '"'})
return(unescaped)
def unicode_decode(decvalue):
""" Unicode decode the specified value %u00 format.
Example Format: %u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
charval = ""
splithex = decvalue.split("%u00")
for item in splithex:
if item != "":
hexcon = item.replace("%u00", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def rot13_decode(decvalue):
""" ROT13 decode the specified value. Example Format: Uryyb Jbeyq
rot13 回转位13 a编码后转换成b,b经过相同的编码之后会转换成
"""
return(decvalue.decode("rot13"))
| [
"[email protected]"
] | |
c6f9bfe889eb0278f68b7a17049662d5605c5285 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /axial/logging_utils.py | ef723570c0f02a331ebfc7220811665417690c53 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 4,459 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import time
from absl import logging
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.core.framework.summary_pb2 import Summary
from tensorflow.compat.v1.core.util.event_pb2 import Event
def pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Taken from Jaxboard.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = np.shape(images)
width, height, depth = shape[-3:]
images = np.reshape(images, (-1, width, height, depth))
batch = np.shape(images)[0]
rows = np.minimum(rows, batch)
cols = np.minimum(batch // rows, cols)
images = images[:rows * cols]
images = np.reshape(images, (rows, cols, width, height, depth))
images = np.transpose(images, [0, 2, 1, 3, 4])
images = np.reshape(images, [rows * width, cols * height, depth])
return images
class SummaryWriter(object):
"""Tensorflow summary writer inspired by Jaxboard.
This version doesn't try to avoid Tensorflow dependencies, because this
project uses Tensorflow.
"""
def __init__(self, dir, write_graph=True):
if not gfile.IsDirectory(dir):
gfile.MakeDirs(dir)
self.writer = tf.summary.FileWriter(
dir, graph=tf.get_default_graph() if write_graph else None)
def flush(self):
self.writer.flush()
def close(self):
self.writer.close()
def _write_event(self, summary_value, step):
self.writer.add_event(
Event(
wall_time=round(time.time()),
step=step,
summary=Summary(value=[summary_value])))
def scalar(self, tag, value, step):
self._write_event(Summary.Value(tag=tag, simple_value=float(value)), step)
def image(self, tag, image, step):
image = np.asarray(image)
if image.ndim == 2:
image = image[:, :, None]
if image.shape[-1] == 1:
image = np.repeat(image, 3, axis=-1)
bytesio = io.BytesIO()
PIL.Image.fromarray(image).save(bytesio, 'PNG')
image_summary = Summary.Image(
encoded_image_string=bytesio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
self._write_event(Summary.Value(tag=tag, image=image_summary), step)
def images(self, tag, images, step, square=True):
"""Saves (rows, cols) tiled images from onp.ndarray.
This truncates the image batch rather than padding
if it doesn't fill the final row.
"""
images = np.asarray(images)
n_images = len(images)
if square:
rows = cols = int(np.sqrt(n_images))
else:
rows = 1
cols = n_images
tiled_images = pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
class Log(object):
"""Logging to Tensorboard and the Python logger at the same time."""
def __init__(self, logdir, write_graph=True):
self.logdir = logdir
# Tensorboard
self.summary_writer = SummaryWriter(logdir, write_graph=write_graph)
def write(self, key_prefix, info_dicts, step):
log_items = []
for key in info_dicts[-1]:
# average the log values over time
key_with_prefix = '{}/{}'.format(key_prefix, key)
avg_val = np.mean([info[key] for info in info_dicts])
# absl log
log_items.append('{}={:.6f}'.format(key_with_prefix, avg_val))
# tensorboard
self.summary_writer.scalar(key_with_prefix, avg_val, step=step)
self.summary_writer.flush()
logging.info('step={:08d} {}'.format(step, ' '.join(log_items)))
| [
"[email protected]"
] | |
1eb51bed6fada6957870d90071118e0bb172b20d | dc68820135a54bbf744425fce65da16c1101ad33 | /web/api/post_image.py | 397c4e2dafed832004870a01e5e6218a182b1a46 | [] | no_license | andrefreitas/feup-ldso-foodrific | 425cf9013427d183e0b23c5632115135dc4542f1 | ec2bd0361873d3eadd4f6a2fa60b23eb3b7e6ddf | refs/heads/master | 2021-01-10T14:30:24.011293 | 2015-10-04T11:01:09 | 2015-10-04T11:01:09 | 43,633,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from datastore import *
from pages import BaseHandler
from google.appengine.api import images
class PostImage(BaseHandler):
def get(self):
post = getPostByID(int(self.request.get("id")))
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(post.photo)
| [
"[email protected]"
] | |
39f29b37f7444cf60b0b9e2cbd3307132c1c48c6 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/pandas/tests/io/parser/test_skiprows.py | 1df2ca4fad4d87539cdcdee874cb25a6cd3ce18e | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 6,948 | py | # -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
from pandas import DataFrame, Index
import pandas.util.testing as tm
@pytest.mark.parametrize("skiprows", [lrange(6), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(StringIO(text), skiprows=skiprows, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3], index=index)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_data = "a,b,c\n" + "\n".join([
",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=index)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
dict(skiprows=[1]),
DataFrame([[2, "line 21\nline 22", 2],
[3, "line 31", 1]], columns=["id", "text", "num_lines"])),
("a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
dict(quotechar="~", skiprows=[2]),
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"])),
(("Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"),
dict(quotechar="~", skiprows=[1, 3]),
DataFrame([['example\n sentence\n two', 'url2']],
columns=["Text", "url"]))
])
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2],
[3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=[
"id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,exp_data", [
("""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2],
[3, "line \n'31' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2],
[3, "line '31\n' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2],
[3, "line '31\n' \r\tline 32", 1]]),
])
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("line_terminator", [
"\n", # "LF"
"\r\n", # "CRLF"
"\r" # "CR"
])
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(["SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M "])
expected = DataFrame([["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"]],
columns=["date", "time", "var", "flag",
"oflag"])
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"])
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = "a\"\nb\"\na\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
(dict(), DataFrame({"1": [3, 5]})),
(dict(header=0, names=["foo"]), DataFrame({"foo": [3, 5]}))
])
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data),
skiprows=lambda x: x % 2 == 0,
**kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
| [
"[email protected]"
] | |
1210ab54593eea5b9c24f896a0e2f0ffdb4dc99f | fb96a752515b20e5bb3548cc5eec39b81d463643 | /Advent/2016/day_08/eight.py | f95e8a67803a3b23d5abf78129a798f0349b137b | [] | no_license | kryptn/Challenges | 2de2675ad0a39e13fb983a728dc090af7113b443 | f1aba799fa28e542bf3782cdfa825ff9440bf66c | refs/heads/master | 2021-05-01T02:54:21.404383 | 2016-12-27T17:09:09 | 2016-12-27T17:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | with open('input.txt') as fd:
data = fd.read()
class Screen:
def __init__(self):
self.grid = [[False]*50 for x in range(6)]
def shift_row(self, row, spaces):
self.grid[row] = self.grid[row][-spaces:]+self.grid[row][:-spaces]
def shift_col(self, col, spaces):
self.grid = zip(*self.grid)
self.shift_row(col, spaces)
self.grid = [list(x) for x in zip(*self.grid)]
def enable(self, length, height):
for x in range(length):
for y in range(height):
self.grid[y][x] = True
def __str__(self):
return '\n'.join(' '.join('#' if x else '.' for x in row) for row in self.grid)
def parse(self, inp):
i = inp.split()
if i[0] == 'rect':
x, y = i[1].split('x')
self.enable(int(x), int(y))
else:
shift = self.shift_row if i[1] == 'row' else self.shift_col
col = int(i[2].split('=')[1])
mag = int(i[4])
shift(col, mag)
s = Screen()
for d in data.splitlines():
s.parse(d)
print('star one: {}\nstar two:\n'.format(sum(sum(x) for x in s.grid)))
print(s)
| [
"[email protected]"
] | |
f5e3c26105be0d37b85658b740c617e799dc16cb | 21961be64d9cd4414f4a6d0b45eb20f727734929 | /K47 User Manual/7_miscellany/37_relay/Code/Python/relay.py | 0e58bdd428c46888823cc7f321177d007c601ec2 | [] | no_license | Alion3064492356/Sensors-for-RaspberryPi | 156f70c2ce81705a9f1372f9d037d0432a7ad54c | f81ca29280d8d27da795f1c9720747e3db9c20d6 | refs/heads/master | 2020-03-17T20:54:52.709446 | 2018-06-14T02:30:46 | 2018-06-14T02:30:46 | 133,934,753 | 0 | 0 | null | 2018-06-12T20:31:51 | 2018-05-18T09:40:42 | Python | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
RelayPin = 10
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(RelayPin, GPIO.OUT)
def loop():
while True:
print '...relayd on'
GPIO.output(RelayPin, GPIO.HIGH)
time.sleep(0.5)
print 'relay off...'
GPIO.output(RelayPin, GPIO.LOW)
time.sleep(0.5)
def destroy():
GPIO.output(RelayPin, GPIO.LOW)
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
| [
"[email protected]"
] | |
073baf122d23c22628502336b5d6cf068590df1b | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/python/training/session_manager.py | 06084b1d7b3fc728396e775179f7ce788d696e65 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,336 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and creates session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.util.tf_export import tf_export
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
@tf_export("train.SessionManager")
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
def __init__(self,
local_init_op=None,
ready_op=None,
ready_for_local_init_op=None,
graph=None,
recovery_wait_secs=30):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation` used to check if the model is ready. The
model is considered ready if that operation returns an empty 1D string
tensor. If the operation returns a non empty 1D string tensor, the elements
are concatenated and used to indicate to the user why the model is not
ready.
The `ready_for_local_init_op` is an `Operation` used to check if the model
is ready to run local_init_op. The model is considered ready if that
operation returns an empty 1D string tensor. If the operation returns a non
empty 1D string tensor, the elements are concatenated and used to indicate
to the user why the model is not ready.
If `ready_op` is `None`, the model is not checked for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
ready_for_local_init_op: An `Operation` to check if the model is ready
to run local_init_op.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
Raises:
ValueError: If ready_for_local_init_op is not None but local_init_op is
None
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
if ready_for_local_init_op is not None and local_init_op is None:
raise ValueError("If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "
", ready_for_local_init_op [%s]" %
ready_for_local_init_op)
def _restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
either running the provided `init_op`, or calling the provided `init_fn`.
The local_init_op is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
ready_for_local_init_op passes.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
if not is_loaded_from_checkpoint:
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def recover_session(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered and initialized, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
# Always try to run local_init_op
local_init_success, msg = self._try_run_local_init_op(sess)
if not is_loaded_from_checkpoint:
# Do not need to run checks for readiness
return sess, False
restoring_file = checkpoint_dir or checkpoint_filename_with_path
if not local_init_success:
logging.info(
"Restoring model from %s did not make model ready for local init:"
" %s", restoring_file, msg)
return sess, False
is_ready, msg = self._model_ready(sess)
if not is_ready:
logging.info("Restoring model from %s did not make model ready: %s",
restoring_file, msg)
return sess, False
logging.info("Restored model from %s", restoring_file)
return sess, is_loaded_from_checkpoint
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
local_init_success, not_ready_local_msg = self._try_run_local_init_op(
sess)
if local_init_success:
# Successful if local_init_op is None, or ready_for_local_init_op passes
is_ready, not_ready_msg = self._model_ready(sess)
if is_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready. "
"Ready_for_local_init_op: %s, ready: %s",
not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
return _ready(self._ready_op, sess, "Model not ready")
def _model_ready_for_local_init(self, sess):
"""Checks if the model is ready to run local_init_op.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready to run
local_init_op and False otherwise, and msg is `None` if the model is
ready to run local_init_op, a `String` with the reason why it is not ready
otherwise.
"""
return _ready(self._ready_for_local_init_op, sess,
"Model not ready for local init")
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
logging.info("Running local_init_op.")
sess.run(self._local_init_op)
logging.info("Done running local_init_op.")
return True, None
else:
return False, msg
return True, None
def _ready(op, sess, msg):
"""Checks if the model is ready or not, as determined by op.
Args:
op: An op, either _ready_op or _ready_for_local_init_op, which defines the
readiness of the model.
sess: A `Session`.
msg: A message to log to warning if not ready
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
if op is None:
return True, None
else:
try:
ready_value = sess.run(op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return True, None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return False, "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("%s : error [%s]", msg, str(e))
raise e
return False, str(e)
class _CountDownTimer(object):
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
| [
"[email protected]"
] | |
6a6d137d3c8dc70d14aa023a752ffba6f170d4fd | 91af1af67ed219e583b209b40ae5dd34d6f7f355 | /train_net.py | 90d770c1765c7f52a585ded8af49a5bf767545db | [] | no_license | jack20951948/Deep-Clustering | d6f5bfdd97be1f07f114371eafd9f8643ebb6e30 | 4dd8b4d3fef72e597cd142406d343450cf2dd517 | refs/heads/main | 2023-06-28T02:18:58.915727 | 2021-07-18T07:18:10 | 2021-07-18T07:18:10 | 387,109,398 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,555 | py | '''
Script to train the model
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
import ipdb
from datagenerator2 import DataGenerator
from model import Model
from GlobalConstont import *
# the .pkl file lists of data set
pkl_list = ['deep-clustering-master/pkl_folder/train.pkl'] # ['../dcdata/' + str(i) + '.pkl' for i in range(1, 12)]
val_list = ['deep-clustering-master/pkl_folder/val.pkl']
sum_dir = 'deep-clustering-master/sum'
train_dir = 'deep-clustering-master/model'
lr = 1e-3
n_hidden = 300
max_steps = 20000000
batch_size = 128
def train():
with tf.Graph().as_default():
# dropout keep probability
p_keep_ff = tf.placeholder(tf.float32, shape=None)
p_keep_rc = tf.placeholder(tf.float32, shape=None)
# generator for training set and validation set
data_generator = DataGenerator(pkl_list, batch_size)
val_generator = DataGenerator(val_list, batch_size)
# placeholder for input log spectrum, VAD info.,
# and speaker indicator function
in_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
VAD_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
Y_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF, 2])
# init the model
BiModel = Model(n_hidden, batch_size, p_keep_ff, p_keep_rc)
# build the net structure
embedding = BiModel.inference(in_data)
Y_data_reshaped = tf.reshape(Y_data, [-1, NEFF, 2])
VAD_data_reshaped = tf.reshape(VAD_data, [-1, NEFF])
# compute the loss
loss = BiModel.loss(embedding, Y_data_reshaped, VAD_data_reshaped)
# get the train operation
train_op = BiModel.train(loss, lr)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
sess = tf.Session()
# either train from scratch or a trained model
# saver.restore(sess, 'train/model.ckpt-492000')
# val_loss = np.fromfile('val_loss').tolist()
# init_step = 56001
init = tf.initialize_all_variables()
sess.run(init)
init_step = 0
summary_writer = tf.summary.FileWriter(
sum_dir, sess.graph)
# val_loss = []
last_epoch = data_generator.epoch
for step in range(init_step, init_step + max_steps):
start_time = time.time()
data_batch = data_generator.gen_batch()
# concatenate the samples into batch data
in_data_np = np.concatenate(
[np.reshape(item['Sample'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'], [1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
# train the model
loss_value, _, summary_str = sess.run(
[loss, train_op, summary_op],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1 - P_DROPOUT_FF,
p_keep_rc: 1 - P_DROPOUT_RC})
summary_writer.add_summary(summary_str, step)
duration = time.time() - start_time
# if np.isnan(loss_value):
# import ipdb; ipdb.set_trace()
assert not np.isnan(loss_value)
if step % 100 == 0:
# show training progress every 100 steps
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch,
data_generator.epoch))
if step % 4000 == 0:
# save model every 4000 steps
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if last_epoch != data_generator.epoch:
# doing validation every training epoch
print('Doing validation')
val_epoch = val_generator.epoch
count = 0
loss_sum = 0
# average the validation loss
while(val_epoch == val_generator.epoch):
count += 1
data_batch = val_generator.gen_batch()
in_data_np = np.concatenate(
[np.reshape(item['Sample'],
[1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'],
[1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
loss_value, = sess.run(
[loss],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1,
p_keep_rc: 1})
loss_sum += loss_value
val_loss.append(loss_sum / count)
print ('validation loss: %.3f' % (loss_sum / count))
np.array(val_loss).tofile('val_loss')
last_epoch = data_generator.epoch
print('%s start' % datetime.now())
train()
| [
"[email protected]"
] | |
b992a6459c6e930f7216efb9a7c3dd03ebc7e85e | 2f81d225594bbe655bc7d3442a1a41924d3829ff | /src/qrcode/tests.py | 098102082f5e0412f5c8fd51ab6e2fe2a4c8b623 | [] | no_license | znight99/inkscape_ext_barcode | 02c33ec4d1f9d0be329f9b268682c7077ff433e4 | 52079e1f739bd318f28959b6148fa935cd8cd6ac | refs/heads/master | 2021-08-22T11:11:10.351558 | 2017-11-30T02:33:55 | 2017-11-30T02:33:55 | 112,554,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | import six
import qrcode
import qrcode.util
import qrcode.image.svg
try:
import qrcode.image.pure
import pymaging_png # ensure that PNG support is installed
except ImportError:
pymaging_png = None
from qrcode.util import (
MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE)
try:
import unittest2 as unittest
except ImportError:
import unittest
UNICODE_TEXT = u'\u03b1\u03b2\u03b3'
class QRCodeTests(unittest.TestCase):
def test_basic(self):
qr = qrcode.QRCode(version=1)
qr.add_data('a')
qr.make(fit=False)
def test_overflow(self):
qr = qrcode.QRCode(version=1)
qr.add_data('abcdefghijklmno')
self.assertRaises(OverflowError, qr.make, fit=False)
def test_fit(self):
qr = qrcode.QRCode()
qr.add_data('a')
qr.make()
self.assertEqual(qr.version, 1)
qr.add_data('bcdefghijklmno')
qr.make()
self.assertEqual(qr.version, 2)
def test_mode_number(self):
qr = qrcode.QRCode()
qr.add_data('1234567890123456789012345678901234', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_NUMBER)
def test_mode_alpha(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_regression_mode_comma(self):
qr = qrcode.QRCode()
qr.add_data(',', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit(self):
qr = qrcode.QRCode()
qr.add_data(u'abcABC' + UNICODE_TEXT, optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit_newline(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890\n', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_render_svg(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgImage)
img.save(six.BytesIO())
def test_render_svg_path(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging with PNG support")
def test_render_pymaging_png(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
img.save(six.BytesIO())
def test_optimize(self):
qr = qrcode.QRCode()
text = 'A1abc12345def1HELLOa'
qr.add_data(text, optimize=4)
qr.make()
self.assertEqual(len(qr.data_list), 5)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[1].mode, MODE_NUMBER)
self.assertEqual(qr.data_list[2].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[3].mode, MODE_ALPHA_NUM)
self.assertEqual(qr.data_list[4].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.version, 2)
def test_optimize_size(self):
text = 'A1abc12345123451234512345def1HELLOHELLOHELLOHELLOa' * 5
qr = qrcode.QRCode()
qr.add_data(text)
qr.make()
self.assertEqual(qr.version, 10)
qr = qrcode.QRCode()
qr.add_data(text, optimize=0)
qr.make()
self.assertEqual(qr.version, 11)
def test_qrdata_repr(self):
data = b'hello'
data_obj = qrcode.util.QRData(data)
self.assertEqual(repr(data_obj), repr(data))
| [
"[email protected]"
] | |
54157e46485cfe84e785669c8a896e72e4eba04c | 22fc34523f4de64a1e1eea707e01da79e425a043 | /srtmprofile/core/urls.py | 3eb617af2639a39dc20d463863e4fff390506028 | [
"MIT"
] | permissive | marcellobenigno/srtmprofile | 04cdcf4a1f127462dd37d94ec5f368b0f304b932 | 52a2550976ce4ecad2921e53a72ac2ec8a8459b5 | refs/heads/master | 2021-04-03T05:25:54.097968 | 2018-03-15T11:05:02 | 2018-03-15T11:05:02 | 124,605,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.conf.urls import url
from . import views
app_name = 'core'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^roads.geojson$', views.roads_geojson, name='roads_geojson'),
url(r'^(?P<pk>\d+)/$', views.detail, name='detail'),
]
| [
"[email protected]"
] | |
4c20c568fe6c4a47880f6ed1eb34cc337b469524 | 6404478cd856f018bddf4a047b23d658e29d94cb | /robot_position_estimation.py | c6b4c283acb14211540d045a0e9ed7c24a3fb5c8 | [] | no_license | RokonUZ/robotic-arm-pick-and-place-OpenCv-Python | c11eff3d70f46d47267ee7342ab16f96a90073af | f9ac7e759a93199d56d97b27efcc7c3d085c1e9e | refs/heads/main | 2023-09-03T10:13:18.792301 | 2021-10-27T17:53:24 | 2021-10-27T17:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,663 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 21:58:01 2020
@author: Tehseen
"""
# This code is used to Find the location of the Origin of the Robotic arm
# with respect to the image frame. We calculate the center point (origin) of the robotic arm
# as well as the rotation of the robotic arm with respect to the image frame.
# These values will then be used in the Camera coordinates to the Robotic arm Coordinates Homogenius Transformation
#First of all place the robotic arm base plate on the table below the camera where we will place the robotic arm afterwards
# Then execute the code. The code will detect the Rectangle in the base plate tool then fild the
# origin and rotation values.
# At the end we will use these values in our main program.
#[Resources]
# https://stackoverflow.com/questions/34237253/detect-centre-and-angle-of-rectangles-in-an-image-using-opencv
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.html#how-to-draw-the-contours
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html#b-rotated-rectangle
# https://stackoverflow.com/questions/52247821/find-width-and-height-of-rotatedrect
import numpy as np
import cv2
import sys
import time
import yaml
import os
import warnings
warnings.filterwarnings("ignore")
#Constants Declaration
webcam_Resolution_Width = 640.0
webcam_Resolution_Height = 480.0
rectangle_width_in_mm = 49.0 #size of the calibration rectangle (longer side) along x-axis in mm.
# Global Variables
cx = 0.0 #object location in mm
cy = 0.0 #object location in mm
angle = 0.0 #robotic arm rotation angle
one_pixel_length = 0.0 #length of one pixel in cm units
number_of_cm_in_Resolution_width = 0.0 #total number of cm in the camera resolution width
#Reading Camera Matrix and Distortion Coefficients from YAML File
with open(r'Camera Calibration Algorithms/2. camera_calibration_tool-master/calibration.yaml') as file:
documents = yaml.full_load(file) #loading yaml file as Stream
camera_matrix = np.array(documents['camera_matrix']) #extracting camera_matrix key and convert it into Numpy Array (2D Matrix)
distortion_coeff = np.array(documents['dist_coeff'])
extrinsic_matrix = np.array(documents['extrinsics_matrix'])
# print ("\nIntrinsic Matrix\n",camera_matrix)
# print ("\nExtrinsic Matrix\n",extrinsic_matrix)
# print ("\nDistortion Coefficients\n",distortion_coeff)
print("\nCamera Matrices Loaded Succeccfully\n")
def undistortImage(img): #Function to undistort a given image. Function inputs: image, camera matrix and distortion coefficients
try:
mtx = camera_matrix
dist = distortion_coeff
#Now undistort the taken Image https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
h, w = img.shape[:2]
#alpha = 0 #use to crop the undistorted image
alpha = 1 #use not to crop the undistorted image (adding black pixel)
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),alpha,(w,h))
#undistort
undist_image = cv2.undistort(img, mtx, dist, None, newcameramtx)
return (undist_image) #return undistorted image
except:
print("Error while Undistorting Image")
pass
def calculate_XYZ(u,v): #Function to get World Coordinates from Camera Coordinates in mm
#https://github.com/pacogarcia3/hta0-horizontal-robot-arm/blob/9121082815e3e168e35346efa9c60bd6d9fdcef1/camera_realworldxyz.py#L105
cam_mtx = camera_matrix
Rt = extrinsic_matrix
#Solve: From Image Pixels, find World Points
scalingfactor = 40.0 #this is demo value, Calculate the Scaling Factor first (depth)
tvec1 = Rt[:, 3] #Extract the 4th Column (Translation Vector) from Extrinsic Matric
uv_1=np.array([[u,v,1]], dtype=np.float32)
uv_1=uv_1.T
suv_1=scalingfactor*uv_1
inverse_cam_mtx = np.linalg.inv(cam_mtx)
xyz_c=inverse_cam_mtx.dot(suv_1)
xyz_c=xyz_c-tvec1
R_mtx = Rt[:,[0,1,2]] #Extract first 3 columns (Rotation Matrix) from Extrinsics Matrix
inverse_R_mtx = np.linalg.inv(R_mtx)
XYZ=inverse_R_mtx.dot(xyz_c)
return XYZ
if __name__ == "__main__":
while(1):
try:
#Start reading camera feed (https://answers.opencv.org/question/227535/solvedassertion-error-in-video-capturing/))
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#Now Place the base_plate_tool on the surface below the camera.
while(1):
_,frame = cap.read()
#frame = undistortImage(frame)
# cv2.imshow("Live" , frame)
k = cv2.waitKey(5)
if k == 27: #exit by pressing Esc key
cv2.destroyAllWindows()
sys.exit()
if k == 13: #Save the centroid and angle values of the rectangle in a file
result_file = r'Camera Calibration Algorithms/2. camera_calibration_tool-master/robot_position.yaml'
try:
os.remove(result_file) #Delete old file first
except:
pass
print("Saving Robot Position Matrices .. in ",result_file)
cx = (cx * one_pixel_length)/10.0 #pixel to cm conversion
cy = (cy * one_pixel_length)/10.0
data={"robot_position": [cx,cy,angle,number_of_cm_in_Resolution_width]}
with open(result_file, "w") as f:
yaml.dump(data, f, default_flow_style=False)
red = np.matrix(frame[:,:,2]) #extracting red layer (layer No 2) from RGB
green = np.matrix(frame[:,:,1]) #extracting green layer (layer No 1) from RGB
blue = np.matrix(frame[:,:,0]) #extracting blue layer (layer No 0) from RGB
#it will display only the Blue colored objects bright with black background
blue_only = np.int16(blue)-np.int16(red)-np.int16(green)
blue_only[blue_only<0] =0
blue_only[blue_only>255] =255
blue_only = np.uint8(blue_only)
# cv2.namedWindow('blue_only', cv2.WINDOW_AUTOSIZE)
# cv2.imshow("blue_only",blue_only)
# cv2.waitKey(1)
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html#otsus-binarization
#Gaussian filtering
blur = cv2.GaussianBlur(blue_only,(5,5),cv2.BORDER_DEFAULT)
#Otsu's thresholding
ret3,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.namedWindow('Threshold', cv2.WINDOW_AUTOSIZE)
cv2.imshow("Threshold",thresh)
cv2.waitKey(1)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for contour in contours:
area = cv2.contourArea(contour)
if area>100000:
contours.remove(contour)
cnt = contours[0] #Conture of our rectangle
##############################################################
#https://stackoverflow.com/a/34285205/3661547
#fit bounding rectangle around contour
rotatedRect = cv2.minAreaRect(cnt)
#getting centroid, width, height and angle of the rectangle conture
(cx, cy), (width, height), angle = rotatedRect
#centetoid of the rectangle conture
cx=int(cx)
cy=int(cy)
# print (cx,cy) #centroid of conture of rectangle
#Location of Rectangle from origin of image frame in millimeters
x,y,z = calculate_XYZ(cx,cy)
#but we choose the Shorter edge of the rotated rect to compute the angle between Vertical
#https://stackoverflow.com/a/21427814/3661547
if(width > height):
angle = angle+180
else:
angle = angle+90
# print("Angle b/w shorter side with Image Vertical: \n", angle)
#cm-per-pixel calculation
if(width != 0.0):
one_pixel_length = rectangle_width_in_mm/width #length of one pixel in mm (rectangle_width_in_mm/rectangle_width_in_pixels)
number_of_cm_in_Resolution_width = (one_pixel_length*640)/10 #in cm
print(number_of_cm_in_Resolution_width)
##############################################################
#Draw rectangle around the detected object
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.html#how-to-draw-the-contours
im = cv2.drawContours(frame,[cnt],0,(0,0,255),2)
# cv2.namedWindow('Contours', cv2.WINDOW_AUTOSIZE)
# cv2.imshow("Contours",im)
# cv2.waitKey(1)
cv2.circle(im, (cx,cy), 2,(200, 255, 0),2) #draw center
cv2.putText(im, str("Angle: "+str(int(angle))), (int(cx)-40, int(cy)+60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(im, str("Center: "+str(cx)+","+str(cy)), (int(cx)-40, int(cy)-50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.namedWindow('Detected Rect', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Detected Rect',im)
cv2.waitKey(1)
except Exception as e:
print("Error in Main Loop\n",e)
cv2.destroyAllWindows()
sys.exit()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
b100acdda26eabc4c53277934e03fac670a2011b | fe7c5854aa07223cfcfcb117ada0dd8111f258fe | /controlLayer_server/Control Layer.py | 7865665e43047daf46a77bca27d98560e268aa13 | [] | no_license | amrmoastafa/CodeYield | ba27dd8aa6d4c47dc43550a64ae0df24c5b30cdb | 7715e651ee13ca2e982f8edb844e0e4f6512ec42 | refs/heads/master | 2022-07-04T13:36:28.033621 | 2020-05-12T11:29:16 | 2020-05-12T11:29:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,884 | py | import cv2 as cv
from image import detectImage
from NavBar import navBar
from cross_circle import detectIcon
from LabelBar import labelBarDetection
from LabelDetection import labelDetection
path = "data/test.png"
img = cv.imread(path)
imageHeight, imageWidth, imageChannels = img.shape
print(imageHeight,imageWidth)
# Tuning Parameters
rowMarginBetweenShapes = 0.2*imageHeight
colMarginXPoint = int(imageWidth / 2)
noOfColumnsPerRow = 2
class Shape:
def __init__(self, name, x, y, width, height, radius):
self.name = name
self.x = x
self.y = y
self.width = width
self.height = height
self.radius = radius
self.widthRatio = 0
self.heightRatio = 0
# Right-Left
self.allignment = ""
class HtmlRow:
def __init__(self):
self.shapesPerRow = []
self.column1Shapes = []
self.column2Shapes = []
self.column1Ratio = 0
self.column2Ratio = 0
self.height = 0
self.maxWidthIndex = 0
shapesList = []
listOfRows = []
#cv.imshow('main', img)
# Retrieving labels
text = labelDetection(path)
for iterator in range(len(text)):
x,y,w,h = cv.boundingRect(text[iterator])
temporaryShape = Shape("TEXT", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving labelBar
labelBar = labelBarDetection(path)
for iterator in range(len(labelBar)):
x, y, w, h = cv.boundingRect(labelBar[iterator])
temporaryShape = Shape("LABEL", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving images
image = detectImage(path)
for iterator in range(len(image)):
x, y, w, h = cv.boundingRect(image[iterator])
temporaryShape = Shape("IMAGE", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving navigation bar
nav = navBar(path)
for iterator in range(len(nav)):
x, y, w, h = cv.boundingRect(nav[iterator])
temporaryShape = Shape("NAV", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving icons
icon = detectIcon(path)
for iterator in range(len(icon)):
(x, y), rad = cv.minEnclosingCircle(icon[iterator])
temporaryShape = Shape("ICON", int(x), int(y), int(rad)*2, int(rad)*2, int(rad))
shapesList.append(temporaryShape)
# Sorting by y-point
shapesList = sorted(shapesList, key=lambda x: x.y, reverse=False)
# Calc. Each row height
def getMaxHeightPerRow(ROW):
maxHeight = 0
for ite in range(len(ROW.shapesPerRow)):
maxHeight = max(maxHeight, ROW.shapesPerRow[ite].height)
return maxHeight
def handlingRows():
temporaryRow = HtmlRow()
# 1st minimum-y shape is inserted into 1st row
temporaryRow.shapesPerRow.append(shapesList[0])
for iterator in range(len(shapesList) - 1):
diff = abs(shapesList[iterator].y - shapesList[iterator + 1].y)
if diff < rowMarginBetweenShapes:
temporaryRow.shapesPerRow.append(shapesList[iterator + 1])
else:
listOfRows.append(temporaryRow)
temporaryRow = HtmlRow()
temporaryRow.shapesPerRow.append(shapesList[iterator+1])
# Appending last row elements
listOfRows.append(temporaryRow)
# Retrieving max-height per row
for rows in range(len(listOfRows)):
listOfRows[rows].height = getMaxHeightPerRow(listOfRows[rows])
#print('ROW Height',listOfRows[rows].height)
handlingRows()
# Retrieving maximum width of a shape for each row & calc. ratio of columns
for rowsCounter in range(len(listOfRows)):
for shapes in range(len(listOfRows[rowsCounter].shapesPerRow)-1):
if listOfRows[rowsCounter].shapesPerRow[shapes+1].width > listOfRows[rowsCounter].shapesPerRow[ listOfRows[rowsCounter].maxWidthIndex ].width:
listOfRows[rowsCounter].maxWidthIndex = shapes+1
# Retrieving maximum width for each shape for each row
maxWidthShape = listOfRows[rowsCounter].shapesPerRow[ listOfRows[rowsCounter].maxWidthIndex ]
#
if maxWidthShape.x <= colMarginXPoint:
maxColumnWidth = maxWidthShape.x + (maxWidthShape.width / 2)
listOfRows[rowsCounter].column1Ratio = maxColumnWidth / imageWidth
listOfRows[rowsCounter].column2Ratio = 1 - listOfRows[rowsCounter].column1Ratio
else:
maxColumnWidth = abs( maxWidthShape.x - (maxWidthShape.width / 2) )
listOfRows[rowsCounter].column1Ratio = maxColumnWidth / imageWidth
listOfRows[rowsCounter].column2Ratio = 1 - listOfRows[rowsCounter].column1Ratio
# Appending each shape to their belong column
for rowsCounter in range(len(listOfRows)):
for shapes in range(len(listOfRows[rowsCounter].shapesPerRow)):
# Checking if the shape lies either in the left column or right one
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= (listOfRows[rowsCounter].column1Ratio * imageWidth):
listOfRows[rowsCounter].column1Shapes.append(listOfRows[rowsCounter].shapesPerRow[shapes])
# Assigning shape width ratio
shapeWidthRatio = listOfRows[rowsCounter].shapesPerRow[shapes].width / (listOfRows[rowsCounter].column1Ratio * imageWidth)
listOfRows[rowsCounter].shapesPerRow[shapes].widthRatio = shapeWidthRatio
# Assigning shape height ratio
shapeHeightRatio = listOfRows[rowsCounter].shapesPerRow[shapes].height / listOfRows[rowsCounter].height
listOfRows[rowsCounter].shapesPerRow[shapes].heightRatio = shapeHeightRatio
# Assigning shape allignment
shapeAllignment = (listOfRows[rowsCounter].column1Ratio * imageWidth) / 3
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= shapeAllignment:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "LEFT"
elif listOfRows[rowsCounter].shapesPerRow[shapes].x <= 2*shapeAllignment:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "Center"
else:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "RIGHT"
else:
listOfRows[rowsCounter].column2Shapes.append(listOfRows[rowsCounter].shapesPerRow[shapes])
# Assigning shape width ratios
shapeWidthRatio = listOfRows[rowsCounter].shapesPerRow[shapes].width / (listOfRows[rowsCounter].column2Ratio * imageWidth)
listOfRows[rowsCounter].shapesPerRow[shapes].widthRatio = shapeWidthRatio
# Assigning shape height ratio
shapeHeightRatio = listOfRows[rowsCounter].shapesPerRow[shapes].height / listOfRows[rowsCounter].height
listOfRows[rowsCounter].shapesPerRow[shapes].heightRatio = shapeHeightRatio
# Assigning shape allignment
column1XPoint = (listOfRows[rowsCounter].column1Ratio * imageWidth)
shapeAllignment = (imageWidth - column1XPoint) / 3
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= (shapeAllignment + column1XPoint):
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "LEFT"
elif listOfRows[rowsCounter].shapesPerRow[shapes].x <= (2*shapeAllignment + column1XPoint):
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "Center"
else:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "RIGHT"
for i in range(len(listOfRows)):
print('Column 1 Started')
for j in range(len(listOfRows[i].column1Shapes)):
print(listOfRows[i].column1Shapes[j].name, ',', listOfRows[i].column1Shapes[j].allignment)
print('Column 2 Started')
for k in range(len(listOfRows[i].column2Shapes)):
print(listOfRows[i].column2Shapes[k].name, ',', listOfRows[i].column2Shapes[k].allignment)
print('ROW' + str(i+1) + 'Finished')
print('ROW Height' + str(i+1) ,listOfRows[i].height)
#cv.waitKey(0)
#cv.destroyAllWindows()
| [
"[email protected]"
] | |
ac338df45f6245caeacf3c665331db8a223ae9b2 | a1af0d3029d0dc2715a3ee55a1767351d18a6f9f | /LC/2115_find_recipes.py | 29bff113ed5592ca5f8200edface44f74795b78a | [] | no_license | zona-li/practice_in_python | b36c12112ec6b27cdb3b9bf83b315ac021d0c4e2 | 2906d12452ec059a7e39fb772391082a576591f0 | refs/heads/master | 2022-09-07T18:02:03.520425 | 2022-09-04T22:40:26 | 2022-09-04T22:40:26 | 100,322,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # recipes = ["burger", "sandwich", "bread"]
# ingredients = [["sandwich","meat","bread"],["bread","meat"],["yeast","flour"]]
# supplies = ["yeast","flour","meat"]
from collections import Counter, defaultdict, deque
from typing import List
from traitlets import default
class Solution:
def findAllRecipes(self, recipes: List[str], ingredients: List[List[str]], supplies: List[str]) -> List[str]:
adj = defaultdict(set)
indegree = defaultdict(int)
for rec, ings in zip(recipes, ingredients):
for ing in ings:
adj[ing].add(rec)
indegree[rec] += 1
res = []
q = deque(supplies)
while len(q):
next = q.popleft()
if next in recipes:
res.append(next)
for rec in adj[next]:
indegree[rec] -= 1
if indegree[rec] == 0:
q.append(rec)
return res
s = Solution()
print(s.findAllRecipes(["burger", "sandwich", "bread"], [["sandwich","meat","bread"],["bread","meat"],["yeast","flour"]], ["yeast","flour","meat"]))
| [
"[email protected]"
] | |
fffce95d0709e83632fe51584057dd7a2f48896d | 51e56d62ba688b5cc323a3ee3890b87934ae7682 | /5_Arg_Pr_Nac.py | 4c377c901ba044f8930cb6e0a34c4005c39cbf51 | [] | no_license | BogdansProgsCo/OLIMP_FREEDOM | b239df814af90e1dc5fd0aff15ee1c5e921a61f6 | 3e631a223b6215d136104eba70bc35203dfe47cf | refs/heads/main | 2023-05-14T13:19:14.760929 | 2021-06-14T18:21:40 | 2021-06-14T18:21:40 | 376,906,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319,195 | py | import requests
from bs4 import BeautifulSoup
import re
import datetime as dt
headers = {"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'}
url = 'https://nb-bet.com/Teams/2136-Atlanta-Buenos-Ayres-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
draw = 6
odd_even = 10
under_15 = 5
over_25 = 10
under_25 = 10
both_scores = 10
drw_frst_tm = 8
no_goal_frst_tm = 6
drw_NOdrw = 7
NOdrw_drw = 8
und15_ovr15 = 7
ovr15_und15 = 7
und25_ovr25 = 7
ovr25_und25 = 7
both_noboth = 7
noboth_both = 7
drw_NOdrw_ft = 7
NOdrw_drw_ft = 7
goal_NOgoal_ft = 7
NOgoal_goal_ft = 7
od_ev = 8
ev_od = 8
a = "Arg Prim Nac.txt"
champ = "Арг Пр Нац"
team = "Атланта Б-Айрес"
def adding_team():
c = "Argentina Prim Nac"
b = "Atlanta Bue Ares"
new_file = open(a, "a+")
new_file.write('\n _______ ' + c + ' _______')
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
def clean_file():
new_file = open(a, 'w+')
new_file.seek(0)
new_file.close()
def create_file():
new_file = open(a, "a+")
new_file.close()
def draws(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' or i == '4 : 4 '
or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 '):
count += 1
else:
break
if count >= draw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничей = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws = ' + b)
new_file.close()
def odd(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 '
and i != '4 : 4 ' and i != '5 : 5 ' and i != '6 : 6 ' and i != '7 : 7 '
and i != '0 : 6 ' and i != '6 : 0 '
and i != '1 : 5 ' and i != '5 : 1 ' and i != '1 : 7 ' and i != '7 : 1 '
and i != '2 : 0 ' and i != '0 : 2 ' and i != '2 : 4 ' and i != '4 : 2 '
and i != '2 : 6 ' and i != '6 : 2 '
and i != '3 : 1 ' and i != '1 : 3 ' and i != '3 : 5 ' and i != '5 : 3 '
and i != '4 : 6 ' and i != '6 : 4 ' and i != '4 : 0 ' and i != '0 : 4 '
and i != '7 : 3 ' and i != '3 : 7 ' and i != '5 : 7 ' and i != '7 : 5 '
and i != '8 : 2 ' and i != '2 : 8 '):
count += 1
else:
break
if count >= odd_even:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m не-чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n odd = ' + b)
new_file.close()
def even(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 '
or i == '4 : 4 ' or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 '
or i == '0 : 6 ' or i == '6 : 0 '
or i == '1 : 5 ' or i == '5 : 1 ' or i == '1 : 7 ' or i == '7 : 1 '
or i == '2 : 0 ' or i == '0 : 2 ' or i == '2 : 4 ' or i == '4 : 2 '
or i == '2 : 6 ' or i == '6 : 2 '
or i == '3 : 1 ' or i == '1 : 3 ' or i == '3 : 5 ' or i == '5 : 3 '
or i == '4 : 6 ' or i == '6 : 4 ' or i == '4 : 0 ' or i == '0 : 4 '
or i == '7 : 3 ' or i == '3 : 7 ' or i == '5 : 7 ' or i == '7 : 5 '
or i == '8 : 2 ' or i == '2 : 8 '):
count += 1
else:
break
if count >= odd_even:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n even = ' + b)
new_file.close()
def under_1_5(x):
count = 0
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
count += 1
else:
break
if count >= under_15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_1.5 = ' + b)
new_file.close()
def over_2_5(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '1 : 0 '
and i != '0 : 1 ' and i != '2 : 0 ' and i != '0 : 2 '):
count += 1
else:
break
if count >= over_25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_2.5 = ' + b)
new_file.close()
def under_2_5(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
count += 1
else:
break
if count >= under_25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_2.5 = ' + b)
new_file.close()
def both_score(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 ' and i != '2 : 0 '
and i != '0 : 2 ' and i != '0 : 3 ' and i != '3 : 0 ' and i != '4 : 0 '
and i != '0 : 4 ' and i != '0 : 5 ' and i != '5 : 0 ' and i != '0 : 6 '
and i != '6 : 0 '):
count += 1
else:
break
if count >= both_scores:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе зибили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_score = ' + b)
new_file.close()
def both_no_score(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '
or i == '0 : 3 ' or i == '3 : 0 ' or i == '4 : 0 ' or i == '0 : 4 '
or i == '0 : 5 ' or i == '5 : 0 ' or i == '0 : 6 ' or i == '6 : 0 '
or i == '0 : 7 ' or i == '7 : 0 '):
count += 1
else:
break
if count >= both_scores:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе НЕ зибили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_no_score = ' + b)
new_file.close()
def draws_first_time(x):
count = 0
for i in x:
if (i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)' or i == '(4 : 4)'
or i == '(5 : 5)'):
count += 1
else:
break
if count >= drw_frst_tm:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничьи 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_first_time = ' + b)
new_file.close()
def no_goal_first_time(x):
count = 0
for i in x:
if i == '(0 : 0)':
count += 1
else:
break
if count >= no_goal_frst_tm:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m 0:0 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n no_goal_first_time = ' + b)
new_file.close()
def Odd_Even(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 ' and i != '4 : 4 '
and i != '5 : 5 ' and i != '2 : 0 ' and i != '0 : 2 ' and i != '1 : 3 '
and i != '3 : 1 ' and i != '4 : 2 ' and i != '2 : 4 ' and i != '3 : 5 '
and i != '5 : 3 ' and i != '4 : 6 ' and i != '6 : 4 ' and i != '4 : 0 '
and i != '0 : 4 ' and i != '1 : 5 ' and i != '5 : 1 ' and i != '2 : 6 '
and i != '6 : 2 ' and i != '3 : 7 ' and i != '7 : 3 ' and i != '0 : 6 '
and i != '6 : 0 ' and i != '1 : 7 ' and i != '7 : 1 ' and i != '2 : 8 '
and i != '8 : 2 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= od_ev:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕчет_чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n odd_even = ' + b)
new_file.close()
def Even_Odd(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 '
and i != '4 : 4 ' and i != '5 : 5 ' and i != '2 : 0 ' and i != '0 : 2 '
and i != '1 : 3 ' and i != '3 : 1 ' and i != '4 : 2 ' and i != '2 : 4 '
and i != '3 : 5 ' and i != '5 : 3 ' and i != '4 : 6 ' and i != '6 : 4 '
and i != '4 : 0 ' and i != '0 : 4 ' and i != '1 : 5 ' and i != '5 : 1 '
and i != '2 : 6 ' and i != '6 : 2 ' and i != '3 : 7 ' and i != '7 : 3 '
and i != '0 : 6 ' and i != '6 : 0 ' and i != '1 : 7 ' and i != '7 : 1 '
and i != '2 : 8 ' and i != '8 : 2 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ev_od:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m чет_НЕчет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n even_odd = ' + b)
new_file.close()
def draws_NOdraws(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' or i == '4 : 4 ' \
or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 ' or i == '8 : 8 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= drw_NOdrw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничья_НЕничья = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws = ' + b)
new_file.close()
def NOdraws_draws(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' \
or i == '4 : 4 ' or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOdrw_drw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕничья_ничья = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws = ' + b)
new_file.close()
def under15_over15(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= und15_ovr15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен_бол 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 1.5 = ' + b)
new_file.close()
def over15_under15(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ovr15_und15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол_мен 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 1.5 = ' + b)
new_file.close()
def under25_over25(x):
count = 0
olimp = []
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= und25_ovr25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен_бол 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 2.5 = ' + b)
new_file.close()
def over25_under25(x):
count = 0
olimp = []
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ovr25_und25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол_мен 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 2.5 = ' + b)
new_file.close()
def both_noboth_score(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 '
and i != '2 : 0 ' and i != '0 : 2 ' and i != '0 : 3 '
and i != '3 : 0 ' and i != '4 : 0 ' and i != '0 : 4 '
and i != '0 : 5 ' and i != '5 : 0 ' and i != '0 : 6 '
and i != '6 : 0 ' and i != '0 : 7 ' and i != '7 : 0 '
and i != '0 : 8 ' and i != '8 : 0 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= both_noboth:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе_необе забили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_noboth score = ' + b)
new_file.close()
def noboth_both_score(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 ' and i != '2 : 0 '
and i != '0 : 2 ' and i != '0 : 3 ' and i != '3 : 0 '
and i != '4 : 0 ' and i != '0 : 4 ' and i != '0 : 5 '
and i != '5 : 0 ' and i != '0 : 6 ' and i != '6 : 0 '
and i != '0 : 7 ' and i != '7 : 0 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= noboth_both:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m необе_обе забили = {count} \033[0m')
print(' ')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n noboth_both score = ' + b)
new_file.close()
def draws_NOdraws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)' \
or i == '(4 : 4)' or i == '(5 : 5)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= drw_NOdrw_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничья_НЕничья 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws 1st time = ' + b)
new_file.close()
def NOdraws_draws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)'\
or i == '(4 : 4)' or i == '(5 : 5)' or i == '(6 : 6)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOdrw_drw_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕничья_ничья 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws 1st time = ' + b)
new_file.close()
def goal_NOgoal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0 : 0)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= goal_NOgoal_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m гол-НЕгол 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n goal_NOgoal 1st time = ' + b)
new_file.close()
def NOgoal_goal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0 : 0)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOgoal_goal_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕгол_гол 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOgoal_goal 1st time = ' + b)
new_file.close()
clean_file()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/4576-Klub-A-Germes-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Клуб А. Гермес"
def adding_team():
b = "Klub-A-Germes"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2891-Khimnasiya-Mendosa-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Химнасия Мендоса"
def adding_team():
b = "Khimnasiya-Mendosa"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1548-Independente-Rivadavia-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Индп Ривадавиа"
def adding_team():
b = "Independente-Rivadavia"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/574-Tigre-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Тигре"
def adding_team():
b = "Tigre"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1552-Oll-Boyz-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Олл Бойз"
def adding_team():
b = "Oll-Boyz"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/561-Kilmes-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Кильмес"
def adding_team():
b = "Kilmes"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1560-Santamarina-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сантамарина"
def adding_team():
b = "Santamarina"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1553-Braun-Adroge-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Браун Адроге"
def adding_team():
b = "Braun-Adroge"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1554-San-Martin-Tukuman-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сан-М Тукуман"
def adding_team():
b = "San-Martin-Tukuman"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2137-Deportivo-Riestra-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Депортиво Риестра"
def adding_team():
b = "Deportivo-Riestra"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/552-Atletiko-Rafaela-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Атлетико Рафаэла"
def adding_team():
b = "Atletiko-Rafaela"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1563-Khimnasiya-Khukhuy-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Химнасия Хухуй"
def adding_team():
b = "Khimnasiya-Khukhuy"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/3915-Estudiantes-Rio-Kuarto-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Эстудиантес Рио Куарто"
def adding_team():
b = "Estudiantes-Rio-Kuarto"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2140-Almirante-Braun-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Альмиранте Браун"
def adding_team():
b = "Almirante-Braun"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2145-San-Telmo-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сан-Тельмо"
def adding_team():
b = "San-Telmo"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1555-Ferro-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Ферро"
def adding_team():
b = "Ferro"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2390-Atletiko-Mitre-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Атлетико Митре"
def adding_team():
b = "Atletiko-Mitre"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2142-Defensores-de-Belgrano-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Дефенсорес де Бельграно"
def adding_team():
b = "Defensores-de-Belgrano"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2389-Agropekuario-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Агропекуарио"
def adding_team():
b = "Agropekuario"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2144-Barrakas-Sentral-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Барракас Сентраль"
def adding_team():
b = "Barrakas-Sentral"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/3914-Alvarado-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Альварадо"
def adding_team():
b = "Alvarado"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/555-Belgrano-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Бельграно"
def adding_team():
b = "Belgrano"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/572-San-Martin-statistika-komandi'
team = "Сан-Мартин"
def adding_team():
b = "San-Martin"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1547-Chakarita-Khuniors-statistika-komandi'
team = "Чакарита Хуниорс"
def adding_team():
b = "Chakarita-Khuniors"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2135-Deportivo-Moron-statistika-komandi'
team = "Депортиво Морон"
def adding_team():
b = "Deportivo-Moron"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/4577-Deportivo-Maypu-statistika-komandi'
team = "Депортиво Майпу"
def adding_team():
b = "Deportivo-Maypu"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1549-Instituto-statistika-komandi'
team = "Институто"
def adding_team():
b = "Instituto"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1551-Almagro-statistika-komandi'
team = "Альмагро"
def adding_team():
b = "Almagro"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2139-Estudiantes-Kaseros-statistika-komandi'
team = "Эстудиантес Касерос"
def adding_team():
b = "Estudiantes-Kaseros"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/573-Temperley-statistika-komandi'
team = "Темперлей"
def adding_team():
b = "Temperley"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1546-Gilermo-Braun-statistika-komandi'
team = "Гильермо Браун"
def adding_team():
b = "Gilermo-Braun"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2147-Tristan-Suares-statistika-komandi'
team = "Тристан Суарес"
def adding_team():
b = "Tristan-Suares"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/565-Nueva-Chikago-statistika-komandi'
team = "Нуэва Чикаго"
def adding_team():
b = "Nueva-Chikago"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1556-Vilya-Dalmine-statistika-komandi'
team = "Вилья Дальмине"
def adding_team():
b = "Vilya-Dalmine"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
| [
"[email protected]"
] | |
9591bbc70a14e16ba63940263937edaeb6058ed4 | 1de19b4527989a29a15cb9ccf3b40c6cbce6e906 | /http/http_client.py | 87802227635ca277305215dce27147758e79c4ec | [] | no_license | rjcostales/python | 468406184bb984e893c3217e74dabff0be4d24c1 | ab09a421a0b9d806a9c8dbef9dc166274a19b4d2 | refs/heads/master | 2021-01-17T11:48:14.512859 | 2018-11-07T20:46:50 | 2018-11-07T20:46:50 | 38,708,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import http.client
url = 'www.python.org'
conn = http.client.HTTPConnection(url)
conn.request("GET", "/")
response = conn.getresponse()
print('\nstatus')
print(response.status)
print('\nmsg')
print(response.msg)
print('\nheaders')
print(response.getheaders())
print(response.getheader("date"))
print(response.getheader('content-type'))
print(response.getheader('content-length'))
print('\nresponse')
length = response.length
print(length)
print(response.read(length))
| [
"[email protected]"
] | |
16576534e3c49ba3775bb663e766209077577950 | a467c2c1f691095c0e13db08dd3e939f57ec2096 | /myenv/lib/python2.7/warnings.py | a0bd7105fc010dcd2c1b0f38d8a6f32eca1a30aa | [] | no_license | Etheri/my-first-blog | 88e7e5dfd6b31206964cef3213c877e717c88d0c | af14f10029bc9168d5875f5978342279c8bdd416 | refs/heads/master | 2021-01-23T05:09:58.496749 | 2017-05-31T14:44:38 | 2017-05-31T14:44:38 | 92,956,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /home/bednyakova/anaconda2/lib/python2.7/warnings.py | [
"[email protected]"
] | |
d2534e7f9ed2539c6ec7228c87061771a60c4676 | 1d11288ec1a5d98dcf66c4ca45072ffd29901de0 | /mrp_extend/models/mrp_bom_line.py | 0731280072097855fc742fa848452a84c7f6fb29 | [] | no_license | pyrun13/addons | 14202e273c802cee391a68474a6bdc7cf062b25c | b81650d81e0a227dd4fc460846e53ce5e61a8cc1 | refs/heads/master | 2020-09-07T21:48:18.673226 | 2019-11-12T16:15:06 | 2019-11-12T16:15:06 | 220,921,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from odoo import models, fields, api, exceptions
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
attrition_rate = fields.Float(string='损耗率(%)')
def write(self, vals):
attrition_rate = vals.get('attrition_rate', 0)
if attrition_rate < 0:
raise exceptions.ValidationError('损耗率不能为负数!')
return super(MrpBomLine, self).write(vals)
| [
"[email protected]"
] | |
ea06dfdc414399d140d3ee55f76920fd6e8f97c9 | b76990d490d87517e01f60e3f010de273e473725 | /naive_bayesian_for_text.py | 3479592ef6ac9058ae8cf48977118cfc0a7bc267 | [] | no_license | manju838/machine_learing_algo_python | 10799bba48e9e850cff397e4ede4ae1ca61c679b | d679f2df11b963bd926842d46db7e6235ff511a8 | refs/heads/master | 2022-03-06T04:30:11.481001 | 2019-10-24T01:26:55 | 2019-10-24T01:26:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | import numpy as np
import text_preprocess
class NaiveBayesianForText:
def fit(self, X, y):
'''
Parameters
----------
X : shape (n_corpus, text_length)
Training corpus
y : shape (n_corpus,)
Target values
'''
self.__classes = np.unique(y)
n_classes = len(self.__classes)
self.__p_classes = [np.mean(y == label) for label in self.__classes]
self.__model = text_preprocess.Tfidf()
word_vector = self.__model.fit_transform(X)
word_of_classes = np.ones((n_classes, len(self.__model.word_dictionary)))
word_of_classes_total = np.full(n_classes, n_classes)
for i in range(n_classes):
word_of_classes[i] += np.sum(word_vector[np.flatnonzero(y == self.__classes[i])], axis=0)
word_of_classes_total[i] += np.sum(word_of_classes[i])
self.__p_word_of_classes = word_of_classes / word_of_classes_total.reshape((-1, 1))
def predict(self, X):
'''
Parameters
----------
X : shape (n_corpus, text_length)
Predicting corpus
Returns
-------
y : shape (n_corpus,)
Predicted class label per sample
'''
n_samples = len(X)
word_vector = np.zeros((n_samples, len(self.__model.word_dictionary)))
for i in range(n_samples):
_, indexes, _ = np.intersect1d(self.__model.word_dictionary, X[i], return_indices=True)
word_vector[i, indexes] = 1
p_class_of_doc = word_vector.dot(np.log(self.__p_word_of_classes).T) + np.log(self.__p_classes)
return self.__classes[np.argmax(p_class_of_doc, axis=1)] | [
"[email protected]"
] | |
1398fd23f5db51415ada765ea40ff41dfd172980 | 1472d0b89d3c845f1f40552fcef889cd12ce367e | /classification/quiz.py | 2c461e7f0ef7cf0fe26cd2e9708dabc4dc97b1c1 | [] | no_license | AatmanTogadia/DataMining | 47e45cb26b8c5bfbb8fdda08044517765c7b3c96 | a3005e132dd823f13dd00dff9ad9f9bd7c1870d0 | refs/heads/master | 2020-12-25T10:34:10.452558 | 2016-07-07T04:02:07 | 2016-07-07T04:02:07 | 61,662,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,541 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 21:53:56 2016
@author: Aatman
"""
__author__ = 'Aatman'
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import pylab as pl
import numpy as np
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
import json
import nltk
from nltk.corpus import stopwords
import pickle
english_stopwords = ["a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also","although","always","am","among", "amongst", "amoungst", "amount", "an", "and", "another", "any","anyhow","anyone","anything","anyway", "anywhere", "are", "around", "as", "at", "back","be","became", "because","become","becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom","but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven","else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own","part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the"]
spanish_stopwords = set(stopwords.words("spanish")) #creating a list of spanish stop-words
all_stopwords=[]
all_stopwords.append(english_stopwords)
all_stopwords.append(spanish_stopwords) #both spanish and eglish stop-words combined.
tweets_nega = []
for line in open('train_nega_tweets.txt').readlines():
tweet=json.loads(line)
temp=tweet['text']
items=[0,temp]
tweets_nega.append(items)
# Extract the vocabulary of keywords
vocab = dict()
for label,text in tweets_nega:
for term in text.split():
term = term.lower()
if len(term) > 2 and term not in all_stopwords:
if vocab.has_key(term):
vocab[term] = vocab[term] + 1
else:
vocab[term] = 1
# Remove terms whose frequencies are less than a threshold (e.g., 20)
vocab = {term: freq for term, freq in vocab.items() if freq > 20}
# Generate an id (starting from 0) for each term in vocab
vocab = {term: idx for idx, (term, freq) in enumerate(vocab.items())}
# Generate X and y
print vocab
X = []
y = []
for class_label, tweet_text in tweets_nega:
x = [0] * len(vocab)
terms = [term1 for term1 in tweet_text.split() if len(term1) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
y.append(class_label)
X.append(x)
tweets_posi = []
for line in open('train_posi_tweets.txt').readlines():
tweet=json.loads(line)
lala=tweet['text']
items=[1,lala]
tweets_posi.append(items)
for class_label, tweet_text in tweets_posi:
x = [0] * len(vocab)
terms = [term2 for term2 in tweet_text.split() if len(term2) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
y.append(class_label)
X.append(x)
# 10 folder cross validation to estimate the best w and b
svc = svm.SVC(kernel='linear')
Cs = range(1,5)
clf = GridSearchCV(estimator=svc, param_grid=dict(C=Cs), cv = 10)
clf = LogisticRegression()
clf.fit(X, y)
print clf.predict(X)
# predict the class labels of new tweets
#print clf.predict(X)
tweets_test = []
for line in open('test_tweets.txt').readlines():
tweet=json.loads(line)
lala=tweet['text']
#items=[lala]
tweets_test.append(lala)
#print tweets
# Generate X for testing tweets
X=[]
for tweet_text in tweets_test:
x = [0] * len(vocab)
terms = [term3 for term3 in tweet_text.split() if len(term3) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
X.append(x)
#print X
y = clf.predict(X)
tweets1=[]
for line in open('test_tweets.txt').readlines():
tweet=json.loads(line)
e_id=tweet['embersId']
text=tweet['text']
items=[e_id,text]
tweets1.append(items)
f1 = open('trained_LR_classifier.pkl', 'w')
f1.write(pickle.dumps(clf))
f1.close()
pred=dict()
t='true'
f='false'
f2=open('predictions.txt','w')
for idx, [tweet_id, tweet_text] in enumerate(tweets1):
if y[idx]==1:
pred.update({tweet_id:t})
else:
pred.update({tweet_id:f})
f2.write(json.dumps(pred))
f2.close()
print '\r\nAmong the total {1} tweets, {0} tweets are predicted as positive.'.format(sum(y), len(y))
| [
"Aatman Togadia"
] | Aatman Togadia |
01b5228bafb4cd7e36afa383714ca0ce95b4d5dd | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/anlangner/cordis_v3.py | c960031a72d020159d2fc051da824933e00894a7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,592 | py | import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link)import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link) | [
"[email protected]"
] | |
a397b67df4bf1082a8f0cd0373bcb051241d51ce | e008a757fc02e6fcc725cc079b373f7c5cfefa28 | /experiments/analysis_gtex_feature_explore.py | b96d4acfe61ff7ff19f4fa1616583928de208ebe | [] | no_license | sc130/AdaFDRpaper | 74e3b67548f0f2811ac1a4d170f379396dd60440 | c564f24a513cf505c4ac7ab07e960d4ef6be1b9b | refs/heads/master | 2022-01-22T16:59:57.285565 | 2019-07-31T18:34:35 | 2019-07-31T18:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | ## system settings
import matplotlib
matplotlib.use('Agg')
import logging
import os
import sys
import argparse
import adafdr.data_loader as dl
import adafdr.method as md
import time
import matplotlib.pyplot as plt
import pickle
def main(args):
# Set up parameters.
alpha = 0.01
n_itr = 1500
# Set up the output folder.
output_folder = os.path.realpath('..') + '/result_gtex_feature_explore/result_'\
+ args.output_folder
if not os.path.exists(output_folder):
os.makedirs(output_folder)
else:
filelist = [os.remove(os.path.join(output_folder, f))\
for f in os.listdir(output_folder)]
# Load the data.
p, x, n_full, cate_name, cis_name = dl.load_GTEx(args.data_name,\
if_impute=False)
# feature_explore
md.adafdr_explore(p, x, alpha=alpha, n_full=n_full, vis_dim=None, cate_name=cate_name,\
output_folder=output_folder, h=None)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Side-info assisted multiple hypothesis testing')
parser.add_argument('-d', '--data_loader', type=str, required=True)
parser.add_argument('-n', '--data_name', type=str, required=False)
parser.add_argument('-o', '--output_folder', type=str, required = True)
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
3b0761a6d612bc898fd451258885973dbdba8234 | de3e36fb6ed1a94c3b8b0313f426d4f74c858fad | /industry/hw3/checkers/svm_impl_shtanko.py | 601f22ae73bcdf347265b05fc1e140f934602597 | [] | no_license | leonshting/data_mining_in_action_2017 | 5415c542de56013bc5b2ef21137e2347bf2f1765 | baeb379213e44c6f38d73f845a5c673ce78f2cf3 | refs/heads/master | 2021-05-07T01:20:52.985394 | 2017-11-18T16:02:25 | 2017-11-18T16:02:25 | 110,332,082 | 0 | 0 | null | 2017-11-11T09:02:35 | 2017-11-11T09:02:35 | null | UTF-8 | Python | false | false | 3,678 | py | import numpy as np
from sklearn.base import BaseEstimator
SVM_PARAMS_DICT = {
'C': 100,
'random_state': 42,
'iters': 1000,
'batch_size': 10,
}
import numpy as np
from random import randint
import random
np.random.seed(42)
random.seed(42)
class MySVM(object):
def __init__(self, C=10000, batch_size = 100, iters=10000, **kwargs):
self.C = C # regularization constant
self.batch_size = batch_size
self.iters = iters
# f(x) = <w,x> + w_0
def f(self, x):
return np.dot(self.w, x) + self.w0
# a(x) = [f(x) > 0]
def a(self, x):
return 1 if self.f(x) > 0 else -1
# predicting answers for X_test
def predict(self, X_test):
o_o = np.array([self.a(x) for x in X_test])
o_o[o_o == -1] = 0
return o_o
# l2-regularizator
def reg(self):
return 1.0 * sum(self.w ** 2) / (2.0 * self.C)
# l2-regularizator derivative
def der_reg(self):
return self.w/self.C
# hinge loss vectorized
def loss(self, x, answer):
return np.vectorize(lambda x_v, answer_v: max([0, 1 - answer_v * self.f(x_v)]),
signature='(m),()->()')(x, answer)
# hinge loss derivative
def _dl(self, x_v, answer_v):
return -answer_v if 1 - answer_v * self.f(x_v) > 0 else 0.0
def der_loss(self, x, answer):
return np.vectorize(lambda x_v, answer_v: self._dl(x_v, answer_v), signature=
'(m),()->()')(x, answer)
def der_loss_wrt_w(self, x, answer):
#print(self.der_loss(x, answer))
return np.mean((np.multiply(x.T, self.der_loss(x, answer))), axis=1)
def der_loss_wrt_w0(self, x, answer):
return np.mean(self.der_loss(x, answer))
def trans_to_01(self, y):
y[y == -1] = 1
return y
def trans_to_11(self, y):
y[y == 0] = -1
return y
def get_params(self, *args, **kwargs):
return {
'C': self.C,
'batch_size': self.batch_size,
'iters': self.iters
}
# fitting w and w_0 with SGD
def fit(self, X_train, y_train):
dim = len(X_train[0])
self.w = np.random.rand(dim) # initial value for w
self.w0 = np.random.randn() # initial value for w_0
y_train = self.trans_to_11(y_train)
# 10000 steps is OK for this example
# another variant is to continue iterations while error is still decreasing
loss_a = 1.
delta = 1.
cnt = 0
glob_cnt = 0
#stops if too long
while (cnt<100 or abs(delta/loss_a) > 1e-3) and glob_cnt < self.iters:
# random example choise
# rand_index = randint(0, len(X_train) - 1,) # generating random index
rand_index = np.random.randint(low=0, high=X_train.shape[0], size=self.batch_size)
x = X_train[rand_index]
y = y_train[rand_index]
loss_b = self.loss(x, y).sum()
# simple heuristic for step size
step = 1./(glob_cnt+1)
# w update
#print(self.der_loss_wrt_w(x, y), self.der_reg())
self.w += step * (-self.der_loss_wrt_w(x, y) - self.der_reg())
# w_0 update
self.w0 += -step * self.der_loss_wrt_w0(x, y)
loss_a = self.loss(x, y).sum()
delta = abs(loss_a - loss_b)
if abs(delta/loss_a) > 1e-3:
cnt = 0
else:
cnt+=1
glob_cnt += 1
return self | [
"[email protected]"
] | |
e6d4a5b68241ef8bf821e322cb11bd1f31db75b6 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rci.py | 1499f78fdcb23fcbcc72afecd718862922797f9e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rCI':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
9e70985c1a04109922a692d38d13895082188238 | 8575c4ce854151973bb8f58b8a124f7b1816df45 | /Malu_Python_Scripts/badpix_match.py | eb4ee7f719267ce1e11e56c8d1213791dbaeb636 | [] | no_license | mlldantas/Gal_classification | e0a3ce375d0661ca1933b4d36ff20f6fb4d469cc | 81c392ec828709d30dea351a2fe27ec81bc6e69d | refs/heads/master | 2022-03-30T14:24:18.340900 | 2020-02-21T17:24:25 | 2020-02-21T17:24:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,834 | py | from __future__ import division
import numpy as np
import pandas as pd
dn4000_txt = '/home/mldantas/Dropbox/STARLIGHT/dn4000_MALU.txt'
lines = '/home/mldantas/Dropbox/STARLIGHT/lines.txt'
dn4000_table = np.loadtxt(dn4000_txt, dtype=object)
lines_table = np.loadtxt(lines, dtype=object)
bad_pix_info = np.loadtxt('/home/mldantas/Dropbox/Clustering/Dataset/badpixels_class_WHAN_BPT_predictions.txt', dtype=str)
classification = np.loadtxt('/home/mldantas/Dropbox/Clustering/Dataset/class_WHAN_BPT.csv', delimiter=',', dtype=str)
dn4000_dictionary = {}
for k in range(len(dn4000_table[0, :])):
dn4000_dictionary[dn4000_table[0, k]] = np.array(dn4000_table[0 + 1:, k], dtype=str)
print ("Dn4000 Table Dictionary read ok!")
lines_dictionary = {}
for j in range((lines_table[0, :]).size):
lines_dictionary[lines_table[0, j]] = np.array(lines_table[0 + 1:, j], dtype=str)
print ("Lines' Table Dictionary read ok!")
classification_dictionary = {}
for k in range(len(classification[0, :])):
classification_dictionary[classification[0, k]] = np.array(classification[0 + 1:, k], dtype=str)
print ("Classification Table Dictionary read ok!")
ids = bad_pix_info[:, 0].astype(str)
bad_pix_hb = bad_pix_info[:, 1].astype(float)
bad_pix_o3 = bad_pix_info[:, 2].astype(float)
bad_pix_ha = bad_pix_info[:, 3].astype(float)
bad_pix_n2 = bad_pix_info[:, 4].astype(float)
index = np.where((bad_pix_hb < 0.25) * (bad_pix_hb >= 0.0) * (bad_pix_o3 < 0.25) * (bad_pix_o3 >= 0.0)
* (bad_pix_ha < 0.25) * (bad_pix_ha >= 0.0) * (bad_pix_n2 < 0.25) * (bad_pix_n2 >= 0.0))
dn4000_ids = dn4000_dictionary['SC5-output_file'].astype(str)
dn4000_obs_break = dn4000_dictionary['Dn4000(obs)'].astype(float)
dn4000_syn_break = dn4000_dictionary['Dn4000(syn)'].astype(float)
lines_plate = lines_dictionary['plate'].astype(int)
lines_mjd = lines_dictionary['mjd'].astype(int)
lines_fiberid = lines_dictionary['fiberID'].astype(int)
print("Line's table size is %d" % lines_plate.size)
ids_class = classification_dictionary['id'].astype(str)
plate_class = []
mjd_class = []
fiberid_class = []
for i in range(ids_class.size):
plate_class.append(int(ids_class[i].split('.')[0]))
mjd_class.append(int(ids_class[i].split('.')[1]))
fiberid_class.append(int(ids_class[i].split('.')[2]))
plate_class = np.array(plate_class)
mjd_class = np.array(mjd_class)
fiberid_class = np.array(fiberid_class)
plate = []
mjd = []
fiberid = []
for i in range(ids.size):
plate.append(int(ids[i].split('.')[0]))
mjd.append(int(ids[i].split('.')[1]))
fiberid.append(int(ids[i].split('.')[2]))
plate = np.array(plate)[index]
mjd = np.array(mjd)[index]
fiberid = np.array(fiberid)[index]
## Dn4000 crossmatch -----------------------------------------------------------------------------------------------
dn4000_plate = []
dn4000_mjd = []
dn4000_fiberid = []
for l in range(dn4000_ids.size):
dn4000_plate_i = dn4000_ids[l].split('.')[0]
dn4000_mjd_i = dn4000_ids[l].split('.')[1]
dn4000_fiberid_i = dn4000_ids[l].split('.')[2]
dn4000_plate.append(int(dn4000_plate_i))
dn4000_mjd.append(int(dn4000_mjd_i))
dn4000_fiberid.append(int(dn4000_fiberid_i))
dn4000_plate = np.array(dn4000_plate)
dn4000_mjd = np.array(dn4000_mjd)
dn4000_fiberid = np.array(dn4000_fiberid)
print ("Dn4000 size is %d" % dn4000_plate.size)
dn4000_indexes = np.arange(plate.size)
dn4000_data_index = []
for m in range(dn4000_plate.size):
dn4000_data_index_m = dn4000_indexes[(plate == dn4000_plate[m]) * (mjd == dn4000_mjd[m]) *
(fiberid == dn4000_fiberid[m])]
if dn4000_data_index_m.size is 0:
continue
dn4000_data_index.append(m)
dn4000_synth = dn4000_syn_break[dn4000_data_index]
dn4000_obs = dn4000_obs_break[dn4000_data_index]
dn4000_plate = dn4000_plate[dn4000_data_index]
dn4000_mjd = dn4000_mjd[dn4000_data_index]
dn4000_fiberid = dn4000_fiberid[dn4000_data_index]
## Lines crossmatch ------------------------------------------------------------------------------------------------
indexes = np.arange(plate.size)
new_index = []
for i in range(lines_plate.size):
index = indexes[(plate == lines_plate[i]) * (mjd == lines_mjd[i]) * (fiberid == lines_fiberid[i])]
if index.size is 0:
continue
new_index.append(i)
h_alpha = lines_dictionary['F_Halpha'].astype(float)[new_index]
ew_h_alpha = lines_dictionary['EW_Halpha'].astype(float)[new_index]
h_beta = lines_dictionary['F_Hbeta'].astype(float)[new_index]
oiii = lines_dictionary['F_oiii'].astype(float)[new_index]
nii = lines_dictionary['F_nii'].astype(float)[new_index]
## Classification crossmatch -------------------------------------------------------------------------------------------
indexes_class = np.arange(plate_class.size)
index_class = []
for i in range(plate_class.size):
index = indexes_class[(plate == plate_class[i]) * (mjd == mjd_class[i]) * (fiberid == fiberid_class[i])]
if index.size is 0:
continue
index_class.append(i)
classification_bpt = classification_dictionary['class_BPT'].astype(int)[index_class]
classification_whan = classification_dictionary['class_WHAN'].astype(int)[index_class]
np.savetxt('/home/mldantas/Dropbox/Clustering/Dataset/results_classification.csv',
np.column_stack((plate, mjd, fiberid, dn4000_obs, dn4000_synth, h_alpha, ew_h_alpha, h_beta, oiii, nii,
classification_bpt, classification_whan)),
fmt="%d,%d,%d,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%d,%d", delimiter=',', newline='\n',
header='plate,mjd,fiber_id,dn4000_obs,dn4000_synth,H_alpha,EW_H_alpha,'
'H_beta,OIII,NII,class_BPT,class_WHAN')
| [
"[email protected]"
] | |
05af6eb6e60b4748045485fcbf36d751acf72583 | 0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52 | /Day1/day1Project.py | 2d1e56254a4ef4fd53ab5a15fdd51db183e510ec | [] | no_license | TheKinshu/100-Days-Python | 15cbacc608ee349cc9733a7032e10a359bebb731 | 293ad6b3e5f5208da84efbc5b2d2d395a5a53421 | refs/heads/master | 2023-04-18T08:21:30.361800 | 2021-05-02T18:48:39 | 2021-05-02T18:48:39 | 351,582,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #1. Create a greeting for your program.
print("Welcome to the Band Name Generator.")
#2. Ask the user for the city that they grew up in.
city = input("What's name of the city you gre up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What's your pet's name?\n")
#4. Combine the name of their city and pet and show them their band name.
print("Your band name could be " + city + " " + pet)
#5. Make sure the input cursor shows on a new line, see the example at:
# https://band-name-generator-end.appbrewery.repl.run/ | [
"[email protected]"
] | |
92c3a0d5822904b02ee02cf30204b593268f8d36 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200413235341.py | 1cded98ea6504881b7ef71c0979704ed33286f9f | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,000 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
def rho_free(x,xp,beta):
"""
Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial harmónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anharmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al osciladoe armónico
(presente en un baño térmico) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición dada para temperatura T dada.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo el potencial "potential".
Recibe:
xmax: float -> los valores de x estarán en el intervalo (-xmax,xmax).
nx: int -> número de valores de x considerados.
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción, debe ser una función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
dx = 2. * x_max / (nx - 1)
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
El sistema asociado a la matriz densidad obtenida (al final de aplicar el algoritmo)
está a temperatura inversa beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada "rho".
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad "rho".
print_steps: bool -> muestra valores de beta en cada iteración
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado "rho" a temperatura
inversa igual a "beta_fin".
trace_rho: int -> traza de la matriz densidad a temperatura inversa
igual a "beta_fin". Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a "rho".
"""
dx = grid_x[1] - grid_x[0]
beta_fin = beta_ini * 2 ** N_iter
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
if print_steps==True:
print(u'Iteration %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=True):
"""
Uso: guarda datos de la distribución pi(x;beta)
Recibe:
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada pi(x;beta).
x_weights: numpy array, shape=(nx,) ->
"""
pi_x_data = {'Position x': grid_x,
'Prob. density': x_weights}
pi_x_data = pd.DataFrame(data=pi_x_data)
with open(file_name,mode='w') as rho_csv:
rho_csv.write(relevant_info+'\n')
rho_csv.close()
with open(file_name,mode='a') as rho_csv:
pi_x_data.to_csv(rho_csv)
rho_csv.close()
if print_data==True:
print(pi_x_data)
return pi_x_data
def run_pi_x_squaring(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, plot=True, save_plot=True, show_plot=True):
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('----------------------------------------------------------------\n',
u'beta_fin = %.3f Z(beta_fin) = Tr(rho(beta_fin)) ≈ %.3E \n'%(beta_fin_2,trace_rho))
# Normalización de rho y cálculo de densidades de probabilidad para valores en grid_x
rho_normalized = rho/trace_rho
x_weights = np.diag(rho_normalized)
if save_data==True:
# Nombre del archivo csv en el que guardamos valores de pi(x;beta_fin)
file_name = u'pi_x-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
# Información relevante para agregar como comentario al archivo csv
relevant_info = u'# %s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin
# Guardamos valores de pi(x;beta_fin) en archivo csv
save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
plot_name = u'pi_x-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'\
%(potential_string,x_max,nx,N_iter,beta_fin)
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return 0
plt.rcParams.update({'font.size':15})
run_pi_x_squaring(potential = harmonic_potential, potential_string = 'harmonic_potential',
save_data=True, save_plot=False, show_plot=True)
| [
"[email protected]"
] | |
1e6ab766a2799d3338ba484409f9b162c8797b68 | 1d8b4d67e1f65b785cd11d006d0322af0d27ebcc | /[0725_현수]Naver_DSC2018/TypeAnalysis_Cpu.py | e860a910918d24f084a3f913c62b0b36624edccb | [] | no_license | kumakuma34/Naver-DataScienceCompetition-2018 | 4256ff548d3d8893620581dc49cf8ea37a2a9d0e | a5c447f327ca6d18879cc7ae59b5889a0292fedc | refs/heads/master | 2020-05-04T21:56:23.604022 | 2019-09-28T11:40:22 | 2019-09-28T11:40:22 | 179,494,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 22:06:02 2018
@author: qgqg2
"""
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import collections
from sklearn import datasets, linear_model
# utf-8 encoding error, so I take 'cp1252'
df = pd.read_csv('Processed_Data.csv', encoding= "cp1252")
#df.shape()
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import collections
from sklearn import datasets, linear_model
# utf-8 encoding error, so I take 'cp1252'
df = pd.read_csv('Processed_Data.csv', encoding= "cp1252")
#df.shape()
Cpu_type = df['Cpu_Type'].unique()
Cpu_size = df.groupby('Cpu_Type').size()
print(Cpu_size)
print(Cpu_type)
labels = Cpu_type
ratio = Cpu_size
plt.figure(figsize=(13,13))
plt.pie(ratio, labels=labels, shadow=True, startangle=150, autopct = '%1.1f%%')
plt.title('PieChart of Cpu')
#plt.show()
| [
"[email protected]"
] | |
af071cda274d216298ffa43dad3dc91a802788fa | a97dab5a6d7fa9e65a61193001652198236d5814 | /ircpdb/bot.py | ce6b1bf265fbd7e97b178bd50deab4f663a664cb | [
"BSD-2-Clause"
] | permissive | scshepard/ircpdb | ac083b55fe94485e44859f7dca301361d6411616 | d6f197b02a77113d8e025e3026b64549eb748e2e | refs/heads/master | 2021-01-24T14:27:05.410262 | 2014-10-30T23:09:24 | 2014-10-30T23:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,071 | py | import fcntl
import logging
from multiprocessing import Queue
import os
import random
import socket
import textwrap
import time
from irc import strings
from irc.bot import SingleServerIRCBot, ServerSpec
import requests
import six
from .exceptions import DpasteError
logger = logging.getLogger(__name__)
class IrcpdbBot(SingleServerIRCBot):
def __init__(
self, channel, nickname, server, port, password,
limit_access_to, message_wait_seconds,
dpaste_minimum_response_length,
**connect_params
):
self.channel = channel
self.queue = Queue()
self.joined = False
self.pre_join_queue = []
self.message_wait_seconds = message_wait_seconds
self.dpaste_minimum_response_length = dpaste_minimum_response_length
self.limit_access_to = limit_access_to
server = ServerSpec(server, port, password)
super(IrcpdbBot, self).__init__(
[server], nickname, nickname, **connect_params
)
def on_nicknameinuse(self, c, e):
c.nick(
u"%s-%s" % (
c.get_nickname(),
random.randrange(0, 9999)
)
)
def on_welcome(self, c, e):
logger.debug('Received welcome message, joining %s', self.channel)
c.join(self.channel)
self.joined = True
hello_lines = [
"Debugger ready (on host %s)" % socket.gethostname(),
(
"Please prefix debugger commands with either '!' or '%s:'. "
"For pdb help, say '!help'; for a list of ircpdb-specific "
"commands, say '!!help'."
)
]
for line in hello_lines:
self.send_user_message(
self.channel,
line
)
for username, message in self.pre_join_queue:
self.send_user_message(username, message)
def on_privmsg(self, c, e):
self.send_user_message(
e.source.nick,
"Ircdb currently supports sending/receiving messages "
"using only the IRC channel."
)
def on_pubmsg(self, c, e):
# Check if this message is prefixed with the bot's username:
a = e.arguments[0].split(":", 1)
if (
len(a) > 1
and strings.lower(a[0]) == strings.lower(
self.connection.get_nickname()
)
):
self.do_command(e, a[1].strip())
# And, check if the argument was prefixed with a '!'.
if e.arguments[0][0] == '!':
self.do_command(e, e.arguments[0][1:].strip())
return
def do_command(self, e, cmd):
logger.debug('Received command: %s', cmd)
nickname = e.source.nick
if self.limit_access_to and nickname not in self.limit_access_to:
self.send_channel_message(
"I'm sorry, %s, you are not allowed to give commands "
"to this debugger. Please ask one of the following "
"users for permission to use the debugger: %s." % (
nickname,
', '.join(self.limit_access_to)
)
)
return
if cmd.startswith("!allow"):
allows = cmd.split(' ')
self.limit_access_to.extend(allows[1:])
self.send_channel_message(
"The following users have been granted access to the debugger:"
" %s." % (
', '.join(allows[1:])
)
)
return
if cmd.startswith("!set_dpaste_minimum_response_length"):
value = cmd.split(' ')
try:
self.dpaste_minimum_response_length = int(value[1])
self.send_channel_message(
"Messages longer than %s lines will now be posted "
"to dpaste if possible." % (
self.dpaste_minimum_response_length
)
)
except (TypeError, IndexError, ValueError):
self.send_channel_message(
"An error was encountered while setting the "
"dpaste_minimum_response_length setting. %s"
)
return
if cmd.startswith("!set_message_wait_seconds"):
value = cmd.split(' ')
try:
self.message_wait_seconds = float(value[1])
self.send_channel_message(
"There will be a delay of %s seconds between "
"sending each message." % (
self.message_wait_seconds
)
)
except (TypeError, IndexError, ValueError):
self.send_channel_message(
"An error was encountered while setting the "
"message_wait_seconds setting."
)
return
if cmd.startswith("!help"):
available_commands = textwrap.dedent("""
Available Commands:
* !!allow NICKNAME
Add NICKNAME to the list of users that are allowed to
interact with the debugger. Current value: {limit_access_to}.
* !!set_dpaste_minimum_response_length INTEGER
Try to send messages this length or longer in lines
to dpaste rather than sending them to IRC directly.
Current value: {dpaste_minimum_response_length}.
* !!set_message_wait_seconds FLOAT
Set the number of seconds to wait between sending messages
(this is a measure used to prevent being kicked from
Freenode and other IRC servers that enforce limits on the
number of messages a client an send in a given period of
time. Current value: {message_wait_seconds}.
""".format(
limit_access_to=self.limit_access_to,
dpaste_minimum_response_length=(
self.dpaste_minimum_response_length
),
message_wait_seconds=self.message_wait_seconds,
))
self.send_channel_message(
available_commands,
dpaste=True,
)
return
else:
self.queue.put(cmd.strip())
def send_channel_message(self, message, dpaste=None):
return self.send_user_message(
self.channel,
message,
dpaste=dpaste,
)
def send_user_message(self, username, message, dpaste=None):
message_stripped = message.strip()
if not self.joined:
logger.warning(
'Tried to send message %s, '
'but was not yet joined to channel. Queueing...',
message
)
self.pre_join_queue.append(
(username, message, )
)
return
lines = message_stripped.split('\n')
chunked = self.get_chunked_lines(lines)
try:
long_response = len(chunked) >= self.dpaste_minimum_response_length
if (long_response and dpaste is None) or dpaste is True:
dpaste_url = self.send_lines_to_dpaste(lines)
self.send_lines(
username, "%s (%s lines)" % (
dpaste_url,
len(lines)
)
)
return
except DpasteError:
pass
self.send_lines(username, chunked)
def get_chunked_lines(self, lines, chunk_size=450):
chunked_lines = []
for line in lines:
if len(line) > chunk_size:
chunked_lines.extend([
line[i:i+chunk_size]
for i in range(0, len(line), chunk_size)
])
else:
chunked_lines.append(line)
return chunked_lines
def send_lines_to_dpaste(self, lines):
try:
response = requests.post(
'http://dpaste.com/api/v2/',
data={
'content': '\n'.join(lines)
}
)
return response.url
except Exception as e:
raise DpasteError(str(e))
def send_lines(self, target, lines):
if isinstance(lines, six.string_types):
lines = [lines]
for part in lines:
self.connection.send_raw(
'PRIVMSG %s :%s' % (
target,
part
)
)
if self.message_wait_seconds:
time.sleep(self.message_wait_seconds)
def process_forever(self, inhandle, outhandle, timeout=0.1):
self._connect()
# Let's mark out inhandle as non-blocking
fcntl.fcntl(inhandle, fcntl.F_SETFL, os.O_NONBLOCK)
while True:
try:
messages = inhandle.read()
except IOError:
messages = None
if messages:
for message in messages.split('(Pdb)'):
stripped = message.strip()
if stripped:
logger.debug('>> %s', stripped)
self.send_channel_message(stripped)
try:
self.manifold.process_once(timeout)
except UnicodeDecodeError:
# This just *happens* -- I think these are coming from
# maybe MOTD messages? It isn't clear.
logger.warning(
'UnicodeDecodeError raised while processing messages.'
)
while True:
if self.queue.empty():
break
message = self.queue.get(block=False)
logger.debug('<< %s', message)
outhandle.write(u'%s\n' % message)
outhandle.flush()
| [
"[email protected]"
] | |
c8448d233f737831366635ce1250748f73103822 | 97aa47340e99f7be364f27cba87e499d942eab43 | /dice.py | e625e3d7c22d238043fb453f6c002adc02a49a65 | [] | no_license | eryilmazysf/assignments- | cbe0d0d761a0a3da819c456ea0d9accb86175a35 | c1b3084b39ea72ae14fdc4c564d94c26ca198806 | refs/heads/master | 2022-12-11T00:22:59.427632 | 2020-09-02T15:26:12 | 2020-09-02T15:26:12 | 277,168,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import random
print("""
*************************
DİCE SİMULATİON
*************************
do not forget dice number between 1 and 8
""")
x=int(input("how many dice you will use:"))
while (x<1 or x>8): #for control whether valid or not
print("not valid value try again")
x = int(input("how many dice you will use:"))
y=int(input("number of rolls:"))
while (y<0): #for control whether valid or not
print("not valid try again:")
y = int(input("number of rolls:"))
total_list=[]
for m in range(1,y+1):#for counting
total = 0
for n in range(1,x+1):
random_number= random.randint(1,6) # for appearance number we make random number
print(n,".diece: ",random_number)
total+=random_number
total_list.append(total)
print(m,".total",total)
print(total_list)
| [
"[email protected]"
] | |
225aaf3cdf09abdad88c9d81dc062d86ce62c1f7 | 16bb2395e3e1c51450a242c1f13718734686c749 | /src/models/ERGM_CVX.py | c89b1232c8587cc1ae750850d3c32148d94d5507 | [
"MIT"
] | permissive | DanqingZ/CPS_TRC | 8a05edacf2aa2ec1f260aa08fa19515779869a6b | 47f98ffc909e2cc9759a0f273a521b3ba189a02f | refs/heads/master | 2021-05-01T01:56:35.471342 | 2019-08-26T17:15:32 | 2019-08-26T17:15:32 | 82,489,734 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from cvxpy import *
class ERGM_CVX:
def __init__(self, E, C, V, E_all):
self.E = E
self.C = C
self.V = V
self.E_all = E_all
def run_CVX(self):
community = self.C.values.tolist()
input = np.zeros((len(self.C)*len(self.C),2))
c_matrix = np.zeros((len(self.C),len(self.C)))
Y_matrix = np.zeros((len(self.C),len(self.C)))
for i in range(len(self.C)):
for j in range(len(self.C)):
if i!=j:
if community[i]==community[j]:
c_matrix[i,j] = 1
input[:,0] = c_matrix.reshape(len(self.C)*len(self.C))
distance = self.E_all[1].values.tolist()
input[:,1] = distance
start = self.E[0].values.tolist()
end = self.E[1].values.tolist()
names = self.V.values.tolist()
start_int = np.zeros((len(start)))
end_int = np.zeros((len(start)))
for i in range(len(start)):
for j in range(len(names)):
if names[j][0] == start[i]:
start_int[i] = int(j)
if names[j][0] == end[i]:
end_int[i] = int(j)
Y_matrix = np.zeros((len(self.C),len(self.C)))
for i in range(len(start_int)):
Y_matrix[start_int[i],end_int[i]] = 1
Y = Y_matrix.reshape(len(self.C)*len(self.C))
import cvxpy as cvx
w = cvx.Variable(2)
b = cvx.Variable(1)
Y_matrix = np.ones((len(self.C),len(self.C)))*(-1)
for i in range(len(start_int)):
Y_matrix[start_int[i],end_int[i]] = 1
Y = Y_matrix.reshape(len(self.C)*len(self.C))
loss = cvx.sum_entries(cvx.logistic(-cvx.mul_elemwise(Y, input*w+np.ones((len(self.C)*len(self.C),1))*b)))
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=True)
self.W = w.value
self.b = b.value
| [
"[email protected]"
] | |
2e3138b7aebe9b0d818303c674da9144988dee2d | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /helpers/mixins/unpack_tags_mixin.py | 5e6e4c11c733fc5368427ac90ddb23bf2e781302 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from .unpack_ids_mixin import UnpackIdsMixin
class UnpackTagsMixin(UnpackIdsMixin):
"""
Mixin to apply on a ModelViewSet which transform registered fields from string containing ids to list of objects
"1,2,3" => [<Obj id=1>, <Obj id=2>, <Obj id=3>]
If a string passed, it will create a new instance of given model with given model name field
"1,2,truc" => [<Obj id=1 name=...>, <Obj id=2 name=...>, <new Obj id=3 name="truc">]
Should define unpackable fields like this :
unpackable_fields = {'data_field_name': (ModelName, 'model_field_name')}
"""
def get_item_id(self, word, options):
"""
If given tag contain only digits, use it as id, else create the instance
"""
item_id = None
if word.isdigit():
item_id = int(word)
elif options:
tag_model, tag_model_field = options
existing_tag = tag_model.objects.filter(**{tag_model_field: word}).first()
if existing_tag:
item_id = existing_tag.id
elif word != "":
item_id = tag_model.objects.create(**{tag_model_field: word}).id
else:
return {"id": None}
if item_id is not None:
return {"id": item_id}
| [
"[email protected]"
] | |
b11487f305e77ea0ce64db973c3e20b4db2f6b9a | 8cf28f19ec1d4ac902a5c53a3a4f23a21f125af4 | /list_dictionary.py | b5c6314af499e0bf32f6f2500a4bbd104d9d54b8 | [] | no_license | thydev/dojo-pythonbasic | e8ba6e986c8eefd45f15bf4d66802323da46e96e | 1c96ef09b2e7f9b65c5e9a6b78dce5fd7ca675e3 | refs/heads/master | 2021-01-25T14:55:49.532555 | 2018-03-05T16:18:21 | 2018-03-05T16:18:21 | 123,735,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # Create a function that takes in two lists and creates a single dictionary.
# The first list contains keys and the second list contains the values.
# Assume the lists will be of equal length.
name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar","Dojo", "coding"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas", "fish"]
# print zip(name, favorite_animal) # Create Tuple
def make_dict(list1, list2):
new_dict = {}
if len(list1) == len(list2):
for i in range(0, len(list1)):
new_dict[list1[i]] = list2[i]
elif len(list1) > len(list2):
for i in range(0, len(list1)):
if i < len(list2):
new_dict[list1[i]] = list2[i]
else:
new_dict[list1[i]] = ""
else:
for i in range(0, len(list2)):
if i < len(list1):
new_dict[list2[i]] = list1[i]
else:
new_dict[list2[i]] = ""
return new_dict
d = make_dict(name, favorite_animal)
print d | [
"[email protected]"
] | |
e1f2f7cda8711e3ec07881200a6ea52f823dd4d3 | 1b98c70426580d6cebf36b6f9ed807fe7a9c0729 | /plots/plot-rx-overhead.py | f34b76b70508ad27093ed0e330314680583b85a3 | [] | no_license | jvimal/eyeq-tests | 54a1bba50d7019c07b09fdd147b831a5a823b5ba | d99d05d001d5a3d9fce53b66c6587f605245b555 | refs/heads/master | 2020-06-06T17:31:13.127695 | 2013-03-21T17:24:46 | 2013-03-21T17:24:46 | 8,303,987 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | import plot_defaults
from helper import *
import math
parser = argparse.ArgumentParser()
parser.add_argument('--cols',
help="Columns to include for CPU usage",
action="store",
default='user,sys,sirq,hirq',
dest="cols")
parser.add_argument('--maxy',
help="Max CPU on y-axis",
action="store",
default=100,
dest="maxy",
type=int)
parser.add_argument('-o',
help="Output file to save",
default=None,
dest="out")
parser.add_argument('--text',
help="Plot rate text on the graph",
default=False,
action="store_true",
dest="text")
parser.add_argument('--dirs',
help="Directories to read output from",
default=['.'],
nargs="+",
dest="dirs")
args = parser.parse_args()
rates = [1000, 3000, 6000, 9000]
nums = [1, 8, 16, 32, 64, 92]
def dir_param(rate, without=False, num=1):
dir = "r%s-n%d" % (rate, num)
if without:
dir = "rx-without/" + dir
else:
dir = "rx-with/" + dir
return dir
def yvalue(rate, without=False, num=1, cols="sirq", rootdir="."):
dir = rootdir + "/" + dir_param(rate, without, num)
data = parse_cpu_usage(os.path.join(dir, "cpu.txt"))
data = transpose(data)
data = map(lambda d: avg(d[10:]), data)
# user, sys, hirq, sirq
data = {
'user': data[0],
'sys': data[1],
'hirq': data[4],
'sirq': data[5]
}
ret = 0.0
for col in cols.split(','):
ret += data[col]
return ret
def yvalue2(rate, without=False, num=1, rootdir="."):
dir = rootdir + "/" + dir_param(rate, without, num)
data = parse_rate_usage(os.path.join(dir, "net.txt"),
ifaces=["eth2"], dir="rx", divider=(1 << 20))
data = avg(data["eth2"][30:])
#perf = perf_summary(os.path.join(dir, "perf.txt"))
print dir, data
#pprint(perf)
return data
colours = blue_colours + ['black']
bar_width=1
bar_group=len(nums)+1
cols = args.cols
def avg(l):
return sum(l) * 1.0 /len(l)
def stdev(l):
m = avg(l)
dev = map(lambda x: (x - m)**2, l)
return math.sqrt(avg(dev))
def plot_without(without=False):
alpha = 1
first = True
for i, n in enumerate(nums):
xs = []
xlabels = []
ys = []
yerrs = []
xindex = i
for rate in rates:
xindex += bar_group
xs.append(xindex)
xlabels.append("%sG" % (rate/1000))
temp_ys = []
for dir in args.dirs:
print dir
temp_ys.append(yvalue(rate, num=n, without=without, cols=cols, rootdir=dir))
ys.append(avg(temp_ys))
yerrs.append(stdev(temp_ys))
#rate = yvalue2(rate, num=n, without=without, rootdir=args.dir)
if without == False and args.text:
plt.text(xindex, ys[-1] + 10,
'%.1fM' % rate, rotation='vertical')
if without == False:
plt.bar(xs, ys, bar_width, color=colours[0], alpha=alpha, hatch='*', yerr=yerrs, ecolor='purple')
else:
plt.bar(xs, ys, bar_width, color=colours[i], label="%d" % n, yerr=yerrs, ecolor="black")#, alpha=alpha)
plt.xlabel("Rate")
plt.ylabel("CPU %")
plt.xticks(xs, xlabels)
if without == True:
plt.legend(loc="upper left")
#plt.title("CPU %s usage @ diff number of VQs/TCP connections.." % cols)
plt.ylim((0,args.maxy))
plt.grid(True)
return
# This negative variable naming is a pain, I know! ;)
plot_without(False)
plot_without(True)
if args.out:
plt.savefig(args.out)
else:
plt.show()
| [
"[email protected]"
] | |
a8b812a267f75e48be0ef972c448526f65c16f4c | d7f8d98b7ce4ca157d44f8124b374887c5a55f79 | /project1/critic/splitgd.py | d1d823dca5d1a5e538420a9032083b1bbb91af7b | [] | no_license | oaowren/IT3105---Artificial-Intelligence-Programming | 1961e4487c4bc73aea527ff3c7f57838e69986a5 | e98c68f231f665e1659e5efcb4e42a785ca64ca6 | refs/heads/master | 2023-05-21T16:48:49.271201 | 2021-05-31T12:26:57 | 2021-05-31T12:26:57 | 328,962,486 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,177 | py | import math
import tensorflow as tf
import numpy as np
# ************** Split Gradient Descent (SplitGD) **********************************
# This "exposes" the gradients during gradient descent by breaking the call to "fit" into two calls: tape.gradient
# and optimizer.apply_gradients. This enables intermediate modification of the gradients. You can find many other
# examples of this concept online and in the (excellent) book "Hands-On Machine Learning with Scikit-Learn, Keras,
# and Tensorflow", 2nd edition, (Geron, 2019).
# This class serves as a wrapper around a keras model. Then, instead of calling keras_model.fit, just call
# SplitGD.fit.
#
# WARNING. In THEORY, you should be able to use this class by just subclassing it and writing your own code
# for the "modify_gradients" method. However, there are many practical issues involving versions of tensorflow, use
# of keras and the tensorflow backend, etc. So the main purpose of this file is to show the basics of how you can
# split gradient descent into two parts using tf.GradientTape. Many similar examples exist online, but, again, there
# is no guarantee that they will work seamlessly with your own code.
class SplitGD():
def __init__(self, keras_model, critic):
self.model = keras_model
self.critic = critic
# Subclass this with something useful.
def modify_gradients(self, gradients):
gradients = self.critic.modify_gradients(gradients)
return gradients
# This returns a tensor of losses, OR the value of the averaged tensor. Note: use .numpy() to get the
# value of a tensor.
def gen_loss(self,features,targets,avg=False):
predictions = self.model(features) # Feed-forward pass to produce outputs/predictions
loss = self.model.loss(targets, predictions) # model.loss = the loss function
return tf.reduce_mean(loss).numpy() if avg else loss
def fit(self, features, targets, epochs=1, mbs=1,vfrac=0.1,verbosity=0,callbacks=[]):
params = self.model.trainable_weights
train_ins, train_targs, val_ins, val_targs = split_training_data(features,targets,vfrac=vfrac)
for cb in callbacks: cb.on_train_begin()
for epoch in range(epochs):
for cb in callbacks: cb.on_epoch_begin(epoch)
for _ in range(math.floor(len(train_ins) / mbs)):
with tf.GradientTape() as tape: # Read up on tf.GradientTape !!
feaset,tarset = gen_random_minibatch(train_ins,train_targs,mbs=mbs)
loss = self.gen_loss(feaset,tarset,avg=False)
gradients = tape.gradient(loss,params)
gradients = self.modify_gradients(gradients)
self.model.optimizer.apply_gradients(zip(gradients,params))
if verbosity > 0:
self.end_of_epoch_action(train_ins,train_targs,val_ins,val_targs,epoch,
verbosity=verbosity,callbacks=callbacks)
for cb in callbacks: cb.on_train_end()
# The call to model.evaluate does 2 things for a set of features and targets: 1) computes the loss, 2) applies
# the model's "metric" (which may differ from the loss) to produce an "evaluation". A typical metric is
# "categorical_accuracy" = the fraction of outputs that are "correct", i.e. the highest output neuron
# corresponds to the correct value. For more metrics, read up on Keras.metrics.
# Verbosity levels: 0 = no prints, 1 = only my own prints, 2 = my prints + TF prints (in call to model.evaluate
def gen_evaluation(self,features,targets,avg=False,verbosity=0,callbacks=[]):
loss, evaluation = self.model.evaluate(features,targets,callbacks=callbacks,
batch_size=len(features), verbose=(1 if verbosity == 2 else 0))
return evaluation, loss
# return (tf.reduce_mean(evaluation).numpy() if avg else evaluation), loss
def status_display(self, val, loss, verbosity = 1, mode='Train'):
if verbosity > 0:
print('{0} *** Loss: {1} Eval: {2}'.format(mode,loss,val), end=' ')
def end_of_epoch_action(self, train_ins, train_targs, valid_ins, valid_targs, epoch,verbosity=1,callbacks=[]):
print('\n Epoch: {0}'.format(epoch), end=' ')
# Calculate Loss and Evaluation for entire training set
val, loss = self.gen_evaluation(train_ins, train_targs,avg=True,verbosity=verbosity,callbacks=callbacks)
self.status_display(val,loss, verbosity=verbosity, mode='Train')
val2, loss2 = 0, 0
if len(valid_ins) > 0: # Calculate Loss and Evaluation for entire Validation Set
val2, loss2 = self.gen_evaluation(valid_ins, valid_targs, avg=True, verbosity=verbosity,callbacks=callbacks)
self.status_display(val2,loss2, verbosity=verbosity, mode='Validation')
self.update_callbacks(epoch, (loss, val, loss2, val2), callbacks)
def update_callbacks(self, epoch, quad, callbacks=[]):
cb_log = {"loss": quad[0], "metric": quad[1], "val_loss": quad[2], "val_metric": quad[3]}
#cb_log = {"loss": quad[0], "val_loss": quad[2]}
for cb in callbacks: cb.on_epoch_end(epoch,cb_log)
# A few useful auxiliary functions
def gen_random_minibatch(inputs, targets, mbs=1):
indices = np.random.randint(len(inputs), size=mbs)
return inputs[indices], targets[indices]
# This returns: train_features, train_targets, validation_features, validation_targets
def split_training_data(inputs,targets,vfrac=0.1,mix=True):
vc = round(vfrac * len(inputs)) # vfrac = validation_fraction
# pairs = np.array(list(zip(inputs,targets)))
if vfrac > 0:
pairs = list(zip(inputs,targets))
if mix: np.random.shuffle(pairs)
vcases = pairs[0:vc]; tcases = pairs[vc:]
return np.array([tc[0] for tc in tcases]), np.array([tc[1] for tc in tcases]),\
np.array([vc[0] for vc in vcases]), np.array([vc[1] for vc in vcases])
# return tcases[:,0], tcases[:,1], vcases[:,0], vcases[:,1] # Can't get this to work properly
else:
return inputs,targets,[],[] | [
"[email protected]"
] | |
5e181f655825792a06adea89c63d5a696c7e7028 | 6dbd108198759b98ed044fc740d79d775553636a | /3.py | 5d37ad979bece10dcce4e04d6e362609616466bc | [] | no_license | linh6666/baitaptuan7-xstk | c5ca69e2b292b5c2aab6304f3242585240fff7f1 | 712922ede096a76704378e5ad9a5ed1229c81786 | refs/heads/main | 2023-08-18T19:07:13.313201 | 2021-10-09T06:14:19 | 2021-10-09T06:14:19 | 415,216,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import numpy as np
a = np.zeros((2, 512 * 512), dtype=np.float32)
a[0, :] = 1.0
a[1, :] = 0.1
print("a.shape: ", a.shape)
print("mean a = ", np.mean(a))
© 2021 GitHub, Inc.
| [
"[email protected]"
] | |
e231384f1e02475631385dcbf48de464cedf8272 | b08b7d8561b78cdf0b245c79b577bfbc4f1805b7 | /autoclicker.py | e1f3999aa77b27481fd1823968b72a10f5f9484e | [] | no_license | AadityaKandel/AutoClicker | a3a679630ebadb048acd4cc027dfb1c0c7629d34 | 0c6559e434e40533fecf4289cd69276f13e118b6 | refs/heads/main | 2023-02-05T11:03:25.744489 | 2020-12-29T11:59:49 | 2020-12-29T11:59:49 | 325,274,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | try:
from tkinter import *
import pyautogui as pp
import keyboard
# Import Success
root = Tk()
mouse = StringVar()
mouse.set("0")
mousee = StringVar()
mousee.set("0")
def loc():
for i in range(0,999999999):
act.set('Activated [ Shift+Q [ Deactivate ] ]')
root.update()
locc.set('Press Ctrl+Q to Stop')
if keyboard.is_pressed("ctrl+q"):
dd = pp.position()
locc.set("Find Mouse Location")
mouse.set(f"{dd[0]}")
mousee.set(f"{dd[1]}")
break
Label(text = "Welcome To AUTO CLICKER",font = "comicsansms 14 bold",bg = "black",fg = "white").pack()
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
def ff():
Label(text = "",bg = "black").pack()
ff()
locc = StringVar()
locc.set("Find Mouse Location")
Button(textvariable = locc,font = "comicsansms 14 bold",bg = "black",fg = "white",command = loc).pack(anchor = W)
f1 = Frame(borderwidth = 10,bg = "black")
f3 = Frame(borderwidth = 10,bg = "black")
f4 = Frame(borderwidth = 10,bg = "black")
Label(f1,text = "Mouse X: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f1,textvariable = mouse,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 7,justify = "right").pack(side = LEFT)
Label(f1,text = "Mouse Y: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f1,textvariable = mousee,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 7,justify = "right").pack(side = LEFT)
f1.pack(anchor = W)
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
ff()
interval = DoubleVar()
interval.set(1)
def plusb():
interval.set((interval.get())+0.1)
def subb():
interval.set((interval.get())-0.1)
Label(f3,text = "Wait After 1 Click: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f3,textvariable = interval,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 5,justify = "right").pack(side = LEFT)
Label(f3,text = " ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Button(f3,text = "+",font = "comicsansms 14 bold",bg = "black",fg = "white",command = plusb).pack(side = LEFT)
Label(f3,text = " ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Button(f3,text = "-",font = "comicsansms 14 bold",bg = "black",fg = "white",command = subb).pack(side = LEFT)
f3.pack(anchor = W)
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
ff()
ff()
act = StringVar()
act.set("[ Shift+A ] Activate")
Button(textvariable = act,font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = BOTTOM)
root.config(bg = "black")
import time
for i in range(0,999999999):
root.update()
if keyboard.is_pressed('shift+a'):
act.set('Activated [ Shift+Q [ Deactivate ] ]')
for i in range(0,999999999999999999999):
root.update()
if keyboard.is_pressed('shift+q'):
root.update()
act.set("[ Shift+A ] Activate")
break
else:
pp.click(x=eval((mouse.get())),y=eval((mousee.get())))
pp.click(x=eval((mouse.get()))+1,y=eval((mousee.get())))
time.sleep((interval.get()))
act.set("[ Shift+A ] Activate")
root.mainloop()
except:
quit() | [
"[email protected]"
] | |
6a6ebbb1c7f50e95986df884a5b0e3681842fb9a | c72dae37d94d8f9e80db232a9838244e2bb33fb2 | /src/teachzy/urls.py | 387858aeb2ebd754105a9a08dec06e7ba64f4989 | [] | no_license | Asifrahman96/DjangoTeachzy | 956ae3c2e9b917ec1bb181fb06babe32f7f48083 | 89e3a07b0beb12e328747a0bc369d731d63b10ec | refs/heads/master | 2023-02-24T03:08:17.274863 | 2021-01-28T20:18:41 | 2021-01-28T20:18:41 | 333,869,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('csvs/', include('csvs.urls')),
path('teachers/', include('teachers.urls')),
path('accounts/', include('accounts.urls')),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
72596ce81af81043b7245963fcc8b2090e48c45d | 70f27f6215c5261f080cb8d12ceac5c484f2f147 | /app/django_models/person/models.py | 7af80c95b061fd6d88de2200ad80dfc6017a9b6c | [] | no_license | jordangarside/django-async-pytest-example | bf2fa32c7ffc5ebed3f2077483113b47987afd5a | 3e141eb1a048b80bd3f04c49068534f726a8c4c6 | refs/heads/master | 2022-12-02T20:51:33.364679 | 2020-08-14T16:47:22 | 2020-08-14T16:47:26 | 286,944,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from django.db import models
from asgiref.sync import sync_to_async
from typing import TypeVar
TModel = TypeVar("TModel", bound=models.Model)
class BonfireAsyncManager(models.Manager): # type: ignore # excuse: typed in a stub
"""This class is typed via a typestub in async_manager.pyi. Make sure to add new manager commands in the file
to pass the typecheck.
"""
async def async_create(self, **kwargs: object) -> TModel:
obj: TModel = await sync_to_async(super().create, thread_sensitive=True)(
**kwargs
)
return obj
class BonfireBaseModel(models.Model): # type: ignore
"""Abstract base model class to provide commonly used fields uuid, created_at and updated_at."""
objects = BonfireAsyncManager()
class Meta:
abstract = True
class Person(BonfireBaseModel):
name = models.CharField(max_length=100)
| [
"[email protected]"
] | |
3918150f5542d26412b16d6b3636c338034e5b14 | 97aed55858f227a56fd79fec51f093e192db6c01 | /app/core/tests/test_models.py | fd2b496c9844804512d95c53c549b3b3e2ae01ab | [
"MIT"
] | permissive | harrydadson/recipe-app-api | 9fdc7d52fe7c2689808d605e65e241e82235cd05 | 63015fd390877ed6d5f41384c818cb9c7d870c52 | refs/heads/master | 2020-06-26T01:18:38.264636 | 2019-08-09T04:25:53 | 2019-08-09T04:25:53 | 173,390,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelsTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = "[email protected]"
password = 'testpass123'
user = get_user_model().objects.create_user(
email = email,
password = password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'test123'
)
self.assertTrue(user.is_superuser) # PermissionMixins
self.assertTrue(user.is_staff)
| [
"[email protected]"
] | |
4b91aba2fe0eca1c717453b7bb0a1adc8c7c999a | 436da49d82df227fc2654c7e3c6acc72d6a6aad6 | /hindi/migrations/0001_initial.py | 4c174394056b998f564cafad188f59419be3a23f | [] | no_license | mc639/Manavmitra | c91a2178f01427284d256ff152a4032f978e48a4 | 567d3b483d52e9285681916bfeda773a7b9ae0ed | refs/heads/master | 2020-07-30T14:41:55.736634 | 2019-09-23T05:02:26 | 2019-09-23T05:02:26 | 210,266,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-19 18:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=500)),
('slug', models.SlugField(max_length=500)),
('image_upload', models.ImageField(upload_to='gujarati/media')),
('article', tinymce.models.HTMLField()),
('video', models.TextField(blank=True, null=True)),
('image', models.TextField(blank=True, null=True)),
('embed', models.TextField(blank=True, null=True)),
('time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='Epaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('e_paper_name', models.CharField(max_length=50)),
('e_paper', models.FileField(upload_to='gujarati/Epaper')),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Trailer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trailer_name', models.CharField(max_length=50)),
('trailer_url', models.TextField()),
],
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindi.Category'),
),
]
| [
"[email protected]"
] | |
35520773184de9bc6cbe60fe4ed6a427c4a1cb42 | 1c79e354726a60b939df18aa34ab63408553d078 | /py/examples/counter_broadcast.py | 9e30f02433102c7e848efbfe1fc87dd3a516e969 | [
"Apache-2.0"
] | permissive | feddelegrand7/wave | 9017fc7bbeef9233c1fd9497c3e7a4d6f6911e85 | ba002d47fcea688bf46fa1682e6c4a73cae0f8ee | refs/heads/master | 2023-02-03T13:06:41.894758 | 2020-12-18T00:09:14 | 2020-12-18T00:09:14 | 322,443,421 | 0 | 0 | Apache-2.0 | 2020-12-18T00:31:26 | 2020-12-18T00:10:14 | null | UTF-8 | Python | false | false | 647 | py | # Mode / Broadcast
# Launch the server in broadcast mode to synchronize browser state across users.
# Open `/demo` in multiple browsers and watch them synchronize in realtime.
# ---
from h2o_wave import main, app, Q, ui, pack
@app('/demo', mode='broadcast')
async def serve(q: Q):
count = q.app.count or 0
if 'increment' in q.args:
count += 1
q.app.count = count
items = pack([ui.button(name='increment', label=f'Count={count}')])
if count > 0:
form = q.page['example']
form.items = items
else:
q.page['example'] = ui.form_card(box='1 1 12 10', items=items)
await q.page.save()
| [
"[email protected]"
] | |
d9431f1fb2020f8d301376bed93ef53f3204cbf1 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7432_run2M1.py | 39656c11ebf8cd9db049ce6d7b9a74d8b7e3f30a | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7432', 'run2M1']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2480/e1331017/s1388354_5610_2M1_s30', '/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7432/run2M1'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"[email protected]"
] | |
ea3871eaa7c9b6755d2963498e08b7d307615ebc | e840fe54e8fc774fce6e81b373c5f532cc35bfd1 | /Api/Flask/Daos/AccesoDatos/DaoEspectros.py | 29c5cebbb14da20aeb31c5fac0b43239ce5184ae | [] | no_license | jcamiloq/geospectre | a059cf594c13aa5e01d2d2696615c5e6c2e0d3bb | 199896571b8ecc38da8374ff35f66f3bc1f3d193 | refs/heads/master | 2022-12-12T04:40:42.159146 | 2020-09-15T14:50:40 | 2020-09-15T14:50:40 | 293,323,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | from .Logica.Espectros import Espectros
class DaoEspectros:
def __init__(self, conexion):
self.conexion = conexion
def guardarEspectros(self, espectros):
sql_guardar = "INSERT INTO espectros (white, dark, capturado, resultado, sensores_id) VALUES "
sql_guardar += "(%s, %s, %s, %s, %s) RETURNING *"
try:
cursor = self.conexion.cursor()
cursor.execute(
sql_guardar,
(
espectros.white, espectros.dark, espectros.capturado,
espectros.resultado,espectros.sensores_id
)
)
result = cursor.fetchone()
self.conexion.commit()
cursor.close()
espectros.id = result[0]
print("Espectro guardado con éxito")
return espectros
except(Exception) as e:
print("Error al guardar espectro", e)
return None
def actualizarEspectros(self, espectros):
sql_guardar = "UPDATE espectros SET "
sql_guardar += "white = %s, dark = %s, capturado = %s, resultado = %s, "
sql_guardar += "sensores_id = %s WHERE id = %s"
# print(sql_guardar)
try:
cursor = self.conexion.cursor()
cursor.execute(
sql_guardar,
(
espectros.white,
espectros.dark,
espectros.capturado,
espectros.resultado,
espectros.sensores_id,
espectros.id
)
)
self.conexion.commit()
cursor.close()
return espectros
except(Exception) as e:
print("Error al actualizar el espectro", e)
return None
def borrarEspectros(self, espectros):
sql_borrar = "DELETE FROM espectros WHERE id = " + str(espectros) + ";"
try:
cursor = self.conexion.cursor()
cursor.execute(sql_borrar)
except(Exception) as e:
print("Error al actualizar la espectros", e)
finally:
if(cursor):
cursor.close()
print("Se ha cerrado el cursor")
def getEspectrosSensores(self, id_sensores):
idEspectros = []
sql_select = "SELECT * FROM espectros WHERE sensores_id = " + str(id_sensores)
try:
cursor = self.conexion.cursor()
cursor.execute(sql_select)
record = cursor.fetchall()
for i in range (0, len(record)):
idEspectros.append(record[i][0])
# idEspectros.append("\n")
print(idEspectros)
return idEspectros
# record = cursor.fetchone()
# result = Sensores()
# result.id = record[0]
# result.lugar = record[1]
# result.tipo = record[2]
# result.numero_serie = record[3]
# result.t_int = record[4]
# result.numero_capt = record[5]
# result.mision_id = record[6]
# return result
except(Exception) as e:
print("Error al retornar los espectros", e)
result = None
def getEspectros(self, id_espectros):
sql_select = "SELECT * FROM espectros WHERE id = " + str(id_espectros)
try:
cursor = self.conexion.cursor()
cursor.execute(sql_select)
record = cursor.fetchone()
result = Espectros()
result.id = record[0]
result.white = record[1]
result.dark = record[2]
result.capturado = record[3]
result.resultado = record[4]
result.sensores_id = record[5]
# print(record)
return result
except(Exception) as e:
print("Error al retornar el espectro", e)
result = None
finally:
if(cursor):
cursor.close()
print("Se ha cerrado el cursor")
return result
| [
"[email protected]"
] | |
94516c3ae940d74c65e1973a2df7f40372e0d9d4 | ecbaab2349087c97f512cd144538369609623b2b | /src/output_terminal.py | caef97d8fdb494d726d544cba5a50894735caba5 | [] | no_license | ErikCalsson/RNA_binding_site_correlation | c1b38a04efaab284c7914aba70d52e04dfa73823 | fe4e64813f90d74200660f622d06df9958c61438 | refs/heads/master | 2023-08-23T15:00:24.329338 | 2021-11-01T07:27:58 | 2021-11-01T07:27:58 | 372,179,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # imports extern
# imports intern
# import sequence_pre_calculation as calc
# import graph_creation as graph
import data_calculation as dat
# start terminal output
def use_terminal():
print("See output barGroupPC1.png or barGroupPC2.png for visualisation of final results")
# output message with statistic
print("PC1: ", str(dat.result_PC_1[0]) + ",p-value: ", dat.result_PC_1[1])
print("PC2: ", dat.result_PC_2[0], ",p-value: ", dat.result_PC_2[1])
print("remember: t > T and alpha > p-value")
print("T = [ 0.995 -> 2.576, 0.99 -> 2.326, 0.975 -> 1.96, 0.95 -> 1.645]")
| [
"[email protected]"
] | |
5aa9f68fd54dbb2103720c1b33fda1491da44482 | 436743a9a77d417350e27736dd20f117bb8d625a | /desidlas/preprocess/preprocess.py | d8fe688a2564fb58b2aa680d07f00882c2a2c179 | [] | no_license | samwang141224/dla_cnn | b0a017030e6007016fa9b889890fc6057fe9c3d2 | 402f675b73d7c449aa67dbe969f3ad3ab3ea3951 | refs/heads/main | 2023-03-26T23:26:32.481646 | 2021-03-31T07:48:39 | 2021-03-31T07:48:39 | 309,691,930 | 0 | 0 | null | 2021-03-31T07:48:39 | 2020-11-03T13:17:42 | Jupyter Notebook | UTF-8 | Python | false | false | 17,675 | py | """ Code for pre-processing DESI data"""
''' Basic Recipe
0. Load the DESI mock spectrum
1. Resample to a constant dlambda/lambda dispersion
2. Renomalize the flux?
3. Generate a Sightline object with DLAs
4. Add labels
5. Write to disk (numpy or TF)
'''
import numpy as np
from desidlas.dla_cnn.spectra_utils import get_lam_data
#from dla_cnn.data_model.DataMarker import Marker
#no Marker in the DESI mock spectra, but maybe the sv data will have this value
from scipy.interpolate import interp1d
from os.path import join, exists
from os import remove
import csv
# Set defined items
from desidlas.dla_cnn import defs
REST_RANGE = defs.REST_RANGE
kernel = defs.kernel
def label_sightline(sightline, kernel=kernel, REST_RANGE=REST_RANGE, pos_sample_kernel_percent=0.3):
"""
Add labels to input sightline based on the DLAs along that sightline
Parameters
----------
sightline: dla_cnn.data_model.Sightline
pos_sample_kernel_percent: float
kernel: pixel numbers for each spectra window
REST_RANGE: [900,1346], wavelength range of DLAs in the rest frame
Returns
-------
classification: np.ndarray
is 1 / 0 / -1 for DLA/nonDLA/border
offsets_array: np.ndarray
offset
column_density: np.ndarray
"""
lam, lam_rest, ix_dla_range = get_lam_data(sightline.loglam, sightline.z_qso, REST_RANGE)
samplerangepx = int(kernel*pos_sample_kernel_percent/2) #60
#kernelrangepx = int(kernel/2) #200
ix_dlas=[]
coldensity_dlas=[]
for dla in sightline.dlas:
if (912<(dla.central_wavelength/(1+sightline.z_qso))<1220)&(dla.central_wavelength>=3700):
ix_dlas.append(np.abs(lam[ix_dla_range]-dla.central_wavelength).argmin())
coldensity_dlas.append(dla.col_density) # column densities matching ix_dlas
'''
# FLUXES - Produce a 1748x400 matrix of flux values
fluxes_matrix = np.vstack(map(lambda f,r:f[r-kernelrangepx:r+kernelrangepx],
zip(itertools.repeat(sightline.flux), np.nonzero(ix_dla_range)[0])))
'''
# CLASSIFICATION (1 = positive sample, 0 = negative sample, -1 = border sample not used
# Start with all samples zero
classification = np.zeros((np.sum(ix_dla_range)), dtype=np.float32)
# overlay samples that are too close to a known DLA, write these for all DLAs before overlaying positive sample 1's
for ix_dla in ix_dlas:
classification[ix_dla-samplerangepx*2:ix_dla+samplerangepx*2+1] = -1
# Mark out Ly-B areas
lyb_ix = sightline.get_lyb_index(ix_dla)
classification[lyb_ix-samplerangepx:lyb_ix+samplerangepx+1] = -1
# mark out bad samples from custom defined markers
#for marker in sightline.data_markers:
#assert marker.marker_type == Marker.IGNORE_FEATURE # we assume there are no other marker types for now
#ixloc = np.abs(lam_rest - marker.lam_rest_location).argmin()
#classification[ixloc-samplerangepx:ixloc+samplerangepx+1] = -1
# overlay samples that are positive
for ix_dla in ix_dlas:
classification[ix_dla-samplerangepx:ix_dla+samplerangepx+1] = 1
# OFFSETS & COLUMN DENSITY
offsets_array = np.full([np.sum(ix_dla_range)], np.nan, dtype=np.float32) # Start all NaN markers
column_density = np.full([np.sum(ix_dla_range)], np.nan, dtype=np.float32)
# Add DLAs, this loop will work from the DLA outward updating the offset values and not update it
# if it would overwrite something set by another nearby DLA
for i in range(int(samplerangepx+1)):
for ix_dla,j in zip(ix_dlas,range(len(ix_dlas))):
offsets_array[ix_dla+i] = -i if np.isnan(offsets_array[ix_dla+i]) else offsets_array[ix_dla+i]
offsets_array[ix_dla-i] = i if np.isnan(offsets_array[ix_dla-i]) else offsets_array[ix_dla-i]
column_density[ix_dla+i] = coldensity_dlas[j] if np.isnan(column_density[ix_dla+i]) else column_density[ix_dla+i]
column_density[ix_dla-i] = coldensity_dlas[j] if np.isnan(column_density[ix_dla-i]) else column_density[ix_dla-i]
offsets_array = np.nan_to_num(offsets_array)
column_density = np.nan_to_num(column_density)
# Append these to the Sightline
sightline.classification = classification
sightline.offsets = offsets_array
sightline.column_density = column_density
# classification is 1 / 0 / -1 for DLA/nonDLA/border
# offsets_array is offset
return classification, offsets_array, column_density
def rebin(sightline, v):
"""
Resample and rebin the input Sightline object's data to a constant dlambda/lambda dispersion.
Parameters
----------
sightline: :class:`dla_cnn.data_model.Sightline.Sightline`
v: float, and np.log(1+v/c) is dlambda/lambda, its unit is m/s, c is the velocity of light
Returns
-------
:class:`dla_cnn.data_model.Sightline.Sightline`:
"""
# TODO -- Add inline comments
c = 2.9979246e8
# Set a constant dispersion
dlnlambda = np.log(1+v/c)
wavelength = 10**sightline.loglam #the wavelength range
max_wavelength = wavelength[-1]
min_wavelength = wavelength[0]
# Calculate how many pixels are needed for Rebinning in this spectra
pixels_number = int(np.round(np.log(max_wavelength/min_wavelength)/dlnlambda))+1 #how many pixels in this spectra
# Rebined wavelength
new_wavelength = wavelength[0]*np.exp(dlnlambda*np.arange(pixels_number))
# Endpoints of original pixels
npix = len(wavelength)
wvh = (wavelength + np.roll(wavelength, -1)) / 2.
wvh[npix - 1] = wavelength[npix - 1] + \
(wavelength[npix - 1] - wavelength[npix - 2]) / 2.
dwv = wvh - np.roll(wvh, 1)
dwv[0] = 2 * (wvh[0] - wavelength[0])
med_dwv = np.median(dwv)
# Cumulative Sum
cumsum = np.cumsum(sightline.flux * dwv)
cumvar = np.cumsum(sightline.error * dwv, dtype=np.float64)
# Interpolate
fcum = interp1d(wvh, cumsum,bounds_error=False)
fvar = interp1d(wvh, cumvar,bounds_error=False)
# Endpoints of new pixels
nnew = len(new_wavelength)
nwvh = (new_wavelength + np.roll(new_wavelength, -1)) / 2.
nwvh[nnew - 1] = new_wavelength[nnew - 1] + \
(new_wavelength[nnew - 1] - new_wavelength[nnew - 2]) / 2.
# Pad starting point
bwv = np.zeros(nnew + 1)
bwv[0] = new_wavelength[0] - (new_wavelength[1] - new_wavelength[0]) / 2.
bwv[1:] = nwvh
# Evaluate
newcum = fcum(bwv)
newvar = fvar(bwv)
# Rebinned flux, var
new_fx = (np.roll(newcum, -1) - newcum)[:-1]
new_var = (np.roll(newvar, -1) - newvar)[:-1]
# Normalize (preserve counts and flambda)
new_dwv = bwv - np.roll(bwv, 1)
new_fx = new_fx / new_dwv[1:]
# Preserve S/N (crudely)
med_newdwv = np.median(new_dwv)
new_var = new_var / (med_newdwv/med_dwv) / new_dwv[1:]
left = 0
while np.isnan(new_fx[left])|np.isnan(new_var[left]):
left = left+1
right = len(new_fx)
while np.isnan(new_fx[right-1])|np.isnan(new_var[right-1]):
right = right-1
test = np.sum((np.isnan(new_fx[left:right]))|(np.isnan(new_var[left:right])))
assert test==0, 'Missing value in this spectra!'
sightline.loglam = np.log10(new_wavelength[left:right])
sightline.flux = new_fx[left:right]
sightline.error = new_var[left:right]
return sightline
def normalize(sightline, full_wavelength, full_flux):
"""
Normalize spectrum by dividing the mean value of continnum at lambda[left,right]
------------------------------------------
parameters:
sightline: dla_cnn.data_model.Sightline.Sightline object;
full_flux: list, flux of the spectra
full_wavelength: list,wavelength of the spectra
--------------------------------------------
return
sightline: the sightline after normalized
"""
blue_limit = 1420
red_limit = 1480
rest_wavelength = full_wavelength/(sightline.z_qso+1)
assert blue_limit <= red_limit,"No Lymann-alpha forest, Please check this spectra: %i"%sightline.id#when no lymann alpha forest exists, assert error.
#use the slice we chose above to normalize this spectra, normalize both flux and error array using the same factor to maintain the s/n.
good_pix = (rest_wavelength>=blue_limit)&(rest_wavelength<=red_limit)
sightline.flux = sightline.flux/np.median(full_flux[good_pix])
sightline.error = sightline.error/np.median(full_flux[good_pix])
def estimate_s2n(sightline):
"""
Estimate the s/n of a given sightline, using the lymann forest part and excluding dlas.
-------------------------------------------------------------------------------------
parameters;
sightline: class:`dla_cnn.data_model.sightline.Sightline` object, we use it to estimate the s/n,
and since we use the lymann forest part, the sightline's wavelength range should contain 1070~1170
--------------------------------------------------------------------------------------
return:
s/n : float, the s/n of the given sightline.
"""
#determine the lymann forest part of this sightline
blue_limit = 1420
red_limit = 1480
wavelength = 10**sightline.loglam
rest_wavelength = wavelength/(sightline.z_qso+1)
#lymann forest part of this sightline, contain dlas
test = (rest_wavelength>blue_limit)&(rest_wavelength<red_limit)
#when excluding the part of dla, we remove the part between central_wavelength+-delta
#dwv = rest_wavelength[1]-rest_wavelength[0]#because we may change the re-sampling of the spectra, this need to be calculated.
#dv = dwv/rest_wavelength[0] * 3e5 # km/s
#delta = int(np.round(3000./dv))
#for dla in sightline.dlas:
#test = test&((wavelength>dla.central_wavelength+delta)|(wavelength<dla.central_wavelength-delta))
#assert np.sum(test)>0, "this sightline doesn't contain lymann forest, sightline id: %i"%sightline.id
s2n = sightline.flux/sightline.error
#return s/n
return np.median(s2n[test])
def generate_summary_table(sightlines, output_dir, mode = "w"):
"""
Generate a csv file to store some necessary information of the given sightlines. The necessary information means the id, z_qso,
s/n of thelymann forest part(avoid dlas+- 3000km/s), the wavelength range and corresponding pixel number of each channel.And the csv file's format is like:
id(int), z_qso(float), s2n(float), wavelength_start_b(float), wavelength_end_b(float),pixel_start_b(int), pixel_end_b(int), wavelength_start_r(float), wavelength_end_r(float),pixel_start_r(int), pixel_end_r(int), wavelength_start_z(float), wavelength_end_z(float),pixel_start_z(int), pixel_end_z(int),dlas_col_density(str),dlas_central_wavelength(str)
"wavelength_start_b" means the start wavelength value of b channel, "wavelength_end_b" means the end wavelength value of b channel, "pixel_start_b" means the start pixel number of b channel,"pixel_end_b" means the end pixel number of b channel
so do the other two channels.Besides, "dlas_col_density" means the col_density array of the sightline, and "dlas_central_wavelength" means the central wavelength array means the central wavelength array of the given sightline. Due to the uncertainty of the dlas' number, we chose to use str format to store the two arrays,
each array is written in the format like "value1,value2, value3", and one can use `str.split(",")` to get the data, the column density and central wavelength which have the same index in the two arrayscorrspond to the same dla.
------------------------------------------------------------------------------------------------------------------------------------------------------------------
parameters:
sightlines: list of `dla_cnn.data_model.Sightline.Sightline` object, the sightline contained should
contain the all data of b,r,z channel, and shouldn't be rebinned,
output_dir: str, where the output csv file is stored, its format should be "xxxx.csv",
mode: str, possible values "w", "a", "w" means writing to the csv file directly(overwrite the
previous content), "a" means adding more data to the csv file(remaining the previous content)
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
return:
None
"""
#the header of the summary table, each element's meaning can refer to above comment
headers = ["id","z_qso","s2n","wavelength_start_b","wavelength_end_b","pixel_start_b","pixel_end_b","wavelength_start_r","wavelength_end_r","pixel_start_r","pixel_end_r","wavelength_start_z","wavelength_end_z","pixel_start_z","pixel_end_z","dlas_col_density","dlas_central_wavelength"]
#open the csv file
with open(output_dir, mode=mode,newline="") as summary_table:
summary_table_writer = csv.DictWriter(summary_table,headers)
if mode == "w":
summary_table_writer.writeheader()
for sightline in sightlines:
#for each sightline, read its information and write to the csv file
info = {"id":sightline.id, "z_qso":sightline.z_qso, "s2n": sightline.s2n,"wavelength_start_b":10**sightline.loglam[0],
"wavelength_end_b":10**sightline.loglam[sightline.split_point_br-1],"pixel_start_b":0,"pixel_end_b":sightline.split_point_br-1,
"wavelength_start_r":10**sightline.loglam[sightline.split_point_br],"wavelength_end_r":10**sightline.loglam[sightline.split_point_rz-1],
"pixel_start_r":sightline.split_point_br,"pixel_end_r":sightline.split_point_rz-1,"wavelength_start_z":10**sightline.loglam[sightline.split_point_rz],
"wavelength_end_z":10**sightline.loglam[-1],"pixel_start_z":sightline.split_point_rz,"pixel_end_z":len(sightline.loglam)-1}
dlas_col_density = ""
dlas_central_wavelength = ""
for dla in sightline.dlas:
dlas_col_density += str(dla.col_density)+","
dlas_central_wavelength += str(dla.central_wavelength)+","
info["dlas_col_density"] = dlas_col_density[:-1]
info["dlas_central_wavelength"] = dlas_central_wavelength[:-1]
#write to the csv file
summary_table_writer.writerow(info)
#from dla_cnn.desi.DesiMock import DesiMock
def write_summary_table(nums, version,path, output_path):
"""
Directly read data from fits files and write the summary table, the summary table contains all available sightlines(dlas!=[] and z_qso>2.33) in the given fits files.
-----------------------------------------------------------------------------------------------------------------------------------------
parameters:
nums: list, the given fits files' id, its elements' format is int, and one should make sure all fits files are available before invoking this funciton, otherwise some sightlines can be missed;
version: int, the version of the data set we use, e.g. if the version is v9.16, then version = 16
path: str, the dir of the folder which stores the given fits file, the folder's structure is like folder-fits files' id - fits files , if you are still confused, you can check the below code about read data from the fits file;
output_path: str, the dir where the summary table is generated, and if there have been a summary table, then we will remove it and generate a new summary table;
------------------------------------------------------------------------------------------------------------------------------------------
"""
#if exists summary table before, remove it
#if exists(output_path):
#remove(output_path)
def write_as_summary_table(num):
"""
write summary table for a single given fits file, if there have been a summary table then directly write after it, otherwise create a new one
---------------------------------------------------------------------------------------------------------------------------------------------
parameter:
num: int, the id of the given fits file, e.g. 700
---------------------------------------------------------------------------------------------------------------------------------------------
"""
#read data from fits file
file_path = join(path,str(num))
spectra = join(file_path,"spectra-%i-%i.fits"%(version,num))
truth = join(file_path,"truth-%i-%i.fits"%(version,num))
zbest = join(file_path,"zbest-%i-%i.fits"%(version,num))
spec = DesiMock()
spec.read_fits_file(spectra,truth,zbest)
sightlines = []
bad_sightlines = []
for key in spec.data.keys():
if spec.data[key]["z_qso"]>2.33 and spec.data[key]["DLAS"]!=[]:
sightlines.append(spec.get_sightline(key))
#generate summary table
if exists(output_path):
generate_summary_table(sightlines,output_path,"a")
else:
generate_summary_table(sightlines,output_path,"w")
bad_files = [] #store the fits files with problems
#for each id in nums, invoking the `write_as_summary_table` funciton
for num in nums:
try:
write_as_summary_table(num)
except:
#if have problems append to the bad_files
bad_files.append(num)
assert bad_files==[], "these fits files have some problems, check them please, fits files' id :%s"%str(bad_files)
| [
"[email protected]"
] | |
a5e2debc3b4de63242c2bc5f62e4db0ae3a58645 | 44f07b81df56d7ea44775784a9697648fe481478 | /day8/faceapp/facedetect.py | ab3e244e889618a394e6791b7b7b4edf81d25532 | [] | no_license | shaadomanthra/cbpython-advanced | 436510c70deca4e1ef01517f87bba0e392583a88 | 86b613f89ca0b0cd8b243c157af1a2807e6ce605 | refs/heads/master | 2022-11-30T23:33:45.938854 | 2020-08-12T11:20:03 | 2020-08-12T11:20:03 | 276,316,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | ## detect face and draw rectangles
# import packages (pip install opencv-python)
from cv2 import cv2
import sys
# path for image and cascade
imagePath = 'images/f1.jpg'
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image & convert to gray scale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
print(faces)
# # Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # # open the image widow to display
cv2.imshow("Faces found", image)
cv2.waitKey(0)
# Saving the image
# cv2.imwrite(saveimagePath, image)
| [
"[email protected]"
] | |
001b8e5d7167d9f7ae30d9510713bbc363cc653b | da934e0010380fdc6894063540f61b0ebc2c9ded | /nova/crypto.py | 1f35ffa3915dad74a002a55998c536549c4b8d2d | [
"Apache-2.0"
] | permissive | bopopescu/cc-2 | ed4f1dfe3c98f476ff619058d99855a16272d36b | 37444fb16b36743c439b0d6c3cac2347e0cc0a94 | refs/heads/master | 2022-11-23T03:57:12.255817 | 2014-10-02T06:10:46 | 2014-10-02T06:10:46 | 282,512,589 | 0 | 0 | Apache-2.0 | 2020-07-25T19:36:05 | 2020-07-25T19:36:05 | null | UTF-8 | Python | false | false | 7,863 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
import hashlib
import logging
import os
import shutil
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
"""requires lsh-utils"""
convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
+ " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
+ " transport | lsh-export-key --openssh"
(out, err) = utils.execute(convert, ssl_public_key)
if err:
raise exception.Error("Failed to generate key: %s", err)
return '%s %s@%s\n' %(out.strip(), name, suffix)
def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
| [
"[email protected]"
] | |
0203f8b7a170b9c90a9503a129644d67e720066b | de121a951947f70f402079d288a78d35c85747b2 | /exercises/exercises_04.py | 79cb7651e375b500210a4054a4ae7430a01afd4a | [] | no_license | tpurnachander/requests-workshop | 56899be6c5520fb947d91676c11864d09b4489d6 | dac134558f141c482e0a52f19fdce37b7e7ba928 | refs/heads/master | 2023-03-10T19:00:31.012280 | 2021-02-19T12:08:54 | 2021-02-19T12:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | import requests
import xml.etree.ElementTree as et
# Exercise 4.1
# Create a function create_xml_body_from_string()
# that returns a docstring (with triple double quotes)
# containing the following XML document:
# <payee>
# <name>John Smith</name>
# <address>
# <street>My street</street>
# <city>My city</city>
# <state>My state</state>
# <zipCode>90210</zipCode>
# </address>
# <phoneNumber>0123456789</phoneNumber>
# <accountNumber>12345</accountNumber>
# </payee>
# Exercise 4.2
# Write a test that POSTs the object created in 4.1
# to http://parabank.parasoft.com/parabank/services/bank/billpay?accountId=12345&amount=500
# Set the request header 'Content-Type' to 'application/xml'
# Then check that the response status code is 200
# and that the value of the response header 'Content-Type' is also equal to 'application/xml'
# Exercise 4.3
# Write a method create_xml_body_using_elementtree() that returns
# the same request body as in Exercise 4.1, but now uses the
# ElementTree library (I've imported that for you already, it's available as 'et')
# Make your life a little easier by specifying all element values as strings
# Exercise 4.4
# Repeat Exercise 4.2, but now use the XML document created in Exercise 4.3
# Don't forget to convert the XML document to a string before sending it!
| [
"[email protected]"
] | |
0aca2f6b09e65de9d040194775493f12fd174098 | 4d539867a53a3b9909bec1bfb1a49e189a2f1e20 | /EGAT/data_generator_attention_visual.py | ef32d63aab04eba48e9b3958108c0b8d5495fe4c | [
"MIT"
] | permissive | sailfish009/EGAT | 7a1d05c7c77e750903cc2ceb4b671d0f37a4ea60 | a03d6cbeb3e6d8f75edd608370256326d8fcb05b | refs/heads/main | 2023-01-15T10:13:17.142550 | 2020-11-22T14:20:28 | 2020-11-22T14:20:28 | 315,053,817 | 0 | 0 | MIT | 2020-11-22T14:15:56 | 2020-11-22T14:15:55 | null | UTF-8 | Python | false | false | 4,534 | py |
import os
import time
import pickle
import torch as t
import numpy as np
from torch.utils import data
import gzip
from time import time
from config import DefaultConfig
import torch
import dgl
import threading
class dataSet(data.Dataset):
def __init__(self, root_dir, protein_list_file):
super(dataSet, self).__init__()
self.edge_feat_mean = [31.83509173, 1.56021911] #calculated from trainset only
self.edge_feat_std = [16.79204272, 0.69076342] #calculated from trainset only
self.all_protBert_feature = pickle.load(gzip.open(root_dir+'/inputs/ProtBert_features.pkl.gz', "rb"))['ProtBert_features']
self.all_dist_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_dist_matrix_map.pkl.gz', 'rb'))
self.all_angle_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_angle_matrix_map.pkl.gz', 'rb'))
print('protein_list_file:', protein_list_file)
with open(protein_list_file, "r") as f:
protein_list = f.readlines()
self.protein_list = [x.strip() for x in protein_list]
self.config = DefaultConfig()
self.max_seq_len = self.config.max_sequence_length
self.neighbourhood_size = 21
self.protein_list_len = len(self.protein_list)
self.all_graphs = self.generate_all_graphs()
print('All graphs generated.')
def __getitem__(self, index):
t0=time()
protein_name = self.protein_list[index]
id_idx = index
_all_protBert_feature_ = self.all_protBert_feature[id_idx][:self.max_seq_len]
seq_len = _all_protBert_feature_.shape[0]
protein_info = {
'protein_name': protein_name,
'protein_idx': id_idx,
'seq_length': seq_len
}
if seq_len < self.max_seq_len:
temp = np.zeros([self.max_seq_len, _all_protBert_feature_.shape[1]])
temp[:seq_len, :] = _all_protBert_feature_
_all_protBert_feature_ = temp
_all_protBert_feature_ = _all_protBert_feature_[np.newaxis, :, :]
G = self.all_graphs[id_idx]
return torch.from_numpy(_all_protBert_feature_).type(torch.FloatTensor), \
G, \
protein_info
def __len__(self):
return self.protein_list_len
def generate_all_graphs(self):
graph_list = {}
for id_idx in self.all_dist_matrix:
G = dgl.DGLGraph()
G.add_nodes(self.max_seq_len)
neighborhood_indices = self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0] \
.argsort()[:, 1:self.neighbourhood_size]
if neighborhood_indices.max() > 499 or neighborhood_indices.min() < 0:
print(neighborhood_indices.max(), neighborhood_indices.min())
raise
edge_feat = np.array([
self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0],
self.all_angle_matrix[id_idx]['angle_matrix'][:self.max_seq_len, :self.max_seq_len]
])
edge_feat = np.transpose(edge_feat, (1, 2, 0))
edge_feat = (edge_feat - self.edge_feat_mean) / self.edge_feat_std # standardize features
self.add_edges_custom(G,
neighborhood_indices,
edge_feat
)
graph_list[id_idx]= G
return graph_list
def add_edges_custom(self, G, neighborhood_indices, edge_features):
t1 = time()
size = neighborhood_indices.shape[0]
neighborhood_indices = neighborhood_indices.tolist()
src = []
dst = []
temp_edge_features = []
for center in range(size):
src += neighborhood_indices[center]
dst += [center] * (self.neighbourhood_size - 1)
for nbr in neighborhood_indices[center]:
temp_edge_features += [np.abs(edge_features[center, nbr])]
if len(src) != len(dst):
prit('source and destination array should have been of the same length: src and dst:', len(src), len(dst))
raise Exception
G.add_edges(src, dst)
G.edata['ex'] = np.array(temp_edge_features)
def graph_collate(samples):
protbert_data, graph_batch, protein_info = map(list, zip(*samples))
graph_batch = dgl.batch(graph_batch)
protbert_data = torch.cat(protbert_data)
return protbert_data, graph_batch, protein_info
| [
"[email protected]"
] | |
e84fdec36800bc2eaf6a99f809432ca0be4287f2 | 84c4f9e14040502efddb258c243cb8e326f274c5 | /task_2_version_3/window_func.py | 0dfa95cde19f5daed407baf331d5f76a219b536e | [] | no_license | labkubia/lab | 7b6707eb2e1a1912e64dbda87bff44ca0aa84299 | 7e8ba89aa8638eb0f80855ba76fb4d852cc63a6e | refs/heads/master | 2021-09-07T15:14:05.243702 | 2018-02-24T19:23:04 | 2018-02-24T19:23:04 | 111,433,806 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import numpy as np
def window_func(train_frame_set):
window = np.hanning(len(train_frame_set[0])) #different in matlab
#window = np.hamming(len(train_frame_set[0]))
train_frame_set=np.float64(train_frame_set)
frame_windowed_set=np.multiply(train_frame_set,window) # apply the window to the frames
#using np.multiply , multipy by elements
return frame_windowed_set | [
"[email protected]"
] | |
a80a155616ff956f9c128c0f892b37b9c3a26c9c | bc7f8b45413692fbf3b74287ed95ce6b2857c83c | /src/test_ocr.py | 18367e4d149ba270d56c26f52d3c540961f356a7 | [
"MIT"
] | permissive | mouradsm/vrpdr | c58b5d699d32f8858af2e7bd3bd203d56c806b97 | 700de74ac322ef2d02be36c070039dcaead918e5 | refs/heads/master | 2022-10-23T14:16:50.189659 | 2020-06-20T16:48:57 | 2020-06-20T16:48:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import cv2 as cv
import numpy as np
import argparse
import sys
import os.path
import logging
import matplotlib.pyplot as plt
from ocr import OCR
def plot_images(data, rows, cols, cmap='gray'):
if(len(data) > 0):
i = 0
for title, image in data.items():
#logging.debug(title)
plt.subplot(rows,cols,i+1),plt.imshow(image,cmap)
plt.title(title)
plt.xticks([]),plt.yticks([])
i += 1
plt.show()
def display_images(img_list, row, col):
if(len(img_list) > 0):
images = {}
n = 0
for img in img_list:
n += 1
images[str(n)] = img
plot_images(images, row, col, cmap='gray')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Testing OCR.')
parser.add_argument('--image', help='Path to image file.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG)
# Open the image file
if not os.path.isfile(args.image):
logging.error("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.image)
hasFrame, frame = cap.read()
if hasFrame:
images = {}
images['frame'] = frame
ocr = OCR(model_filename="../config/attention_ocr_model.pth", use_cuda=False, threshold=0.7)
pred = ocr.predict(frame)
logging.info(f'Prediction: {pred}')
plot_images(images, 1, 3, cmap='gray')
else:
logging.debug("Frame not found!") | [
"[email protected]"
] | |
112b11038313f4ecd80672439c01bf4361a7ebd4 | 158f2afa919a22e51b8c607f7a34b34e72db1b1a | /Astropy_Open_Error.py | 8e9b32e185a495bd99e348ea98ba9b54dc0e07f6 | [] | no_license | chrisfrohmaier/Code_Snippets | 96fb40a8a0ea46fbe1171432af388b9003b7a877 | 59d2ec591cf74805f7d0e299a4e6dcdd23acb6de | refs/heads/master | 2016-09-10T15:50:29.066747 | 2014-03-07T11:54:07 | 2014-03-07T11:54:07 | 14,979,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from astropy.io import fits
try:
hdulist_multi_sci=fits.open('/Users/cf5g09/Documents/PTF_Transform/Icecube/refs/ptf_2924/C01/cd.ptf_2924_01_R_first.weight.fits')
#print '++++ multi_mask assign ', science_image
except IOError or Warning or UnboundLocalError:
print 'Cant open Science' | [
"[email protected]"
] | |
df43518630c7ca82014fbf33f662d1d4af83bbca | 03a7a46f3cc00486ff46edcf4c4390dd64e214e5 | /lab2stack.py | c23b9e575cae786df29e52c83243cb6579e50154 | [] | no_license | fadybarsoum/NetSec-Reliable-Interaction-Protocol-Implementation | 1f558a8eb400f73df7d193b49571c9f2b196fca3 | a9ad4c4a1d9bbae224f1d61288f4df3686e27c2e | refs/heads/master | 2021-01-19T23:02:54.065939 | 2017-05-03T19:57:33 | 2017-05-03T19:57:33 | 88,915,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | from src.rip import RIPProtocol | [
"[email protected]"
] | |
2976ce29416e292cb0e119e7f3705c0bdd786ad7 | bad59b62baf06c5a110dbf96ee9653a69b8ca4df | /soldjango/soldjango/settings.py | 1daa5ebe5b7896e0a20e718dac4263b34ae127d4 | [] | no_license | 45jihoon/Leon | 848db934898ef9fc838154919c254ae5f7fcb684 | 69d024469a518a1b40e450e3408291671b183718 | refs/heads/main | 2023-01-13T10:55:11.184173 | 2020-11-18T01:47:33 | 2020-11-18T01:47:33 | 313,471,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | """
Django settings for soldjango project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%i8ro%=@i@jttfy1hxsblzafv7&v5uj@9dc%4g5^@v-2xnw=j&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'assembly',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'soldjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'soldjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
ac24192eb309aac4b2982be8519bf5c2b06906ea | 1326612e90a772bd6acbf0c9532cbcaefdec9f48 | /BiblioPixelAnimations/strip/Searchlights.py | 5bed4eba37d24cfa5a9789fec768858fa29d0c92 | [
"MIT"
] | permissive | russp81/BiblioPixelAnimations | cc2ad721ed60435950282ce09d95191bf09b1eb3 | 4184ace37200861cc721e5bd7a43014bd0bbcadf | refs/heads/master | 2021-01-17T21:01:22.832174 | 2016-09-16T06:43:06 | 2016-09-16T06:43:06 | 68,341,732 | 1 | 0 | null | 2016-09-16T00:23:29 | 2016-09-16T00:23:29 | null | UTF-8 | Python | false | false | 3,267 | py | from bibliopixel import LEDStrip
import bibliopixel.colors as colors
from bibliopixel.animation import BaseStripAnim
import random
class Searchlights(BaseStripAnim):
"""Three search lights sweeping at different speeds"""
def __init__(self, led, colors=[colors.MediumSeaGreen,colors.MediumPurple,colors.MediumVioletRed], tail=5, start=0, end=-1):
super(Searchlights, self).__init__(led, start, end)
self._color = colors
self._tail = tail + 1
if self._tail >= self._size / 2:
self._tail = (self._size / 2) - 1
self._direction = [1,1,1]
self._currentpos = [0,0,0]
self._steps = [1,1,1]
self._fadeAmt = 256 / self._tail
def step(self, amt = 1):
self._ledcolors = [(0,0,0) for i in range(self._size)]
self._led.all_off()
for i in range(0,3):
self._currentpos[i] = self._start + self._steps[i]
#average the colors together so they blend
self._ledcolors[self._currentpos[i]] = map(lambda x,y: (x + y)/2, self._color[i], self._ledcolors[self._currentpos[i]])
for j in range(1,self._tail):
if self._currentpos[i] - j >= 0:
self._ledcolors[self._currentpos[i] - j] = map(lambda x,y: (x + y)/2, self._ledcolors[self._currentpos[i] - j], colors.color_scale(self._color[i], 255 - (self._fadeAmt * j)))
if self._currentpos[i] + j < self._size:
self._ledcolors[self._currentpos[i] + j] = map(lambda x,y: (x + y)/2, self._ledcolors[self._currentpos[i] + j], colors.color_scale(self._color[i], 255 - (self._fadeAmt * j)))
if self._start + self._steps[i] >= self._end:
self._direction[i] = -1
elif self._start + self._steps[i] <= 0:
self._direction[i] = 1
# advance each searchlight at a slightly different speed
self._steps[i] += self._direction[i] * amt * int(random.random() > (i*0.05))
for i,thiscolor in enumerate(self._ledcolors):
self._led.set(i, thiscolor)
MANIFEST = [
{
"class": Searchlights,
"controller": "strip",
"desc": "Three search lights sweeping at different speeds",
"display": "Searchlights",
"id": "Searchlights",
"params": [
{
"default": -1,
"help": "Ending pixel (-1 for entire strip)",
"id": "end",
"label": "End",
"type": "int"
},
{
"default": 0,
"help": "Starting pixel",
"id": "start",
"label": "Start",
"type": "int"
},
{
"default": 5,
"help": "Length of the faded pixels at the start and end.",
"id": "tail",
"label": "Tail Length",
"type": "int"
},
{
"default": [colors.MediumSeaGreen,colors.MediumPurple,colors.MediumVioletRed],
"help": "",
"id": "colors",
"label": "Colors",
"type": "colors"
}
],
"type": "animation"
}
]
| [
"[email protected]"
] | |
ec6528e726e97e6f27a0169c8a07854d0fd9957b | d8c12c88942f5e0d0db76885884653bb94076cac | /src/boi/parser.py | 1275ecf1b21e99ef4817ae2441b9f314e5d3a697 | [] | no_license | jkarns275/ELLie | 58c5648f3e1fbcdfb3198a51af97ebee0e5fc91a | 2ecfed86f7f4bc0a9eec36368e9fd3319ebaac6c | refs/heads/master | 2020-06-23T20:06:47.745014 | 2019-07-28T03:20:51 | 2019-07-28T03:20:51 | 198,739,697 | 0 | 0 | null | 2019-07-25T02:11:27 | 2019-07-25T02:11:26 | null | UTF-8 | Python | false | false | 11,449 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by TatSu.
#
# https://pypi.python.org/pypi/tatsu/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
from tatsu.buffering import Buffer
from tatsu.parsing import Parser
from tatsu.parsing import tatsumasu, leftrec, nomemo
from tatsu.parsing import leftrec, nomemo # noqa
from tatsu.util import re, generic_main # noqa
KEYWORDS = {} # type: ignore
class BoiBuffer(Buffer):
def __init__(
self,
text,
whitespace=None,
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
namechars='',
**kwargs
):
super(BoiBuffer, self).__init__(
text,
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
namechars=namechars,
**kwargs
)
class BoiParser(Parser):
def __init__(
self,
whitespace=None,
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
left_recursion=True,
parseinfo=True,
keywords=None,
namechars='',
buffer_class=BoiBuffer,
**kwargs
):
if keywords is None:
keywords = KEYWORDS
super(BoiParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
parseinfo=parseinfo,
keywords=keywords,
namechars=namechars,
buffer_class=buffer_class,
**kwargs
)
@tatsumasu()
def _id_(self): # noqa
self._pattern('[a-zA-Z][a-zA-Z0-9_]*')
@tatsumasu()
def _var_(self): # noqa
self._id_()
@tatsumasu()
def _float_(self): # noqa
self._pattern('[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')
@tatsumasu()
def _value_(self): # noqa
self._float_()
self.name_last_node('@')
@tatsumasu()
def _expr_(self): # noqa
with self._choice():
with self._option():
self._value_()
self.name_last_node('@')
with self._option():
self._var_()
self.name_last_node('@')
with self._option():
self._token('(')
self._cut()
self._base_expr_()
self.name_last_node('@')
self._cut()
self._token(')')
self._error('no available options')
@tatsumasu()
def _multiplicative_expr_(self): # noqa
self._expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('*')
with self._option():
self._token('/')
self._error('no available options')
self._cut()
self._expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _pow_expr_(self): # noqa
self._multiplicative_expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
self._token('**')
self._cut()
self._multiplicative_expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _additive_expr_(self): # noqa
self._pow_expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('+')
with self._option():
self._token('-')
self._error('no available options')
self._cut()
self._pow_expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _base_expr_(self): # noqa
with self._choice():
with self._option():
self._lambda_expr_()
self.name_last_node('@')
with self._option():
self._let_expr_()
self.name_last_node('@')
with self._option():
self._if_expr_()
self.name_last_node('@')
with self._option():
self._function_call_expr_()
self.name_last_node('@')
with self._option():
self._additive_expr_()
self.name_last_node('@')
self._error('no available options')
@tatsumasu()
def _bool_expr_(self): # noqa
with self._choice():
with self._option():
self._comparison_expr_()
self.name_last_node('@')
with self._option():
self._condition_expr_()
self.name_last_node('@')
self._error('no available options')
@tatsumasu()
def _comparison_expr_(self): # noqa
self._base_expr_()
with self._group():
with self._choice():
with self._option():
self._token('>=')
with self._option():
self._token('>')
with self._option():
self._token('<=')
with self._option():
self._token('<')
with self._option():
self._token('=')
with self._option():
self._token('<>')
self._error('no available options')
self._cut()
self._base_expr_()
@tatsumasu()
def _condition_expr_(self): # noqa
self._base_expr_()
@tatsumasu()
def _function_call_expr_(self): # noqa
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._expr_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
@tatsumasu()
def _let_expr_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('in')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _lambda_expr_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._id_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('in')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _if_expr_(self): # noqa
self._token('if')
self._cut()
self._bool_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('then')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('else')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _function_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._id_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
self._cut()
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _program_(self): # noqa
def block1():
with self._choice():
with self._option():
self._function_()
with self._option():
self._base_expr_()
self._error('no available options')
self._closure(block1)
self.name_last_node('@')
self._check_eof()
@tatsumasu()
def _start_(self): # noqa
self._program_()
class BoiSemantics(object):
def id(self, ast): # noqa
return ast
def var(self, ast): # noqa
return ast
def float(self, ast): # noqa
return ast
def value(self, ast): # noqa
return ast
def expr(self, ast): # noqa
return ast
def multiplicative_expr(self, ast): # noqa
return ast
def pow_expr(self, ast): # noqa
return ast
def additive_expr(self, ast): # noqa
return ast
def base_expr(self, ast): # noqa
return ast
def bool_expr(self, ast): # noqa
return ast
def comparison_expr(self, ast): # noqa
return ast
def condition_expr(self, ast): # noqa
return ast
def function_call_expr(self, ast): # noqa
return ast
def let_expr(self, ast): # noqa
return ast
def lambda_expr(self, ast): # noqa
return ast
def if_expr(self, ast): # noqa
return ast
def function(self, ast): # noqa
return ast
def program(self, ast): # noqa
return ast
def start(self, ast): # noqa
return ast
def main(filename, start=None, **kwargs):
if start is None:
start = 'id'
if not filename or filename == '-':
text = sys.stdin.read()
else:
with open(filename) as f:
text = f.read()
parser = BoiParser()
return parser.parse(text, rule_name=start, filename=filename, **kwargs)
if __name__ == '__main__':
import json
from tatsu.util import asjson
ast = generic_main(main, BoiParser, name='Boi')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(asjson(ast), indent=2))
print()
| [
"[email protected]"
] | |
a174ca449539006233ff7a4acea1252aef8eb3eb | 0ab90ab559eab46b583b4b1fdd4a5bb3f55b7793 | /python/ray/experimental/workflow/common.py | 3c40c555e0eab6747e2da0c8fe41e1c1b84e7018 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | swag1ong/ray | b22cd5ebab96c30f15b00a7d044fdeb7543a4616 | fdbeef604692aa308973988b32405ec0d70f9f40 | refs/heads/master | 2023-06-25T21:55:44.398516 | 2021-07-26T00:39:24 | 2021-07-26T00:39:24 | 389,518,857 | 2 | 0 | Apache-2.0 | 2021-07-26T05:33:40 | 2021-07-26T05:33:39 | null | UTF-8 | Python | false | false | 7,714 | py | from enum import Enum, unique
from collections import deque
import re
from typing import Dict, List, Optional, Callable, Set, Iterator, Any
import unicodedata
import uuid
from dataclasses import dataclass
import ray
from ray import ObjectRef
# Alias types
StepID = str
WorkflowOutputType = ObjectRef
@unique
class WorkflowStatus(str, Enum):
# There is at least a remote task running in ray cluster
RUNNING = "RUNNING"
# It got canceled and can't be resumed later.
CANCELED = "CANCELED"
# The workflow runs successfully.
SUCCESSFUL = "SUCCESSFUL"
# The workflow failed with an applicaiton error.
# It can be resumed.
FAILED = "FAILED"
# The workflow failed with a system error, i.e., ray shutdown.
# It can be resumed.
RESUMABLE = "RESUMABLE"
@dataclass
class WorkflowInputs:
# The object ref of the input arguments.
args: ObjectRef
# The object refs in the arguments.
object_refs: List[ObjectRef]
# TODO(suquark): maybe later we can replace it with WorkflowData.
# The workflows in the arguments.
workflows: "List[Workflow]"
@dataclass
class WorkflowData:
# The workflow step function body.
func_body: Callable
# The arguments of a workflow.
inputs: WorkflowInputs
# The num of retry for application exception
max_retries: int
# Whether the user want to handle the exception mannually
catch_exceptions: bool
# ray_remote options
ray_options: Dict[str, Any]
def to_metadata(self) -> Dict[str, Any]:
f = self.func_body
return {
"name": f.__module__ + "." + f.__qualname__,
"object_refs": [r.hex() for r in self.inputs.object_refs],
"workflows": [w.id for w in self.inputs.workflows],
"max_retries": self.max_retries,
"catch_exceptions": self.catch_exceptions,
"ray_options": self.ray_options,
}
@dataclass
class WorkflowMetaData:
# The current status of the workflow
status: WorkflowStatus
def slugify(value: str, allow_unicode=False) -> str:
"""Adopted from
https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, dots or hyphens. Also strip leading and
trailing whitespace.
"""
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = re.sub(r"[^\w.\-]", "", value).strip()
return re.sub(r"[-\s]+", "-", value)
class Workflow:
def __init__(self, workflow_data: WorkflowData):
if workflow_data.ray_options.get("num_returns", 1) > 1:
raise ValueError("Workflow should have one return value.")
self._data = workflow_data
self._executed: bool = False
self._output: Optional[WorkflowOutputType] = None
self._step_id: StepID = slugify(
self._data.func_body.__qualname__) + "." + uuid.uuid4().hex
@property
def executed(self) -> bool:
return self._executed
@property
def output(self) -> WorkflowOutputType:
if not self._executed:
raise Exception("The workflow has not been executed.")
return self._output
@property
def id(self) -> StepID:
return self._step_id
def execute(self,
outer_most_step_id: Optional[StepID] = None,
last_step_of_workflow: bool = False) -> ObjectRef:
"""Trigger workflow execution recursively.
Args:
outer_most_step_id: See
"step_executor.execute_workflow" for explanation.
last_step_of_workflow: The step that generates the output of the
workflow (including nested steps).
"""
if self.executed:
return self._output
from ray.experimental.workflow import step_executor
output = step_executor.execute_workflow_step(self._step_id, self._data,
outer_most_step_id,
last_step_of_workflow)
if not isinstance(output, WorkflowOutputType):
raise TypeError("Unexpected return type of the workflow.")
self._output = output
self._executed = True
return output
def iter_workflows_in_dag(self) -> Iterator["Workflow"]:
"""Collect all workflows in the DAG linked to the workflow
using BFS."""
# deque is used instead of queue.Queue because queue.Queue is aimed
# at multi-threading. We just need a pure data structure here.
visited_workflows: Set[Workflow] = {self}
q = deque([self])
while q: # deque's pythonic way to check emptyness
w: Workflow = q.popleft()
for p in w._data.inputs.workflows:
if p not in visited_workflows:
visited_workflows.add(p)
q.append(p)
yield w
@property
def data(self) -> WorkflowData:
"""Get the workflow data."""
return self._data
def __reduce__(self):
raise ValueError(
"Workflow is not supposed to be serialized by pickle. "
"Maybe you are passing it to a Ray remote function, "
"returning it from a Ray remote function, or using "
"'ray.put()' with it?")
def run(self, workflow_id: Optional[str] = None) -> Any:
"""Run a workflow.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = trip.run()
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
return ray.get(self.run_async(workflow_id))
def run_async(self, workflow_id: Optional[str] = None) -> ObjectRef:
"""Run a workflow asynchronously.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = ray.get(trip.run_async())
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
# TODO(suquark): avoid cyclic importing
from ray.experimental.workflow.execution import run
return run(self, workflow_id)
| [
"[email protected]"
] | |
ecaeb6fd0293027f32aa44c7a539c89169160328 | 9f2fdb37e1dd7fd926d45fc22ecab5d3c0d6c4ab | /LDA/22.2.netease_news.py | 4d8b231eef12af009b0754091faf983191997db0 | [] | no_license | luoguohao/python2_machine_learning | 40c67028bc46b0d81ee0f28fa692be75eabaff9a | 3f6c893cf977a9ffa8b2cb18a39947c5d59600ef | refs/heads/master | 2021-05-05T21:12:00.384861 | 2017-12-28T07:49:17 | 2017-12-28T07:49:17 | 115,509,874 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | # !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from gensim import corpora, models, similarities
from pprint import pprint
import time
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def load_stopword():
f_stop = open('22.stopword.txt')
sw = [line.strip() for line in f_stop]
f_stop.close()
return sw
if __name__ == '__main__':
# 网易新闻数据,使用gensim库分析
print '初始化停止词列表 --'
t_start = time.time()
stop_words = load_stopword()
print '开始读入语料数据 -- '
f = open('22.news.dat') #22.LDA_test.txt
texts = [[word for word in line.strip().lower().split() if word not in stop_words] for line in f]
# texts = [line.strip().split() for line in f]
print '读入语料数据完成,用时%.3f秒' % (time.time() - t_start)
f.close()
M = len(texts)
print '文本数目:%d个' % M
# pprint(texts)
print '正在建立词典 --'
dictionary = corpora.Dictionary(texts)
V = len(dictionary)
print '正在计算文本向量 --'
corpus = [dictionary.doc2bow(text) for text in texts]
print '正在计算文档TF-IDF --'
t_start = time.time()
corpus_tfidf = models.TfidfModel(corpus)[corpus]
print '建立文档TF-IDF完成,用时%.3f秒' % (time.time() - t_start)
print 'LDA模型拟合推断 --'
num_topics = 30
t_start = time.time()
lda = models.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dictionary,
alpha=0.01, eta=0.01, minimum_probability=0.001,
update_every=1, chunksize=100, passes=1)
print 'LDA模型完成,训练时间为\t%.3f秒' % (time.time() - t_start)
# # 所有文档的主题
# doc_topic = [a for a in lda[corpus_tfidf]]
# print 'Document-Topic:\n'
# pprint(doc_topic)
# 随机打印某10个文档的主题
num_show_topic = 10 # 每个文档显示前几个主题
print '10个文档的主题分布:'
doc_topics = lda.get_document_topics(corpus_tfidf) # 所有文档的主题分布
idx = np.arange(M)
np.random.shuffle(idx)
idx = idx[:10]
for i in idx:
topic = np.array(doc_topics[i])
topic_distribute = np.array(topic[:, 1])
# print topic_distribute
topic_idx = topic_distribute.argsort()[:-num_show_topic-1:-1]
print ('第%d个文档的前%d个主题:' % (i, num_show_topic)), topic_idx
print topic_distribute[topic_idx]
num_show_term = 7 # 每个主题显示几个词
print '每个主题的词分布:'
for topic_id in range(num_topics):
print '主题#%d:\t' % topic_id
term_distribute_all = lda.get_topic_terms(topicid=topic_id)
term_distribute = term_distribute_all[:num_show_term]
term_distribute = np.array(term_distribute)
term_id = term_distribute[:, 0].astype(np.int)
print '词:\t',
for t in term_id:
print dictionary.id2token[t],
print
# print '\n概率:\t', term_distribute[:, 1]
| [
"[email protected]"
] | |
4abe47ce110bd58ce4ebf9615f794bbff3b4f553 | ee2f57ffb3c0bec9a196090022a623a342a9ce96 | /PythonApp/FlaskWebProject1/runserver.py | dbe79fd5e33d3bd051c2ce33ad1fd4476e00178c | [] | no_license | cherho0/pythonApp | c313f2b2869530a79b0cba26d68e2c61df5d5ad1 | 9c7c6fb851358bc85956e9c512ba23b80d8cc3b3 | refs/heads/master | 2021-01-20T03:54:37.721785 | 2017-09-07T03:49:49 | 2017-09-07T03:49:49 | 101,373,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | """
This script runs the FlaskWebProject1 application using a development server.
"""
from os import environ
from FlaskWebProject1 import app
import socket
if __name__ == '__main__':
#HOST = environ.get('SERVER_HOST', 'localhost')
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
print(ip)
app.run(ip, 5555)
| [
"[email protected]"
] | |
ad21dddcaff52dd22e77f283ff4e11ab18a76100 | b8d0b260960e1c43b883049d68c15a7183df200b | /5_py_blog/blog_app/tests.py | ebafc4198267b4929abd66e68f76098e08839139 | [] | no_license | JAreina/python-django | 59ac92d0694522c1d096bed636409d9405c5caba | 66c7c301dec448217df6516198723e1ce987eab7 | refs/heads/master | 2020-03-27T18:34:59.821701 | 2018-09-07T07:49:35 | 2018-09-07T07:49:35 | 146,931,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py |
# Create your tests here.
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from .models import Post
class BlogTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='xxxxxx'
)
self.post = Post.objects.create(
titulo='A good titulo',
texto='Nice texto content',
autor=self.user,
)
def test_string_representation(self):
post = Post(titulo='A sample titulo')
self.assertEqual(str(post), post.titulo)
def test_post_content(self):
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
def test_post_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Nice texto content')
self.assertTemplateUsed(response, 'home.html')
def test_post_detail_view(self):
response = self.client.get('/post/1/')
no_response = self.client.get('/post/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'A good titulo')
self.assertTemplateUsed(response, 'post_detalle.html')
| [
"[email protected]"
] | |
8c59ff3068e701a47f55427121fb4d45c93db56c | 649e2af15011b3c6326436e91a9dd9af0c3a6f8f | /vnpy/app/spread_trading/engine.py | 0a6901795c79ebaf15b64c56c62d0f2272d57e13 | [
"MIT"
] | permissive | Loopring/vnpy | 6270662260c2fdbeed846f0370d1b5eecea7c7bf | f7945b23e29dab8bfdf064da6a6cb815bb755b17 | refs/heads/loopring-release | 2023-07-16T23:11:10.174728 | 2021-09-06T04:01:00 | 2021-09-06T04:01:00 | 277,985,227 | 21 | 6 | MIT | 2021-01-23T02:21:08 | 2020-07-08T03:59:17 | Python | UTF-8 | Python | false | false | 31,837 | py | import traceback
import importlib
import os
from typing import List, Dict, Set, Callable, Any, Type
from collections import defaultdict
from copy import copy
from pathlib import Path
from datetime import datetime, timedelta
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import (
EVENT_TICK, EVENT_POSITION, EVENT_CONTRACT,
EVENT_ORDER, EVENT_TRADE, EVENT_TIMER
)
from vnpy.trader.utility import load_json, save_json
from vnpy.trader.object import (
TickData, ContractData, LogData,
SubscribeRequest, OrderRequest
)
from vnpy.trader.constant import (
Direction, Offset, OrderType, Interval
)
from vnpy.trader.converter import OffsetConverter
from .base import (
LegData, SpreadData,
EVENT_SPREAD_DATA, EVENT_SPREAD_POS,
EVENT_SPREAD_ALGO, EVENT_SPREAD_LOG,
EVENT_SPREAD_STRATEGY,
load_bar_data, load_tick_data
)
from .template import SpreadAlgoTemplate, SpreadStrategyTemplate
from .algo import SpreadTakerAlgo
APP_NAME = "SpreadTrading"
class SpreadEngine(BaseEngine):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
"""Constructor"""
super().__init__(main_engine, event_engine, APP_NAME)
self.active = False
self.data_engine: SpreadDataEngine = SpreadDataEngine(self)
self.algo_engine: SpreadAlgoEngine = SpreadAlgoEngine(self)
self.strategy_engine: SpreadStrategyEngine = SpreadStrategyEngine(self)
self.add_spread = self.data_engine.add_spread
self.remove_spread = self.data_engine.remove_spread
self.get_spread = self.data_engine.get_spread
self.get_all_spreads = self.data_engine.get_all_spreads
self.start_algo = self.algo_engine.start_algo
self.stop_algo = self.algo_engine.stop_algo
def start(self):
""""""
if self.active:
return
self.active = True
self.data_engine.start()
self.algo_engine.start()
self.strategy_engine.start()
def stop(self):
""""""
self.data_engine.stop()
self.algo_engine.stop()
self.strategy_engine.stop()
def write_log(self, msg: str):
""""""
log = LogData(
msg=msg,
gateway_name=APP_NAME
)
event = Event(EVENT_SPREAD_LOG, log)
self.event_engine.put(event)
class SpreadDataEngine:
""""""
setting_filename = "spread_trading_setting.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.legs: Dict[str, LegData] = {} # vt_symbol: leg
self.spreads: Dict[str, SpreadData] = {} # name: spread
self.symbol_spread_map: Dict[str, List[SpreadData]] = defaultdict(list)
def start(self):
""""""
self.load_setting()
self.register_event()
self.write_log("价差数据引擎启动成功")
def stop(self):
""""""
pass
def load_setting(self) -> None:
""""""
setting = load_json(self.setting_filename)
for spread_setting in setting:
self.add_spread(
spread_setting["name"],
spread_setting["leg_settings"],
spread_setting["active_symbol"],
spread_setting.get("min_volume", 1),
save=False
)
def save_setting(self) -> None:
""""""
setting = []
for spread in self.spreads.values():
leg_settings = []
for leg in spread.legs.values():
price_multiplier = spread.price_multipliers[leg.vt_symbol]
trading_multiplier = spread.trading_multipliers[leg.vt_symbol]
inverse_contract = spread.inverse_contracts[leg.vt_symbol]
leg_setting = {
"vt_symbol": leg.vt_symbol,
"price_multiplier": price_multiplier,
"trading_multiplier": trading_multiplier,
"inverse_contract": inverse_contract
}
leg_settings.append(leg_setting)
spread_setting = {
"name": spread.name,
"leg_settings": leg_settings,
"active_symbol": spread.active_leg.vt_symbol,
"min_volume": spread.min_volume
}
setting.append(spread_setting)
save_json(self.setting_filename, setting)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
leg = self.legs.get(tick.vt_symbol, None)
if not leg:
return
leg.update_tick(tick)
for spread in self.symbol_spread_map[tick.vt_symbol]:
spread.calculate_price()
self.put_data_event(spread)
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
leg = self.legs.get(position.vt_symbol, None)
if not leg:
return
leg.update_position(position)
for spread in self.symbol_spread_map[position.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
leg = self.legs.get(trade.vt_symbol, None)
if not leg:
return
leg.update_trade(trade)
for spread in self.symbol_spread_map[trade.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
leg = self.legs.get(contract.vt_symbol, None)
if leg:
# Update contract data
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol, contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
def put_data_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_DATA, spread)
self.event_engine.put(event)
def put_pos_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_POS, spread)
self.event_engine.put(event)
def get_leg(self, vt_symbol: str) -> LegData:
""""""
leg = self.legs.get(vt_symbol, None)
if not leg:
leg = LegData(vt_symbol)
self.legs[vt_symbol] = leg
# Subscribe market data
contract = self.main_engine.get_contract(vt_symbol)
if contract:
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol,
contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
# Initialize leg position
for direction in Direction:
vt_positionid = f"{vt_symbol}.{direction.value}"
position = self.main_engine.get_position(vt_positionid)
if position:
leg.update_position(position)
return leg
def add_spread(
self,
name: str,
leg_settings: List[Dict],
active_symbol: str,
min_volume: float,
save: bool = True
) -> None:
""""""
if name in self.spreads:
self.write_log("价差创建失败,名称重复:{}".format(name))
return
legs: List[LegData] = []
price_multipliers: Dict[str, int] = {}
trading_multipliers: Dict[str, int] = {}
inverse_contracts: Dict[str, bool] = {}
for leg_setting in leg_settings:
vt_symbol = leg_setting["vt_symbol"]
leg = self.get_leg(vt_symbol)
legs.append(leg)
price_multipliers[vt_symbol] = leg_setting["price_multiplier"]
trading_multipliers[vt_symbol] = leg_setting["trading_multiplier"]
inverse_contracts[vt_symbol] = leg_setting.get(
"inverse_contract", False)
spread = SpreadData(
name,
legs,
price_multipliers,
trading_multipliers,
active_symbol,
inverse_contracts,
min_volume
)
self.spreads[name] = spread
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].append(spread)
if save:
self.save_setting()
self.write_log("价差创建成功:{}".format(name))
self.put_data_event(spread)
def remove_spread(self, name: str) -> None:
""""""
if name not in self.spreads:
return
spread = self.spreads.pop(name)
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].remove(spread)
self.save_setting()
self.write_log("价差移除成功:{},重启后生效".format(name))
def get_spread(self, name: str) -> SpreadData:
""""""
spread = self.spreads.get(name, None)
return spread
def get_all_spreads(self) -> List[SpreadData]:
""""""
return list(self.spreads.values())
class SpreadAlgoEngine:
""""""
algo_class = SpreadTakerAlgo
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.spreads: Dict[str: SpreadData] = {}
self.algos: Dict[str: SpreadAlgoTemplate] = {}
self.order_algo_map: dict[str: SpreadAlgoTemplate] = {}
self.symbol_algo_map: dict[str: SpreadAlgoTemplate] = defaultdict(list)
self.algo_count: int = 0
self.vt_tradeids: Set = set()
self.offset_converter: OffsetConverter = OffsetConverter(
self.main_engine
)
def start(self):
""""""
self.register_event()
self.write_log("价差算法引擎启动成功")
def stop(self):
""""""
for algo in self.algos.values():
self.stop_algo(algo)
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
self.event_engine.register(
EVENT_SPREAD_DATA, self.process_spread_event
)
def process_spread_event(self, event: Event):
""""""
spread: SpreadData = event.data
self.spreads[spread.name] = spread
def process_tick_event(self, event: Event):
""""""
tick = event.data
algos = self.symbol_algo_map[tick.vt_symbol]
if not algos:
return
buf = copy(algos)
for algo in buf:
if not algo.is_active():
algos.remove(algo)
else:
algo.update_tick(tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
algo = self.order_algo_map.get(order.vt_orderid, None)
if algo and algo.is_active():
algo.update_order(order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
algo = self.order_algo_map.get(trade.vt_orderid, None)
if algo and algo.is_active():
algo.update_trade(trade)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def process_timer_event(self, event: Event):
""""""
buf = list(self.algos.values())
for algo in buf:
if not algo.is_active():
self.algos.pop(algo.algoid)
else:
algo.update_timer()
def start_algo(
self,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
# Find spread object
spread = self.spreads.get(spread_name, None)
if not spread:
self.write_log("创建价差算法失败,找不到价差:{}".format(spread_name))
return ""
# Generate algoid str
self.algo_count += 1
algo_count_str = str(self.algo_count).rjust(6, "0")
algoid = f"{self.algo_class.algo_name}_{algo_count_str}"
# Create algo object
algo = self.algo_class(
self,
algoid,
spread,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algos[algoid] = algo
# Generate map between vt_symbol and algo
for leg in spread.legs.values():
self.symbol_algo_map[leg.vt_symbol].append(algo)
# Put event to update GUI
self.put_algo_event(algo)
return algoid
def stop_algo(
self,
algoid: str
):
""""""
algo = self.algos.get(algoid, None)
if not algo:
self.write_log("停止价差算法失败,找不到算法:{}".format(algoid))
return
algo.stop()
def put_algo_event(self, algo: SpreadAlgoTemplate) -> None:
""""""
event = Event(EVENT_SPREAD_ALGO, algo)
self.event_engine.put(event)
def write_algo_log(self, algo: SpreadAlgoTemplate, msg: str) -> None:
""""""
msg = f"{algo.algoid}:{msg}"
self.write_log(msg)
def send_order(
self,
algo: SpreadAlgoTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
lock: bool
) -> List[str]:
""""""
holding = self.offset_converter.get_position_holding(vt_symbol)
contract = self.main_engine.get_contract(vt_symbol)
if direction == Direction.LONG:
available = holding.short_pos - holding.short_pos_frozen
else:
available = holding.long_pos - holding.long_pos_frozen
# If no position to close, just open new
if not available:
offset = Offset.OPEN
# If enougth position to close, just close old
elif volume < available:
offset = Offset.CLOSE
# Otherwise, just close existing position
else:
volume = available
offset = Offset.CLOSE
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and algo.
self.order_algo_map[vt_orderid] = algo
return vt_orderids
def cancel_order(self, algo: SpreadAlgoTemplate, vt_orderid: str) -> None:
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_algo_log(algo, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_tick(self, vt_symbol: str) -> TickData:
""""""
return self.main_engine.get_tick(vt_symbol)
def get_contract(self, vt_symbol: str) -> ContractData:
""""""
return self.main_engine.get_contract(vt_symbol)
class SpreadStrategyEngine:
""""""
setting_filename = "spread_trading_strategy.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.strategy_setting: Dict[str: Dict] = {}
self.classes: Dict[str: Type[SpreadStrategyTemplate]] = {}
self.strategies: Dict[str: SpreadStrategyTemplate] = {}
self.order_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.algo_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.spread_strategy_map: dict[str: SpreadStrategyTemplate] = defaultdict(
list)
self.vt_tradeids: Set = set()
self.load_strategy_class()
def start(self):
""""""
self.load_strategy_setting()
self.register_event()
self.write_log("价差策略引擎启动成功")
def close(self):
""""""
self.stop_all_strategies()
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.spread_trading.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.split(".")[-1] in ("py", "pyd", "so"):
strategy_module_name = ".".join([module_name, filename.split(".")[0]])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, SpreadStrategyTemplate) and value is not SpreadStrategyTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def get_all_strategy_class_names(self):
""""""
return list(self.classes.keys())
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["spread_name"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"spread_name": strategy.spread_name,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def register_event(self):
""""""
ee = self.event_engine
ee.register(EVENT_ORDER, self.process_order_event)
ee.register(EVENT_TRADE, self.process_trade_event)
ee.register(EVENT_SPREAD_DATA, self.process_spread_data_event)
ee.register(EVENT_SPREAD_POS, self.process_spread_pos_event)
ee.register(EVENT_SPREAD_ALGO, self.process_spread_algo_event)
def process_spread_data_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_data)
def process_spread_pos_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_pos)
def process_spread_algo_event(self, event: Event):
""""""
algo = event.data
strategy = self.algo_strategy_map.get(algo.algoid, None)
if strategy:
self.call_strategy_func(
strategy, strategy.update_spread_algo, algo)
def process_order_event(self, event: Event):
""""""
order = event.data
strategy = self.order_strategy_map.get(order.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.update_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
strategy = self.order_strategy_map.get(trade.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.on_trade, trade)
def call_strategy_func(
self, strategy: SpreadStrategyTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_strategy_log(strategy, msg)
def add_strategy(
self, class_name: str, strategy_name: str, spread_name: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
spread = self.spread_engine.get_spread(spread_name)
if not spread:
self.write_log(f"创建策略失败,找不到价差{spread_name}")
return
strategy = strategy_class(self, strategy_name, spread, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.spread_strategy_map[spread_name]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.spread_strategy_map[strategy.spread_name]
strategies.remove(strategy)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def init_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
return
self.call_strategy_func(strategy, strategy.on_init)
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
def start_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
self.call_strategy_func(strategy, strategy.on_stop)
strategy.stop_all_algos()
strategy.cancel_all_orders()
strategy.trading = False
self.put_strategy_event(strategy)
def init_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.init_strategy(strategy)
def start_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.start_strategy(strategy)
def stop_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.stop_strategy(strategy)
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def start_algo(
self,
strategy: SpreadStrategyTemplate,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
""""""
algoid = self.spread_engine.start_algo(
spread_name,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algo_strategy_map[algoid] = strategy
return algoid
def stop_algo(self, strategy: SpreadStrategyTemplate, algoid: str):
""""""
self.spread_engine.stop_algo(algoid)
def stop_all_algos(self, strategy: SpreadStrategyTemplate):
""""""
pass
def send_order(
self,
strategy: SpreadStrategyTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
offset: Offset,
lock: bool
) -> List[str]:
contract = self.main_engine.get_contract(vt_symbol)
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.order_strategy_map[vt_orderid] = strategy
return vt_orderids
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str):
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_strategy_log(
strategy, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_all_orders(self, strategy: SpreadStrategyTemplate):
""""""
pass
def put_strategy_event(self, strategy: SpreadStrategyTemplate):
""""""
data = strategy.get_data()
event = Event(EVENT_SPREAD_STRATEGY, data)
self.event_engine.put(event)
def write_strategy_log(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
msg = f"{strategy.strategy_name}:{msg}"
self.write_log(msg)
def send_strategy_email(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "价差策略引擎"
self.main_engine.send_email(subject, msg)
def load_bar(
self, spread: SpreadData, days: int, interval: Interval, callback: Callable
):
""""""
end = datetime.now()
start = end - timedelta(days)
bars = load_bar_data(spread, interval, start, end)
for bar in bars:
callback(bar)
def load_tick(self, spread: SpreadData, days: int, callback: Callable):
""""""
end = datetime.now()
start = end - timedelta(days)
ticks = load_tick_data(spread, start, end)
for tick in ticks:
callback(tick)
| [
"[email protected]"
] | |
38834c15b947bd3f1e865675bfc8c866c246b9e8 | d4f4293505926285a3449bd7aba29fb4fe07a51c | /wangyi/wangyi/settings.py | c4abd898d64cb8001578cd9120598fc255472092 | [] | no_license | fugui12345/- | 96078509e798e7b55af5632dfdf37969c727894c | ffb9f7d6295cd246d990ea35f28c2fce5035af19 | refs/heads/main | 2023-07-01T06:36:32.951094 | 2021-08-04T18:48:15 | 2021-08-04T18:48:15 | 392,793,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | # Scrapy settings for wangyi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wangyi'
SPIDER_MODULES = ['wangyi.spiders']
NEWSPIDER_MODULE = 'wangyi.spiders'
LOG_LEVEL = 'ERROR'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wangyi.middlewares.WangyiSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'wangyi.middlewares.WangyiDownloaderMiddleware': 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'wangyi.pipelines.WangyiPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
9a71057ca86eb6931927a2afbb8ea436b8c68c37 | afd44f9bf1469418ae4709f48f2c3c188b45eb73 | /preprocessing/text_processor.py | 88a513a6b2da416865452ab9af1cab27c4987d68 | [] | no_license | zerebom/pytoolkit | 2ed359ec0ef612461dec24b57e746f99f212d540 | 078a2fa786a755d6fe0ee69dd8caecec833fb2fa | refs/heads/master | 2020-06-29T06:20:11.069967 | 2019-09-18T01:59:14 | 2019-09-18T01:59:14 | 200,461,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,065 | py | import urllib.request, urllib.error
import re
import MeCab
import mojimoji
from sklearn.feature_extraction.text import TfidfVectorizer
def get_stopword()->list:
slothlib_path = 'http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt'
slothlib_file = urllib.request.urlopen(url=slothlib_path)
slothlib_stopwords = [line.decode("utf-8").strip() for line in slothlib_file]
slothlib_stopwords = [ss for ss in slothlib_stopwords if not ss==u'']
eng_stop=["a's" , "able" , "about" , "above" , "according" , "accordingly" , "across" , "actually" , "after" , "afterwards" , "again" , "against" , "ain't" , "all" , "allow" , "allows" , "almost" , "alone" , "along" , "already" , "also" , "although" , "always" , "am" , "among" , "amongst" , "an" , "and" , "another" , "any" , "anybody" , "anyhow" , "anyone" , "anything" , "anyway" , "anyways" , "anywhere" , "apart" , "appear" , "appreciate" , "appropriate" , "are" , "aren't" , "around" , "as" , "aside" , "ask" , "asking" , "associated" , "at" , "available" , "away" , "awfully" , "be" , "became" , "because" , "become" , "becomes" , "becoming" , "been" , "before" , "beforehand" , "behind" , "being" , "believe" , "below" , "beside" , "besides" , "best" , "better" , "between" , "beyond" , "both" , "brief" , "but" , "by" , "c'mon" , "c's" , "came" , "can" , "can't" , "cannot" , "cant" , "cause" , "causes" , "certain" , "certainly" , "changes" , "clearly" , "co" , "com" , "come" , "comes" , "concerning" , "consequently" , "consider" , "considering" , "contain" , "containing" , "contains" , "corresponding" , "could" , "couldn't" , "course" , "currently" , "definitely" , "described" , "despite" , "did" , "didn't" , "different" , "do" , "does" , "doesn't" , "doing" , "don't" , "done" , "down" , "downwards" , "during" , "each" , "edu" , "eg" , "eight" , "either" , "else" , "elsewhere" , "enough" , "entirely" , "especially" , "et" , "etc" , "even" , "ever" , "every" , "everybody" , "everyone" , "everything" , "everywhere" , "ex" , "exactly" , "example" , "except" , "far" , "few" , "fifth" , "first" , "five" , "followed" , "following" , "follows" , "for" , "former" , "formerly" , "forth" , "four" , "from" , "further" , "furthermore" , "get" , "gets" , "getting" , "given" , "gives" , "go" , "goes" , "going" , "gone" , "got" , "gotten" , "greetings" , "had" , "hadn't" , "happens" , "hardly" , "has" , "hasn't" , "have" , "haven't" , "having" , "he" , "he's" , "hello" , "help" , "hence" , "her" , "here" , "here's" , "hereafter" , "hereby" , "herein" , "hereupon" , "hers" , "herself" , "hi" , "him" , "himself" , "his" , "hither" , "hopefully" , "how" , "howbeit" , "however" , "i'd" , "i'll" , "i'm" , "i've" , "ie" , "if" , "ignored" , "immediate" , "in" , "inasmuch" , "inc" , "indeed" , "indicate" , "indicated" , "indicates" , "inner" , "insofar" , "instead" , "into" , "inward" , "is" , "isn't" , "it" , "it'd" , "it'll" , "it's" , "its" , "itself" , "just" , "keep" , "keeps" , "kept" , "know" , "known" , "knows" , "last" , "lately" , "later" , "latter" , "latterly" , "least" , "less" , "lest" , "let" , "let's" , "like" , "liked" , "likely" , "little" , "look" , "looking" , "looks" , "ltd" , "mainly" , "many" , "may" , "maybe" , "me" , "mean" , "meanwhile" , "merely" , "might" , "more" , "moreover" , "most" , "mostly" , "much" , "must" , "my" , "myself" , "name" , "namely" , "nd" , "near" , "nearly" , "necessary" , "need" , "needs" , "neither" , "never" , "nevertheless" , "new" , "next" , "nine" , "no" , "nobody" , "non" , "none" , "noone" , "nor" , "normally" , "not" , "nothing" , "novel" , "now" , "nowhere" , "obviously" , "of" , "off" , "often" , "oh" , "ok" , "okay" , "old" , "on" , "once" , "one" , "ones" , "only" , "onto" , "or" , "other" , "others" , "otherwise" , "ought" , "our" , "ours" , "ourselves" , "out" , "outside" , "over" , "overall" , "own" , "particular" , "particularly" , "per" , "perhaps" , "placed" , "please" , "plus" , "possible" , "presumably" , "probably" , "provides" , "que" , "quite" , "qv" , "rather" , "rd" , "re" , "really" , "reasonably" , "regarding" , "regardless" , "regards" , "relatively" , "respectively" , "right" , "said" , "same" , "saw" , "say" , "saying" , "says" , "second" , "secondly" , "see" , "seeing" , "seem" , "seemed" , "seeming" , "seems" , "seen" , "self" , "selves" , "sensible" , "sent" , "serious" , "seriously" , "seven" , "several" , "shall" , "she" , "should" , "shouldn't" , "since" , "six" , "so" , "some" , "somebody" , "somehow" , "someone" , "something" , "sometime" , "sometimes" , "somewhat" , "somewhere" , "soon" , "sorry" , "specified" , "specify" , "specifying" , "still" , "sub" , "such" , "sup" , "sure" , "t's" , "take" , "taken" , "tell" , "tends" , "th" , "than" , "thank" , "thanks" , "thanx" , "that" , "that's" , "thats" , "the" , "their" , "theirs" , "them" , "themselves" , "then" , "thence" , "there" , "there's" , "thereafter" , "thereby" , "therefore" , "therein" , "theres" , "thereupon" , "these" , "they" , "they'd" , "they'll" , "they're" , "they've" , "think" , "third" , "this" , "thorough" , "thoroughly" , "those" , "though" , "three" , "through" , "throughout" , "thru" , "thus" , "to" , "together" , "too" , "took" , "toward" , "towards" , "tried" , "tries" , "truly" , "try" , "trying" , "twice" , "two" , "un" , "under" , "unfortunately" , "unless" , "unlikely" , "until" , "unto" , "up" , "upon" , "us" , "use" , "used" , "useful" , "uses" , "using" , "usually" , "value" , "various" , "very" , "via" , "viz" , "vs" , "want" , "wants" , "was" , "wasn't" , "way" , "we" , "we'd" , "we'll" , "we're" , "we've" , "welcome" , "well" , "went" , "were" , "weren't" , "what" , "what's" , "whatever" , "when" , "whence" , "whenever" , "where" , "where's" , "whereafter" , "whereas" , "whereby" , "wherein" , "whereupon" , "wherever" , "whether" , "which" , "while" , "whither" , "who" , "who's" , "whoever" , "whole" , "whom" , "whose" , "why" , "will" , "willing" , "wish" , "with" , "within" , "without" , "won't" , "wonder" , "would" , "wouldn't" , "yes" , "yet" , "you" , "you'd" , "you'll" , "you're" , "you've" , "your" , "yours" , "yourself" , "yourselves" , "zero"]
sw=slothlib_stopwords+eng_stop
return sw
tagger = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
def normalize_number(text):
# 連続した数字を0で置換
text = re.sub(r'[0-9]+', '0', text)
text = re.sub(r'[10-99]+', '00', text)
text = re.sub(r'[100-999]+', '000', text)
text = re.sub(r'[1000-9999]+', '0000', text)
replaced_text = re.sub(r'[10000-9999999]+', '0000', text)
return replaced_text
def delete_number(text):
# 連続した数字を0で置換
replaced_text = re.sub(r'[0-9999999]+', '', text)
return replaced_text
# 入力されたテキストを単語単位に分割して返却する関数
def parse_text(text, min_word_len=1):
words = []
try:
tagger.parse(text).rstrip().split("\n")[:-1]
except:
return ""
for morph in tagger.parse(text).rstrip().split("\n")[:-1]:
#表層系
# word=morph.split("\t")[0]
#標準形
word = morph.split(",")[-3]
word_cls = morph.split("\t")[1].split(",")[0]
word = mojimoji.zen_to_han(word, kana=False).lower()
if not word in sw:
if len(word) > min_word_len:
#品詞によるスクリーニング
# if word_cls in ['名詞']:
words.append(delete_number(word))
return " ".join(words)
def tokenize(s):
return re.split('[ !"#$%&\'(+)*,-./:;<=>?@\\\[\]^_`{|}~“”¨«»®´·º½¾¿¡§£₤‘’。、]', s)
def get_len(text):
"""df[col]=df[col].apply(get_len)"""
num = len(text) if type(text) == str else 0
return num
tfidf_vectorizer = TfidfVectorizer(max_df=0.5, min_df=1,
max_features=10000, norm='l2',
tokenizer=tokenize, ngram_range=(1, 2))
| [
"[email protected]"
] | |
6114e7a67a5459c344b648dc4ae2266a17a375b1 | 3b8955841f6982c575331ac78ce91deb327902ee | /utils/ax.py | 858f4f79bc2837700f6fdd88f371a3710a7d33d6 | [] | no_license | kyshel/ich | 6c7b1e66ca28c7c633d800eb7f4d3ee76e05c056 | 1cd4a17a9abf63afa72195fffdc7051fd87eed45 | refs/heads/main | 2023-07-22T17:27:58.461595 | 2021-08-21T13:42:54 | 2021-08-21T13:42:54 | 370,445,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,300 | py | # general functions that often use
import os
import pickle
import json
import csv
from time import localtime, strftime
from pathlib import Path
import time
import glob
import re
import math
from decimal import Decimal
from pathlib import Path
import glob
import re
import torch # no need repro cause only save here
import os
from tqdm import tqdm
import shutil
os.environ['TZ'] = 'Asia/Shanghai'
time.tzset()
try:
import wandb
except ImportError:
wandb = None
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'optimizer_state_dict', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1024**2 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
suffix = path.suffix
path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # update path
dir = path if path.suffix == '' else path.parent # directory
if not dir.exists() and mkdir:
dir.mkdir(parents=True, exist_ok=True) # make directory
return path
def get_stratified(inputs,ratio): # list, float
magnitude = math.floor(math.log(len(inputs), 10)) # Ex: 10 > 1, 100 > 2
margin_ratio = str(round(float(ratio), magnitude))
numerator, denominator = Decimal(margin_ratio).as_integer_ratio()
# print(numerator,denominator)
return [v for i,v in enumerate(inputs) if i % denominator < numerator]
def nowtime(style = 0):
if style == 0:
fmt = "%Y%m%d_%H%M%S"
elif style == 1:
fmt = "%Y-%m-%d %H:%M:%S"
return strftime(fmt, localtime())
def mkdir(fp):
Path(fp).mkdir(parents=True, exist_ok=True)
def clean_dir(dir_name ):
fp_list = glob.glob( os.path.join(dir_name,'*')) + glob.glob(os.path.join(dir_name,'.*'))
for f in tqdm( fp_list, desc=f"Cleaning {dir_name}" ) :
# os.remove(f)
shutil.rmtree(f)
def get_fp_list(dir_name,ext = None):
fp_list =[]
for root, dirs, files in os.walk(dir_name):
for file in files:
if ext:
if file.endswith(ext):
filepath = os.path.join(root, file)
fp_list += [filepath]
else:
filepath = os.path.join(root, file)
fp_list += [filepath]
return fp_list
# https://stackoverflow.com/questions/3086973/how-do-i-convert-this-list-of-dictionaries-to-a-csv-file
def dict2csvfile(toCSV,filename = 'tmp.csv',bom = 0,silent=0):
keys = toCSV[0].keys()
with open(filename, 'w', encoding='utf-8', newline='') as output_file:
if bom: output_file.write('\ufeff')
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
if not silent: print('dict2csvfile ok! please check ' + filename)
# https://stackoverflow.com/questions/18337407/saving-utf-8-texts-with-json-dumps-as-utf8-not-as-u-escape-sequence
def dict2jsonfile(dict_src,filename='tmp.json',silent=0):
with open(filename, 'w', encoding='utf-8') as fp:
json.dump(dict_src, fp,indent=4, sort_keys=False,ensure_ascii=False)
if not silent: print('dict2jsonfile ok! please check '+filename)
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def ins(v):
print("ins>>>")
print('>dir:')
print(dir(v))
print('>type:')
print(type(v))
print('>print:')
print(v)
print("ins<<<")
def save_obj(obj1, fp='tmp_obj.pkl',silent = 0):
if not silent: print('saving obj to ' + fp)
with open(fp, 'wb') as handle:
pickle.dump(obj1, handle, protocol=pickle.HIGHEST_PROTOCOL)
if not silent: print('save_obj ok! ' )
pass
def load_obj(filename='tmp_obj.txt', silent = 0):
if not silent: print('loading obj ' + filename)
with open(filename, 'rb') as handle:
b = pickle.load(handle)
if not silent: print('load_obj ok! ' )
return b
pass
# alias
save = save_obj
load = load_obj
if __name__ == '__main__':
# do nothing
print('you called main, do nothing')
| [
"[email protected]"
] | |
19566dd1b2d10137c2e3943ea5ac2e7ec000f503 | 55e14ece094383ca13ad22d638d545767a637327 | /ClassificationModel_TrainedW2V/repeat.py | adaa7b183171cce43ebab70c21964e334860e323 | [] | no_license | UnitForDataScience/Neptune-Classification | e16ede89d2e3afc434a787d018d99c2393525811 | 114d78c1fcfd1ec329636fde3401fdedd8d418ef | refs/heads/master | 2022-06-09T23:25:04.386608 | 2020-05-06T06:05:10 | 2020-05-06T06:05:10 | 261,607,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import os
import main
for i in range(4):
os.system('python main.py')
| [
"[email protected]"
] | |
90ebb27f00615a63b07c8ff1cd495f77293c88ea | 8f784ca91cd56818dc6e38d5e602756a913e13b4 | /modbus_tcp_server/network/accept_thread.py | a512980848dd5a91ed2ce730cf546634df5968c6 | [
"MIT"
] | permissive | smok-serwis/modbus-tcp-server | 9a02a3c5e9d0875179903bc4171b4d782d6d48b9 | 558eca908b6762280a74b16d78d56dc047a9dace | refs/heads/master | 2023-06-14T01:26:07.299860 | 2021-07-15T13:59:15 | 2021-07-15T13:59:15 | 339,780,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import socket
import typing as tp
from satella.coding import silence_excs
from satella.coding.concurrent import TerminableThread
from .conn_thread import ConnectionThread
from ..data_source import BaseDataSource, TestingDataSource
from ..datagrams import MODBUSTCPMessage
from ..processor import ModbusProcessor
class ModbusTCPServer(TerminableThread):
def __init__(self, bind_ifc: str, bind_port: int,
data_source: tp.Optional[BaseDataSource] = None,
backlog: int = 128):
super().__init__(name='accept')
if data_source is None:
data_source = TestingDataSource()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((bind_ifc, bind_port))
self.backlog = backlog
self.processor = ModbusProcessor(data_source)
def prepare(self) -> None:
self.socket.listen(self.backlog)
self.socket.setblocking(True)
self.socket.settimeout(5)
def process_message(self, msg: MODBUSTCPMessage) -> MODBUSTCPMessage:
return self.processor.process(msg)
def cleanup(self):
self.socket.close()
@silence_excs(socket.timeout)
def loop(self) -> None:
sock, addr = self.socket.accept()
ConnectionThread(sock, addr, self).start()
| [
"[email protected]"
] | |
4c8db1dea09dd07cd75539c214a17c0ac7ae5b64 | 1738a24cc31c3e659384d9c3cafaf9d15b1eac52 | /ChatBot.py | e9bde94ee7ef2562b2d6bcedd554077b56e4a139 | [] | no_license | michaelcicero/PythonChatBot | 7e7bb54e6009b80698b898e468a1dc375019b6fc | e0f8d47e42b05e5823880d1c21534d9560050072 | refs/heads/master | 2022-12-09T20:07:04.335082 | 2020-09-09T20:55:24 | 2020-09-09T20:55:24 | 294,226,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | # # - * - coding: utf-8 - * -
""" Eliza homework """
_author_="Michael Cicero"
import re
#find bye function
def findBye(a):
if(re.findall(r'bye', curInp, re.IGNORECASE)):
print("It was nice speaking with you. Goodbye")
quit()
#find ed function
def findEd(b):
l = b.split(" ")
for ed in l:
if(re.findall('ed$', ed)):
return ed
#get name
print("Greetings. I'm chatbot. Can you remind me of your name?")
name = input()
curInp = name
findBye(curInp)
if(re.findall(r'(i am)|(my name is)', curInp, re.IGNORECASE)):
name = re.sub(r'([iI] am)|([mM]y name is)',"", curInp)
while len(name) == 0:
print("What is your name?")
name = input()
curInp = name
findBye(curInp)
#how are you
print("Hello " + name + "! How are you doing today?")
hay = input()
curInp = hay
if(re.findall(r'([you?]{4})', curInp, re.IGNORECASE)):
print("Don't worry about me. I am more interested in hearing about you. I'll be asking the questions from now on.")
while len(hay) == 0:
print("Sorry, I couldn't hear you. How are you doing today?")
hay = input()
curInp = hay
findBye(curInp)
#while loop containing all further checks
while True:
if(re.findall(r'(ok|alright|fine|not bad|not too bad)',curInp, re.IGNORECASE)):
print("I'm glad you are feeling ok. Why aren't you good?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(sad|depress|sick|unwell|bad|poor|not well|not very well|not good|not very good|not great)',curInp, re.IGNORECASE)):
print("I'm sorry you arent feeling well. Why haven't you been feeling well?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(good|great|excellent|superb|wonderful|extraordinary|well|happy|joy)',curInp, re.IGNORECASE)):
print("Wonderful, I'm glad you're good. Why are you so good?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(mom|mother|mommmy|momma)',curInp, re.IGNORECASE)):
print("Tell me more about your mother.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(friend|bud|pal)',curInp, re.IGNORECASE)):
print("Tell me more about your friend.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(dad|father|daddy)',curInp, re.IGNORECASE)):
print("Tell me more about your father.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(brother|bro)',curInp, re.IGNORECASE)):
print("Tell me more about your brother.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(sister|sis)',curInp, re.IGNORECASE)):
print("Tell me more about your sister.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
else:
print("Intresting, mind telling me a bit more about this?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
| [
"[email protected]"
] | |
63d50f46e6763c50b438c35733b409c516416606 | 33cff13b90fdd628560baef8b3f6d68ceaad912c | /tests/test_commands/test_package_downloads.py | e4b7b094ed22878a396f1c1e911369fd769b9165 | [
"MIT"
] | permissive | rosdyana/dephell | 3139140d6f16288177705020a625897f91f2514b | 993a212ce17dda04a878ceac64854d809f3dc47b | refs/heads/master | 2020-08-06T09:38:21.150070 | 2019-09-27T16:58:23 | 2019-09-27T16:58:23 | 212,927,181 | 0 | 0 | MIT | 2019-10-05T01:22:23 | 2019-10-05T01:22:23 | null | UTF-8 | Python | false | false | 708 | py | # built-in
import json
# external
import pytest
# project
from dephell.commands import PackageDownloadsCommand
from dephell.config import Config
@pytest.mark.skipif(True, reason='disable while pypistat is down')
@pytest.mark.allow_hosts()
def test_package_downloads_command(capsys):
config = Config()
config.attach({
'level': 'WARNING',
'silent': True,
})
command = PackageDownloadsCommand(argv=['DJANGO'], config=config)
result = command()
captured = capsys.readouterr()
output = json.loads(captured.out)
assert result is True
assert len(output['pythons']) > 4
assert len(output['systems']) > 2
assert '█' in output['pythons'][0]['chart']
| [
"[email protected]"
] | |
b327266507aba7d35b343d48b9710ab0f36214ad | 0da165e72316bff15e8330a9d1789bd743d30689 | /quadcopter/agents/ddpg_v1/ddpg_actor.py | 83c47f1620e3d085e795d5e60e55546294fba7ba | [] | no_license | padickinson/RL-Quadcopter-2 | 1267ce761780ea5d79f705ca2286ae780b95298c | ebb70ac7f7252b69cd94111bbe0637a17196e87e | refs/heads/master | 2020-03-29T10:19:54.320868 | 2018-09-21T21:31:43 | 2018-09-21T21:31:43 | 149,799,738 | 0 | 0 | null | 2018-09-21T18:02:26 | 2018-09-21T18:02:26 | null | UTF-8 | Python | false | false | 2,945 | py | from keras import layers, models, optimizers
from keras import backend as K
class Actor:
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, action_low, action_high, lr, alpha):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
# Initialize any other variables here
self.learning_rate = lr
self.hidden_size1 = 32
self.hidden_size2 = 32
self.alpha = alpha
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (states)
states = layers.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = layers.BatchNormalization()(states)
net = layers.Dense(units=self.hidden_size1, activation='linear')(net)
net = layers.advanced_activations.LeakyReLU(self.alpha)(net)
net = layers.BatchNormalization()(net)
net = layers.Dense(units=self.hidden_size2, activation='linear')(net)
net = layers.advanced_activations.LeakyReLU(self.alpha)(net)
net = layers.BatchNormalization()(net)
# net = layers.Dense(units=32, activation='relu')(net)
# net = layers.BatchNormalization()(net)
# Try different layer sizes, activations, add batch normalization, regularizers, etc.
# Add final output layer with sigmoid activation
raw_actions = layers.Dense(units=self.action_size, activation='tanh',
name='raw_actions')(net)
# Scale [0, 1] output for each action dimension to proper range
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = models.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
# Incorporate any additional losses here (e.g. from regularizers)
# Define optimizer and training function
optimizer = optimizers.Adam(lr=self.learning_rate)
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, action_gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
| [
"[email protected]"
] | |
1a57dcb6dd5bc694a8c241ff875abb2a00b8f021 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Prime Services/FPythonCode/PaymentFees.py | d79409eb38743fa11ab65e6b6c2c6f2b1438516b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,474 | py | """-----------------------------------------------------------------------
MODULE
PaymentFees
DESCRIPTION
Date : 2012-09-19
Purpose : Returns the payment fees of a trade
Department and Desk : Prime Services
Requester : Danilo Mantoan
Developer : Nidheesh Sharma
CR Number : 556348
ENDDESCRIPTION
HISTORY
Date: CR Number: Developer: Description:
2013-03-19 C885651 Nidheesh Sharma Excluded INS, SET, Brokerage fees from OtherFee
2014-03-12 C1819376 Hynek Urban Refactor & minor bug fix of other fees.
2018-11-22 1001164411 Ondrej Bahounek ABITFA-5622: Convert Other Fees to trade currency.
2018-11-28 Jaco Swanepoel Payment migration: convert cash payments to appropriate new additional payment types.
-----------------------------------------------------------------------"""
import acm
FX_COLUMN_ID = 'FX Rate On Display Curr'
CS = acm.Calculations().CreateCalculationSpace(acm.GetDefaultContext(), 'FPortfolioSheet')
ZAR_CUR = acm.FCurrency['ZAR']
PAYMENT_TYPES_TO_EXCLUDE = ('Premium',
'Dividend Suppression',
'INS',
'SET',
'Brokerage Vatable',
'Execution Fee',
'Aggregated Settled',
'Aggregated Accrued',
'Aggregated Funding',
'Aggregated Dividends',
'Aggregated Depreciation',
'Aggregated Future Settle',
'Aggregated Forward Funding PL',
'Aggregated Cash Open Value',
'Aggregated Cash Position',
'Aggregated Forward Premium',
'Aggregated Forward Settled',
'Aggregated Forward Dividends',
'Aggregated Forward Position')
PAYMENT_TEXTS_TO_EXCLUDE = ('Execution', 'ExecutionFee', 'INS', 'SET', 'Brokerage')
def ReturnOtherFee(trade, val_date):
"""
Return the sum of all fees of a trade up to the specified date.
Fees of type Execution Fee, INS, SET and Brokerage and any payments of type
Aggregated Settled are excluded.
"""
CS.SimulateGlobalValue('Valuation Date', val_date)
CS.SimulateGlobalValue('Portfolio Profit Loss End Date', 'Custom Date')
CS.SimulateGlobalValue('Portfolio Profit Loss End Date Custom', val_date)
sumOfOtherFees = 0
if trade.Status() not in ('Void'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in PAYMENT_TYPES_TO_EXCLUDE or\
payment.Text() in PAYMENT_TEXTS_TO_EXCLUDE:
continue
if payment.ValidFrom() > val_date:
continue
amount = payment.Amount()
if ZAR_CUR.Name() != payment.Currency().Name():
# Ondrej's note:
# Convert all non-ZAR payments to ZAR.
# This should be ideally converted to trade currency,
# but then many other attributes need to be changed and well tested.
# This is just a fix to accommodate Futs on FXs by the end of the month.
CS.SimulateValue(ZAR_CUR, "Portfolio Currency", payment.Currency())
fx_rate = CS.CreateCalculation(ZAR_CUR, FX_COLUMN_ID).Value().Number()
amount *= fx_rate
sumOfOtherFees += amount
return acm.DenominatedValue(sumOfOtherFees, ZAR_CUR.Name(), None, val_date)
#Function to return termination fee of a trade
def ReturnTerminationFee(trade):
terminationFee = 0
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationFee = terminationFee + payment.Amount()
elif payment.Type() in ('Termination Fee'):
terminationFee = terminationFee + payment.Amount()
return terminationFee
#Function to return termination fee date of a trade
def ReturnTerminationFeeDate(trade):
terminationDate = ''
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationDate = payment.PayDay()
elif payment.Type() in ('Termination Fee'):
terminationDate = payment.PayDay()
return terminationDate
#Function to return termination fee date of a trade in the correct format from an array of dates
def ReturnSingleTerminationFeeDate(arrayOfDates):
terminationDate = ''
for date in arrayOfDates:
if date != '' and isinstance(date, str):
dateFormatter = acm.FDateFormatter('dateFormatter')
dateFormatter.FormatDefinition("%d/%m/%Y")
terminationDate = dateFormatter.Format(date)#.replace('-','/')
break
return terminationDate
| [
"[email protected]"
] | |
36d202508d82d6e61d7e13af2d2f6b042afdbfe4 | 77fb4b9902a79a2bcc42105f1c62744cc869cd15 | /wignerd1.py | 0aae41925ad38d3c8112ac88bc28753c021972ef | [] | no_license | davidsdatascience/Algorithm-Development | 6dd3d35d6eeab1a3a019abca8b591950f7830754 | 57b0cf1a976ce7005fa05a79880a49d6bdd06822 | refs/heads/master | 2020-04-20T09:21:54.105597 | 2019-05-04T20:08:50 | 2019-05-04T20:08:50 | 168,763,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,354 | py | from scipy.special import jv, legendre, sph_harm, jacobi
from scipy.misc import factorial, comb
from numpy import floor, sqrt, sin, cos, exp, power
from math import pi
def wignerd(j,m,n=0,approx_lim=10):
'''
Wigner "small d" matrix. (Euler z-y-z convention)
example:
j = 2
m = 1
n = 0
beta = linspace(0,pi,100)
wd210 = wignerd(j,m,n)(beta)
some conditions have to be met:
j >= 0
-j <= m <= j
-j <= n <= j
The approx_lim determines at what point
bessel functions are used. Default is when:
j > m+10
and
j > n+10
for integer l and n=0, we can use the spherical harmonics. If in
addition m=0, we can use the ordinary legendre polynomials.
'''
if (j < 0) or (abs(m) > j) or (abs(n) > j):
raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
+ " Valid range for parameters: j>=0, -j<=m,n<=j.")
if (j > (m + approx_lim)) and (j > (n + approx_lim)):
#print 'bessel (approximation)'
return lambda beta: jv(m-n, j*beta)
if (floor(j) == j) and (n == 0):
if m == 0:
#print 'legendre (exact)'
return lambda beta: legendre(j)(cos(beta))
elif False:
#print 'spherical harmonics (exact)'
a = sqrt(4.*pi / (2.*j + 1.))
return lambda beta: a * conjugate(sph_harm(m,j,beta,0.))
jmn_terms = {
j+n : (m-n,m-n),
j-n : (n-m,0.),
j+m : (n-m,0.),
j-m : (m-n,m-n),
}
k = min(jmn_terms)
a, lmb = jmn_terms[k]
b = 2.*j - 2.*k - a
if (a < 0) or (b < 0):
raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
+ " Encountered negative values in (a,b) = ({0},{1})".format(a,b))
coeff = power(-1.,lmb) * sqrt(comb(2.*j-k,k+a)) * (1./sqrt(comb(k+b,b)))
#print 'jacobi (exact)'
return lambda beta: coeff \
* power(sin(0.5*beta),a) \
* power(cos(0.5*beta),b) \
* jacobi(k,a,b)(cos(beta))
def wignerD(j,m,n=0,approx_lim=10):
'''
Wigner D-function. (Euler z-y-z convention)
This returns a function of 2 to 3 Euler angles:
(alpha, beta, gamma)
gamma defaults to zero and does not need to be
specified.
The approx_lim determines at what point
bessel functions are used. Default is when:
j > m+10
and
j > n+10
usage:
from numpy import linspace, meshgrid
a = linspace(0, 2*pi, 100)
b = linspace(0, pi, 100)
aa,bb = meshgrid(a,b)
j,m,n = 1,1,1
zz = wignerD(j,m,n)(aa,bb)
'''
return lambda alpha,beta,gamma=0: \
exp(-1j*m*alpha) \
* wignerd(j,m,n,approx_lim)(beta) \
* exp(-1j*n*gamma)
#if __name__ == '__main__':
'''
just a bunch of plots in (phi,theta) for
integer and half-integer j and where m and
n take values of [-j, -j+1, ..., j-1, j]
Note that all indexes can be any real number
with the conditions:
j >= 0
-j <= m <= j
-j <= n <= j
'''
from matplotlib import pyplot, cm, rc
from numpy import linspace, arange, meshgrid, real, imag, arccos
rc('text', usetex=False)
ext = [0.,2.*pi,0.,pi]
phi = linspace(ext[0],ext[1],200)
theta = linspace(ext[2],ext[3],200)
pphi,ttheta = meshgrid(phi,theta)
# The maximum value of j to plot. Will plot real and imaginary
# distributions for j = 0, 0.5, ... maxj
maxj = 3
for j in arange(0,maxj+.1,step=0.5):
fsize = (j*2+3,j*2+3)
title = 'WignerD(j,m,n)(phi,theta)'
if j == 0:
fsize = (4,4)
else:
title += ', j = '+str(j)
figr = pyplot.figure(figsize=fsize)
figr.suptitle(r'Real Part of '+title)
figi = pyplot.figure(figsize=fsize)
figi.suptitle(r'Imaginary Part of '+title)
for fig in [figr,figi]:
fig.subplots_adjust(left=.1,bottom=.02,right=.98,top=.9,wspace=.02,hspace=.1)
if j == 0:
fig.subplots_adjust(left=.1,bottom=.1,right=.9,top=.9)
if j == 0.5:
fig.subplots_adjust(left=.2,top=.8)
if j == 1:
fig.subplots_adjust(left=.15,top=.85)
if j == 1.5:
fig.subplots_adjust(left=.15,top=.85)
if j == 2:
fig.subplots_adjust(top=.87)
if j != 0:
axtot = fig.add_subplot(1,1,1)
axtot.axesPatch.set_alpha(0.)
axtot.xaxis.set_ticks_position('top')
axtot.xaxis.set_label_position('top')
axtot.yaxis.set_ticks_position('left')
axtot.spines['left'].set_position(('outward',10))
axtot.spines['top'].set_position(('outward',10))
axtot.spines['right'].set_visible(False)
axtot.spines['bottom'].set_visible(False)
axtot.set_xlim(-j-.5,j+.5)
axtot.set_ylim(-j-.5,j+.5)
axtot.xaxis.set_ticks(arange(-j,j+0.1,1))
axtot.yaxis.set_ticks(arange(-j,j+0.1,1))
axtot.set_xlabel('n')
axtot.set_ylabel('m')
nplts = 2*j+1
for m in arange(-j,j+0.1,step=1):
for n in arange(-j,j+0.1,step=1):
print j,m,n
zz = wignerD(j,m,n)(pphi,ttheta)
i = n+j + nplts*(j-m)
for fig,data in zip((figr,figi), (real(zz),imag(zz))):
ax = fig.add_subplot(nplts, nplts, i+1, projection='polar')
plt = ax.pcolormesh(pphi,ttheta,data.copy(),
cmap=cm.jet,
#cmap=cm.RdYlBu_r,
vmin=-1., vmax=1.)
if j == 0:
ax.grid(True, alpha=0.5)
ax.set_title(r'j,m,n = (0,0,0)', position=(0.5,1.1), size=12)
ax.set_xlabel(r'$\phi$')
ax.set_ylabel(r'$\theta$', rotation='horizontal', va='bottom')
ax.xaxis.set_ticks([0,.25*pi,.5*pi,.75*pi,pi,1.25*pi,1.5*pi,1.75*pi])
ax.xaxis.set_ticklabels(['0',r'$\frac{\pi}{4}$',r'$\frac{\pi}{2}$',r'$\frac{3 \pi}{4}$',r'$\pi$',r'$\frac{5 \pi}{4}$',r'$\frac{3 \pi}{2}$',r'$\frac{7 \pi}{4}$'], size=14)
ax.yaxis.set_ticks([0,.25*pi,.5*pi,.75*pi,pi])
ax.yaxis.set_ticklabels(['0',r'$\frac{\pi}{4}$',r'$\frac{\pi}{2}$',r'$\frac{3 \pi}{4}$',r'$\pi$'], size=14)
else:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_xlim(ext[0],ext[1])
ax.set_ylim(ext[2],ext[3])
if j == 0:
fig.colorbar(plt, pad=0.07)
# uncomment the following if you want to save these to image files
#figr.savefig('wignerD_j'+str(j)+'_real.png', dpi=150)
#figi.savefig('wignerD_j'+str(j)+'_imag.png', dpi=150)
pyplot.show()
| [
"[email protected]"
] | |
78d7b71bc57e02874094e8d8369f2ea02d403828 | ab6b73cc1bd2501fca5b406a0bcd69b7b8b7f94b | /hackerrank/warm-up/sales-by-match.py | fb602ce970fa16f37a83476ebb9e068e686fa307 | [] | no_license | youngbin-ro/problem-solving | c2a57a4318dc66647a182418d9c07bf0615ff36b | 7b27e44144bc25fd0ad9928eb979c5522ab772d4 | refs/heads/master | 2023-02-05T07:18:34.926633 | 2020-12-21T13:16:08 | 2020-12-21T13:16:08 | 232,480,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from collections import defaultdict
def sock_merchant(arr):
dic = defaultdict(int)
count = 0
for num in arr:
dic[num] += 1
if dic[num] % 2 == 0:
dic[num] = 0
count += 1
return count
if __name__ == "__main__":
arr_ = [10, 20, 20, 10, 10, 30, 50, 10, 20]
print(sock_merchant(arr_))
| [
"[email protected]"
] | |
caa1a4bff741d96aeec1525edabd69b711687a41 | 4b5b168ab84b4d40d7e7cc58ebc806287e0a8d82 | /tricky.py | 714693cb19a608edcafbec9ebb2a41ea15f87da0 | [] | no_license | gould-ann/challenges- | 5de5f14527f2ee0147514d364f193d78cec6a113 | 4784eb6c3caf494ee24e0e8c587ce626fc026fdd | refs/heads/master | 2020-03-18T18:32:35.402677 | 2018-05-28T03:57:15 | 2018-05-28T03:57:15 | 135,099,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #https://www.hackerrank.com/challenges/maximum-subarray-sum/problem
import math #lol
# arr = [1, 2, 3]
# mod_number = 2
# max([sum( [arr[j] if str(bin(i))[2:].zfill(len(arr))[j] == "1" else 0 for j in range(len(arr))] ) % mod_number for i in range(2**len(arr))])
def max_subarray(arr, mod_number):
best_sum = 0
for i in range(2**len(arr)):
current_array = []
bin_str = str(bin(i))[2:].zfill(len(arr))
current_array = [arr[j] if bin_str[j] == "1" else 0 for j in range(len(arr))]
best_sum = max(sum(current_array) % mod_number, best_sum)
return best_sum
#this line
print max_subarray([3, 3, 9, 9, 5], 7)
| [
"[email protected]"
] | |
c8d42762f1ba5f26bd5112890d47ecaef5bed717 | d992f4973bb2aa3c43dce5d4b3483c6212108c60 | /hw1/MachineConstants.py | d31a5e92fb49edff71974ce9b0112e2e8b76759d | [] | no_license | wesshih/astr427 | ece74e5e3ac7275ed50ba6613ed6980a2da6cc8a | b6dc28a02b0712f8e40a5d9ca5e371a660c070f2 | refs/heads/master | 2021-01-18T15:45:50.093209 | 2017-06-08T05:30:08 | 2017-06-08T05:30:08 | 86,681,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | import numpy as np
'''
Wesley Shih
1237017
Astr 427 Homework 1
4/11/17
Problem 1: Machine Constants
The first problem of the homework asks us to empirically determine several
machine constants related to floating point numbers. These include the smallest number
epsilon that can be successfully added to or subracted from 1.0, and the maximum and minimum
positive numbers that can be represented using the float data type. We will be using 64-bit floats
for all parts of this problem.
This file contains code that calculates the machine constants and prints them to the console.
Becasue these values are fairly easy to calculate, this file will not contain any user-defined
functions or data structures. For each part I will simply calculate the constant, print it to
the console, and comment on how it relates to the IEEE 754 representation.
A quick note on float representation that is relative to the whole problem. For a 64-bit
float, there is 1 bit for sign, 11 bits for the exponent, and 52 bits for the Significand or fraction.
However, there is an implied 1 at the beginning of the significand, so we effectively have 53 bits
available for the fraction.
'''
# Part A
# We are looking for the smallest value that can be successfully subtracted from 1.0
# or in other words, find smallest epsilon such that 1.0 - epsilon != 1.0
epsilon_a = 1.0
while (1.0 - epsilon_a/2.0) != 1.0:
epsilon_a /= 2.0
print 'A) smallest epsilon s.t. 1.0 - epsilon != 1.0'
print '\t\tepsilon_a:\t' + `epsilon_a` + '\n'
# Running this code gives us a value of epsilon_a = 1.1102230246251565e-16
# This value is within an order of 2 of the true value of epsilon, as we know that
# 1.0 - (epsilon_a/2) == 1.0. Given the 53 bits for the significand, we expect
# the true machine epsilon to be 2^-(53 - 1). However, 2^-52 = 2.22e-16 which is essentially
# double the value of epsilon_a.
# Part B
# We are looking for the smallest value that can be successfully added from 1.0
# or formally, find smallest epsilon such that 1.0 + epsilon != 1.0
epsilon_b = 1.0
while (1.0 + epsilon_b/2.0) != 1.0:
epsilon_b /= 2.0
print 'B) smallest epsilon s.t. 1.0 + epsilon != 1.0'
print '\t\tepsilon_b:\t' + `epsilon_b` + '\n'
# Running this code gives us a value of epsilon_b = 2.220446049250313e-16
# This value agrees very nicely with the "expected" epsilon I calculated above.
# 2^-52 = 2.22e-16, which is very close to the calculated value.
# Part C
# We are looking for the maximum number that can be represented with a float
max_num = 1.0
while (max_num * 2.0) != np.inf:
max_num *= 2.0
print 'C) maximum representable number'
print '\t\tmax_num:\t' + `max_num` + '\n'
# Running this code gives us a max_num = 8.98846567431158e+307
# We know that this value is at least within an order of magnitude of the true max_num
# because we know that max_num * 2.0 == infinity representation.
# We have 11 bits total for the exponent, however these bits follow twos-compliment.
# This means we only have 10 bits available for positive exponents. So the maximum
# positive exponent is 1023. We find that 2^1023 = 8.9884e+307, which is exactly what
# we have found here. the true maximum number will be greater than this though, as we
# can increase the significand to push the max_num higher.
# Part D
# We are looking for the minimum representable positive number
min_num = 1.0
while (min_num/2) > 0:
min_num /= 2
print 'D) minimum representable number'
print '\t\tmin_num:\t' + `min_num` + '\n'
# Running this code gives us a min_num = 5e-324
# Like with max_num, to find the minimum number, we will look at the 11 exponent bits.
# However, this time we are able to use the MSB, and so we can achieve an exponent of -1024
# 2^-1024 = 5.56e-309. Using the exponent alone is not enough to get 5e-324. To do this,
# we must denormalize the float, changing the implied 1.f to a 0.f. This will get us the
# rest of the way there to 5e-324.
| [
"[email protected]"
] | |
4bccb67d59f443ca9b1876575c634bd2741ec643 | 0a3b70128f4de8ba3dc74decea6d349924d31907 | /mysite/bin/easy_install-2.7 | 9f3daab08a7378e652e37d5ba6778ad0a331647e | [] | no_license | ozknight/Django_Tutorials_OJT_Proof | 80c38b278b42f2ca72bd05206f84a359b780daeb | 8006ed69be3bae7e666e49c62f517fbd2f454420 | refs/heads/master | 2016-09-06T16:13:58.746020 | 2015-06-24T23:53:40 | 2015-06-24T23:53:40 | 37,992,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | 7 | #!/home/oz-knightwalker/Desktop/Django_Tutorials_OJT_Proof/mysite/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
1755b870d7e1acc791f2b3b36cfe6ebb0e6e8cfe | ec3964d765f2a499fd017a4e2fb89d405c3070c9 | /basicapp/models.py | 9358d6cd70b0ab4d7b5c7d1096ec410afe5aa252 | [] | no_license | aryan1jain2/Trycycle | 77476e8e769185db42fc7bb2ecdac6ad1f7c102c | 00e63728fdcc7e8d4ec37964ed4ac8e5f856ad14 | refs/heads/main | 2023-03-02T03:31:55.354309 | 2021-02-13T10:52:10 | 2021-02-13T10:52:10 | 338,552,016 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | from django.db import models
# Create your models here.
class userinfo(models.Model): #corresponds to customer table
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
# reg_no = models.CharField(max_length=100)
# room_no = models.CharField(max_length=100)
username = models.CharField(max_length=20, default = "No name")
email = models.CharField(max_length=100)
phone_no = models.BigIntegerField()
class location(models.Model):
location_id = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
class contact_us(models.Model):
name = models.CharField(max_length=40)
email =models.CharField(max_length=75)
subject=models.TextField()
feed=models.TextField()
class cycle_accessories(models.Model):
name = models.CharField(max_length=40, primary_key = True)
quantity = models.IntegerField()
costperitem = models.IntegerField()
class cycle_category(models.Model):
name = models.CharField(max_length = 40, primary_key = True)
costperday = models.IntegerField()
latefeesperhour = models.IntegerField()
class cycle(models.Model):
cycle_id = models.CharField(max_length=40, primary_key = True)
model_year = models.CharField(max_length=5)
model = models.CharField(max_length=40)
category = models.ForeignKey(cycle_category, on_delete=models.CASCADE)
availability = models.CharField(max_length=40)
class discount(models.Model):
discount_code = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
expiry_date = models.DateField()
percentage = models.IntegerField()
class insurance(models.Model):
insurance_code = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
costperday = models.IntegerField()
coverage_type = models.CharField(max_length = 40)
class Bookings(models.Model):
user = models.CharField(max_length=40)
date = models.DateField(max_length=10)
startpt = models.CharField(max_length=40)
lastpt = models.CharField(max_length=40)
start_time = models.TimeField(max_length=40)
end_time = models.TimeField(max_length=40, default = '24 hours')
accessory = models.CharField(max_length=40, null = True)
discount = models.CharField(max_length=40, null = True)
insurance = models.CharField(max_length=40, null = True)
cycle_id = models.CharField(max_length=40)
tot = models.IntegerField()
| [
"[email protected]"
] | |
5bc96ed5b2ff7057cfe5cf0f85b1852e0b311584 | afa0d5a97925273f7fb0befef697d36020df5787 | /packages/google-cloud-alloydb/google/cloud/alloydb_v1beta/services/alloy_db_admin/pagers.py | 1c442de8074c691d92fdefd2aa87e57390df9038 | [
"Apache-2.0"
] | permissive | scooter4j/google-cloud-python | dc7ae1ba6a33a62a40b617b806ec8ed723046b8b | 36b1cf08092d5c07c5971bb46edda7a9928166b1 | refs/heads/master | 2023-04-14T18:36:48.643436 | 2023-04-06T13:19:26 | 2023-04-06T13:19:26 | 188,338,673 | 0 | 0 | null | 2019-05-24T02:27:15 | 2019-05-24T02:27:14 | null | UTF-8 | Python | false | false | 20,951 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.alloydb_v1beta.types import resources, service
class ListClustersPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__iter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListClustersResponse],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Cluster]:
for page in self.pages:
yield from page.clusters
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListClustersAsyncPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__aiter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListClustersResponse]],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Cluster]:
async def async_generator():
async for page in self.pages:
for response in page.clusters:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListInstancesResponse],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListInstancesResponse]],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListBackupsResponse],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Backup]:
for page in self.pages:
yield from page.backups
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsAsyncPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListBackupsResponse]],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__iter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListSupportedDatabaseFlagsResponse],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.SupportedDatabaseFlag]:
for page in self.pages:
yield from page.supported_database_flags
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsAsyncPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListSupportedDatabaseFlagsResponse]],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.SupportedDatabaseFlag]:
async def async_generator():
async for page in self.pages:
for response in page.supported_database_flags:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"[email protected]"
] | |
b1efe20d5ba4c2a9c279544113a1e2bd6cdf7018 | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-eth_hash.py | 1b22c286fe3f7300f269b0ec19044cd2c28cc11a | [
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 611 | py | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_submodules
# The ``eth_hash.utils.load_backend`` function does a dynamic import.
hiddenimports = collect_submodules('eth_hash.backends')
| [
"[email protected]"
] | |
bfcd8e3ab880fe73ecee9e775da43d432b361f6b | 90e131d3f407984c6eb651702c8539986216173b | /transform_content.py | a3ce95a5672e53fece2b5fe04e3f02e34b05a9f6 | [] | no_license | EnVyNm/EnVy | 2333c3f7954b6706527a09aee369bfceff7a1ddb | e12b71a4a6ae716998ecb64e6a4f8b09bb10b6a6 | refs/heads/master | 2020-04-18T22:33:15.011208 | 2014-05-07T03:38:46 | 2014-05-07T03:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | #!/usr/bin/env python
# Copyright 2008 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Brett Slatkin ([email protected])"
import os
import re
import urlparse
################################################################################
# URLs that have absolute addresses
ABSOLUTE_URL_REGEX = r"(http(s?):)?//(?P<url>[^\"'> \t\)]+)"
# URLs that are relative to the base of the current hostname.
BASE_RELATIVE_URL_REGEX = r"/(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]*)"
# URLs that have '../' or './' to start off their paths.
TRAVERSAL_URL_REGEX = r"(?P<relative>\.(\.)?)/(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]*)"
# URLs that are in the same directory as the requested URL.
SAME_DIR_URL_REGEX = r"(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]+)"
# URL matches the root directory.
ROOT_DIR_URL_REGEX = r"(?!//(?!>))/(?P<url>)(?=[ \t\n]*[\"'\)>/])"
# Start of a tag using 'src' or 'href'
TAG_START = r"(?i)\b(?P<tag>src|href|action|url|background)(?P<equals>[\t ]*=[\t ]*)(?P<quote>[\"']?)"
# Start of a CSS import
CSS_IMPORT_START = r"(?i)@import(?P<spacing>[\t ]+)(?P<quote>[\"']?)"
# CSS url() call
CSS_URL_START = r"(?i)\burl\((?P<quote>[\"']?)"
REPLACEMENT_REGEXES = [
(TAG_START + SAME_DIR_URL_REGEX,
"\g<tag>\g<equals>\g<quote>%(accessed_dir)s\g<url>"),
(TAG_START + TRAVERSAL_URL_REGEX,
"\g<tag>\g<equals>\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(TAG_START + BASE_RELATIVE_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/%(base)s/\g<url>"),
(TAG_START + ROOT_DIR_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/%(base)s/"),
# Need this because HTML tags could end with '/>', which confuses the
# tag-matching regex above, since that's the end-of-match signal.
(TAG_START + ABSOLUTE_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/\g<url>"),
(CSS_IMPORT_START + SAME_DIR_URL_REGEX,
"@import\g<spacing>\g<quote>%(accessed_dir)s\g<url>"),
(CSS_IMPORT_START + TRAVERSAL_URL_REGEX,
"@import\g<spacing>\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(CSS_IMPORT_START + BASE_RELATIVE_URL_REGEX,
"@import\g<spacing>\g<quote>/%(base)s/\g<url>"),
(CSS_IMPORT_START + ABSOLUTE_URL_REGEX,
"@import\g<spacing>\g<quote>/\g<url>"),
(CSS_URL_START + SAME_DIR_URL_REGEX,
"url(\g<quote>%(accessed_dir)s\g<url>"),
(CSS_URL_START + TRAVERSAL_URL_REGEX,
"url(\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(CSS_URL_START + BASE_RELATIVE_URL_REGEX,
"url(\g<quote>/%(base)s/\g<url>"),
(CSS_URL_START + ABSOLUTE_URL_REGEX,
"url(\g<quote>/\g<url>"),
]
################################################################################
def TransformContent(base_url, accessed_url, content):
url_obj = urlparse.urlparse(accessed_url)
accessed_dir = os.path.dirname(url_obj.path)
if not accessed_dir.endswith("/"):
accessed_dir += "/"
for pattern, replacement in REPLACEMENT_REGEXES:
fixed_replacement = replacement % {
"base": base_url,
"accessed_dir": accessed_dir,
}
content = re.sub(pattern, fixed_replacement, content)
return content
| [
"[email protected]"
] | |
7f3cd58d2ad66672684040b5b5587e8f52617096 | 1ae6034a53d60bee5c61208539cbb39143ec76e3 | /Motion detection game/ui.py | 5982fe204020dab34a2dd66c4d71613ee9d9a191 | [
"MIT"
] | permissive | harshmalik9423/Motion-Detection-Game | 2f60e77983d8dda746ffb4c7de907f6658dcb2fb | 49ad8c25360df34f4e33647dee7406e6397311de | refs/heads/master | 2022-08-23T12:52:48.089138 | 2020-05-25T10:56:48 | 2020-05-25T10:56:48 | 266,755,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | from tkinter import *
import cv2
from tkinter import messagebox
import PIL.Image, PIL.ImageTk
window = Tk()
window.title("Welcome")
window.geometry('1500x1500')
########CODING FOR QUIT BUTTON MESSAGEBOX#########
def close_window():
MsgBox=messagebox.askquestion('Quit the game','Are you sure you want to quit the game?',icon='warning')
if MsgBox =='yes':
window.destroy()
else:
messagebox.showinfo('Return','You will now return to the game screen')
def close():
if txt1 == '':
messagebox.showinfo('Return','You will now return to the game screen')
else:
file=open("text1.txt","w")
a=txt1.get()
file.write(a)
file.close()
if txt2 == '':
messagebox.showinfo('Return','You will now return to the game screen')
else:
file=open("text2.txt","w")
b=txt2.get()
file.write(b)
file.close()
window.destroy()
#################CREATE A WINDOW##################
cv_img=cv2.imread("images.jpeg")
canvas=Canvas(window,width=1500,height=1500)
canvas.pack()
photo=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
canvas.create_image(0,0,image=photo,anchor=NW)
##########LABEL AND TEXTBOX FOR PLAYER 1##########
photo2=PhotoImage(file="player1.gif")
lbl1 = Label(window, compound=TOP, width=284, height=58, image=photo2)
lbl1.place(x=200,y=180)
txt1 = Entry(window,font=("Bold",20),bd=5,bg='light green')
txt1.place(x=600,y=185)
##########LABEL AND TEXTBOX FOR PLAYER 2##########
photo3=PhotoImage(file="player2.gif")
lbl2 = Label(window, compound=TOP, width=292, height=60, image=photo3)
lbl2.place(x=200,y=280)
txt2 = Entry(window,font=("Bold",20),bd=5,bg='light green')
txt2.place(x=600,y=285)
##############READY AND QUIT BUTTONS##############
btn2=Button(window,text="Ready",font=("Bold",20),height=2,width=20,fg='green',bg='black',bd=10,command=close)
btn2.place(x=400,y=500)
btn3=Button(window,text="Quit",font=("Bold",15),height=1,width=8,fg='black',bg='gray',bd=5,command=close_window)
btn3.place(x=1225,y=50)
###################MAIN LOOP######################
window.mainloop()
| [
"[email protected]"
] | |
ebc97dabe6ba4cd2d87aca268755945115d291e2 | 3447227dd54587eb8c0c7f5346ac158504f7a907 | /compass/ocean/tests/global_ocean/threads_test/__init__.py | 42883b53b746d85a52e069468c8ae411ba7c414e | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | MPAS-Dev/compass | 5e2c1525224dd399bcf4f56f661df05e2ec197a6 | 0b7440f0aa77c1ae052922a39e646bd35c267661 | refs/heads/main | 2023-08-30T20:59:52.052430 | 2023-08-29T09:45:14 | 2023-08-29T09:45:14 | 310,409,977 | 10 | 26 | NOASSERTION | 2023-09-13T14:19:16 | 2020-11-05T20:28:25 | Python | UTF-8 | Python | false | false | 2,046 | py | from compass.validate import compare_variables
from compass.ocean.tests.global_ocean.forward import ForwardTestCase, \
ForwardStep
class ThreadsTest(ForwardTestCase):
"""
A test case for performing two short forward runs to make sure the results
are identical with 1 and 2 thread per MPI process
"""
def __init__(self, test_group, mesh, init, time_integrator):
"""
Create test case
Parameters
----------
test_group : compass.ocean.tests.global_ocean.GlobalOcean
The global ocean test group that this test case belongs to
mesh : compass.ocean.tests.global_ocean.mesh.Mesh
The test case that produces the mesh for this run
init : compass.ocean.tests.global_ocean.init.Init
The test case that produces the initial condition for this run
time_integrator : {'split_explicit', 'RK4'}
The time integrator to use for the forward run
"""
super().__init__(test_group=test_group, mesh=mesh, init=init,
time_integrator=time_integrator,
name='threads_test')
for openmp_threads in [1, 2]:
name = f'{openmp_threads}thread'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=name,
subdir=name, ntasks=4,
openmp_threads=openmp_threads)
step.add_output_file(filename='output.nc')
self.add_step(step)
# no run() method is needed
def validate(self):
"""
Test cases can override this method to perform validation of variables
and timers
"""
variables = ['temperature', 'salinity', 'layerThickness',
'normalVelocity']
compare_variables(test_case=self, variables=variables,
filename1='1thread/output.nc',
filename2='2thread/output.nc')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.