blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c66ccb80383feeee96b3fb492ff63be1a67a796 | f1a6726105c414e394470c6c3846c16ac0fb53d9 | /content_management_portal/tests/interactors/test_question_interactor.py | fcdf93e13c0be613b4560b3ac19da31f859ac057 | [] | no_license | srinivasukotipalli/content_management_portal | 9496f80db911e09977ec7a69fd20950a3ec837d8 | 462272cb1f175f69711ac77d20c65b750dc0a139 | refs/heads/master | 2022-10-20T02:39:14.235923 | 2020-06-09T04:09:56 | 2020-06-09T04:09:56 | 269,610,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | import pytest
from django_swagger_utils.drf_server.exceptions import NotFound
from unittest.mock import create_autospec
from content_management_portal.constants.enums import TextType
from content_management_portal.interactors.storages.storage_interface \
import StorageInterface
from content_management_portal.interactors.presenters. \
question_presenter_interface import PresenterInterface
from content_management_portal.interactors.question_creation_interactor \
import QuestionCreateInteractor
from content_management_portal.interactors.question_updation_interactor \
import QuestionUpdateInteractor
from content_management_portal.interactors.question_deletion_interactor \
import QuestionDeletionInteractor
class TestQuestionInteractor:
def test_question_create(self,questiondto):
user_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)
interactor.question_creation(user_id=user_id,short_title=short_title, \
content_type=content_type, content=content)
# Assert
storage.question_creation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self,questiondto):
user_id=1
question_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)
interactor.question_updation(user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
# Assert
storage.question_updation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
# Arrange
question_id=1
storage=create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
# Act
interactor.question_deletion(question_id=question_id)
# Assert
storage.question_deletion.assert_called_once_with(question_id=question_id)
| [
"[email protected]"
] | |
6d2d789d8d362370753b294e203208f455a2619a | 8744fa436516097b7cc283ecc79faa1fe44ea513 | /test2.py | 0be4d1e9ea7d8b18e00bb40a412d14496732c7e4 | [] | no_license | SsomyaA/WebScrap_Naukri | ba9ece7cc139c821f4024fe9ae9620ad0e0a435e | 93f9a1a2c7142bd539c278e93424987910ac17d5 | refs/heads/master | 2022-02-22T02:31:14.237420 | 2019-08-06T14:27:28 | 2019-08-06T14:27:28 | 198,096,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import xlsxwriter
Wb = xlsxwriter.Workbook('arrays' + str(1) +'.xlsx')
Ws = Wb.add_worksheet()
array = [['a1', 'a2', 'a3'],
['a4', 'a5', 'a6'],
['a7', 'a8', 'a9'],
['a10', 'a11', 'a12', 'a13', 'a14']]
row = len(array)
for row in range(row):
Ws.write_row(row, 0, array[row])
Wb.close()
# a = ['a1', 'a2', 'a3']
# b = ['a4', 'a5', 'a6']
# c = ['a10', 'a11', 'a12', 'a13', 'a14']
# arr = []
#
#
# for i in range(3):
# arr.append(a)
#
# print(arr) | [
"[email protected]"
] | |
a337213d15591a67ac7e22f0bdfa4617c7dba0b6 | 9756229c6014524b7e32ceaa75be751d9c3c6f8b | /app/views/admin_view.py | 1770186c756317423f78815ab90f87f2b597c634 | [] | no_license | Flagchet/MSHP-CTF | bd83e72a2523120446b42a33a0cdd2e2d8cd4909 | 5abf4a9e78f16c401a19d1f057f758a0dc5151c6 | refs/heads/master | 2020-12-02T10:03:56.559182 | 2017-07-03T07:07:31 | 2017-07-03T07:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | from flask import redirect
from flask import session
from app import app, admin, db
from app.models.db_models import User, Task, SolvedTask, Contest, ContestTask
from flask_admin.contrib.sqla import ModelView
class MyModelView(ModelView):
def is_accessible(self):
print(session)
return session['login'] == 'admin'
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
print('why??')
return redirect('/')
admin.add_view(MyModelView(User, db.session))
admin.add_view(MyModelView(Task, db.session))
admin.add_view(MyModelView(SolvedTask, db.session))
admin.add_view(MyModelView(Contest, db.session))
admin.add_view(MyModelView(ContestTask, db.session))
| [
"[email protected]"
] | |
a1255a1d1a6c6b8b8eb660a4ddf801259e1f4fa5 | fe19923be5086e6ba68b7809ba77093e82fc271c | /p2.py | bbf7a5a8d8bae15bb4030c86997bed47b328559a | [] | no_license | ps-star-empire/jenkins_python | 44ab0405150b3f97274bb61e7d2f593997a3d1df | 23056e089d6959edcd7735ab10fa321c71d0c94f | refs/heads/master | 2023-01-05T11:52:41.731867 | 2020-10-27T14:10:58 | 2020-10-27T14:10:58 | 307,921,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | Welocme the world of git and jenkins
| [
"[email protected]"
] | |
e6fb318626c221366a806c2cf1985fa2cb88e345 | a8378aa5cc4b706e058a9599ce89def6e6045e49 | /cash/urls.py | ae77c05f9b3dfedcac0be17cb2c55bf19ec44f0b | [] | no_license | khigor777/cash | cb7bf0d95478311d0b1a708be9ea5da44f719ece | 1f7b16021cfd8e2c68de37c20850d2531332847e | refs/heads/master | 2021-01-20T19:19:58.658497 | 2016-06-06T10:53:29 | 2016-06-06T10:53:29 | 60,447,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """cash URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include, patterns
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('home.urls'))
]
| [
"[email protected]"
] | |
fea66616a93898ab80d6fdb57705917d225f9bce | 8900db5096dcf2f8a758f6792cc7c8f2bc1d7785 | /src/utils/cityscapes_gen_list.py | 6d719fa98920814b922988780f38591598cf6c6c | [] | no_license | PeterZhouSZ/seg2vid | ca5d649a19c47b769fc243ef5da4f95e965c42a9 | 306ca254ac30620afe48767d02b9cf3fecba1194 | refs/heads/master | 2020-06-01T19:50:22.737812 | 2019-03-26T11:37:58 | 2019-03-26T11:37:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | import torch
from torch.utils.data import Dataset, DataLoader
import os
import numpy as np
import glob
from tqdm import tqdm
from multiprocessing.dummy import Pool as ThreadPool
# image_root_dir = '/mnt/lustre/panjunting/video_generation/cityscapes/leftImg8bit/train_extra/*'
# image_root_dir = '/mnt/lustre/panjunting/video_generation/cityscapes/leftImg8bit/demoVideo/'
image_root_dir = '/mnt/lustrenew/DATAshare/leftImg8bit_sequence/val/'
listfile = open("cityscapes_val_sequence_full_18.txt", 'a')
# image_root_dir = '/mnt/lustrenew/DATAshare/gtFine/val/'
# listfile = open("cityscapes_val_sequence_w_mask_8.txt", 'a')
print (image_root_dir)
# max = [6299, 599, 4599]
# i = 0
num_frame_to_predict = 18
def gen_list_per_city(sub_dir):
# image_list = glob.glob(sub_dir + "/*_gtFine_labelIds.png")
image_list = glob.glob(sub_dir + "/*.png")
for image_dir in tqdm(image_list):
flag = True
for j in range(1, num_frame_to_predict):
new_dir = image_dir[0:-22] + str(int(image_dir[-22:-16]) + j).zfill(6) + image_dir[-16::]
if not os.path.isfile(new_dir):
flag = False
if flag:
# Replace mask suffix for image suffix
# img_dir = image_dir.split(image_root_dir)[-1].split('_gtFine_labelIds.png')[0] + '_leftImg8bit.png'
listfile.write(image_dir.split(image_root_dir)[-1] + "\n")
# i += 1
# new_dir = image_dir[0:-22] + str(int(image_dir[-22:-16]) + num_frame_to_predict).zfill(6) + image_dir[-16::]
# if os.path.isfile(new_dir):
# listfile.write(image_dir.split(image_root_dir)[-1]+"\n")
# i += 1
# print i
#25 26 27 28 29
cities = [sub_dir for sub_dir in glob.glob(image_root_dir + '*')]
print (cities)
# for city in cities:
# gen_list_per_city(city)
# # make the Pool of workers
pool = ThreadPool(len(cities))
# open the urls in their own threads
# and return the results
results = pool.map(gen_list_per_city, cities)
listfile.close()
# close the pool and wait for the work to finish
pool.close()
pool.join() | [
"[email protected]"
] | |
41223899ee36d16c57acd6cffe8eb09edef53345 | b6389525ca18ed9d47f1ff7208469d2a105647a0 | /esp32_00_MicroPyhtonCode/GPIOs/deep_sleep_external_0_esp32.py | 4c97de91783d8f0709de0e0e4e68d4bff3357424 | [] | no_license | Qu3d45/MicroPython_ESP32 | f5d6f0587958b5bf3a78a106c5a085b17d847755 | 7f27fc2d6d5fcf46a45521366c43dd15e07a7aa0 | refs/heads/master | 2020-09-26T19:17:36.601210 | 2020-04-04T10:15:19 | 2020-04-04T10:15:19 | 226,323,418 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 481 | py |
# Button: ESP32:
# GND Add 10kOhm --> GND + GPIO4
# VCC --> 3V3
import machine
import esp32
from machine import Pin
from time import sleep
wake1 = Pin(14, mode=Pin.IN)
# level parameter can be: esp32.WAKEUP_ANY_HIGH or esp32.WAKEUP_ALL_LOW
esp32.wake_on_ext0(pin=wake1, level=esp32.WAKEUP_ANY_HIGH)
# your main code goes here to perform a task
print('Im awake. Going to sleep in 10 seconds')
sleep(10)
print('Going to sleep now')
machine.deepsleep()
| [
"[email protected]"
] | |
e0339dd4ba3994af18657657533435c000618e27 | f980929e94530c8276e24fb5be1b3d5fe2e4bc5a | /utility/drivermanager.py | ce2ea7009e3e862c4d8029798f542572a67a5d76 | [] | no_license | arokiaanburaj/python_UI_automation | ae9a55019b5f00339dce773d2cd95f3efc0ddb85 | cb57c61aa90c2fcf3651531a3ff298aec08d514d | refs/heads/master | 2020-08-01T12:36:14.254048 | 2019-09-26T06:12:06 | 2019-09-26T06:12:06 | 210,998,779 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | """
@author: Arokia Anburaj
@email: [email protected]
@date: 12-Jul-2015
"""
import logging
import unittest
from selenium import webdriver
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
import sys, os
class DriverManager(unittest.TestCase):
"""
This class is for instantiating web driver instances.
"""
def setUp(self):
"""
This method is to instantiate the web driver instance.
"""
logging.info("## SETUP METHOD ##")
logging.info("# Initializing the webdriver.")
#self.ffprofile = self.create_ffprofile("C:\\Users\\dell\\AppData\\Local\\Mozilla\\Firefox\\Profiles\\xjncngoo.seleniumProfile")
#self.driver = webdriver.Firefox(self.ffprofile)
self.driver = webdriver.Chrome(executable_path="D:\\webdrivers\chromedriver.exe")
self.driver.maximize_window()
self.driver.implicitly_wait(5)
self.driver.get("file:///D:/sahitest/Sahi%20Tests.htm")
def tearDown(self):
"""
This is teardown method.
It is to capture the screenshots for failed test cases,
& to remove web driver object.
"""
logging.info("## TEARDOWN METHOD ##")
if sys.exc_info()[0]:
logging.info("# Taking screenshot.")
test_method_name = self._testMethodName
self.driver.save_screenshot("./../screenshots/%s.png" % test_method_name)
if self.driver is not None:
logging.info("# Removing the webdriver.")
self.driver.quit()
def create_ffprofile(self):
"""
This function is to create firefox profile.
:return: firefox profile.
"""
logging.info("# Setting up firefox profile.")
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', os.getcwd())
profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'text/csv,application/octet-stream,application/pdf,application/vnd.ms-excel')
profile.set_preference("pdfjs.disabled", True)
return profile
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1c9361f16d3fe65ca23f48b2d65c9d11236c073c | 985267e31099fd72d7f6bcc52435e266bec41c2b | /mikaponics/account/serializers/logout_serializers.py | c606706d27bf0461997158c8ca7c047c116772af | [
"BSD-3-Clause"
] | permissive | mikaponics/mikaponics-back | 32d2e9a159876592b1c437bc3118165b12478995 | 98e1ff8bab7dda3492e5ff637bf5aafd111c840c | refs/heads/master | 2022-12-09T23:59:11.612473 | 2019-07-13T01:16:32 | 2019-07-13T01:16:32 | 175,534,652 | 4 | 1 | BSD-3-Clause | 2022-11-22T03:45:09 | 2019-03-14T02:35:19 | Python | UTF-8 | Python | false | false | 1,090 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from oauth2_provider.models import Application, AbstractApplication, AbstractAccessToken, AccessToken, RefreshToken
from foundation.models import User
class LogoutSerializer(serializers.Serializer):
token = serializers.CharField(required=True, allow_blank=False)
def validate(self, attrs):
token = attrs.get('token', None)
user = self.context.get('authenticated_by', None)
access_token = AccessToken.objects.filter(
token=token,
user=user
).first()
if access_token is None:
raise exceptions.ValidationError(_('Authentication token is invalid.'))
# Save and return our access token.
attrs['access_token'] = access_token
return attrs
| [
"[email protected]"
] | |
0380787ee55f00a58cad9c7029b5a1cb7924f779 | d9befc41646bbf05232d065a75ed711d5b7111a5 | /excercise/osmodules.py | 69677d50d831b4ed7b158e47c47b7390c1344708 | [] | no_license | sanketkothiya/python-programe | aae170720bfe0a80edd2c190ec01efd0f99b9e12 | e0638ae530e537a5771d98be93414086ac2a2b06 | refs/heads/master | 2023-04-18T16:01:34.476070 | 2021-05-06T10:56:40 | 2021-05-06T10:56:40 | 364,875,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import os
# print(dir(os))
# print(os.getcwd())
# os.chdir("C://")
# print(os.getcwd())
# f = open("harry.txt")
print(os.listdir(os.getcwd()))
# os.makedirs("This/that")
# os.rename("harry.txt", "codewithharry.txt")
# print(os.environ.get('Path'))
# print(os.path.join("C:/", "/harry.txt"))
# print(os.path.exists("C://Program Files2"))
print(os.path.isfile("C://Program Files")) | [
"[email protected]"
] | |
958a92c3ed430284122abed4b7fbfff4ed8a891c | 9a000fd5ae262210277934b381639844a316933b | /setup.py | b2883d345eb08d4b2df84b22c118a6a312578813 | [
"Apache-2.0"
] | permissive | lelandlib/handout | 0f0fb0c8a890799a004b8c31a41acd24b7c57a0a | c0476b838087930c04c2e528e8c335085f1de442 | refs/heads/master | 2023-04-06T08:08:59.002099 | 2019-08-08T03:33:25 | 2019-08-08T03:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import setuptools
setuptools.setup(
name='handout',
version='1.0.0',
description='Turn Python scripts into handouts with Markdown and figures',
url='http://github.com/danijar/handout',
install_requires=[],
extras_require={'media': ['imageio']},
packages=['handout'],
package_data={'handout': ['data/*']},
classifiers=[
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics :: Presentation',
'License :: OSI Approved :: Apache Software License',
],
)
| [
"[email protected]"
] | |
7b6b41631fdd479565383193200327c970580bcf | 0aad4ba48f2fcdfbfac68db1dcf83592ad014141 | /docker/yuanqu/Device/migrations/0035_project_users.py | d4d85e50d079e8966f8f0f47972a866741e9de6a | [] | no_license | cauckfgf/fullstack | d55f5640df4c8c5917f0b23b405422975f347bef | e398797d23b2ef1afd3605f5600a9c4dae8efec8 | refs/heads/master | 2021-07-17T10:49:21.211777 | 2020-04-09T14:16:54 | 2020-04-09T14:16:54 | 123,122,968 | 1 | 3 | null | 2020-03-23T13:55:05 | 2018-02-27T11:59:44 | JavaScript | UTF-8 | Python | false | false | 674 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2019-11-18 12:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Device', '0034_auto_20191114_1015'),
]
operations = [
migrations.AddField(
model_name='project',
name='users',
field=models.ManyToManyField(blank=True, related_name='Project2Users', to=settings.AUTH_USER_MODEL, verbose_name='\u80fd\u770b\u5230\u8be5\u7ad9\u70b9\u7684\u4eba'),
),
]
| [
"[email protected]"
] | |
2de2ff7631315fd28b7856999a20223f01a447c8 | 910f1159bd4e5feff35c7145771d66b28f08036c | /apps/users/migrations/0002_banner_emailverifyrecord.py | 99eb4caaa8e44204319c3074eb704f740b619913 | [] | no_license | chenhuihaha/MxOnline | f66cb1502e4ad5f5d8fd9fb76c28c2ea8f68e183 | ebc9a141f1cd980fa2c78c0d1ab12748ee2f46ef | refs/heads/master | 2020-08-31T19:36:34.461419 | 2019-11-08T12:04:11 | 2019-11-08T12:04:11 | 218,767,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-11-01 02:12
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('image', models.ImageField(upload_to='banner/%Y%m', verbose_name='轮播图')),
('url', models.URLField(verbose_name='访问地址')),
('index', models.IntegerField(default=100, verbose_name='顺序')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '轮播图',
'verbose_name_plural': '轮播图',
},
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '找回密码')], max_length=10)),
('send_time', models.DateTimeField(default=datetime.datetime.now)),
],
options={
'verbose_name': '邮箱验证码',
'verbose_name_plural': '邮箱验证码',
},
),
]
| [
"[email protected]"
] | |
0e6b31178c22e4725a9e8a5299e0bc30e50ab588 | af3ce59f21f13f8bec46fc28895313fde3c1c4f6 | /Task4.py | e9f002b519efc0b22e5fd04fcfaff215c42e1692 | [] | no_license | yichen123/Algorithm_p0 | 8965f141bc907378899ce27779f7e7e6cbe545d8 | 76b4152950e1948ace7c4ff04d6c75b576da5c69 | refs/heads/master | 2020-05-16T21:56:04.270904 | 2019-04-26T13:26:59 | 2019-04-26T13:26:59 | 183,321,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 4:
The telephone company want to identify numbers that might be doing
telephone marketing. Create a set of possible telemarketers:
these are numbers that make outgoing calls but never send texts,
receive texts or receive incoming calls.
Print a message:
"These numbers could be telemarketers: "
<list of numbers>
The list of numbers should be print out one per line in lexicographic order with no duplicates.
"""
def lexSort(list):
# quick sort
if len(list) <= 1:
return list
p = list[0]
less = [i for i in list[1:] if i < p]
great = [i for i in list[1:] if i > p]
return lexSort(less) + [p] + lexSort(great)
telemarketers = []
nums = {'call_out':[], 'call_in':[], 'text_out':[], 'text_in':[]}
for num1, num2, _, _ in calls:
if num1 not in nums['call_out']:
nums['call_out'].append(num1)
if num2 not in nums['call_in']:
nums['call_in'].append(num2)
for num1, num2, _ in texts:
if num1 not in nums['text_out']:
nums['text_out'].append(num1)
if num2 not in nums['text_in']:
nums['text_in'].append(num2)
for num in nums['call_out']:
if num not in nums['call_in'] and num not in nums['text_in'] and num not in nums['text_out']:
telemarketers.append(num)
sorted = lexSort(telemarketers)
print("These numbers could be telemarketers: ")
if sorted:
for num in sorted:
print(num)
else:
print(None)
| [
"[email protected]"
] | |
7faf32885b34e41c6daad9a5f3c507c878277f42 | 773bb7e986841d68d0b61bd4310178c7d4bf9276 | /contacts/migrations/0003_alter_contact_owner.py | f2bb563a8c2ebf986b686d01fb082c430b60748b | [] | no_license | Retr02332/Agenda_Contactos | 4c01f2489ae522863c7718d7f8ff6e4ba334b484 | 45c167fb2cb9159509cb66e3e855eb745673e914 | refs/heads/master | 2023-05-08T13:42:22.034636 | 2021-06-03T02:58:17 | 2021-06-03T02:58:17 | 373,363,657 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # Generated by Django 3.2.2 on 2021-06-02 14:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contacts', '0002_auto_20210601_1855'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
ee8f1edd0b62eb97599683f3f80c8eec5b03a92b | 8b901d1d77b6939b4582a11501b921e0bf6106e2 | /views.py | 406b1ef417681570993f005a7bd4117f943bc1dc | [] | no_license | hmetua/UA-BDR | 8db9a701b35d7f808f91811a79f5b2e42a7cc181 | 8ffec4f1eaec133584d754ea0eeab655be2b99f0 | refs/heads/master | 2023-04-30T15:48:44.710802 | 2021-05-16T22:27:56 | 2021-05-16T22:27:56 | 349,147,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,937 | py | from django.shortcuts import render
from django.http import HttpResponse
from monappli.models import MessageInfo,EmployeeEnron,DateEnvoi
from django.db import connection
# Create your views here.
#def index(request):
# return HttpResponse("<header> Accueil </header>")
def accueil(request):
return render(request,"accueil.html")
"""
def question1(request):
return render(request,"question1.html",{'resultat':DateEnvoi.objects.all()})
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
not_this_year = DateEnvoi.objects.filter(timestamp__lte="10-02-2002")
return render(request,"question1.html",{'resultat':not_this_year})
else:
"""
def question1a(request):
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
searchresult1=EmployeeEnron.objects.raw('SELECT id_mail,nom_prenom,status,number FROM employee_enron JOIN (SELECT auteur,COUNT(*) AS number FROM (SELECT auteur,destinataire FROM (SELECT id_message FROM date_envoi WHERE date_p BETWEEN %s and %s) AS att1,message_info AS m WHERE m.id_message=att1.id_message) AS att2 WHERE att2.auteur !=0 AND att2.destinataire!=0 GROUP BY att2.auteur ORDER BY number DESC) AS att ON id_mail=att.auteur ORDER BY number DESC',[fromdate,todate])
return render(request,"question1a.html",{'resultat':searchresult1})
else:
return render(request,"question1a.html")
def question1b(request):
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
searchresult=EmployeeEnron.objects.raw('select id_mail,nom_prenom,status,number from employee_enron join (select auteur,count(*) as number from (select auteur,destinataire from (select id_message from date_envoi where date_p between %s and %s) as att1,message_info as m where m.id_message=att1.id_message and m.reponse=1) as att2 where att2.auteur!=0 and att2.destinataire!=0 group by att2.auteur order by number desc) as att on id_mail=att.auteur order by number desc',[fromdate,todate])
return render(request,"question1b.html",{'resultat':searchresult})
else:
return render(request,"question1b.html")
def question1c(request):
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
searchresult=EmployeeEnron.objects.raw('select id_mail,nom_prenom,status,number from employee_enron join (select auteur,count(*) as number from (select auteur,destinataire from (select id_message from date_envoi where date_p between %s and %s) as att1,message_info as m where m.id_message=att1.id_message and m.reponse=0) as att2 where att2.auteur!=0 and att2.destinataire!=0 group by att2.auteur order by number desc) as att on id_mail=att.auteur order by number desc',[fromdate,todate])
return render(request,"question1c.html",{'resultat':searchresult})
else:
return render(request,"question1c.html")
def question2(request):
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
cursor = connection.cursor()
cursor.execute('select c1.nom_prenom as mp,c2.nom_prenom as mo,number from employee_enron c1, employee_enron c2, (select least(tab.auteur,tab.destinataire) as n1,greatest(tab.destinataire,tab.auteur) as n2,count(*) as number from (select auteur,destinataire from (select id_message from date_envoi where date_p between %s and %s) as att1,message_info as m where m.id_message=att1.id_message and m.reponse=0) as tab where tab.auteur!=0 and tab.destinataire!=0 group by n1,n2 order by number desc) as tabf where tabf.n1=c1.id_mail and tabf.n2=c2.id_mail order by number desc',[fromdate,todate])
searchresult=cursor.fetchall()
return render(request,"question2.html",{'couple_tableau':searchresult})
else:
return render(request,"question2.html")
def question3(request):
if request.method=="POST":
fromdate=request.POST.get('fromdate')
todate=request.POST.get('todate')
cursor = connection.cursor()
cursor.execute('select DATE(date_p) as mp,count(*) as number from (select id_message,date_p from date_envoi where date_p between %s and %s) as att group by date_p order by number desc',[fromdate,todate])
searchresult=cursor.fetchall()
return render(request,"question3.html",{'date_tableau':searchresult})
else:
return render(request,"question3.html")
def question4(request):
if request.method=="POST":
nom=request.POST.get('nom')
cursor = connection.cursor()
cursor.execute('select avg(tab11.ml) from (select date(att1.date_p) as mp, avg(att1.moy_mail) as ml from (select date_p,avg(att.mail) as moy_mail from date_envoi,(select distinct id_message as mail from message_info where auteur=(select id_mail from employee_enron where nom_prenom=%s) and reponse=1) as att where id_message=att.mail group by date_p order by date_p desc) as att1 group by date(att1.date_p) order by date(att1.date_p) desc) as tab11',[nom])
searchresult4a = cursor.fetchall()
cursor = connection.cursor()
cursor.execute('select distinct count(*) from message_info where auteur=(select id_mail from employee_enron where nom_prenom=%s) and destinataire=0 or auteur=0 and destinataire=(select id_mail from employee_enron where nom_prenom=%s)',[nom,nom])
searchresult4b = cursor.fetchall()
cursor = connection.cursor() #les contacts internes de l'employé : les destinataires de tous ces mails
cursor.execute('select nom_prenom from employee_enron as att1,(select distinct destinataire from message_info where destinataire!=0 and auteur=(select id_mail from employee_enron where nom_prenom=%s) group by destinataire order by destinataire desc) as att where att.destinataire=att1.id_mail',[nom])
searchresult4c = cursor.fetchall()
cursor = connection.cursor() #nom_prenom de l'employé
cursor.execute('select nom_prenom,status from employee_enron where nom_prenom=%s',[nom])
searchresult4 = cursor.fetchall()
cursor = connection.cursor() #emails de l'employée
cursor.execute('select email from email_employee_info where id_mail=(select id_mail from employee_enron where nom_prenom=%s)',[nom])
searchresultemail = cursor.fetchall()
#searchresult4a=DateEnvoi.objects.raw('select identity(integer,1,1) as id,date(att1.date_p) as mp, avg(att1.moy_mail) as ml into newtable from (select date_p,avg(att.mail) as moy_mail from date_envoi,(select distinct id_message as mail from message_info where auteur=(select id_mail from employee_enron where nom=%s)) as att where id_message=att.mail group by date_p order by date_p desc) as att1 group by date(att1.date_p) order by date(att1.date_p) desc',[nom])
return render(request,"question4.html",{'quest4email':searchresultemail,'quest4a':searchresult4a,'quest4b':searchresult4b,'quest4c':searchresult4c,'quest4premier':searchresult4})
else:
return render(request,"question4.html")
def show(request):
return render(request,"show.html",{'employees':EmployeeEnron.objects.all()}) | [
"[email protected]"
] | |
3f17da05a8aece307fb7389e2f06458db2e2a458 | 67acb613d10fec7c66cfcfda98f849e81218a2dc | /Lib/site-packages/state_machine_crawler/logger.py | cdc1dea8c457942986020f850ec45fc3eafd5c3e | [
"MIT"
] | permissive | gallrobert/Pyweek32-neverending | 32e58ec63d8bd2e1a9d6166d078ddc4e6afcccf9 | 59659e0ded536e17b7450cbd234e6b50096c97b5 | refs/heads/main | 2023-08-01T04:46:03.437911 | 2021-09-20T13:48:36 | 2021-09-20T13:48:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import sys
import traceback
class Color:
RED = "\033[1;91m"
GREEN = "\033[1;92m"
BLUE = "\033[1;94m"
NO_COLOR = "\033[0m"
class Symbol:
PASS = u"\u2713"
FAIL = u"\u2717"
UNKNOWN = "?"
class StateLogger(object):
def __init__(self, debug=False):
self._debug = debug
def make_debug(self):
self._debug = True
def _pr(self, msg):
if self._debug:
sys.stdout.write(msg)
sys.stdout.flush()
def _c(self, flag):
if flag is True:
self._pr("[" + Color.GREEN + Symbol.PASS + Color.NO_COLOR + "]")
elif flag is False:
self._pr("[" + Color.RED + Symbol.FAIL + Color.NO_COLOR + "]")
else:
self._pr("[" + Color.BLUE + Symbol.UNKNOWN + Color.NO_COLOR + "]")
def msg(self, current_state, next_state):
self._pr("+ " + current_state.full_name + " -> " + next_state.full_name)
self._pr("\n")
def ok(self):
self._c(True)
self._pr("\n")
def nok(self):
self._c(False)
self._pr("\n")
def transition(self):
self._pr("\tTransition ")
def verification(self):
self._pr("\tVerification ")
def show_traceback(self):
if self._debug:
self._pr("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
traceback.print_exc()
self._pr("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n")
def err(self, msg):
self._pr(str(msg))
| [
"root@localhost"
] | root@localhost |
8a703d90bf6a6d184b2fbb50bde9b056ce571e27 | f5ff0f4d44c9473452a92a5f6b0fd502a0d61e2e | /main.py | 6179c28bc1f5a25471767e58005d23b58ed935d0 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | hutch-matt/var-MiT-TFKeras | 65a3de5dd405cd44b501de37a04f0469bf3d92b2 | 94e4fd5307f9dae085e408ff19aa4dc6781b17b5 | refs/heads/main | 2023-02-16T14:42:42.478666 | 2021-01-06T18:53:56 | 2021-01-06T18:53:56 | 321,402,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,110 | py | # Copyright 2020, MIT Lincoln Laboratory
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import time
import os
import horovod.tensorflow.keras as hvd
from utils import *
if __name__ == '__main__':
time_start = time.time()
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--job-id', action='store', dest='job_id', type=int, default=None)
parser.add_argument('--job-name', action='store', dest='job_name', type=str, default=None)
parser.add_argument('--batch-size-per-gpu', action='store', dest='batch_size_per_gpu', type=int, default=32)
parser.add_argument('--reports-folder', action='store', dest='reports_folder', type=str, default=os.getcwd() + '/reports')
parser.add_argument('--train-only', action='store_true', dest='train_only', default=False)
parser.add_argument('--eval-only', action='store_true', dest='eval_only', default=False)
parser.add_argument('--input-model', action='store', dest='input_model', type=str, default=None)
parser.add_argument('--verbose', action='store', dest='verbose', type=int, default=2)
parser.add_argument('--random-seed', action='store', dest='random_seed', type=int, default=0)
parser.add_argument('--loss', action='store', dest='loss', type=str, default='categorical_crossentropy')
parser.add_argument('--data-transform', action='store', dest='data_transform', type=int, default=0)
# Required for training:
parser.add_argument('--epochs', action='store', dest='epochs', type=int, default=100)
parser.add_argument('--initial-epoch', action='store', dest='initial_epoch', type=int, default=0)
parser.add_argument('--training-set', action='store', dest='training_set', type=str, default='/home/gridsan/mshutch/Moments_in_Time/data-copy/data/parsed/TrainingBatch_90')
parser.add_argument('--output-model', action='store', dest='output_model', type=str, default=None)
parser.add_argument('--optimizer', action='store', dest='optimizer', type=str, default='sgd')
parser.add_argument('--learning-rate', action='store', dest='learning_rate', type=float, default=0.1)
parser.add_argument('--momentum', action='store', dest='momentum', type=float, default=0.9)
parser.add_argument('--warmup-epochs', action='store', dest='warmup_epochs', type=int, default=5)
parser.add_argument('--early-stopping', action='store_true', dest='early_stopping', default=False)
# Required for validation:
parser.add_argument('--validation-set', action='store', dest='validation_set', type=str, default='/home/gridsan/mshutch/Moments_in_Time/data-copy/data/parsed/ValidationBatch_90')
args, unknown = parser.parse_known_args()
# Prepare session
report = args.reports_folder + '/' + args.job_name + '-report.txt'
setup(args, report)
args_checker(args)
write_args(args, report)
model = get_model(args.input_model)
callbacks = get_callbacks(args)
opt = get_optimizer(optimizer=args.optimizer, learning_rate=args.learning_rate, momentum=args.momentum)
m = get_metrics()
model.compile(optimizer=opt, loss=args.loss, metrics=m)
# Training Only
if args.train_only:
trn_generator = DataGenerator(args, report, subset='training')
print('Starting training on rank ' + str(hvd.rank()))
session_start = time.time()
# See note below about fit_generator
hist = model.fit_generator(trn_generator, epochs=args.epochs, verbose=args.verbose if hvd.rank()==0 else 0,
callbacks=callbacks)
session_end = time.time()
save_model(model, args.output_model, report)
save_stats(hist, session_end-session_start, report)
# Validation Only
elif args.eval_only:
val_generator = DataGenerator(args, report, subset='validation')
print('Starting validation on rank ' + str(hvd.rank()))
session_start = time.time()
hist = model.evaluate(x=val_generator, verbose=args.verbose if hvd.rank()==0 else 0, callbacks=callbacks)
hist = hvd.allreduce(hist)
session_end = time.time()
save_stats(hist, session_end-session_start, report)
if hvd.rank()==0:
print('All reduced stats', hist)
# Training and Validation
else:
trn_generator = DataGenerator(args, report, subset='training')
val_generator = DataGenerator(args, report, subset='validation')
print('Starting training with validation on rank ' + str(hvd.rank()))
session_start = time.time()
# fit_generator is neccessary because fit does not yet support validation generators
hist = model.fit_generator(trn_generator, epochs=args.epochs, verbose=args.verbose if hvd.rank()==0 else 0,
callbacks=callbacks, validation_data=val_generator, validation_freq=1)
session_end = time.time()
save_model(model, args.output_model, report)
save_stats(hist, session_end-session_start, report)
# End session
time_end = time.time()
end_session(time_end-time_start, report)
| [
"[email protected]"
] | |
f34a8d1493d1dccda6339e8c7dac90eeb8aa0ead | 17358332cab8c2b17078af53453008f11e5cdcc8 | /Code/Utils/fig_area.py | ba12f28f7c7f13fc2ab38173223ad5d5e7c542ad | [] | no_license | jakubwida/MasterThesis | 7857a684897f827a576a1092014d32d8ba317c5d | 8bc97cdb73fdbe3754beeddcc64a06870be48661 | refs/heads/master | 2020-06-16T11:36:51.531830 | 2019-09-13T06:43:56 | 2019-09-13T06:43:56 | 195,558,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | import numpy as np
import shapely
from shapely.geometry import point
from shapely.geometry import linestring
from shapely import ops
import matplotlib.pyplot as plt
from descartes import PolygonPatch
def split_disjoint(radiuses,positions):
size = len(radiuses)
collision_groups = [{i} for i in range(size)]
for i in range(size):
for j in range(size):
if i!=j:
if np.linalg.norm(positions[i]-positions[j]) < (radiuses[i]+radiuses[j]):
newset = collision_groups[i] | collision_groups[j]
collision_groups[i] = newset
collision_groups[j] = newset
out = {frozenset(i) for i in collision_groups}
out = [list(i) for i in out]
return out
def circle_group_area(radiuses,positions):
circles = []
for i in range(len(radiuses)):
circles.append(point.Point(positions[i][0],positions[i][1]).buffer(radiuses[i]))
union = ops.unary_union(circles)
result = [geom for geom in ops.polygonize(union)]
completeareas = [list(ops.polygonize(g.exterior))[0].area for g in result]
max_index = np.argmax(completeareas)
result_area = result[max_index].area
return result_area
def single_circle_area(radius):
return np.pi * radius * radius
def fig_area(radiuses,positions):
groups = split_disjoint(radiuses,positions)
totalarea = 0.0
for group in groups:
if len(group) == 1:
totalarea += single_circle_area(radiuses[group[0]])
else:
g_radiuses = radiuses[np.array(group)]
g_positions = positions[np.array(group)]
totalarea += circle_group_area(radiuses,positions)
return totalarea
radiuses = np.array([0.5,5.0])
positions = np.array([(0.0,0.0),(15.0,15.0)])
print(fig_area(radiuses,positions))
| [
"[email protected]"
] | |
b394e36719b9e55f7e706baf6fae30445a66fa3d | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/Developent/AjaxWithDjango/bin/django-admin.py | 4e1e59e580a78fff0c5e6e5df8d28ccb987f3e29 | [] | no_license | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 168 | py | #!/root/PycharmProjects/Developent/AjaxWithDjango/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
7b353bbb14a0e2e0939e80efd3d9aead6c7940a4 | b0de612c2f7d03399c0d02c5aaf858a72c9ad818 | /armi/cli/gridGui.py | a1fc5254b325be2ea49a6af8e2eab5f663a17409 | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | wangcj05/armi | 2007e7abf4b422caca0157fc4405b7f45fc6c118 | 8919afdfce75451b291e45ca1bc2e03c044c2090 | refs/heads/master | 2022-12-22T00:05:47.561722 | 2022-12-13T16:46:57 | 2022-12-13T16:46:57 | 277,868,987 | 0 | 0 | Apache-2.0 | 2020-07-07T16:32:40 | 2020-07-07T16:32:39 | null | UTF-8 | Python | false | false | 1,929 | py | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Grid editor GUI entry point.
CLI entry point to spin up the GridEditor GUI.
"""
from armi.cli import entryPoint
class GridGuiEntryPoint(entryPoint.EntryPoint):
"""
Load the grid editor GUI
"""
name = "grids"
def addOptions(self):
self.parser.add_argument(
"blueprints",
nargs="?",
type=str,
default=None,
help="Optional path to a blueprint file to open",
)
def invoke(self):
# Import late since wxpython is kind of big and only needed when actually
# invoking the entry point
try:
import wx
from armi.utils import gridEditor
except ImportError:
raise RuntimeError(
"wxPython is not installed in this "
"environment, but is required for the Grid GUI. wxPython is not "
"installed during the default ARMI installation process. Refer to "
"installation instructions to install extras like wxPython."
)
app = wx.App()
frame = wx.Frame(None, wx.ID_ANY, title="Grid Editor", size=(1000, 1000))
gui = gridEditor.GridBlueprintControl(frame)
frame.Show()
if self.args.blueprints is not None:
gui.loadFile(self.args.blueprints)
app.MainLoop()
| [
"[email protected]"
] | |
9a5efdde39fe2d4a9c80dde02c65c509f4afcf61 | b72a2589aacacb10921cb3bfcfb23b853f92286d | /single_jeans.py | a7632e76888f7600935751412d8937754a6266ab | [] | no_license | Jonathanfreundlich/CuspCore_NIHAOtest | 7d2257fec15eb983457302ae94d37ac7e39e9046 | 5d5bf6654e5e32cd5566b644023f38fb5a9e76f8 | refs/heads/main | 2023-03-27T23:20:35.479772 | 2021-03-30T12:28:01 | 2021-03-30T12:28:01 | 352,965,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,195 | py | # COMPARE (alpha+gamma-2beta)sigma_r^2 and Vc^2
from matplotlib.pylab import *
G = 4.499753324353496e-06 # gravitational constant [kpc^3 Gyr^-2 Msun^-1]
parsec=3.085677581e16 #m
year=3.1557600e7 #s
kms_to_kpcGyr=1/parsec*year*1e9
fontsize=20
legendsize=18
linewidth=2
linesize=5
component='d'
rcParams['axes.linewidth'] = linewidth
rcParams['xtick.major.size'] = linesize
rcParams['xtick.major.width'] = linewidth
rcParams['xtick.minor.size'] = linesize
rcParams['xtick.minor.width'] = linewidth
rcParams['ytick.major.size'] = linesize
rcParams['ytick.major.width'] = linewidth
rcParams['ytick.minor.size'] = linesize
rcParams['ytick.minor.width'] = linewidth
rcParams['xtick.labelsize'] = fontsize-4
rcParams['ytick.labelsize'] = fontsize-4
figure()
axhline(0,color='gray')
for (i,col,kstring,label) in zip((k,k+1),('blue','red'),('k','k+1'),('before','after')):
ss=gl[i]
r= ss[component]['r']
Rvir=ss['Rvir']
Mvir=ss[component]['Mvir']
Kvir=0.5*G*Mvir/Rvir
logr=log10(ss[component]['r']/ss['Rvir'])
t=ss['t']
logr=log10(r/Rvir)
# Term1 = (alpha+gamma-2beta)sigma_r^2
sigma2=(ss[component]['sigmar_smooth']/3.085677581*3.1556952)**2
alpha=ss[component]['alpha']
beta=ss[component]['beta_smooth']
gamma=ss[component]['gamma']
alpha_lin=linearize(alpha,logr,rlim)
beta_lin=linearize(beta,logr,rlim)
gamma_lin=linearize(gamma,logr,rlim)
Term1 = (alpha+gamma-2.*beta)*sigma2/Kvir
Term1_lin = (alpha_lin+gamma_lin-2.*beta_lin)*sigma2/Kvir
# Term2 = Vc2
vc2=G*ss[component]['Mall']/r
Term2=vc2/Kvir
line1,=plot(logr,Term1,color=col,linestyle='-',label=r'$\rm K_{1}=(\alpha+\gamma-2\beta)\sigma_r^2$')
line1lin,=plot(logr,Term1_lin,color=col,linestyle=':')
line2,=plot(logr,Term2,color=col,linestyle='--',label=r'$\rm K_{2} = V_c^2$')
xlabel(r'$\log(r/R_{\rm vir})$',fontsize=fontsize)
ylabel(r'$\rm K/K_{vir}$',fontsize=fontsize) #$\rm [kpc^2 Gyr^{-2}]$
legend((line1,line2),(r'$\rm K_{1}=(\alpha+\gamma-2\beta)\sigma_r^2$',r'$\rm K_{2} = V_c^2$'), fontsize=legendsize,frameon=False,loc='upper left')
ylim(-2,6)
xlim(rlim)
vlines=concatenate((linspace(0.01,0.1,10),linspace(0.2,1,9)))
for xv in vlines:
axvline(x=log10(xv),color='k',linestyle='-',alpha=0.2)
ax=gca()
ax.text(0.03,0.11, r'$\rm blue:$ $k=%i$'%k, transform=ax.transAxes, color='blue',fontsize=fontsize)
ax.text(0.03,0.04, r'$\rm red:$ $k+1=%i$'%(k+1), transform=ax.transAxes, color='red',fontsize=fontsize)
############################################
figure()
axhline(0,color='gray')
for (i,col,kstring,label) in zip((k,k+1),('blue','red'),('k','k+1'),('before','after')):
ss=gl[i]
r= ss[component]['r']
Rvir=ss['Rvir']
Mvir=ss[component]['Mvir']
Kvir=0.5*G*Mvir/Rvir
logr=log10(ss[component]['r']/ss['Rvir'])
t=ss['t']
logr=log10(r/Rvir)
# Term1 = (alpha+gamma-2beta)sigma_r^2
sigma2=(ss[component]['sigmar_smooth']/3.085677581*3.1556952)**2
alpha=ss[component]['alpha']
beta=ss[component]['beta_smooth']
gamma=ss[component]['gamma']
alpha_lin=linearize(alpha,logr,rlim)
beta_lin=linearize(beta,logr,rlim)
gamma_lin=linearize(gamma,logr,rlim)
Term1 = (alpha+gamma-2.*beta)*sigma2/Kvir
Term1_lin = (alpha_lin+gamma_lin-2.*beta_lin)*sigma2/Kvir
# Term2 = Vc2
vc2=G*ss[component]['Mall']/r
Term2=vc2/Kvir
DT=(Term1-Term2)/Term2
DT_lin=(Term1_lin-Term2)/Term2
line1,=plot(logr,DT,color=col,linestyle='-')
line1_lin,=plot(logr,DT_lin,color=col,linestyle=':')
axhline(0.5,color='gray')
axhline(-0.5,color='gray')
axhline(1,color='gray')
axhline(-1,color='gray')
vlines=concatenate((linspace(0.01,0.1,10),linspace(0.2,1,9)))
for xv in vlines:
axvline(x=log10(xv),color='k',linestyle='-',alpha=0.2)
xlabel(r'$\log(r/R_{\rm vir})$',fontsize=fontsize)
ylabel(r'$\rm (K_{1}-K_{2})/K_{2}$',fontsize=fontsize) #$\rm [kpc^2 Gyr^{-2}]$
legend(fontsize=legendsize,frameon=False,loc='upper left')
ylim(-2.,2.)
xlim(rlim)
ax=gca()
ax.text(0.05,0.9,r'Relative difference',fontsize=fontsize,transform=ax.transAxes)
ax.text(0.03,0.11, r'$\rm blue:$ $k=%i$'%k, transform=ax.transAxes, color='blue',fontsize=fontsize)
ax.text(0.03,0.04, r'$\rm red:$ $k+1=%i$'%(k+1), transform=ax.transAxes, color='red',fontsize=fontsize)
############################################
# COMPARE Kreal and Kjeans = 0.5 (3-2beta)/(alpha+gamma-2beta) Vc^2
figure()
axhline(0,color='gray')
for (i,col,kstring,label) in zip((k,k+1),('blue','red'),('k','k+1'),('before','after')):
ss=gl[i]
r= ss[component]['r']
Rvir=ss['Rvir']
Mvir=ss[component]['Mvir']
Kvir=0.5*G*Mvir/Rvir
logr=log10(ss[component]['r']/ss['Rvir'])
t=ss['t']
logr=log10(r/Rvir)
p = ss['d'][fitname]['p']
# Tjeans = 0.5 (3-2beta)/(alpha+gamma-2beta) Vc^2
vc2=G*ss[component]['Mall']/r
alpha=ss[component]['alpha']
beta=ss[component]['beta_smooth']
gamma=ss[component]['gamma']
alpha_lin=linearize(alpha,logr,rlim)
beta_lin=linearize(beta,logr,rlim)
gamma_lin=linearize(gamma,logr,rlim)
num=redress_denominator(3-2.*beta)
num_lin=redress_denominator(3-2.*beta_lin)
den=redress_denominator(alpha+gamma-2.*beta)
den_lin=redress_denominator(alpha_lin+gamma_lin-2.*beta_lin)
Tjeans=0.5*num/den*vc2/Kvir
Tjeans_lin=0.5*num_lin/den_lin*vc2/Kvir
# T real
T_real=treal[i][2]/Kvir
# T_evol
M_fit = prf.M(r, p)
M_real=ss[component]['Mall']
add_params=[]
if Ttype=='jeans-Mreal' or Ttype=='alpha-Mreal' or Ttype=='alpha-p-Mreal':
M = M_real
add_params=M_real
else:
M=M_fit
T_evol=get_T(r,[alpha,beta,gamma,p],m=0.,Ttype=Ttype,do_smooth=False,add_params=add_params)/Kvir
line1,=plot(logr,Tjeans,color=col,linestyle='-',label=r'$\rm K_{Jeans}=\frac{3-2\beta}{\alpha+\gamma-2\beta}\frac{GM(r)}{2r}$')
line1_evol,=plot(logr,T_evol,color=col,linestyle='--',label=r'$\rm K_{model}$')
line2,=plot(logr,T_real,color=col,linestyle=':',label=r'$\rm K_{real}$')
xlabel(r'$\log(r/R_{\rm vir})$',fontsize=fontsize)
ylabel(r'$\rm K/K_{vir}$',fontsize=fontsize) #$\rm [kpc^2 Gyr^{-2}]$
legend((line1,line1_evol,line2),(r'$\rm K_{Jeans}=\frac{3-2\beta}{\alpha+\gamma-2\beta}\frac{GM(r)}{2r}$',r'$\rm K_{model}$',r'$\rm K_{real}$'),fontsize=legendsize,frameon=False,loc='upper left')
ylim(-2,6)
xlim(rlim)
vlines=concatenate((linspace(0.01,0.1,10),linspace(0.2,1,9)))
for xv in vlines:
axvline(x=log10(xv),color='k',linestyle='-',alpha=0.2)
ax=gca()
ax.text(0.03,0.11, r'$\rm blue:$ $k=%i$'%k, transform=ax.transAxes, color='blue',fontsize=fontsize)
ax.text(0.03,0.04, r'$\rm red:$ $k+1=%i$'%(k+1), transform=ax.transAxes, color='red',fontsize=fontsize)
figure()
axhline(0,color='gray')
for (i,col,kstring,label) in zip((k,k+1),('blue','red'),('k','k+1'),('before','after')):
ss=gl[i]
r= ss[component]['r']
Rvir=ss['Rvir']
Mvir=ss[component]['Mvir']
Kvir=0.5*G*Mvir/Rvir
logr=log10(ss[component]['r']/ss['Rvir'])
t=ss['t']
logr=log10(r/Rvir)
# Tjeans = 0.5 (3-2beta)/(alpha+gamma-2beta) Vc^2
vc2=G*ss[component]['Mall']/r
alpha=ss[component]['alpha']
beta=ss[component]['beta_smooth']
gamma=ss[component]['gamma']
alpha_lin=linearize(alpha,logr,rlim)
beta_lin=linearize(beta,logr,rlim)
gamma_lin=linearize(gamma,logr,rlim)
num=redress_denominator(3-2.*beta)
num_lin=redress_denominator(3-2.*beta_lin)
den=redress_denominator(alpha+gamma-2.*beta)
den_lin=redress_denominator(alpha_lin+gamma_lin-2.*beta_lin)
Tjeans=0.5*num/den*vc2/Kvir
Tjeans_lin=0.5*num_lin/den_lin*vc2/Kvir
# T real
T_real=treal[i][2]/Kvir
# T_evol
M_fit = prf.M(r, p)
M_real=ss[component]['Mall']
add_params=[]
if Ttype=='jeans-Mreal' or Ttype=='alpha-Mreal' or Ttype=='alpha-p-Mreal':
M = M_real
add_params=M_real
else:
M=M_fit
T_evol=get_T(r,[alpha,beta,gamma,p],m=0.,Ttype=Ttype,do_smooth=False,add_params=add_params)/Kvir
# Relative difference
DT=(Tjeans-T_real)/T_real
DT_lin=(Tjeans_lin-T_real)/T_real
DT_evol=(T_evol-T_real)/T_real
line1,=plot(logr,DT,color=col,linestyle='-')
line1_lin,=plot(logr,DT_lin,color=col,linestyle=':')
line2,=plot(logr,DT_evol,color=col,linestyle='--')
xlabel(r'$\log(r/R_{\rm vir})$',fontsize=fontsize)
ylabel(r'$\rm (K_{Jeans}-K_{real})/K_{real}$',fontsize=fontsize) #$\rm [kpc^2 Gyr^{-2}]$
ylim(-2,2)
xlim(rlim)
axhline(0.5,color='gray')
axhline(-0.5,color='gray')
axhline(1,color='gray')
axhline(-1,color='gray')
vlines=concatenate((linspace(0.01,0.1,10),linspace(0.2,1,9)))
for xv in vlines:
axvline(x=log10(xv),color='k',linestyle='-',alpha=0.2)
ax=gca()
ax.text(0.55,0.9,r'Relative difference',fontsize=fontsize,transform=ax.transAxes)
ax.text(0.03,0.11, r'$\rm blue:$ $k=%i$'%k, transform=ax.transAxes, color='blue',fontsize=fontsize)
ax.text(0.03,0.04, r'$\rm red:$ $k+1=%i$'%(k+1), transform=ax.transAxes, color='red',fontsize=fontsize)
| [
"[email protected]"
] | |
3cc5a69c59fc3e2453bfc4a89635a7f329aff0e6 | 33a4dfa243eefe6f0b6c5cb522f77ff54b72060b | /website/settings.py | fcd0d89490e54cf1c9b0efdfcc3464e5b26e2c87 | [] | no_license | hussamjarrar/blog2 | ef7d86b10b7db51341dc46e5308f279d16c00150 | cb654a01f157b505394aa68703b84003cc4bb38a | refs/heads/master | 2021-05-15T11:38:45.541843 | 2017-10-25T13:07:45 | 2017-10-25T13:07:45 | 108,243,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | """
Django settings for website project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,"template")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=7djf1syd5yw@rzgul+=art!kx%c%=37a-b_32skcy=8x%7g#a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apptwo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
5b017f7e867d09e7f141ac36f2038c60b4c5f071 | 5b392247faf8a59529bf4101c746883aa1bf58a5 | /src/extra_apps/xadmin/plugins/filters.py | 74fa41113e6d838ec2bb2400c031179d35c02f0a | [] | no_license | Kingvast/GmOnline | 5d18343d03a54721c955d32b5bdb0d78e4aaa544 | 34497e0857cc62a967c04ed4627fc51bc751bb07 | refs/heads/master | 2022-12-09T22:40:38.814639 | 2022-03-20T17:23:46 | 2022-03-20T17:23:46 | 150,425,272 | 1 | 0 | null | 2022-12-08T05:13:47 | 2018-09-26T12:45:09 | JavaScript | UTF-8 | Python | false | false | 10,772 | py | import operator
from future.utils import iteritems
from xadmin import widgets
from xadmin.plugins.utils import get_context_dict
from django.contrib.admin.utils import get_fields_from_path, lookup_needs_distinct
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import QUERY_TERMS
from django.template import loader
from django.utils import six
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from xadmin.filters import manager as filter_manager, FILTER_PREFIX, SEARCH_VAR, DateFieldListFilter, \
RelatedFieldSearchFilter
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.util import is_related_field
from functools import reduce
class IncorrectLookupParameters(Exception):
pass
class FilterPlugin(BaseAdminPlugin):
list_filter = ()
search_fields = ()
free_query_filter = True
def lookup_allowed(self, lookup, value):
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specificially included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existants fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif is_related_field(field):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_list_queryset(self, queryset):
lookup_params = dict(
[(smart_str(k)[len(FILTER_PREFIX):], v)
for k, v in self.admin_view.params.items()
if smart_str(k).startswith(FILTER_PREFIX) and v != ''])
for p_key, p_val in iteritems(lookup_params):
if p_val == "False":
lookup_params[p_key] = False
use_distinct = False
# for clean filters
self.admin_view.has_query_param = bool(lookup_params)
self.admin_view.clean_query_url = self.admin_view.get_query_string(
remove=[
k for k in self.request.GET.keys()
if k.startswith(FILTER_PREFIX)
])
# Normalize the types of keys
if not self.free_query_filter:
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise SuspiciousOperation(
"Filtering by %s not allowed" % key)
self.filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(self.request, lookup_params, self.model,
self)
else:
field_path = None
field_parts = []
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, filter_manager.create
if not isinstance(field, models.Field):
field_path = field
field_parts = get_fields_from_path(
self.model, field_path)
field = field_parts[-1]
spec = field_list_filter_class(
field,
self.request,
lookup_params,
self.model,
self.admin_view,
field_path=field_path)
if len(field_parts) > 1:
# Add related model name to title
spec.title = "%s %s" % (field_parts[-2].name,
spec.title)
# Check if we need to use distinct()
use_distinct = (use_distinct or lookup_needs_distinct(
self.opts, field_path))
if spec and spec.has_output():
try:
new_qs = spec.do_filte(queryset)
except ValidationError as e:
new_qs = None
self.admin_view.message_user(
_("<b>Filtering error:</b> %s") % e.messages[0],
'error')
if new_qs is not None:
queryset = new_qs
self.filter_specs.append(spec)
self.has_filters = bool(self.filter_specs)
self.admin_view.filter_specs = self.filter_specs
obj = filter(lambda f: f.is_used, self.filter_specs)
if six.PY3:
obj = list(obj)
self.admin_view.used_filter_num = len(obj)
try:
for key, value in lookup_params.items():
use_distinct = (use_distinct
or lookup_needs_distinct(self.opts, key))
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e)
try:
# fix a bug by david: In demo, quick filter by IDC Name() cannot be used.
if isinstance(queryset, models.query.QuerySet) and lookup_params:
new_lookup_parames = dict()
for k, v in lookup_params.items():
list_v = v.split(',')
if len(list_v) > 0:
new_lookup_parames.update({k: list_v})
else:
new_lookup_parames.update({k: v})
queryset = queryset.filter(**new_lookup_parames)
except (SuspiciousOperation, ImproperlyConfigured):
raise
except Exception as e:
raise IncorrectLookupParameters(e)
else:
if not isinstance(queryset, models.query.QuerySet):
pass
query = self.request.GET.get(SEARCH_VAR, '')
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and query:
orm_lookups = [
construct_search(str(search_field))
for search_field in self.search_fields
]
for bit in query.split():
or_queries = [
models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups
]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
self.admin_view.search_query = query
if use_distinct:
return queryset.distinct()
else:
return queryset
# Media
def get_media(self, media):
arr = filter(lambda s: isinstance(s, DateFieldListFilter),
self.filter_specs)
if six.PY3:
arr = list(arr)
if bool(arr):
media = media + self.vendor('datepicker.css', 'datepicker.js',
'xadmin.widget.datetime.js')
arr = filter(lambda s: isinstance(s, RelatedFieldSearchFilter),
self.filter_specs)
if six.PY3:
arr = list(arr)
if bool(arr):
media = media + self.vendor('select.js', 'select.css',
'xadmin.widget.select.js')
return media + self.vendor('xadmin.plugin.filters.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.has_filters:
nodes.append(
loader.render_to_string(
'xadmin/blocks/model_list.nav_menu.filters.html',
context=get_context_dict(context)))
def block_nav_form(self, context, nodes):
if self.search_fields:
context = get_context_dict(context or {}) # no error!
context.update({
'search_var':
SEARCH_VAR,
'remove_search_url':
self.admin_view.get_query_string(remove=[SEARCH_VAR]),
'search_form_params':
self.admin_view.get_form_params(remove=[SEARCH_VAR])
})
nodes.append(
loader.render_to_string(
'xadmin/blocks/model_list.nav_form.search_form.html',
context=context))
site.register_plugin(FilterPlugin, ListAdminView)
| [
"[email protected]"
] | |
bfeb340af283eb04f5529820d56d98a9732a5174 | 3cd35c16392d865d448cc55ae4a28cc3f2011120 | /apimadetest/apitest/views.py | 1964a67caf76c2a27a870e6b69436f2ee0763acc | [] | no_license | testshiling/apimadetest | d91e37fe4856f9f05a7741b0fead161e77a5c18c | d2e0fd4d04557248de90b0981ff0b6c2b7e5b6ec | refs/heads/master | 2020-05-18T16:53:26.168897 | 2019-12-06T14:38:43 | 2019-12-06T14:38:43 | 184,537,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,230 | py | from django.shortcuts import render,HttpResponse
from apitest.models import *
import json
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User
from django.contrib.auth.hashers import check_password, make_password
import sys
import datetime
import threading
import requests
# api_demo
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def api_demo(request):
parameter = request.data
id = parameter['data']
if id == 1:
data = 'There are three dogs'
elif id == 2:
data = 'There are two dogs'
else:
data = 'Thers is nothing'
return Response({'data': data})
# 登录接口-post
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def login_post(request):
data = json.loads(request.body)
try:
user = User.objects.get(username=data['username'])
except User.DoesNotExist:
return Response({
"status_code": 400,
'msg': "用户不存在"
})
password = data['password']
passwdcheck = check_password(password, user.password)
if passwdcheck:
return Response({
"status_code":200,
'msg': "登录成功"
})
else:
return Response({
"status_code": 400,
'msg': "密码错误"
})
# 登录接口-get
@csrf_exempt
@api_view(http_method_names=['GET'])
@permission_classes((permissions.AllowAny,))
def login_get(request):
username = request.GET.get('username')
password = request.GET.get('password')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response({
"status_code": 400,
'msg': "用户不存在"
})
passwdcheck = check_password(password, user.password)
if passwdcheck:
return Response({
"status_code":200,
'msg': "登录成功"
})
else:
return Response({
"status_code": 400,
'msg': "密码错误"
})
# 注册接口
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def register(request):
"""
参数示例:
data = {
'username': "luoshiling18",
'password': admin12345,
'email': "[email protected]",
}
:param request:
:return:
"""
data = json.loads(request.body)
if (data['username'] == ""
or data['password'] == ""
or data['email'] == ""
):
return Response({
"status_code": 400,
'msg': "信息错误"
})
else:
user = User(email=data['email'],
password=make_password(data['password']),
username=data['username'])
user.save()
return Response({
"status_code": 200,
'msg': "注册成功"
})
# 数据库检查字段 目前没用
def is_fields_error(_model, fields, ex_fields):
from django.db import models
"""
@note 检查相应的_model里是否含有params所有key,若为否,则返回第一个遇到的不在_model里的key和False
否则,返回为空True与空
:param _model: fields:待检查字段 ex_fields:不在检查范围内的字段,比如外键
:param params:
:return: True,'' / False, key
"""
if ex_fields:
for i in ex_fields:
if i in fields:
fields.remove(i)
if not (issubclass(_model, models.Model) and isinstance(fields, (list, tuple))):
return False, u'参数有误'
all_fields = list(_model._meta.get_fields())
print(all_fields)
for key in fields:
if key not in all_fields:
return False, key
return True, ''
# 添加房源接口
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def add_lodgeinfo(request):
"""
参数示例:
info_dict = {"dayprice":3,
# "estate":"valid",
# "minday":1,
# "maxday":2,
# "tel":"15901304864",
# "remarks":"",
# "address_id":"124253424342",
# "image_md5":"sfdgwet4husf98fwiuhfsjkdhwh"
# }
:param request:
:return:
"""
info_dict = json.loads(request.body)
# _flag, func_r = is_fields_error(lodgeunitinfo, list(info_dict.keys()), ex_fields=['id','create_time','update_time'])
# if not _flag:
# print("触发", func_r)
# else:
# print("没触发", func_r)
try:
lodgeunitinfo.objects.create(**info_dict)
return Response({"status_code": 200, "msg": "房源添加成功"})
except Exception:
exception_info = sys.exc_info()
return Response({"status_code": 400, "msg":exception_info[0] + ":" + exception_info[1]})
# 订单接口
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def create_order(request):
"""
参数示例:
order_info = {
# "luid":1,
# "guestnum":2,
# "checkinday":"2019-01-03",
# "checkoutday":"2019-01-04"
# }
:param request:
:return:
"""
data = json.loads(request.body)
# 房源存在校验
luid = data['luid']
daynum = datetime.datetime.strptime(data['checkoutday'], '%Y-%m-%d') - datetime.datetime.strptime(data['checkinday'],'%Y-%m-%d')
id_list = []
for i in lodgeunitinfo.objects.values('id'):
id_list.append(i['id'])
if luid not in id_list:
return Response({"status_code": 400, "msg": luid + "不存在"})
elif daynum.days < 1:
return Response({"status_code": 400, "msg": "入住时间不能晚于离开时间"})
else:
lodgeinfo = lodgeunitinfo.objects.filter(id=str(luid))
dayprice = 0
for i in lodgeinfo:
dayprice = i.dayprice
totalprice = int(daynum.days) * dayprice
#print("日价", dayprice, "天数", daynum.days)
data["totalprice"] = totalprice
order.objects.create(**data)
return Response({"status_code": 200, "msg": "创建订单成功"})
# 支付回调接口--示例
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def payback_order(request):
data = json.loads(request.body)
order_id = data['order_id']
try:
order.objects.filter(id=order_id).update(estate='deleted')
except Exception:
return Response({"status_code": 400, "msg": "订单回调更新失败"})
return Response({"status_code": 200, "msg": "支付成功"})
# 支付第三方接口
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def others_pay_order(request):
data = json.loads(request.body)
order_id = data['order_id']
totalprice = data['totalprice']
other_order_info = others_order.objects.filter(order_id=str(order_id))
if other_order_info:
pass
else:
others_order.objects.create(order_id=order_id)
# 下面这步在实际中其实是不对,应该是从第三方库中查
try:
orderinfo = order.objects.filter(id=str(order_id))
except Exception:
return Response({"status_code": 400, "msg": "订单不存在"})
for i in orderinfo:
if totalprice == i.totalprice:
if i.estate == 'valid':
coo_others = threading.Thread(target=update_others_order, kwargs=({"order_id": order_id, "totalprice": totalprice, "estate": "yes"}))
coo_others.start()
coo_back = threading.Thread(target=payback_order_true, kwargs=({"order_id": order_id}))
coo_back.start()
return Response({"status_code": 200, "msg": "支付成功"})
else:
return Response({"status_code": 400, "msg": "订单状态不正确"})
else:
return Response({"status_code": 400, "msg": "订单总价不正确"})
# 支付回调接口--第三方用
def payback_order_true(**data):
order_id = data['order_id']
try:
order.objects.filter(id=order_id).update(estate='deleted')
except Exception:
return Response({"status_code": 400, "msg": "订单回调更新失败"})
# 起一个进程去后台更新数据库
def update_others_order(**data):
check_dict = isinstance(data, dict)
if check_dict:
others_order.objects.filter(order_id=data['order_id']).\
update(**data)
pass
else:
return Response({"status_code": 400, "msg": "订单更新失败"})
print("更新内容:" + str(data))
# 支付接口
@csrf_exempt
@api_view(http_method_names=['POST'])
@permission_classes((permissions.AllowAny,))
def pay_order(request):
"""
参数示例:
pay_order_info = {
"order_id": 1,
"luid": 1
}
:param request:
:return:
"""
data = json.loads(request.body)
# 订单参数检查
order_id = data['order_id']
luid = data['luid']
try:
orderinfo = order.objects.filter(id=str(order_id))
except Exception:
return Response({"status_code": 400, "msg": "订单不存在"})
try:
luinfo = lodgeunitinfo.objects.filter(id=str(luid))
except Exception:
return Response({"status_code": 400, "msg": "房源不存在"})
for i in orderinfo:
if luid == i.luid:
if i.estate == 'valid':
for j in luinfo:
if j.estate == 'valid':
return Response({"status_code": 200, "msg": "这时要调第三方支付接口"})
else:
return Response({"status_code": 400, "msg": "房源已下线或已被预订"})
else:
return Response({"status_code": 400, "msg": "订单已失效"})
else:
return Response({"status_code": 400, "msg": "订单与房源不匹配"})
| [
"[email protected]"
] | |
b9ce345218a56559b1c0e2d0548939b30672a0b7 | 43b86533a02d9dc975808284bcf8662adf188738 | /catalog/admin.py | d4e88b9e200698ae60b30cf29f1695565ad2ac7a | [] | no_license | AltFlexe/Django_Website | 6248c441ad71fbb312ec77b31e99b4b495754fdd | 1dcd72dd9e414dc9e2f1c35304d1fbac9ca37cdd | refs/heads/master | 2023-05-30T02:33:32.503387 | 2021-06-20T18:18:16 | 2021-06-20T18:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | from django.contrib import admin
from .models import Author, Genre, Book, BookInstance
# admin.site.register(Book)
# admin.site.register(Author)
admin.site.register(Genre)
# admin.site.register(BookInstance)
# Register your models here.
# Define the admin class
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death')
# Register the admin class with the associated model
admin.site.register(Author, AuthorAdmin)
# Register the Admin classes for Book using the decorator
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
# Register the Admin classes for BookInstance using the decorator
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_filter = ('status', 'due_back') | [
"[email protected]"
] | |
1c1378d18703f296f0dd3b18faca5c91b2c7ce25 | 0d6b7158479642b4e663bd68ef13d9e7d5dec528 | /E_Helth/E_Helth/settings.py | bcc2173b166f278daaa2ffafcbc5f906eca9dc11 | [] | no_license | GMurali2020/Python_projects | 994cd9972414a73b128774e96156586985bdbb8e | 20a24d1fd20149f41f388efb96fe2157eff62c16 | refs/heads/master | 2023-01-28T13:19:32.432190 | 2020-12-12T10:35:04 | 2020-12-12T10:35:04 | 285,795,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | """
Django settings for E_Helth project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%y00$o6m%y2=p*we&zgep98)5yhrb!fh&nh9!px!6)+cbwfb^x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'helth_admin.apps.HelthAdminConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'E_Helth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'E_Helth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
19fa85186e9ff41bb0bc2d9c93867717f0417bdf | 47ece9868ababa1aa49adb529d305cf406ddcd1a | /STracker/wsgi.py | 5212c3a016a4b8a9d9fca26603ab15fdc5263a38 | [] | no_license | skonstantinov89/STracker | e6d1f3abc3cb1559c80602219ed93e807c532a28 | 6fb94347d476be7205845b66734490ee8c12a963 | refs/heads/master | 2021-01-10T03:22:31.747369 | 2016-01-25T14:59:55 | 2016-01-25T14:59:55 | 48,292,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for STracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "STracker.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
5665f714e62a89b8c8766794c9e419937da7ce59 | ca9da73975aa686a5ec68b7b761e47da2070ee04 | /main.py | 3b333678a542433d219d08148910bd7ae1e13dd6 | [] | no_license | RajuDhl/iclan | 2977d8179f5d0a7003bcecc5dbbd8fa169934ab8 | 56f00a5acd05de51ccf14bd4458b9981921baadb | refs/heads/master | 2023-07-30T05:15:48.351495 | 2021-09-15T13:58:52 | 2021-09-15T13:58:52 | 399,783,110 | 0 | 0 | null | 2021-09-15T13:58:53 | 2021-08-25T10:46:24 | JavaScript | UTF-8 | Python | false | false | 441 | py | from flask import render_template, Flask, request
main = Flask(__name__)
@main.route('/')
def home():
return render_template('index.html')
@main.route('/<url>')
def redirect(url):
path = request.path
url = path[1:len(path)]
file = f'{url}.html'
try:
return render_template(file)
except:
return render_template('index.html', msg="URL does not exist")
if __name__ == '__main__':
main.run()
| [
"[email protected]"
] | |
bcae894964be340bd41e97249039c84ee1b2704f | 99a43a3eaeaa6d6f2175825ca7fac12d0832c0d5 | /src/experiments/base_trainer.py | f4b55885a2054868e0315623e7bd6a3de775baed | [
"MIT"
] | permissive | gorinars/VQ-VAE-Speech | 2a4eebc096fc8e78e831a4515591ceaa136a5453 | 60398f03eb129195bce402a423ace8cca8995f3c | refs/heads/master | 2020-12-09T23:59:22.565008 | 2019-11-17T02:13:19 | 2019-11-17T02:13:19 | 233,451,368 | 0 | 0 | null | 2020-01-12T20:06:43 | 2020-01-12T20:06:42 | null | UTF-8 | Python | false | false | 5,937 | py | #####################################################################################
# MIT License #
# #
# Copyright (C) 2019 Charly Lamothe #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from error_handling.console_logger import ConsoleLogger
from evaluation.gradient_stats import GradientStats
import numpy as np
from tqdm import tqdm
import os
import pickle
class BaseTrainer(object):
def __init__(self, device, data_stream, configuration, experiments_path, experiment_name, iterations_to_record=10):
self._device = device
self._data_stream = data_stream
self._configuration = configuration
self._experiments_path = experiments_path
self._experiment_name = experiment_name
self._iterations_to_record = iterations_to_record
def train(self):
ConsoleLogger.status('start epoch: {}'.format(self._configuration['start_epoch']))
ConsoleLogger.status('num epoch: {}'.format(self._configuration['num_epochs']))
for epoch in range(self._configuration['start_epoch'], self._configuration['num_epochs']):
with tqdm(self._data_stream.training_loader) as train_bar:
train_res_recon_error = list() # FIXME: record as a global metric
train_res_perplexity = list() # FIXME: record as a global metric
iteration = 0
max_iterations_number = len(train_bar)
iterations = list(np.arange(max_iterations_number, step=(max_iterations_number / self._iterations_to_record) - 1, dtype=int))
for data in train_bar:
losses, perplexity_value = self.iterate(data, epoch, iteration, iterations, train_bar)
if losses is None or perplexity_value is None:
continue
train_res_recon_error.append(losses)
train_res_perplexity.append(perplexity_value)
iteration += 1
self.save(epoch, **{'train_res_recon_error': train_res_recon_error, 'train_res_perplexity': train_res_perplexity})
def _record_codebook_stats(self, iteration, iterations, vq,
concatenated_quantized, encoding_indices, speaker_id, epoch):
if not self._configuration['record_codebook_stats'] or iteration not in iterations:
return
embedding = vq.embedding.weight.data.cpu().detach().numpy()
codebook_stats_entry = {
'concatenated_quantized': concatenated_quantized.detach().cpu().numpy(),
'embedding': embedding,
'n_embedding': embedding.shape[0],
'encoding_indices': encoding_indices.detach().cpu().numpy(),
'speaker_ids': speaker_id.to(self._device).detach().cpu().numpy(),
'batch_size': self._data_stream.training_batch_size
}
codebook_stats_entry_path = self._experiments_path + os.sep + \
self._experiment_name + '_' + str(epoch + 1) + '_' + \
str(iteration) + '_codebook-stats.pickle'
with open(codebook_stats_entry_path, 'wb') as file:
pickle.dump(codebook_stats_entry, file)
def _record_gradient_stats(self, modules, iteration, iterations, epoch):
if not self._configuration['record_codebook_stats'] or iteration not in iterations:
return
gradient_stats_entry = {
name: GradientStats.build_gradient_entry(module.named_parameters()) \
for name, module in modules.items()
}
gradient_stats_entry_path = self._experiments_path + os.sep + self._experiment_name + '_' + str(epoch + 1) + '_' + str(iteration) + '_gradient-stats.pickle'
with open(gradient_stats_entry_path, 'wb') as file:
pickle.dump(gradient_stats_entry, file)
def iterate(self, data, epoch, iteration, iterations, train_bar):
raise NotImplementedError
def save(self, epoch, **kwargs):
raise NotImplementedError
| [
"[email protected]"
] | |
d9993d3d63705e88b6e3e42b91894d35e509f4d2 | e33fce460fb64b7271f85adcf6b08ba0e9c4d2b7 | /setup.py | 5e115a8985b58f1668ba68db35291eb658a71485 | [] | no_license | sabyasachi-biswas/vault_setup_file | fa5916c8bd00cd3ae9b5afd859efd85dcd439de6 | fbcba061ed4d3a1b80106cc68d274dd1c839ea32 | refs/heads/main | 2023-03-28T12:12:22.181648 | 2021-03-30T05:07:08 | 2021-03-30T05:07:08 | 352,876,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import sqlite3
from tkinter import filedialog
import os
conn = sqlite3.connect('user_data.db')
c = conn.cursor()
c.execute("""CREATE TABLE user (
uid integer NOT NULL PRIMARY KEY,
name string,
username string,
pwd string
)""")
c.execute("""CREATE TABLE vault_config (
uid integer NOT NULL PRIMARY KEY,
path string
)""")
c.execute("""CREATE TABLE vault_data (
fileid integer NOT NULL PRIMARY KEY,
uid integer NOT NULL,
state string,
algo string,
filename string,
filesize integer,
path string,
filetype string
)""")
c.execute("""CREATE TABLE vault_path (
path string
)""")
path = filedialog.askdirectory()
c.execute("INSERT INTO vault_path VALUES (:path)",{
'path' : path
})
# path=os.getcwd()
os.system('pip install pillow')
os.system('pip install bcrypt')
conn.commit()
conn.close() | [
"[email protected]"
] | |
79f303f6fae45d041e55f7fec4c9a8757afb5b8c | b47c3cb57fcd195197605d6753dbbed216b71a78 | /ggd/finance/StkQuoteCollector.py | 953093934e7641a2390ae97f4d9253499c69a8de | [] | no_license | gauciouss/python-finance | b9c2aca0c5bef1d1505d97f0f79cd2d764403727 | d66627e4605d7815fdff77c5d9eb2fd20819e9a8 | refs/heads/master | 2021-09-16T00:20:41.891655 | 2018-06-13T18:51:34 | 2018-06-13T18:51:34 | 112,549,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # -*- encoding: utf8-*-
from ggd.log.logger import *
from ggd.net.httpUtil import *
from bs4 import BeautifulSoup
class StkQuoteCollector:
logger = None
httpUtil = None
url = "http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=json&date={}&stockNo={}"
def __init__(self):
self.logger = LoggerFactory.getLogger(self)
self.httpUtil = HttpUtil()
def getData(self, *args):
return None
def doParser(self, html):
return None
| [
"[email protected]"
] | |
78dab4d93bba9a1921cc5afc998d0db52088e38f | 9b846f4a692eb04ee9604d0c6ef66805f2dc8d47 | /7 kyu/square_every_digit.py | febf656b7f52103d4906f7f3d8e1410baebd24bd | [] | no_license | Juozapaitis/CodeWars | 93dc4b121d7b1b03650b8d52dc67775c29a2d87b | 3ebeb5a6124757c3248d686fdd8477f735c69098 | refs/heads/main | 2023-08-20T23:38:58.578269 | 2021-10-20T12:36:41 | 2021-10-20T12:36:41 | 381,331,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | def square_digits(num):
ans = [int(digit) ** 2 for digit in str(num)]
return int(''.join(str(v) for v in ans)) | [
"[email protected]"
] | |
306c316a327c0c4f171d3650f76eb33d56e2d348 | 280a23e1910f21f9ff203858db56530dc8a7d33a | /Code/Modules/plot_network.py | dec39cd864e2cd2014b811ea61ea4a92fe1d86fc | [] | no_license | culuc/Network-Science-Project | 08bac838d03c1d3054d9d28f5546e9f568e9abce | d63b689c471be0b0c730e3a6dd780703ba90a389 | refs/heads/master | 2023-04-10T12:58:40.002880 | 2021-04-09T16:04:08 | 2021-04-09T16:04:08 | 356,321,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
import networkx as nx
# Create basemap of region Germany to draw nettwork onto
def draw(G, path=None):
plt.figure(figsize=(6, 12))
m = Basemap(
projection='merc',
llcrnrlon=5,
llcrnrlat=46,
urcrnrlon=16,
urcrnrlat=56,
lat_ts=0,
resolution='i',
suppress_ticks=True)
# map network nodes to position relative to basemap, remove nodes that don't have location
G2 = G.copy()
pos = {}
for n in G.nodes():
# remove nodes that do not have lat,long info
if 'Longitude' not in G.node[n].keys():
G2.remove_node(n)
continue
long = G.node[n]['Longitude']
lat = G.node[n]['Latitude']
pos[n] = m(long,lat)
#get values for drawing nodes,edges in color
values = [G2.node[n]['Value'] for n in G2.nodes()]
values_edges = [G2.edges[e]['Value'] for e in G2.edges()]
# draw network
graph = G2
nx.draw_networkx_nodes(G = graph, pos = pos, node_list = graph.nodes(), node_color = values, alpha = 0.4, node_size=10,cmap=plt.get_cmap('Pastel1'))
# nx.draw_networkx_nodes(G = graph, pos = pos, node_list = nodelist['G2'], node_color = 'y', alpha = 0.2, node_size=10)
nx.draw_networkx_edges(G = graph,
pos = pos,
edge_color=values_edges,
cmap=plt.get_cmap('Pastel1'),
arrows=False,
width=2
)
# m.drawcoastlines(color='black',linewidth=0.5)
m.drawcountries(color='black',linewidth=1)
# m.fillcontinents(color="#FFDDCC", lake_color='#DDEEFF')
# m.drawmapboundary(fill_color="#DDEEFF")
m.shadedrelief()
if path:
plt.savefig(path,dpi=300,bbox_inches='tight')
plt.show()
| [
"[email protected]"
] | |
1961cabeffd309cf7edbba6e5a9c0554c8869b31 | bcc2244d3115866a10d0fe8cdd4d1a2a6764a535 | /user/migrations/0010_acknowledgment.py | 13238d035b4b781ffe5764d286838a4e91913e7d | [] | no_license | Hardik-Dharmik/Online_Notice_Dashboard | 037b66bbce1d106ce70c3988f75ae334052431c3 | 313dd61e606917a4530b1232f81a173daeb84c2b | refs/heads/master | 2023-08-27T19:18:58.757947 | 2021-10-12T09:07:09 | 2021-10-12T09:07:09 | 397,969,375 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | # Generated by Django 3.2.5 on 2021-09-23 11:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Admin', '0004_addnotice_year'),
('user', '0009_profile_notice'),
]
operations = [
migrations.CreateModel(
name='Acknowledgment',
fields=[
('ack_id', models.AutoField(primary_key=True, serialize=False)),
('is_acknowledged', models.BooleanField(default=False)),
('notice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Admin.addnotice')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.profile')),
],
),
]
| [
"[email protected]"
] | |
6f390581974497e18baa65474194a2178635ca3c | c80804bc7ebb123dc3bb65c1dd5188274555819c | /workflow/parametersGridSearch.py | e612e092d512bffceaf0f011948cdad258bd57ec | [] | no_license | vagabundoo/predicting_financial_markets_deep_learning | 8c195e1ab6682ee2eae106df153acb3c3021d551 | 5dd4bc76167816fb8fc7fe9b8224711a7265e100 | refs/heads/master | 2021-02-15T08:34:49.289504 | 2020-03-12T22:43:34 | 2020-03-12T22:43:34 | 244,882,662 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | ltsm_layer1 = [50, 100, 150, 200]
dropout1 = [0.1, 0.2, 0.3, 0.4]
ltsm_layer2 = [25, 50, 100, 150]
dropout2 = [0.1, 0.2, 0.3, 0.4] | [
"[email protected]"
] | |
9fb9df422087935d4d24a18abd078bebe32de083 | 4ce4b441069a2420775d6e6f293fd2a208970e96 | /jibreel/orcrider.py | a0b385d2cb11d9b46b40b03cd55a114d94298a72 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmrcastillo/python-simple-game | 740358f1a6126b06a774013466dedb32b4291ee8 | 0f08781e2f8cb0b4a4cfbac32ac568c1a2bb88a9 | refs/heads/master | 2022-12-12T04:16:27.831556 | 2019-09-18T12:20:42 | 2019-09-18T12:20:42 | 209,301,943 | 0 | 0 | NOASSERTION | 2022-12-08T06:15:18 | 2019-09-18T12:23:06 | Python | UTF-8 | Python | false | false | 547 | py |
from __future__ import print_function
from gameunit import AbstractGameUnit
class OrcRider(AbstractGameUnit):
"""
Class that represents the game character Orc Rider
"""
def __init__(self, name=' '):
super().__init__(name=name)
self.max_hp = 30
self.health_meter = self.max_hp
self.unit_type = 'enemy'
self.hut_number = 0
def info(self):
"""
Print basic information about this character
"""
print("Grrr.. I'm an Orc Wolf Rider. Don't mess with me.")
| [
"[email protected]"
] | |
5c7264287aad4d842ddf8fb0386e9e321920d4af | 7e1cf6c9ebf64536e3b0a2cadc4964cba6c6426e | /searchdb_coocurence/migrations/0006_coocurence_data_base.py | 15c27f490be94ca0310880b5ce12d785e5522cef | [] | no_license | steven0seagal/PhD-project | afcacf01920c8f4ad3c986107776b86bc80976e7 | 6e4d30c03c499eed1ad227d9844ace3925c2f807 | refs/heads/master | 2022-12-01T21:38:57.085752 | 2020-06-09T23:13:42 | 2020-06-09T23:13:42 | 229,573,242 | 0 | 1 | null | 2022-11-22T04:38:09 | 2019-12-22T13:38:28 | JavaScript | UTF-8 | Python | false | false | 416 | py | # Generated by Django 2.1.7 on 2019-04-19 10:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('searchdb_coocurence', '0005_auto_20190315_1900'),
]
operations = [
migrations.AddField(
model_name='coocurence',
name='data_base',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"[email protected]"
] | |
e8b0a577d9eae7c0d08d3202b3281bb0e0e3b37a | 9e85514daac857cdf3f44fae6b3aecc7b1b4e387 | /DJANGO_PROJ/mysite/mysite/urls.py | de6e4e5d27d356019e354820bca91341d56a2908 | [] | no_license | hstets/pyDjanHockeyApp | 56cee764050cee220b23db08a2dbb8b95ab223b3 | bf214426fc4f9aa20a9916e410ea7202e3d7c4e7 | refs/heads/master | 2020-04-11T23:49:11.899441 | 2018-12-17T19:41:50 | 2018-12-17T19:41:50 | 162,180,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('account/', include('accounts.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
63e3f9b2072c80383c383c6d242276f41bb03b64 | 183898b5781b596c22ad8fd0dd922f3b4bac261f | /register/forms.py | 52497c113fd2d80e34fe05a4faff6841601af181 | [] | no_license | prithahowladar/Apply-Form | 10a910cc8ab3a1fded0d23c4b6c25b7dd17e094c | f5fe5ed7ff1784ce32175432ea53df4562a18958 | refs/heads/master | 2022-09-26T00:27:10.068054 | 2020-05-30T15:26:08 | 2020-05-30T15:26:08 | 255,154,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserInfo
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2', )
class UserInfoForm(forms.ModelForm):
class Meta:
model = UserInfo
exclude = ["user",]
| [
"[email protected]"
] | |
ebb7e458bc8013bdf2ff470f71c02b21b7215cd5 | 115b8345fa6db4ee2df004427451ae1c60723f34 | /Registraion_testElastix.py | 711b1b7ab669428ea04a573af8da7b619b378a27 | [] | no_license | anouk610/CapitaSelecta_8DM20 | c3923ae4c74efc342e5c6c94a5b1ee7e9948b6ed | d131d9855a0e0c03cb1f30f0f5b13bc38eb0c06f | refs/heads/master | 2023-03-02T09:12:49.390989 | 2021-02-08T13:58:06 | 2021-02-08T13:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 11:37:07 2021
@author: s169369
"""
import elastix
import os
import imageio
import matplotlib.pyplot as plt
import SimpleITK as sitk
from cv2 import *
import numpy as np
from scrollview import ScrollView
import rawpy
import imageio
ELASTIX_PATH = os.path.join(r'C:\Users\s169369\Documents\studie\2020-2021\05-03\Capita Selecta\Project\elastix.exe')
TRANSFORMIX_PATH = os.path.join(r'C:\Users\s169369\Documents\studie\2020-2021\05-03\Capita Selecta\Project\transformix.exe')
if not os.path.exists(ELASTIX_PATH):
raise IOError('Elastix cannot be found, please set the correct ELASTIX_PATH.')
if not os.path.exists(TRANSFORMIX_PATH):
raise IOError('Transformix cannot be found, please set the correct TRANSFORMIX_PATH.')
# Make a results directory if non exists
if os.path.exists('results') is False:
os.mkdir('results')
fixed_path = r'TrainingData\p102\mr_bffe.mhd'
moving_path = r'TrainingData\p135\mr_bffe.mhd'
fixed_image = sitk.ReadImage(fixed_path)
fixed_im_array = sitk.GetArrayFromImage(fixed_image)
moving_image = sitk.ReadImage(moving_path)
moving_im_array = sitk.GetArrayFromImage(moving_image)
parameter_file_path = r'TrainingData\parameters.txt'
def Registration(fixed_image_path, atlas_path, ELASTIX_PATH, pnr ):
"Function does registration for one atlas with one fixed image"
parameter_file_path = r'TrainingData\parameters.txt'
# Make a results directory if non exists
if os.path.exists('results_{}'.format(pnr)) is False:
os.mkdir('results_{}'.format(pnr))
el = elastix.ElastixInterface(elastix_path=ELASTIX_PATH)
el.register(
fixed_image=fixed_image_path,
moving_image=atlas_path,
parameters=[parameter_file_path],
output_dir='results_{}'.format(pnr))
# Perform registration 5 times
Registration(fixed_path, moving_path, ELASTIX_PATH, 'p102')
# # result jacobian:
# tr = elastix.TransformixInterface(parameters='res_chest/TransformParameters.0.txt',transformix_path=TRANSFORMIX_PATH)
# tr.jacobian_determinant('res_chest_p')
# jac_path = os.path.join('res_chest_p', 'spatialJacobian.mhd')
# jacobian = sitk.ReadImage(jac_path)
# jac_res = sitk.GetArrayFromImage(jacobian)
# jac_bin = jac_res > 0
# # now black spots in image are where folding occurs (negative jacobian)
# plt.figure()
# plt.imshow(jac_bin, cmap='gray')
# plt.title('Jacobian. black is negative = folding')
# # results image :
# result_path = os.path.join('res_chest_p', 'result.0.mhd')
# transformed_moving_image = sitk.ReadImage(result_path)
# tr_mov_im_array = sitk.GetArrayFromImage(transformed_moving_image)
# plt.figure()
# plt.imshow(tr_mov_im_array, cmap='gray')
# plt.title('transformed moving image')
# # Iteration_file_path_0 = 'res_ssp2/IterationInfo.0.R0.txt'
# # log0 = elastix.logfile(Iteration_file_path_0)
# # Iteration_file_path_1 = 'res_ssp2/IterationInfo.0.R1.txt'
# # log1 = elastix.logfile(Iteration_file_path_1)
# # Iteration_file_path_2 = 'res_ssp2/IterationInfo.0.R2.txt'
# # log2 = elastix.logfile(Iteration_file_path_2)
# # Iteration_file_path_3 = 'res_ssp2/IterationInfo.0.R3.txt'
# # log3 = elastix.logfile(Iteration_file_path_3)
# # Iteration_file_path_4 = 'res_ssp2/IterationInfo.0.R4.txt'
# # log4 = elastix.logfile(Iteration_file_path_4)
# # plt.figure()
# # plt.plot(log0['itnr'], log0['metric'], label = 'R0')
# # plt.plot(log1['itnr'], log1['metric'], label ='R1')
# # plt.plot(log2['itnr'], log2['metric'], label = 'R2')
# # plt.plot(log3['itnr'], log3['metric'], label = 'R3')
# # plt.plot(log4['itnr'], log4['metric'], label = 'R4')
# # plt.title('cost-functions')
# # plt.legend()
| [
"[email protected]"
] | |
af6d9f15c3dba17c199ca66803fe38480347b962 | cc096d321ab5c6abf54fdcea67f10e77cd02dfde | /flex-backend/pypy/translator/flex/examples/sound/sound.py | fb3dc79b00c8ad0b3b9b614b18195db2ea5b34c9 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | limweb/flex-pypy | 310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2 | 05aeeda183babdac80f9c10fca41e3fb1a272ccb | refs/heads/master | 2021-01-19T22:10:56.654997 | 2008-03-19T23:51:59 | 2008-03-19T23:51:59 | 32,463,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from pypy.translator.flex.modules.flex import *
from pypy.rpython.ootypesystem.bltregistry import BasicExternal, MethodDesc
class Sonido():
def load_sound(self, w):
s = Sound()
r = newURLRequest("sal.mp3")
s.load(r)
s.play()
def flash_main( x=1 ):
w = castToWindow( x )
o = Sonido()
o.load_sound(w)
| [
"facundobatista@dbd81ab4-9648-0410-a770-9b81666e587d"
] | facundobatista@dbd81ab4-9648-0410-a770-9b81666e587d |
d13f51d530dfdbf41575380cd547755b2bc4a010 | 3656b052aba0c7673e200b286ea211a63ab90584 | /hw1/ab8541_hw1_q3.py | 13a84e82a14dcb8d3b8559e6fda3eb1914300e2c | [] | no_license | alisha-bhatia/1134 | 20bf6c3dd330faf42947899f0af72c719c310169 | 87c4b9178a475f60a84a81bfbb58838f3daf48f3 | refs/heads/master | 2022-11-10T21:22:22.011055 | 2020-07-03T21:06:06 | 2020-07-03T21:06:06 | 274,279,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #3a:
def squares_sum(n):
total = 0
for x in range(1,n+1):
total = total+ x**2
return total
#3b
sum(k*k for k in range(3))
#3c
def odd_squares_sum(n):
total = 0
for y in range(1,n+1):
if (y%2==1):
total = total+y
return total
def main():
y = squares_sum(3)
print(y)
main()
#3d
sum(k*k for k in range(4) if (k%2 == 1))
| [
"[email protected]"
] | |
7ba6a228cb2e5dfcd05ea45a34e1bf43bccabd08 | f811244ce15308f2e2bc4509e0fe18025a06ff87 | /tool/autoPlatform/tools/excel_parse_isp/rules.py | 8444c61832bed2a0f806bbb4537d762cb5a75819 | [] | no_license | autoCore/autoTest | cb21118374d69897ee9a363921fce4ff230bfb4c | 6b778aafca1fb170201813c4a1fda315fc6322ef | refs/heads/master | 2023-06-12T04:51:22.266677 | 2021-07-01T06:42:03 | 2021-07-01T06:42:03 | 112,465,116 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | #! /usr/bin/env python
class Rule:
def action(self, block, handler):
handler.do(self.type)
return True
class BaseRule(Rule):
type = 'capbase'
def condition(self, block):
for row in block:
try:
row[0] + ''
except:
continue
if 'BASE:' in row[0].upper() or 'BASE :' in row[0].upper():
return True
return False
def action(self, block, handler, register, regfield):
handler.do(self.type, block, register, regfield)
return True
class RegisterRule(Rule):
type = 'capregister'
def condition(self, block):
for row in block:
try:
row[0] + ''
except:
continue
if 'Offset:' in row[0]:
return True
return False
def action(self, block, handler, register, regfield):
handler.do(self.type, block, register, regfield)
return True
class RegfieldRule(Rule):
type = 'capregfield'
def condition(self, block):
if "Bits" in block[0][0] and 'Field (Code)' in block[0][2]:
return True
return False
def action(self, block, handler, register, regfield):
for i, line in enumerate(block):
if "Bits" in line[0] and 'Field (Code)' in line[2]:
keys = [_str.strip() for _str in line]
break
# print keys
for line in block[i+1:]:
handler.do(self.type, block, register, regfield)
return False
| [
"[email protected]"
] | |
cbc4517dfd261c43287ac8afc77191cb726c026f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02595/s629865346.py | d0a82f085823894f709114df5b0bba8496dd8d7c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n,d = [int(x) for x in input().split()]
x = []
y = []
for i in range(n):
x1,y1 = [int(x) for x in input().split()]
x.append(x1)
y.append(y1)
c = 0
for i in range(n):
s = x[i] ** 2 + y[i] ** 2
if s <= d ** 2:
c += 1
print(c) | [
"[email protected]"
] | |
77425464532646f7e160d2ca347be9551fc737b0 | c45b34c191c4f89710a937ddf82df063e701c752 | /apps/accounts/migrations/0001_initial.py | 961340272bb7bc485373f0e35c2a59216d3f3855 | [] | no_license | kzh3ka/junior_django_todo | 822aac5278304d300543fcfa5f05c1c43c0449d5 | deb5f17e3a428e2391e57f5d76ed0da5bb583dd6 | refs/heads/master | 2023-08-04T09:26:10.909727 | 2021-09-04T19:50:40 | 2021-09-10T13:59:21 | 403,142,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # Generated by Django 3.2.6 on 2021-09-04 19:34
from django.conf import settings
from django.db import migrations
def create_user(apps, schema):
User = apps.get_model(settings.AUTH_USER_MODEL)
User.objects.create_superuser("admin", "[email protected]", "1")
User.objects.create_superuser("user", "[email protected]", "1")
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.RunPython(create_user, None)
]
| [
"[email protected]"
] | |
842c064847ec0ecf83a8ed3fdbe0570b18e385f0 | 6cf4783e44837b05163beed2ee10e19c71b51bce | /oddnumber.py | 6a13acb8c3556e1c76469028d18b09ee2031cb0c | [] | no_license | bindurathod/Assignment5 | 52bd5cf57fd4e47a0faae3b31fbf96724708838d | 7c37545b4113120ed5b2efdcd76edd153c9db7b9 | refs/heads/main | 2023-01-12T00:52:36.828637 | 2020-11-05T05:40:54 | 2020-11-05T05:40:54 | 310,200,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | def odd_numb(num):
if num%2==0:
print(num, "odd")
else:
print(num, "odd")
odd_numb(13)
| [
"[email protected]"
] | |
d0a6b55a1a10b91a7089c1e11d5153948f254604 | f8f20f48de71862722615ad1ce4d40de4d4f9024 | /python/image_proc/calibrate_kinect_extrinsics.py | 67f885a0c2105c5bd6eebc98bcf1729a67e49302 | [] | no_license | rll/sushichallenge | 33cdc7b7bcdbc3a9b42e03eff9e46103ce58404b | 372345361dc44e785c9b98d538373b33e7174060 | refs/heads/master | 2016-09-05T12:53:16.754195 | 2012-05-21T23:54:36 | 2012-05-21T23:54:36 | 4,010,735 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | import numpy as np
import cv2
import calibration
import chessboards
from rave.openrave_kinematics import RaveRobot
from os.path import join
import transformations
import roslib;roslib.load_manifest('sensor_msgs')
import pcl_utils
import cPickle
#rr = RaveRobot()
DATA_DIR = "/home/joschu/Data/calib"
bgr_kinects = np.load(join(DATA_DIR,'bgr_kinects.npy'),mmap_mode='r')
bgr_lefts = np.load(join(DATA_DIR,'bgr_lefts.npy'),mmap_mode='r')
CB_SHAPE = (5,4)
CB_SIZE = .0249
def get_trans_rot_corners(bgr, window_name, cam_matrix):
try:
cam_trans, cam_rot, corners = chessboards.get_chessboard_pose(bgr, CB_SHAPE, CB_SIZE, cam_matrix)
bgr_plot = bgr.copy()
cv2.drawChessboardCorners(bgr_plot, CB_SHAPE, corners, True)
cv2.circle(img=bgr_plot, center=tuple(corners[0].flatten()), radius=5, color=(0,0,255), thickness=2)
cv2.imshow(window_name,bgr_plot)
cv2.waitKey(20)
print "chessboard found"
except ValueError:
cam_trans, cam_rot, corners = None,None, None
cv2.imshow(window_name,bgr.copy())
cv2.waitKey(20)
print "chessboard not found"
return cam_trans, cam_rot, corners
cv2.namedWindow('kinect corners', cv2.cv.CV_WINDOW_NORMAL)
cv2.namedWindow('left corners', cv2.cv.CV_WINDOW_NORMAL)
kin_trans_list = []
cam_trans_list = []
with open('/home/joschu/Data/calib/info_left.pkl','r') as fh: cam_info=cPickle.load(fh)
left_cam_matrix = np.array(cam_info.P).reshape(3,4)[:3,:3]
kin_cam_matrix = pcl_utils.CAMERA_MATRIX
for (bgr_kin, bgr_left) in zip(bgr_kinects, bgr_lefts):
kin_trans, kin_rot, kin_corn = get_trans_rot_corners(bgr_kin, 'kinect corners', kin_cam_matrix)
cam_trans, cam_rot, cam_corn = get_trans_rot_corners(bgr_left, 'left corners', left_cam_matrix)
if kin_trans is not None and cam_trans is not None:
kin_trans_list.append(kin_trans.flatten())
cam_trans_list.append(cam_trans.flatten())
M = transformations.superimposition_matrix(np.array(kin_trans_list),np.array(cam_trans_list))
| [
"team2@prj1.(none)"
] | team2@prj1.(none) |
36b959d058b43bfcac23c454c8c2e6319756bb85 | 8b6139ff207c20dc9b3dc13d26f4cb73c54c59e7 | /Crud2/env/bin/django-admin | 68db5ad754532a5f2c4a997db3799dc314cdb0ca | [] | no_license | CarolinaPardoFuquen/Crud | 52ecef0353ed8c53087fb3e2cb8db37b3d83deae | beedf204fe7c5d7000c15e3c00e9cf2b5d4c6834 | refs/heads/master | 2023-04-28T01:42:00.088982 | 2020-01-22T23:30:05 | 2020-01-22T23:30:05 | 235,474,924 | 0 | 2 | null | 2023-04-21T20:45:29 | 2020-01-22T01:21:07 | JavaScript | UTF-8 | Python | false | false | 310 | #!/home/estudiante/Escritorio/PruebaIngreso/Crud2/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
764a1510383c384620c7766d9c7b9359b1433dc0 | 14828da26a31b618bc0e87b27853cc46f7474b4a | /mysite copy/polls/models.py | 3f2999a5ce99f8025786316ce9df14aaa395ee6d | [] | no_license | ayush-mayur/django-am | 089e680005bce2a066e0e9343c1ad936c26c35df | 245d32802dc4b34e8af10e3a3eed2cec1ee97b8c | refs/heads/main | 2023-03-06T22:11:22.844395 | 2021-02-21T09:00:41 | 2021-02-21T09:00:41 | 340,819,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class Thought(models.Model):
thought_title = models.CharField(max_length=200)
thought_text = models.TextField()
def __str__(self):
return self.thought_title + ": " + self.thought_text | [
"[email protected]"
] | |
eb7c63fd6a53470763db9d871f3bc50924492016 | ba5a6f1b6776c3503a4e6f486795ffd62a11810b | /Loja/cliente/views.py | 5fbfdf60a07d138a5b63653a43a384160f5b37d5 | [] | no_license | AmaroCesa/intmed_test | 9e4e78a0dc7eb989b797ca9b5c56d94588d08193 | 1e59a365298ad33216aad5635b6df3fe98749c7c | refs/heads/master | 2021-06-13T06:44:29.943526 | 2019-05-27T02:08:47 | 2019-05-27T02:08:47 | 155,141,625 | 0 | 0 | null | 2021-06-10T20:56:24 | 2018-10-29T02:44:41 | JavaScript | UTF-8 | Python | false | false | 1,232 | py | from django.shortcuts import render
# Create your views here.
from rest_framework import generics
from rest_framework import viewsets
from rest_framework import serializers
from rest_framework.decorators import list_route
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAdminUser
from rest_framework.permissions import IsAuthenticated
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework import status
from .models import Cliente
from .serializers import ClienteSerializer
from rest_framework import views
from django.contrib.auth import login, authenticate
class ClienteViewSet(viewsets.ModelViewSet):
"""
Cliente registro e controle de permiçoes para acesso
"""
queryset = Cliente.objects.all()
serializer_class = ClienteSerializer
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
if self.action == 'list':
permission_classes = [AllowAny]
else:
permission_classes = [IsAuthenticated]
return [permission() for permission in permission_classes]
| [
"[email protected]"
] | |
d32c8ea081b171bd68dfa57d848475b5c2f3164c | 426961e942392584e37e703246b14b2290248057 | /scikit_learn/train_test_split.py | fc799ca6e52bdad703c63afb47f92fe115151459 | [] | no_license | chingandy/Data-Science-with-Python | 34beb6e965feb98ef5f3626d1ba1d46faee91796 | 5d58c0d63231ad996ff500400a557f43a7fc6937 | refs/heads/master | 2021-01-24T16:39:42.873489 | 2018-03-20T09:48:53 | 2018-03-20T09:48:53 | 123,205,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # Import necessary modules
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state= 42)
# Create the regressor: reg_all
reg_all = LinearRegression()
# Fit the regressor to the training data
reg_all.fit(X_train,y_train)
# Predict on the test data: y_pred
y_pred = reg_all.predict(X_test)
# Compute and print R^2 and RMSE
print("R^2: {}".format(reg_all.score(X_test, y_test)))
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: {}".format(rmse))
| [
"[email protected]"
] | |
32d50e2a5e620f4e704d3a8bc918a444a27385dc | d5e8b4b1e0b4ffecc248221cd15628e62ac65ef9 | /euclid.py | 86aa9ca5d9e866a811d909926e5dbb4503088eca | [] | no_license | thangvynam/simulation-math-python | 897ef9c5d3590b8f08d8d5496330961974cacb4a | 36ca6014f5fed8043dc1fcc4d09cd429e0f961f4 | refs/heads/master | 2023-02-07T10:49:05.283434 | 2018-05-26T08:24:27 | 2018-05-26T08:24:27 | 134,943,140 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | a = input('Enter a: ')
b = input('Enter b: ')
a=int(a)
b=int(b)
#Euclid
def ucln(a, b):
while b != 0:
r = a % b
a = b
b = r
return a
def gcd_extend(a,b):
x=1
y=0
x1=0
y1=1
while (b!=0):
q=a//b
r = a%b
a=b
b=r
x2=x-x1*q
y2=y-y1*q
x=x1
y=y1
x1=x2
y1=y2
print("x = ",x)
print("y = ",y)
def gcd(a,b):
while (b!=0):
t=a%b
a=b
b=t
return a
def modulo_inverse_euclidean(a,n):
if(gcd(a,n)!=1):
return [0,0]
else:
u1,u2,u3=1,0,a
v1,v2,v3=0,1,n
while (v3!=0):
q=(u3//v3)
t1,t2,t3=u1-q*v1,u2-q*v2,u3-q*v3
u1,u2,u3=v1,v2,v3
v1,v2,v3=t1,t2,t3
return [u1,u2]
print("Nhap n=")
n=int(input())
print("Nhap a=")
a=int(input())
res = modulo_inverse_euclidean(a,n)
if(a==0):
print("Khong co nghich dao")
else:
if(n%a==0):
print("Khong co nghich dao")
else:
if(res[0] == 0 & res[1]==0):
print("Khong co nghich dao")
else:
if(res[0]<0):
c=res[0]+n
print("a'=%d" % (c))
else:
print("a'=%d" % (res[0]))
if __name__ == '__main__':
gcd_extend(a,b) | [
"[email protected]"
] | |
ba35438e1d6b05abc413431836945ef3ca569972 | 9888a22651ceb0f36f987aa4f7c8ed4c02e1862d | /src/model/connection/server_connection.py | d0894937a0efbb617cb8e3e988fbf68ffd086680 | [
"MIT"
] | permissive | jtkorhonen/mtms-console | 7359be1db54e67b9fc604a0be16e5d66ad18c02c | 8861a8938ce0f1cc213ad1f85c5f5a6606e37f29 | refs/heads/main | 2023-03-18T22:02:27.746506 | 2021-03-10T12:29:46 | 2021-03-10T12:29:46 | 345,942,485 | 0 | 0 | MIT | 2021-03-10T12:28:16 | 2021-03-09T08:56:09 | Python | UTF-8 | Python | false | false | 4,982 | py | #!/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import logging
from enum import Enum
from typing import Optional, Callable
import asyncio
from .helpers import validate_url
logger = logging.getLogger(__name__)
class ConnectionStatus(Enum):
UNDEFINED = -1
DISCONNECTED = 0
CONNECTED = 1
DISCONNECTING = 2
CONNECTING = 3
CANCELLING = 4
class ServerConnection():
"""Bogus server connection model.
"""
def __init__(self,
url: Optional[str] = None,
on_connection_status_changed: Callable[[ServerConnection, ConnectionStatus, ConnectionStatus], None] = None
):
self._url = validate_url(url)
self._on_connection_status_changed = set()
if on_connection_status_changed:
self._on_connection_status_changed.add(on_connection_status_changed)
self._connection_status = ConnectionStatus.DISCONNECTED
self._connect_task = None
self._disconnect_task = None
logger.debug(f"Initialized ServerConnection \"{self}\" with url=\"{self._url}\".")
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = validate_url(value)
def _on_connection_status_changed_setter(self, value):
self._on_connection_status_changed.add(value)
on_connection_status_changed = property(None, _on_connection_status_changed_setter)
@property
def connection_status(self):
return self._connection_status
@connection_status.setter
def connection_status(self, value: ConnectionStatus):
old_value = self._connection_status
self._connection_status = value
for callback in self._on_connection_status_changed:
callback(self, value, old_value)
@property
def connected(self):
return self.connection_status in (ConnectionStatus.CONNECTED, ConnectionStatus.DISCONNECTING)
@property
def connecting(self):
return self.connection_status == ConnectionStatus.CONNECTING
# return self._connect_task is not None
async def connect(self) -> bool:
logger.info(f"Connecting to server \"{self.url}\"...")
self.connection_status = ConnectionStatus.CONNECTING
if self.url is None:
self.connection_status = ConnectionStatus.DISCONNECTED
logger.error("Could not initiate connection. URL is not set.")
raise ConnectionError("Could not initiate connection. URL is not set.")
if self._connect_task is not None:
logger.error("Could not initiate another connection. Connection process is already started.")
raise ConnectionError("Please disconnect or cancel the current connection attempt first.")
async def connect_coro():
# Bogus connection method
await asyncio.sleep(5)
return True
self._connect_task = asyncio.create_task(connect_coro())
await self._connect_task
try:
if self._connect_task.result():
logger.info("Connection success.")
self.connection_status = ConnectionStatus.CONNECTED
self._connect_task = None
return True
else:
logger.error("Connection failed.")
self.connection_status = ConnectionStatus.DISCONNECTED
self._connect_task = None
return False
except asyncio.CancelledError:
logger.info("Connection cancelled during the process.")
self.connection_status = ConnectionStatus.DISCONNECTED
self._connect_task = None
return False
async def disconnect(self) -> None:
if not self.connected:
return
self.connection_status = ConnectionStatus.DISCONNECTING
if self._disconnect_task is not None:
raise ConnectionError("Disconnect is already started.")
async def disconnect_coro():
if self._connect_task is not None:
self._connect_task.cancel()
await asyncio.sleep(2)
self._connect_task = None
await asyncio.sleep(1)
self._disconnect_task = asyncio.create_task(disconnect_coro())
await self._disconnect_task
self.connection_status = ConnectionStatus.DISCONNECTED
self._disconnect_task = None
def cancel(self) -> None:
logger.info("Cancelling connection requested.")
self.connection_status = ConnectionStatus.CANCELLING
if self._connect_task is not None:
self._connect_task.cancel()
self._connect_task = None
self.connection_status = ConnectionStatus.DISCONNECTED
if self._disconnect_task is not None:
self._disconnect_task.cancel()
self._disconnect_task = None
self.connection_status = ConnectionStatus.CONNECTED
| [
"[email protected]"
] | |
8a2c72c03696bed7a71a0ccf77f70d54cf3b7c60 | e0332fff3931ada2a491990237d5721e04292cdb | /qiskit/circuit/library/template_circuits/toffoli/template_9d_9.py | 9a269ed985714606603b1a1b144ecfc6e5be81b3 | [
"Apache-2.0"
] | permissive | AustinGilliam/qiskit-terra | b5ab851cab94e1222c008bc3a5696a6981fd3f99 | a4caec885fe5ff013092979ee00d5cdd9c26e848 | refs/heads/master | 2022-12-21T04:47:01.370948 | 2020-09-21T09:17:22 | 2020-09-21T09:17:22 | 283,307,467 | 0 | 0 | Apache-2.0 | 2020-07-28T19:25:11 | 2020-07-28T19:25:11 | null | UTF-8 | Python | false | false | 1,641 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Template 9d_9:
.. parsed-literal::
q_0: ──■────■────■─────────■────■─────────■────■──
│ │ ┌─┴─┐ │ ┌─┴─┐ │ ┌─┴─┐
q_1: ──■────┼──┤ X ├───────■──┤ X ├───────■──┤ X ├
┌─┴─┐┌─┴─┐└─┬─┘┌───┐┌─┴─┐└─┬─┘┌───┐┌─┴─┐└─┬─┘
q_2: ┤ X ├┤ X ├──■──┤ X ├┤ X ├──■──┤ X ├┤ X ├──■──
└───┘└───┘ └───┘└───┘ └───┘└───┘
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
def template_9d_9():
"""
Returns:
QuantumCircuit: template as a quantum circuit.
"""
qc = QuantumCircuit(3)
qc.ccx(0, 1, 2)
qc.cx(0, 2)
qc.ccx(0, 2, 1)
qc.x(2)
qc.ccx(0, 1, 2)
qc.ccx(0, 2, 1)
qc.x(2)
qc.ccx(0, 1, 2)
qc.ccx(0, 2, 1)
return qc
| [
"[email protected]"
] | |
5918738529e12ee8429b809d031b11311b8647cb | bef1177d3c9f5a6484d80139365f6c994c985cee | /quantum classifier/cin/dataset/iris.py | a2007cada090242da36e38b5fb418a02a1a844e8 | [] | no_license | israelferrazaraujo/dcsp | 6c32272959d1823d13d38fc10779c706309907eb | 44177a73417f783a7ef9bebe916caf7db1fb8ef1 | refs/heads/master | 2023-02-27T04:53:43.727026 | 2021-02-06T10:06:08 | 2021-02-06T10:06:08 | 285,107,847 | 1 | 0 | null | 2020-08-04T21:49:02 | 2020-08-04T21:49:02 | null | UTF-8 | Python | false | false | 941 | py | import numpy as np
from sklearn import datasets
def load(classes):
iris = datasets.load_iris() # load the iris dataset.
X = iris.data # separate the data from the target attributes. # pylint: disable=no-member
Y = iris.target # # pylint: disable=no-member
X = np.array([ e[0] for e in list(zip(X, Y)) if e[1] in classes]) # select intended classes.
min_class = min(classes) #
c = max(classes) - min_class #
Y = np.array([ 2*((e-min_class)//c)-1 for e in Y if e in classes]) # +1 ou -1.
#normalization = np.sqrt(np.sum(X ** 2, -1)) # pylint: disable=no-member
#X_norm = (X.T / normalization).T
X_norm = X / X.max(axis=0) # reescala as colunas para valores entre 0 e 1. Há apenas valores positivos no Iris.
return X_norm, Y
| [
"[email protected]"
] | |
bf1caeb574730c6e6c13e8c7fa72ae842a3a9dd5 | a6d61c8f5cf893213b5c3b47ae354b6e6bd2dd11 | /source/main.py | b27622fd6a13eb3b4eb2be2ba948ea714d93d3b7 | [] | no_license | mtaziz/anomaly-detection | aa9ddb61d156f393d4cdbba40cebea8404ee97f4 | 3cafa3cdf5b325b4804e3374bb1e77937e2b51dd | refs/heads/master | 2022-11-25T15:33:08.800914 | 2020-07-31T19:02:35 | 2020-07-31T19:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from source.machine_learning.adquire_data import read_data
from source.machine_learning.preprocess import process
from source.machine_learning.train import train
from source.machine_learning.validate import validate
from cd4ml import tracking
from cd4ml.pipeline_params import pipeline_params
def run():
# get_file()
df = read_data()
x_normal, x_attack = process(df)
model_name = pipeline_params['model_name']
params = pipeline_params['model_params'][model_name]
with tracking.track() as track:
model, x_normal_test = train(x_normal, params)
track.log_ml_params(params)
track.log_pipeline_params(pipeline_params)
validate(model, x_normal, x_attack, x_normal_test, track)
| [
"[email protected]"
] | |
d8bf06539adc2d58a081df688a73c9b2aa5d8fdc | 3944261f92ad298450742fd2d55300e58ad4df38 | /apps/users/migrations/0002_banner_emailverifyrecord.py | 77cbc4577ec23ff9779676dcddbf0a320d5724b3 | [] | no_license | ybyangjian/moocdemo | f38124351bb2da5421768e3a2aa94dd81d281cd6 | 0ebbe7f7da44fbea1a08dbbe0142fd7394891ee6 | refs/heads/master | 2020-03-13T07:21:44.856961 | 2018-04-28T15:34:35 | 2018-04-28T15:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | # Generated by Django 2.0.4 on 2018-04-25 14:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('image', models.ImageField(upload_to='banner/%Y/%m', verbose_name='轮播图')),
('url', models.URLField(verbose_name='访问地址')),
('index', models.IntegerField(default=100, verbose_name='顺序')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '轮播图',
'verbose_name_plural': '轮播图',
},
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '找回密码')], max_length=10, verbose_name='邮件类型')),
('send_time', models.DateTimeField(default=datetime.datetime.now)),
],
options={
'verbose_name': '邮箱验证码',
'verbose_name_plural': '邮箱验证码',
},
),
]
| [
"[email protected]"
] | |
063e327c3f613572963cbe12854b56e017fe62cb | 856f5d5482fd973f902afb13be5782c8fc57cca3 | /hw5/test.py | 986ed071f44f1123f9e6b8664cc13061235dcc8b | [] | no_license | wukm/579 | 021e12ada91923ae756bddfd18033c1da7e9174d | cfaa8f8fa782cb97d275daea552fe0682f40a90f | refs/heads/master | 2020-05-31T08:09:15.472410 | 2015-05-22T00:18:00 | 2015-05-22T00:18:00 | 29,958,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/env python3
"""
make this a unit test.
this should result in an x_est s.t.
nonzeros(x_est) = [a] where a is very close to 1 (i.e. .99)
and other items are 0 or very close
"""
import numpy
from numpy.random import randn
from scipy.linalg import norm
from LASSO import *
def nonzeros(a):
return a[a.nonzero()].reshape((1,-1))
x = numpy.zeros((500,1))
x[2] = [1.]
A = randn(50,500)
# normalize by row
for row in A:
row /= norm(row)
b = A.dot(x)
x_est = lasso(A,b,.001)
| [
"[email protected]"
] | |
f726fc166e76a8551aa2031ad6a49a81b90801fc | d4561571faf82f6b61fca851c42ceeea09df72eb | /commands/base.py | 1aab32f7545f1becc5b2fec37b65a430f7db7a38 | [
"MIT"
] | permissive | rafaelcassau/pycommands | 614659f005d6b7badd8be81bc00ddab957a7c442 | e94db33915463264539d49a1887d414fd0bbb9f5 | refs/heads/master | 2021-06-12T01:40:06.445078 | 2019-11-07T20:05:06 | 2019-11-07T20:05:06 | 198,507,558 | 2 | 2 | MIT | 2021-06-02T00:22:52 | 2019-07-23T21:00:53 | Python | UTF-8 | Python | false | false | 1,750 | py | import logging
import subprocess
import sys
from subprocess import CalledProcessError, TimeoutExpired
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
class BaseCommand:
def __init__(self, timeout=60):
self.command = None
self.undo_command = None
self.timeout = timeout
def execute(self):
self._validate()
self.command = self.build()
logger.info(f"running command: {self.command}")
command = self.command.split()
self._call(command)
def build(self):
raise NotImplementedError()
def get_validators(self):
return []
def undo(self):
self._validate()
self.undo_command = self.build_undo()
logger.info(f"running undo command: {self.undo_command}")
if self.undo_command:
command = self.undo_command.split()
self._call(command)
def build_undo(self):
return None
def _call(self, command):
try:
response = subprocess.run(
command, capture_output=True, check=True, timeout=self.timeout
)
if response.stdout:
logger.info(f"success: {response.stdout}")
except CalledProcessError as error:
logger.error(f"returned error: {error.stderr}")
except TimeoutExpired as error:
logger.error(f"returned error timeout: {error}")
except Exception as error:
logger.error(f"returned error: {error}")
def _validate(self):
validators = self.get_validators()
for validator in validators:
validator.validate()
| [
"[email protected]"
] | |
6587b8bef4d0b5548dedf4bbbb41f0fd5f4dccc0 | 537b7b1d67f39b2c0351d58906e7b24125866d5f | /Restful_API_flask/bai2_my_app/__init__.py | 66289d19b1115fa30bd963c7bf351bffbe654531 | [] | no_license | PhungXuanAnh/python-note | 8836ca37d9254c504f9801acca3977e2c28a4f60 | 9181c4845af32c2148e65313cbb51c2837420078 | refs/heads/master | 2023-08-31T02:43:16.469562 | 2023-08-22T03:14:44 | 2023-08-22T03:14:44 | 94,281,269 | 16 | 5 | null | 2023-05-22T22:30:46 | 2017-06-14T02:46:49 | Python | UTF-8 | Python | false | false | 342 | py | # khoi tao app, database va ket noi dung voi nhau
#
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
from Restful_API_flask.bai2_my_app.catalog.views import catalog
app.register_blueprint(catalog)
db.create_all()
| [
"xuananh@viosoft-PC"
] | xuananh@viosoft-PC |
cc0c205a5eadcfb338aabc12159d5d53c11ec7dd | 115ff19cdc284d66bc58596a6da1550b59f395d7 | /TrainingApp/demo-service.py | 0a052883536ace06aaf063562e6e5ec91ae5c988 | [] | no_license | quentinPitalier/mappa | 9201cf30513ad29b2250a9d19dd4bcab5cc6e6e0 | 571b529c3869827cefc0022dbfcab8e461ba1962 | refs/heads/master | 2021-05-06T19:06:12.532721 | 2017-11-25T20:01:04 | 2017-11-25T20:01:04 | 112,008,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,616 | py | #!/usr/bin/env python
import qi
import sys
import os
import pyrebase
class DemoService:
services_connected = None
connected_signals = []
def __init__(self, application):
# Getting a session that will be reused everywhere
self.application = application
self.session = application.session
self.service_name = self.__class__.__name__
# Getting a logger. Logs will be in /var/log/naoqi/servicemanager/{application id}.{service name}
self.logger = qi.Logger(self.service_name)
# Do some initializations before the service is registered to NAOqi
self.logger.info("Initializing...")
self.connect_services()
self.logger.info("Initialized!")
self.add_memory_subscriber("DemoService/exit", self.exit_service)
self.logger.info("Service started")
@qi.nobind
def exit_service(self,value):
self.logger.info("Calling stop service...")
self.stop_service(value)
@qi.nobind
def start_service(self):
self.logger.info("Starting service...")
self.load_dialog()
# do something when the service starts
self.logger.info("Started!")
@qi.nobind
def stop_service(self, value):
# probably useless, unless one method needs to stop the service from inside.
# external naoqi scripts should use ALServiceManager.stopService if they need to stop it.
self.logger.info("Stopping service...")
self.application.stop()
self.logger.info("Stopped!")
@qi.nobind
def connect_services(self):
# connect all services required by your module
# done in async way over 30s,
# so it works even if other services are not yet ready when you start your module
# this is required when the service is autorun as it may start before other modules...
self.logger.info('Connecting services...')
self.services_connected = qi.Promise()
services_connected_fut = self.services_connected.future()
def get_services():
try:
self.memory = self.session.service('ALMemory')
self.dialog = self.session.service('ALDialog')
# connect other services if needed...
self.logger.info('All services are now connected')
self.services_connected.setValue(True)
except RuntimeError as e:
self.logger.warning('Still missing some service:\n {}'.format(e))
get_services_task = qi.PeriodicTask()
get_services_task.setCallback(get_services)
get_services_task.setUsPeriod(int(2*1000000)) # check every 2s
get_services_task.start(True)
try:
services_connected_fut.value(30*1000) # timeout = 30s
get_services_task.stop()
except RuntimeError:
get_services_task.stop()
self.logger.error('Failed to reach all services after 30 seconds')
raise RuntimeError
### Utility functions ###
@qi.nobind
def load_dialog(self):
self.logger.info("Loading dialog")
dir_path = os.path.dirname(os.path.realpath(__file__))
lang = self.dialog.getLanguage()
self.logger.info(lang)
if (lang == 'English'):
topic_path = os.path.realpath(os.path.join(dir_path, "..", "trainingapp/dialog", "dialog_enu.top"))
else:
topic_path = os.path.realpath(os.path.join(dir_path, "..", "trainingapp/dialog", "dialog_fif.top"))
self.logger.info(topic_path)
try:
self.loadedTopic = self.dialog.loadTopic(topic_path)
self.dialog.activateTopic(self.loadedTopic)
self.dialog.subscribe(self.service_name)
self.logger.info("Dialog loaded...")
except Exception, e:
self.logger.info("Error while loading dialog: {}".format(e))
@qi.nobind
def unload_dialog(self):
# if needed, here is how to unload a dialog from Python
self.logger.info("Unloading dialog")
try:
#dialog = self.session.service("ALDialog")
self.dialog.unsubscribe(self.service_name)
self.dialog.deactivateTopic(self.loadedTopic)
self.dialog.unloadTopic(self.loadedTopic)
self.logger.info("Dialog unloaded.")
except Exception, e:
self.logger.info("Error while unloading dialog: {}".format(e))
@qi.nobind
def add_memory_subscriber(self, event, callback):
# add memory subscriber utility function
self.logger.info("Subscribing to {}".format(event))
try:
self.memory.declareEvent(event)
sub = self.memory.subscriber(event)
con = sub.signal.connect(callback)
self.connected_signals.append([sub, con])
except Exception, e:
self.logger.info("Error while subscribing: {}".format(e))
@qi.nobind
def remove_memory_subscribers(self):
# remove memory subscribers utility function
self.logger.info("unsubscribing to all signals...")
for sub, con in self.connected_signals:
try:
sub.signal.disconnect(con)
except Exception, e:
self.logger.info("Error while unsubscribing: {}".format(e))
@qi.nobind
def show_tablet(self):
# how to load and display the webpage on the tablet
dir_path = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
folder = os.path.basename(dir_path)
self.logger.info("Loading tablet page for app: {}".format(folder))
try:
ts = self.session.service("ALTabletService")
ts.loadApplication(folder)
ts.showWebview()
except Exception, e:
self.logger.info("Error while loading tablet: {}".format(e))
@qi.nobind
def on_disable_movements(self, useless_value):
# if needed, here is how to disable movements for a short time
try:
self.session.service("ALBasicAwareness").setEnabled(False)
# ALMotion.setBreathEnabled("Body", 0)
self.session.service("ALMotion").setIdlePostureEnabled("Head", 1)
# ALBackgroundMovement.setEnabled(0)
except Exception, e:
self.logger.info("Error while disabling movements: {}".format(e))
@qi.nobind
def on_enable_movements(self, useless_value):
# if needed, enable movements back
try:
self.session.service("ALBasicAwareness").setEnabled(True)
# ALMotion.setBreathEnabled("Body", 0)
self.session.service("ALMotion").setIdlePostureEnabled("Head", 0)
# ALBackgroundMovement.setEnabled(0)
except Exception, e:
self.logger.info("Error while enabling movements: {}".format(e))
### ################# ###
def cleanup(self):
# called when your module is stopped
self.logger.info("Cleaning...")
self.unload_dialog()
# do something
self.remove_memory_subscribers()
self.logger.info("End!")
if __name__ == "__main__":
# with this you can run the script for tests on remote robots
# run : python my_super_service.py --qi-url 10.0.137.169
app = qi.Application(sys.argv)
app.start()
service_instance = DemoService(app)
service_id = app.session.registerService(service_instance.service_name, service_instance)
service_instance.start_service()
app.run()
service_instance.cleanup()
app.session.unregisterService(service_id)
| [
"[email protected]"
] | |
1c82743c96d1940c061fc6d1386573e2303e61f5 | 832690986c5b97336616617231e10b9e1bd72502 | /permutation.py | 2d98a36ec1069c88937fb1a9ece5735a5d97555c | [] | no_license | digitallyamar/Python-Permutation | 63a185702d306c354a077c167aee9efa998bdb16 | 4d41da4b5e80bf42ac240ba0381f4f46fd201ee9 | refs/heads/main | 2023-01-19T14:15:26.469873 | 2020-11-29T11:25:47 | 2020-11-29T11:25:47 | 316,933,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from itertools import permutations
my_list = [1, 2, 3, 4, 5]
list_len = len(my_list)
while(list_len):
perm = permutations(my_list, list_len)
for el in perm:
print(el)
list_len = list_len-1 | [
"[email protected]"
] | |
ac0134ceac28d0816c485119d79811f97dd8fdb3 | d82a1781665d6caa1a5a2eff85b306eb19680f07 | /AoC21_reversed.py | 14adc0ab24348bf4e377ce2427049432a8b2bc24 | [] | no_license | Solaxun/aoc2016 | 91aedce86bb9abb8cc81cf896982b02c7606946a | 5d7084cd0d8fbfba32c70f844186ea3615e23d93 | refs/heads/master | 2021-01-12T00:57:05.451086 | 2017-01-19T04:58:54 | 2017-01-19T04:58:54 | 78,320,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | """
swap x y index
swap x y actual (even if not in string at least try)
rotate left or right x steps
rotate right based on index of letter x - right once, then right index num plus one more if index >= 4
reverse x through y inclusive (index)
move x to y means remove x, and insert so it ends at pos y
"""
from collections import deque
import re
directions = open('AoC21.txt').read().split('\n')
# hgdebafc wrong
def swap_index(text,x,y):
text = [l for l in text]
text[x],text[y] = text[y],text[x]
return ''.join(text)
def swap_letter(text,y,x):
swapped = []
new_word = []
for i,letter in enumerate(text):
if letter == x:
new_word.append(y)
elif letter == y:
new_word.append(x)
else:
new_word.append(letter)
return ''.join(new_word)
def rotate_plain(text,right):
text = deque(text)
text.rotate(-right)
return ''.join(text)
def rotate_on_position_letter(text,letter):
ix = text.index(letter)
dq = deque(text)
dq.rotate(-1)
dq.rotate(-ix)
if ix >=4:
dq.rotate(-1)
return ''.join(dq)
def reverse_through_indexes(text,x,y):
#fix this one... not as obvious as moving args around
lower,upper = min(x,y),max(x,y)
rev = ''.join(list(reversed(text[lower:upper+1])))
text = text[:lower] + rev + text[upper+1:]
return text
def move_to(text,y,x):
text = list(text)
moveme = text.pop(x)
text.insert(y,moveme)
return ''.join(text)
def get_numbers(text):
return list(map(int,re.findall('\d+',text)))
def parse_inst(inst,salt):
print(inst)
if inst.startswith('rotate right'):
right = get_numbers(inst)[0]
return rotate_plain(salt,right)
if inst.startswith('rotate left'):
left = get_numbers(inst)[0]
return rotate_plain(salt,-left)
elif inst.startswith('swap position'):
x,y = get_numbers(inst)
return swap_index(salt,x,y)
elif inst.startswith('reverse positions'):
x,y = get_numbers(inst)
return reverse_through_indexes(salt,x,y)
elif inst.startswith('move position'):
x,y = get_numbers(inst)
return move_to(salt,x,y)
elif inst.startswith('rotate based on'):
letter = re.findall(r'\b\w\b',inst)[0]
return rotate_on_position_letter(salt,letter)
elif inst.startswith('swap letter'):
left_letter,right_letter = re.findall(r'\b\w\b',inst)
return swap_letter(salt,left_letter,right_letter)
res = 'abcdefgh'
scrambled = 'fbgdceah'
for d in directions:
scrambled = parse_inst(d,scrambled)
print(scrambled)
| [
"[email protected]"
] | |
b7d5cb97979742b3898d60ce38760912032fb414 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03472/s684454987.py | b6b020b957450bcb071ab3ec20a7b70b19eedd33 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | import sys
mina=10**10
def waru(a,b):
if a%b==0:
return a//b
else:
return (a//b)+1
N,H=map(int,input().split())
A=list()
B=list()
for i in range(N):
a,b=map(int,input().split())
A.append(a)
B.append(b)
ma=max(A)
ind=A.index(ma)
B=[i for i in B if i>ma]
B=sorted(B,reverse=True)
cou=1
s=0
for i in range(len(B)):
s+=B[i]
if H<=s:
print(i+1)
sys.exit()
H=H-s
print(len(B)+waru(H,ma)) | [
"[email protected]"
] | |
d869081774173e11aebfc948ae74e1c95ef6ac88 | 4eb569fb25a5084dd94b65a23726976d907db9ba | /app/__init__.py | a1daead289cd1f93d6b03c77111ba8c015d186ca | [] | no_license | zhengxingliu/Cloud-Photo-Gallery-with-Text-Detection | 297402af0f3168ec0484fd32d8e8ac212cd582c3 | 43592dcb4d9cbc7fc41bcfe415c81d47ce9c809e | refs/heads/master | 2022-04-08T12:48:37.518693 | 2020-03-28T00:56:34 | 2020-03-28T00:56:34 | 217,774,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from flask import Flask, session
from datetime import timedelta
webapp = Flask(__name__)
from app import user
from app import photo
from app import http_rate
# set session timeout, user login expires after 1 day
webapp.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=1)
webapp.config['SECRET_KEY'] = 'bf7\xf3MP\xe1\x00}\xaf\xffk5\xeb\xb7\xe7o\xda\x05\x10\xcb\x0b\xff\x03'
# restrict file size to 5 Mb
webapp.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
| [
"[email protected]"
] | |
c470558508805f0829d0884a4351f44ba88dba99 | 31178640835955cff2dcac9fe5b61339bec62842 | /eval.py | 302506664c8e136fd8ed818978b99b172fc50880 | [] | no_license | JonyFaker/Segmentation-developing | 0215c80969e4b16c64cfeacf814595b96c3cba79 | 55579f318a7d3d26769ea73bca05eabee2698866 | refs/heads/master | 2020-03-18T17:50:54.812600 | 2019-01-03T12:08:08 | 2019-01-03T12:08:08 | 135,054,852 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,250 | py | # System libs
import os
import datetime
import argparse
from distutils.version import LooseVersion
# Numerical libs
import numpy as np
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from dataset import ValDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import lib.utils.data as torchdata
import cv2
import time
import timeit
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def visualize_result(data, preds, args):
colors = loadmat('data/color150.mat')['colors']
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(preds, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
cv2.imwrite(os.path.join(args.result,
img_name.replace('.jpg', '.png')), im_vis)
def evaluate(segmentation_module, loader, args):
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
segmentation_module.eval()
forward_time = 0
dur = 0
for i, batch_data in enumerate(loader):
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])
# pred = torch.zeros(1, args.num_class, 480, 640)
# print("pred: ", pred.size())
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, args.gpu_id)
# forward pass
torch.cuda.synchronize()
start_time = timeit.default_timer()
# print("segSize = ", segSize)
pred_tmp = segmentation_module(feed_dict, segSize=segSize)
# print("pred_tmp: ", pred_tmp.size())
torch.cuda.synchronize()
elapsed = timeit.default_timer() - start_time
print('inference time:{}, segSize:{}'.format(elapsed, segSize))
pred = pred + pred_tmp.cpu() / len(args.imgSize)
_, preds = torch.max(pred.data.cpu(), dim=1)
preds = as_numpy(preds.squeeze(0))
# calculate accuracy
acc, pix = accuracy(preds, seg_label)
intersection, union = intersectionAndUnion(preds, seg_label, args.num_class)
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
print('[{}] iter {}, accuracy: {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
i, acc))
# visualization
if args.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
preds, args)
# print("time cost:", dur)
# print("total forward time cost: {}, FPS: {}".format(forward_time, 5050//forward_time) )
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4}, Accuracy: {:.2f}%'
.format(iou.mean(), acc_meter.average()*100))
# print("time cost:", dur)
# print("total forward time cost: {}, FPS: {}".format(forward_time, 2000//forward_time) )
def main(args):
torch.cuda.set_device(args.gpu_id)
# Network Builders
builder = ModelBuilder()
isMacNet = True;
if isMacNet:
net_encoder = builder.build_encoder(
arch=args.arch_encoder,
fc_dim=args.fc_dim,
weights=args.weights_encoder,
use_softmax=True)
else:
net_encoder = builder.build_encoder(
arch=args.arch_encoder,
fc_dim=args.fc_dim,
weights=args.weights_encoder)
net_decoder = builder.build_decoder(
arch=args.arch_decoder,
fc_dim=args.fc_dim,
num_class=args.num_class,
# weights=args.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
# Dataset and Loader
dataset_val = ValDataset(
args.list_val, args, max_sample=args.num_val)
loader_val = torchdata.DataLoader(
dataset_val,
batch_size=args.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=5,
drop_last=True)
segmentation_module.cuda()
print(net_encoder)
# Main loop
evaluate(segmentation_module, loader_val, args)
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser()
# Model related arguments
parser.add_argument('--id', required=True,
help="a name for identifying the model to load")
parser.add_argument('--suffix', default='_epoch_20.pth',
help="which snapshot to load")
parser.add_argument('--arch_encoder', default='resnet50_dilated8',
help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup',
help="architecture of net_decoder")
parser.add_argument('--fc_dim', default=2048, type=int,
help='number of features between encoder and decoder')
# Path related arguments
parser.add_argument('--list_val',
default='./data/validation.odgt')
parser.add_argument('--root_dataset',
default='./data/')
# Data related arguments
parser.add_argument('--num_val', default=-1, type=int,
help='number of images to evalutate')
parser.add_argument('--num_class', default=150, type=int,
help='number of classes')
parser.add_argument('--batch_size', default=1, type=int,
help='batchsize. current only supports 1')
parser.add_argument('--imgSize', default=[450], nargs='+', type=int,
help='list of input image sizes.'
'for multiscale testing, e.g. 300 400 500 600')
parser.add_argument('--imgMaxSize', default=1000, type=int,
help='maximum input image size of long edge')
parser.add_argument('--padding_constant', default=8, type=int,
help='maxmimum downsampling rate of the network')
# Misc arguments
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--visualize', action='store_true', default=False,
help='output visualization?')
parser.add_argument('--result', default='./result',
help='folder to output visualization results')
parser.add_argument('--gpu_id', default=1, type=int,
help='gpu_id for evaluation')
args = parser.parse_args()
print(args)
# absolute paths of model weights
args.weights_encoder = os.path.join(args.ckpt, args.id,
'encoder' + args.suffix)
args.weights_decoder = os.path.join(args.ckpt, args.id,
'decoder' + args.suffix)
assert os.path.exists(args.weights_encoder) and \
os.path.exists(args.weights_encoder), 'checkpoint does not exitst!'
args.result = os.path.join(args.result, args.id)
if not os.path.isdir(args.result):
os.makedirs(args.result)
main(args)
| [
"[email protected]"
] | |
08d82ea861ed6eee8f531713ffd5141071ed595d | 698a33fc5ba3814eae252b9c08db40befc3a301c | /klinurl_api/klinurl_api/settings.py | 76bce9f747d97584f1b136cf98dfd29878ba48a9 | [
"MIT"
] | permissive | HarrietAkot/klin-url | fb1f62e3f43c7fce40cdfcd43dcd6e48c3eac59f | 1b13291cf3f5f40a8d7b46c76f8a70695a55df32 | refs/heads/main | 2023-01-25T02:07:30.554527 | 2020-11-23T00:05:41 | 2020-11-23T00:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | """
Django settings for klinurl_api project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env = environ.Env(
DEBUG=(bool, False)
)
env_file = os.path.join(BASE_DIR, ".env")
environ.Env.read_env(env_file)
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party packages
'rest_framework',
'rest_framework_swagger',
'corsheaders',
#developer apps
'url_shortener'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'klinurl_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
}
WSGI_APPLICATION = 'klinurl_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
if 'DATABASE_URL' in os.environ:
import dj_database_url
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CORS_ORIGIN_ALLOW_ALL = True
| [
"[email protected]"
] | |
0ad7d9c088386520f67394b94a0cb1764d95f141 | 41a3caecc80a5e9da719ff906718297987c787ee | /lecture_3/tasks/views.py | ea5a629049b6f93d81a1332f8c9a871b4509228b | [] | no_license | astafanous/Django | 942af6455ceac29cc7b93b0bc1f7bde7dbd666c1 | 3b5fd918687c829cf09a7b8bdc60a7ec7ac35d86 | refs/heads/master | 2023-05-01T18:44:47.154421 | 2021-05-24T14:46:42 | 2021-05-24T14:46:42 | 369,448,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | from django import forms
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
# tasks = ["foo", "bar", "baz"]
# tasks = []
class NewTaskForm(forms.Form):
task = forms.CharField(label="New Task")
# priority = forms.IntegerField(label="Priority", min_value=1, max_value=5)
# Create your views here.
# def index(request):
# return render(request, "tasks/index.html", {
# "tasks": tasks
# })
def index(request):
# Check if there already exists a "tasks" key in our session
if "tasks" not in request.session:
# If not, create a new list
request.session["tasks"] = []
return render(request, "tasks/index.html", {
"tasks": request.session["tasks"]
})
# Add a new task:
def add(request):
# Check if method is POST
if request.method == "POST":
# Take in the data the user submitted and save it as form
form = NewTaskForm(request.POST)
# Check if form data is valid (server-side)
if form.is_valid():
# Isolate the task from the 'cleaned' version of form data
task = form.cleaned_data["task"]
# Add the new task to our list of tasks
# tasks.append(task)
request.session["tasks"] += [task]
# Redirect user to list of tasks
return HttpResponseRedirect(reverse("tasks:index"))
else:
# If the form is invalid, re-render the page with existing information.
return render(request, "tasks/add.html", {
"form": form
})
return render(request, "tasks/add.html", {
"form": NewTaskForm()
})
| [
"[email protected]"
] | |
39c54c0f3a91f4a6abf3b5eb069ff2fd357bce5b | 91e91449b2c554a82a0438995cb90f39cae4e537 | /issues/migrations/0002_issue.py | 1c9576ee3768ce3a9eacd11611f53304a027aac9 | [] | no_license | paul4000/github-issues-manager | 922e4875db0b7575d4898ae95703e64e66ccad6d | 7648971966fa1079abb6f256f0c6c0a26c2a3eb0 | refs/heads/master | 2021-07-18T02:12:28.067480 | 2020-06-02T19:36:08 | 2020-06-02T19:36:08 | 167,267,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | # Generated by Django 2.1.2 on 2019-02-05 20:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('github_number', models.IntegerField()),
('github_html_url', models.TextField()),
('github_state', models.TextField()),
('github_title', models.TextField()),
('github_body', models.TextField()),
('label', models.TextField()),
('github_created_at', models.DateTimeField()),
('github_updated_at', models.DateTimeField()),
('github_comments_number', models.IntegerField()),
('priority', models.IntegerField(choices=[(1, '1 - HIGH'), (2, '2 - MEDIUM'), (3, '3 - LOW')], default=3)),
('deadline', models.DateTimeField()),
('github_assignee_login', models.TextField()),
('github_assignee_url_profile', models.TextField()),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.Repository')),
],
),
]
| [
"[email protected]"
] | |
a9b9299e0daaa0d0fb5aa3c702fb8a4915ccfae9 | 586068e0de8011da1310c07834a068e441f1f3f2 | /graphql/execution/experimental/tests/test_mutations.py | 431073f4d376c7170da3d475db6d99efb38a9104 | [
"MIT"
] | permissive | yen223/graphql-core | 36603497b87af07b6f8e267c0cfdb22cf5f121fc | 431ea35d383e8064bb8d5829e7b3cb8d4f395cd9 | refs/heads/master | 2021-01-11T01:01:15.429546 | 2017-05-10T21:15:27 | 2017-05-10T21:15:27 | 90,983,518 | 0 | 1 | null | 2017-05-11T13:39:45 | 2017-05-11T13:39:45 | null | UTF-8 | Python | false | false | 4,263 | py | from graphql.language.parser import parse
from graphql.type import (GraphQLArgument, GraphQLField, GraphQLInt,
GraphQLList, GraphQLObjectType, GraphQLSchema,
GraphQLString)
from ..executor import execute
class NumberHolder(object):
def __init__(self, n):
self.theNumber = n
class Root(object):
def __init__(self, n):
self.numberHolder = NumberHolder(n)
def immediately_change_the_number(self, n):
self.numberHolder.theNumber = n
return self.numberHolder
def promise_to_change_the_number(self, n):
# TODO: async
return self.immediately_change_the_number(n)
def fail_to_change_the_number(self, n):
raise Exception('Cannot change the number')
def promise_and_fail_to_change_the_number(self, n):
# TODO: async
self.fail_to_change_the_number(n)
NumberHolderType = GraphQLObjectType('NumberHolder', {
'theNumber': GraphQLField(GraphQLInt)
})
QueryType = GraphQLObjectType('Query', {
'numberHolder': GraphQLField(NumberHolderType)
})
MutationType = GraphQLObjectType('Mutation', {
'immediatelyChangeTheNumber': GraphQLField(
NumberHolderType,
args={'newNumber': GraphQLArgument(GraphQLInt)},
resolver=lambda obj, args, *_:
obj.immediately_change_the_number(args['newNumber'])),
'promiseToChangeTheNumber': GraphQLField(
NumberHolderType,
args={'newNumber': GraphQLArgument(GraphQLInt)},
resolver=lambda obj, args, *_:
obj.promise_to_change_the_number(args['newNumber'])),
'failToChangeTheNumber': GraphQLField(
NumberHolderType,
args={'newNumber': GraphQLArgument(GraphQLInt)},
resolver=lambda obj, args, *_:
obj.fail_to_change_the_number(args['newNumber'])),
'promiseAndFailToChangeTheNumber': GraphQLField(
NumberHolderType,
args={'newNumber': GraphQLArgument(GraphQLInt)},
resolver=lambda obj, args, *_:
obj.promise_and_fail_to_change_the_number(args['newNumber'])),
})
schema = GraphQLSchema(QueryType, MutationType)
def assert_evaluate_mutations_serially(executor=None):
doc = '''mutation M {
first: immediatelyChangeTheNumber(newNumber: 1) {
theNumber
},
second: promiseToChangeTheNumber(newNumber: 2) {
theNumber
},
third: immediatelyChangeTheNumber(newNumber: 3) {
theNumber
}
fourth: promiseToChangeTheNumber(newNumber: 4) {
theNumber
},
fifth: immediatelyChangeTheNumber(newNumber: 5) {
theNumber
}
}'''
ast = parse(doc)
result = execute(schema, ast, Root(6), operation_name='M', executor=executor)
assert not result.errors
assert result.data == \
{
'first': {'theNumber': 1},
'second': {'theNumber': 2},
'third': {'theNumber': 3},
'fourth': {'theNumber': 4},
'fifth': {'theNumber': 5},
}
def test_evaluates_mutations_serially():
assert_evaluate_mutations_serially()
def test_evaluates_mutations_correctly_in_the_presense_of_a_failed_mutation():
doc = '''mutation M {
first: immediatelyChangeTheNumber(newNumber: 1) {
theNumber
},
second: promiseToChangeTheNumber(newNumber: 2) {
theNumber
},
third: failToChangeTheNumber(newNumber: 3) {
theNumber
}
fourth: promiseToChangeTheNumber(newNumber: 4) {
theNumber
},
fifth: immediatelyChangeTheNumber(newNumber: 5) {
theNumber
}
sixth: promiseAndFailToChangeTheNumber(newNumber: 6) {
theNumber
}
}'''
ast = parse(doc)
result = execute(schema, ast, Root(6), operation_name='M')
assert result.data == \
{
'first': {'theNumber': 1},
'second': {'theNumber': 2},
'third': None,
'fourth': {'theNumber': 4},
'fifth': {'theNumber': 5},
'sixth': None,
}
assert len(result.errors) == 2
# TODO: check error location
assert result.errors[0].message == 'Cannot change the number'
assert result.errors[1].message == 'Cannot change the number'
| [
"[email protected]"
] | |
d98c1414a799bd3ca5f26129831f1b0c15580229 | 9f1394dde9a51ba9cdeb6f83db82aa2f7b028e9f | /59.py | 7fbb3127552a6d9e99408ee93677fe6980e84e3b | [] | no_license | Yuvanshankar21/beginner-set-5 | 143c42c8a042106034d3279b89745a396751600a | 65f6e4ca5c3bcf30f1aa64d2c6bb6a0b0d7e8989 | refs/heads/master | 2020-06-21T05:53:13.593001 | 2019-08-04T06:07:02 | 2019-08-04T06:07:02 | 197,361,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | num=int(input())
count=0
while(0<num):
count=count+1
num//=10
print(count)
| [
"[email protected]"
] | |
42e006389fff8b08ff28973bbffa8caf713652c5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03402/s593504264.py | 64942787d3b18bc2ccc81dfb44fa5d990935a6e8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,212 | py | #!/usr/bin/env python3
import sys
# import time
# import math
# import numpy as np
# import scipy.sparse.csgraph as cs # csgraph_from_dense(ndarray, null_value=inf), bellman_ford(G, return_predecessors=True), dijkstra, floyd_warshall
# import random # random, uniform, randint, randrange, shuffle, sample
# import string # ascii_lowercase, ascii_uppercase, ascii_letters, digits, hexdigits
# import re # re.compile(pattern) => ptn obj; p.search(s), p.match(s), p.finditer(s) => match obj; p.sub(after, s)
# from bisect import bisect_left, bisect_right # bisect_left(a, x, lo=0, hi=len(a)) returns i such that all(val<x for val in a[lo:i]) and all(val>-=x for val in a[i:hi]).
# from collections import deque # deque class. deque(L): dq.append(x), dq.appendleft(x), dq.pop(), dq.popleft(), dq.rotate()
# from collections import defaultdict # subclass of dict. defaultdict(facroty)
# from collections import Counter # subclass of dict. Counter(iter): c.elements(), c.most_common(n), c.subtract(iter)
# from datetime import date, datetime # date.today(), date(year,month,day) => date obj; datetime.now(), datetime(year,month,day,hour,second,microsecond) => datetime obj; subtraction => timedelta obj
# from datetime.datetime import strptime # strptime('2019/01/01 10:05:20', '%Y/%m/%d/ %H:%M:%S') returns datetime obj
# from datetime import timedelta # td.days, td.seconds, td.microseconds, td.total_seconds(). abs function is also available.
# from copy import copy, deepcopy # use deepcopy to copy multi-dimentional matrix without reference
# from functools import reduce # reduce(f, iter[, init])
# from functools import lru_cache # @lrucache ...arguments of functions should be able to be keys of dict (e.g. list is not allowed)
# from heapq import heapify, heappush, heappop # built-in list. heapify(L) changes list in-place to min-heap in O(n), heappush(heapL, x) and heappop(heapL) in O(lgn).
# from heapq import nlargest, nsmallest # nlargest(n, iter[, key]) returns k-largest-list in O(n+klgn).
# from itertools import count, cycle, repeat # count(start[,step]), cycle(iter), repeat(elm[,n])
# from itertools import groupby # [(k, list(g)) for k, g in groupby('000112')] returns [('0',['0','0','0']), ('1',['1','1']), ('2',['2'])]
# from itertools import starmap # starmap(pow, [[2,5], [3,2]]) returns [32, 9]
# from itertools import product, permutations # product(iter, repeat=n), permutations(iter[,r])
# from itertools import combinations, combinations_with_replacement
# from itertools import accumulate # accumulate(iter[, f])
# from operator import itemgetter # itemgetter(1), itemgetter('key')
# from fractions import gcd # for Python 3.4 (previous contest @AtCoder)
def main():
mod = 1000000007 # 10^9+7
inf = float('inf') # sys.float_info.max = 1.79...e+308
# inf = 2 ** 64 - 1 # (for fast JIT compile in PyPy) 1.84...e+19
sys.setrecursionlimit(10**6) # 1000 -> 1000000
def input(): return sys.stdin.readline().rstrip()
def ii(): return int(input())
def mi(): return map(int, input().split())
def mi_0(): return map(lambda x: int(x)-1, input().split())
def lmi(): return list(map(int, input().split()))
def lmi_0(): return list(map(lambda x: int(x)-1, input().split()))
def li(): return list(input())
def increase_symbol(seq, sym, num):
for i in range(1, 48, 2):
for j in range(1, 98, 2):
if num > 0:
seq[i][j] = sym
num -= 1
else:
return
a, b = mi()
upper = [['#'] * 99 for _ in range(49)]
lower = [['.'] * 99 for _ in range(49)]
increase_symbol(upper, '.', a - 1)
increase_symbol(lower, '#', b - 1)
grid = upper + lower
print("98 99")
for line in grid:
print(''.join(line))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1096376646272e6e4c6f694be7f696f06836c1b1 | fd070e08e244e98446107d21b133f8a70b4984a7 | /baseballcardproject/baseballcardapp/views/players/list.py | 5fa8747778536fb7a57ea2c5976a1f2d5d3d3d92 | [] | no_license | ChaseSully222/Backend-Capstone | a033397f82432074a3c96e0775064cb604c18ed2 | c3d5392c63dfc49396e1066f9c724400337063a2 | refs/heads/master | 2023-08-01T13:33:27.524060 | 2020-07-01T02:22:54 | 2020-07-01T02:22:54 | 271,038,506 | 0 | 0 | null | 2021-09-22T19:13:52 | 2020-06-09T15:21:40 | Python | UTF-8 | Python | false | false | 1,648 | py | import sqlite3
from django.shortcuts import render, redirect, reverse
from baseballcardapp.models import Player
from baseballcardapp.models import model_factory
from ..connection import Connection
from string import ascii_uppercase
playerCount = Player.objects.all().count()
def player_list(request):
if request.method == 'GET':
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = model_factory(Player)
db_cursor = conn.cursor()
letter = '%'
if request.GET.get('letter', None) is not None:
letter = f'{request.GET.get("letter", None)}%'
else:
letter = '%'
db_cursor.execute("""
SELECT
p.firstName,
p.lastName,
p.id
FROM baseballcardapp_player p
WHERE lastName LIKE ?
""", (letter,))
all_players = db_cursor.fetchall()
template = 'players/list.html'
context = {
'all_players': all_players,
'alphabet': ascii_uppercase
}
return render(request, template, context)
elif request.method == 'POST':
form_data = request.POST
with sqlite3.connect(Connection.db_path) as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
INSERT INTO baseballcardapp_player
(
firstName,lastName
)
VALUES (?, ?)
""",
(form_data['firstName'], form_data['lastName']))
return redirect(reverse('baseballcardapp:players'))
| [
"[email protected]"
] | |
2269f70b99adcbcb2b425bbb05b96c5e1f0382b1 | b7bb2c03fbb6a55270fa193eb80bad4388a06661 | /raw_pre/raw_lstm.py | 035f4a08f10a9cd1eec7366211199f7a40a8f586 | [] | no_license | voltelxu/notes | b50fa54583d789d7e363f7614838b5e89b0d7814 | 419db03a335e4c3da62806b7a9d311a604c323aa | refs/heads/master | 2022-01-09T18:30:59.867372 | 2019-06-21T10:13:32 | 2019-06-21T10:13:32 | 112,202,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,406 | py | import tensorflow as tf
import numpy as np
import random
'''
global variable
'''
train_rate = 0.7 #the percent of the train_data set
lr = 0.5 # the learning rate
batch_size = 2 # the batch size of model
embed_size = 128 # the word embedding size
voc_size = 0 # the vocabulary size
max_num_step = 0 # the max number step unroll of the model
num_layers = 2 # the layers of the model
hidden_size = 500 # the layer size of the model
num_epoch = 1 # the train times of model
#get the vocabulary
def wordid(filename):
dic = open(filename, 'r')
voc = dict()
for line in dic:
line = line.strip('\n')
lines = line.split(' ')
voc[lines[0]] = int(lines[1])
# lables
lable = dict()
lable['Z'] = 0
lable['B'] = 1
lable['E'] = 2
lable['M'] = 3
lable['S'] = 4
lable['A'] = 5
return voc, lable
word_ids, lable_ids = wordid('worddic')
voc_size = len(word_ids)
lable_size = 6
# get data set and chang to ids
# change the data set to train and test data set
def getdata(filename):
datafile = open(filename, 'r')
datas = list()
#the max length of the sentence
max_scelen = 0
for line in datafile:
words = line.strip('\n').split(" ")
tup = list()
data = list()
lable = list()
for word in words:
terms = word.split(":")
data.append(word_ids.get(terms[0], 0))
lable.append(lable_ids[terms[1]])
if len(data) > max_scelen:
max_scelen = len(data)
tup.append(data)
tup.append(lable)
datas.append(tup)
len_data = len(datas)
#shuffle the data set
random.shuffle(datas)
# datas length sentence
dataslen = [length for length in [len(x[0]) for x in datas]]
# padding data
for d in datas:
l = len(d[0])
d[0] += [0 for i in range(max_scelen - l)]
d[1] += [0 for i in range(max_scelen - l)]
#split position
split = int(len_data * train_rate)
#split dataset to train and test sets
train = datas[0 : split]
test = datas[split : len_data]
#train_ data
train_data = [t for t in [x[0] for x in train]]
#train_lable
train_lable = [t for t in [x[1] for x in train]]
# train lenght
train_len = dataslen[0 : split]
#test_data
test_data = [t for t in [x[0] for x in test]]
#test_lable
test_lable = [t for t in [x[1] for x in test]]
# test_data length0
test_len = dataslen[split : len_data]
#return train and test set
return train_data, train_lable, train_len, test_data, test_lable, test_len, max_scelen
train_data,train_lable, train_len, test_data, test_lable, test_len, max_len = getdata("test")
# max sequence length
max_num_step = max_len
# get batch input data
def getbatch(data, lable, lens, start, batch_size):
batch_data = data[start : start + batch_size]
batch_seqlen = lens[start : start + batch_size]
batch_lable = lable[start : start + batch_size]
return batch_data, batch_lable, batch_seqlen
# batch_data, batch_lable, batch_seqlen = getbatch(train_data, train_lable, 1, 8)
# print len(batch_data)
def dynamicRNN(inputs, seqlen, weights, bias):
#
# inputs = tf.unstack(inputs, max_num_step, 1)
#lstm cell
lstm_cell = tf.contrib.rnn.BasicLSTMCell(hidden_size)
#dropout
# cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=0.7)
#outputs and state
# outputs, state = tf.contrib.rnn.static_rnn(cell, inputs, dtype=tf.float32,
# sequence_length=seqlen)
outputs, state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32, sequence_length=seqlen)
# outputs shape [num_step, batch_size, hidden_size]
# outputs = tf.stack(outputs)
#last out puts
'''batch = tf.shape(outputs)[0]
outsize = tf.shape(outputs)[2]
index = [tf.range(0, batch),(seqlen - 1)]
outputs = tf.gather(tf.reshape(outputs, [-1, outsize]), index)'''
outputs = tf.reshape(outputs, [batch_size * max_num_step, hidden_size])
#outputs shape [batch_size, num_step, hidden_size]
outputs = tf.matmul(outputs, weights) + bias
# outputs = tf.reshape(outputs, [batch_size, max_num_step, lable_size])
# softmax = tf.nn.softmax(outputs)
# softmax = tf.reshape(softmax, [batch_size, max_num_step, lable_size])
return outputs
def model():
# input data
input_data = tf.placeholder(dtype=tf.int32, shape=[batch_size, max_num_step])
# targets lables
target = tf.placeholder(dtype=tf.int32, shape=[batch_size, max_num_step])
# true seq lenght
seqlen = tf.placeholder(dtype=tf.int32, shape=[None])
# seq mask [1, 0]
# selmask = tf.sequence_mask(seqlen, maxlen=max_num_step, dtype=tf.float32)
seqem = tf.constant(np.tril(np.ones([max_num_step + 1, max_num_step]), -1), dtype=tf.float32)
seqmask = tf.nn.embedding_lookup(seqem, seqlen)
# embeddings
embedding = tf.get_variable(name='embedding', shape=[voc_size, embed_size])
# change word to tensor
inputs = tf.nn.embedding_lookup(embedding, input_data)
# weights
weights = tf.get_variable("weights",[hidden_size, lable_size])
# bias
bias = tf.get_variable("bias", [lable_size])
# rnn cell
logits = dynamicRNN(inputs, seqlen, weights, bias)
#
pred = tf.nn.softmax(logits)
pred = tf.cast(tf.argmax(pred, 1), tf.int32)
# out = tf.argmax(pred,1)
correct = tf.cast(tf.equal(pred, tf.reshape(target, [-1])), tf.int32) * tf.cast(tf.reshape(seqmask, [-1]), tf.int32)
# accuracy
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32))/tf.reduce_sum(tf.cast(seqmask, tf.float32))
#
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.reshape(target, [-1]))
loss = loss * tf.reshape(seqmask, [-1])
cost = tf.reduce_sum(loss) / tf.reduce_sum(seqmask)
#optimizer
opt = tf.train.AdamOptimizer(0.1).minimize(cost)
#
'''logits = tf.reshape(pred, [batch_size, max_num_step, lable_size])
loss = tf.contrib.seq2seq.sequence_loss(logits,
target,
tf.ones([batch_size, max_num_step]))
cost = tf.reduce_mean(loss)
softmax_out = tf.nn.softmax(tf.reshape(logits, [-1, lable_size]))0
predict = tf.cast(tf.argmax(softmax_out, axis=1),tf.int32)
correct = tf.equal(predict, tf.reshape(target,[-1]))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar("accuracy", accuracy)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
opt = tf.train.GradientDescentOptimizer(learning_rate=lr).apply_gradients(zip(grads,tvars))
'''
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('./path', sess.graph)
merged = tf.summary.merge_all()
# train
epoch_size = len(train_data)/batch_size
testsize = len(test_data)/batch_size
'''
for s in range(20):
alls = 0.0
for i in range(epoch_size):
x, y, l = getbatch(train_data, train_lable, train_len, i * batch_size, batch_size)
_, a, c = sess.run([opt, accuracy, correct], feed_dict={input_data : x, target : y, seqlen : l})
alls += a
print alls/epoch_size
'''
for step in range(20000):
for i in xrange(0, epoch_size, 1):
x, y, l = getbatch(train_data, train_lable, train_len, i * batch_size, batch_size)
sess.run([opt], feed_dict={input_data : x, target : y, seqlen : l})
sum_loss = 0.0
sum_ac = 0.0
num = 0
for j in xrange(0, testsize, 1):
num = num + 1
tx, ty, tl = getbatch(test_data, test_lable, test_len, j * batch_size, batch_size)
ac, loss = sess.run([accuracy, cost], feed_dict={input_data : tx, target : ty, seqlen : tl})
sum_ac = sum_ac + ac
sum_loss = sum_loss + loss
sum_ac = sum_ac/num
print "step "+str(step)+", accuracy "+str(sum_ac)+", loss " + str(sum_loss)
model() | [
"[email protected]"
] | |
db5f0da3df6e5192b2a58b8ee7e3399d3a85be3d | 859e958d5397d0bd35adb3bcfc63261d893a4a54 | /functions/DICTIONARIES_AGNfitter.py | 6e395d0f2c93b226f0c9c716d751b8cc4d506f58 | [] | no_license | DLZRR/AGNfitterALESS | 205ca861b37a22eae2ae626d8a1879bb23908c02 | d9ddbe7c6a70a6962977dfd6a9019bd0629623b1 | refs/heads/master | 2020-03-17T21:49:29.175567 | 2018-07-06T11:41:31 | 2018-07-06T11:41:31 | 133,977,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,390 | py |
"""%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
DICTIONARIES_AGNFitter.py
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script contains all functions which are needed to construct the total model of AGN.
##For constructing a new dictionary,
(in cases: 1)add a filter which is not included,
2) need finer grid for better S/N data)
see DICTIONARIES_AGNfitter.py
"""
import sys,os
import numpy as np
import sys
from collections import defaultdict
import MODEL_AGNfitter as model
import FILTERS_AGNfitter as filterpy
from scipy.integrate import trapz
from scipy.interpolate import interp1d
import time
import cPickle
from astropy import units as u
class MODELSDICT:
"""
Class MODELSDICT
Builds a dictionary of model templates.
##input:
- filename of the dictionary you want to create
- the AGNfitter path is in your computer
- the filters settings (dictionary from the settings file)
- Also variables self.ebvgal_array,self.ebvbbb_array, self.z_array
can be changed by the user, for a finer grid in this parameters.
##bugs:
"""
def __init__(self, filename, path, filters, models):
self.filename = filename
self.path=path
self.modelsettings=models
## To be called form filters
self.z_array = filters['dict_zarray']
#print filters
a = dict.fromkeys(filters)
for i in range(len(a.keys())):
if a.keys()[i] == 'add_filters' or a.keys()[i] == 'dict_zarray' or a.keys()[i] == 'add_filters_dict' or a.keys()[i] == 'path':
a[a.keys()[i]] = filters[a.keys()[i]]
else:
a[a.keys()[i]] = filters[a.keys()[i]][0]
#print a
self.filters_list = a
if os.path.lexists(filename):
self.fo = cPickle.load(file(filename, 'rb'))
self.filters = self.fo.filternames
self.filterset_name = self.fo.name
else:
self.fo = filterpy.create_filtersets(a, path)
self.filters = self.fo.filternames
self.filterset_name = self.fo.name
def build(self):
Modelsdict = dict()
i=0
dictionary_progressbar(i, len(self.z_array), prefix = 'Dict:', suffix = 'Complete', barLength = 50)
for z in self.z_array:
i += 1
filterdict = [self.fo.central_nu_array, self.fo.lambdas_dict, self.fo.factors_dict]
dict_modelsfiltered = construct_dictionaryarray_filtered(z, filterdict, self.path, self.modelsettings)
Modelsdict[str(z)] = dict_modelsfiltered
time.sleep(0.01)
dictionary_progressbar(i, len(self.z_array), prefix = 'Dict:', suffix = 'Complete', barLength = 50)
self.MD = Modelsdict
def construct_dictionaryarray_filtered( z, filterdict,path, modelsettings):
"""
Construct the dictionaries of fluxes at bands (to compare to data),
and dictionaries of fluxes over the whole spectrum, for plotting.
All calculations are done at one given redshift.
"""
GALAXYFdict_filtered = dict()
STARBURSTFdict_filtered = dict()
BBBFdict_filtered = dict()
TORUSFdict_filtered = dict()
GALAXYFdict_4plot, GALAXY_SFRdict, galaxy_parnames = model.GALAXY(path, modelsettings)
for c in GALAXYFdict_4plot.keys():
gal_nu, gal_Fnu=GALAXYFdict_4plot[c]
bands, gal_Fnu_filtered = filtering_models(gal_nu, gal_Fnu, filterdict, z)
GALAXYFdict_filtered[c] = bands, gal_Fnu_filtered
STARBURSTFdict_4plot, starburst_parnames = model.STARBURST(path, modelsettings)
for c in STARBURSTFdict_4plot.keys():
sb_nu, sb_Fnu=STARBURSTFdict_4plot[c]
bands, sb_Fnu_filtered = filtering_models(sb_nu, sb_Fnu, filterdict, z)
STARBURSTFdict_filtered[c] = bands, sb_Fnu_filtered
BBBFdict_4plot, bbb_parnames = model.BBB(path, modelsettings)
for c in BBBFdict_4plot.keys():
bbb_nu, bbb_Fnu=BBBFdict_4plot[c]
bands, bbb_Fnu_filtered = filtering_models(bbb_nu, bbb_Fnu, filterdict, z)
BBBFdict_filtered[c] = bands, bbb_Fnu_filtered
TORUSFdict_4plot, torus_parnames = model.TORUS(path, modelsettings)
for c in TORUSFdict_4plot.keys():
tor_nu, tor_Fnu=TORUSFdict_4plot[c]
bands, tor_Fnu_filtered = filtering_models(tor_nu, tor_Fnu, filterdict, z)
TORUSFdict_filtered[c] = bands, tor_Fnu_filtered
norm_parnames = ['GA', 'SB', 'BB', 'TO' ]
all_parnames = [galaxy_parnames, starburst_parnames,torus_parnames, bbb_parnames, norm_parnames]
return STARBURSTFdict_filtered , BBBFdict_filtered, GALAXYFdict_filtered, TORUSFdict_filtered, \
STARBURSTFdict_4plot , BBBFdict_4plot, GALAXYFdict_4plot, TORUSFdict_4plot,GALAXY_SFRdict, all_parnames
def dictkey_arrays(MODELSdict):
"""
Summarizes the model dictionary keys and does the interpolation to nearest value in grid.
used to be transporte to data
##input:
##output:
"""
STARBURSTFdict , BBBFdict, GALAXYFdict, TORUSFdict, _,_,_,_,GALAXY_SFRdict, all_parnames= MODELSdict
galaxy_parkeys= np.array(list(GALAXYFdict.keys()))
starburst_parkeys = np.array(list(STARBURSTFdict.keys()))
torus_parkeys = np.array(list(TORUSFdict.keys()))
bbb_parkeys = np.array(list(BBBFdict.keys()))
class pick_obj:
def __init__(self, par_names,pars_modelkeys):
self.pars_modelkeys=pars_modelkeys.T
self.pars_modelkeys_float =self.pars_modelkeys.astype(float)
self.par_names = par_names
def pick_nD(self, pars_mcmc):
self.matched_parkeys = []
for i in range(len(pars_mcmc)):
matched_idx =np.abs(self.pars_modelkeys_float[i]-pars_mcmc[i]).argmin()
matched_parkey = self.pars_modelkeys[i][matched_idx]
self.matched_parkeys.append(matched_parkey)
def pick_1D(self, *pars_mcmc):
matched_idx =np.abs(self.pars_modelkeys_float-pars_mcmc).argmin()
self.matched_parkeys = self.pars_modelkeys[matched_idx]
galaxy_parnames, starburst_parnames,torus_parnames, bbb_parnames, norm_parnames = all_parnames
gal_obj =pick_obj(galaxy_parnames,galaxy_parkeys)
sb_obj =pick_obj(starburst_parnames,starburst_parkeys)
tor_obj=pick_obj(torus_parnames,torus_parkeys)
bbb_obj=pick_obj(bbb_parnames,bbb_parkeys)
return gal_obj,sb_obj,tor_obj, bbb_obj
def filtering_models( model_nus, model_fluxes, filterdict, z ):
"""
Projects the model SEDs into the filter curves of each photometric band.
##input:
- model_nus: template frequencies [log10(nu)]
- model_fluxes: template fluxes [F_nu]
- filterdict: dictionary with all band filter curves' information.
To change this, add one band and filter curve, etc,
look at DICTIONARIES_AGNfitter.py
- z: redshift
##output:
- bands [log10(nu)]
- Filtered fluxes at these bands [F_nu]
"""
bands, lambdas_dict, factors_dict = filterdict
filtered_model_Fnus = []
# Costumize model frequencies and fluxes [F_nu]
# to same units as filter curves (to wavelengths [angstrom] and F_lambda)
model_lambdas = nu2lambda_angstrom(model_nus) * (1+z)
model_lambdas = model_lambdas[::-1]
model_fluxes_nu = model_fluxes[::-1]
model_fluxes_lambda = fluxnu_2_fluxlambda(model_fluxes_nu, model_lambdas)
mod2filter_interpol = interp1d(model_lambdas, model_fluxes_lambda, kind = 'nearest', bounds_error=False, fill_value=0.)
# For filter curve at each band.
# (Vectorised integration was not possible -> different filter-curve-arrays' sizes)
for iband in bands:
# Read filter curves info for each data point
# (wavelengths [angstrom] and factors [non])
lambdas_filter = np.array(lambdas_dict[iband])
factors_filter = np.array(factors_dict[iband])
iband_angst = nu2lambda_angstrom(iband)
# Interpolate the model fluxes to
#the exact wavelengths of filter curves
modelfluxes_at_filterlambdas = mod2filter_interpol(lambdas_filter)
# Compute the flux ratios, equivalent to the filtered fluxes:
# F = int(model)/int(filter)
integral_model = trapz(modelfluxes_at_filterlambdas*factors_filter, x= lambdas_filter)
integral_filter = trapz(factors_filter, x= lambdas_filter)
filtered_modelF_lambda = (integral_model/integral_filter)
# Convert all from lambda, F_lambda to Fnu and nu
filtered_modelFnu_atfilter_i = fluxlambda_2_fluxnu(filtered_modelF_lambda, iband_angst)
filtered_model_Fnus.append(filtered_modelFnu_atfilter_i)
return bands, np.array(filtered_model_Fnus)
## ---------------------------------------------------
c = 2.997e8
Angstrom = 1.e10
def fluxlambda_2_fluxnu (flux_lambda, wl_angst):
"""
Calculate F_nu from F_lambda.
"""
flux_nu = flux_lambda * (wl_angst**2. ) / c /Angstrom
return flux_nu
def fluxnu_2_fluxlambda (flux_nu, wl_angst):
"""
Calculate F_lambda from F_nu.
"""
flux_lambda = flux_nu / wl_angst**2 *c * Angstrom
return flux_lambda #in angstrom
def nu2lambda_angstrom(nus):
"""
Calculate wavelength [angstrom] from frequency [log Hz].
"""
lambdas = c / (10**nus) * Angstrom
return lambdas
## ------------------------------------------------
def dictionary_progressbar (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):
"""
Print progress bar of dictionary construction
"""
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '>' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
| [
"[email protected]"
] | |
d9cc495ec1b18f73a3ba8fbcaedd9a6744f5b454 | 59ef249574f087775ee9d21576fb962af7e284ce | /front_end_flask.py | 1513d17c249d76d169e3a70e2e4e4d2703ef8e7b | [] | no_license | djonvincent/DSCW | 8c69bc5dd511c1f9d48a54bdbad98c0b1dc89f14 | c5ab73f05f419a64c408228594414fc43679fa42 | refs/heads/master | 2020-04-26T10:41:07.285242 | 2019-03-08T01:04:03 | 2019-03-08T01:04:03 | 173,493,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | import Pyro4
import json
import random
from flask import Flask, request, abort
app = Flask(__name__)
update_id = 0
ns = Pyro4.locateNS()
proxies = {}
servers = []
def refresh_servers():
global proxies
global servers
proxies = {}
servers = []
for k, v in ns.list(prefix='MovieRating').items():
proxies[k] = Pyro4.Proxy(v)
servers.append(k)
print(servers)
refresh_servers()
class NoServersOnlineError(Exception):
pass
def execute(func, *args, avoid=[]):
if len(servers) == 0:
raise NoServersOnlineError()
servers_copy = servers[:]
for s in avoid:
servers_copy.remove(s)
if len(servers_copy) == 0:
servers_copy = servers[:]
server = random.choice(servers_copy)
proxy = proxies[server]
try:
status = proxy.get_status()
if status == 'overloaded':
print(server + ' is overloaded, trying another')
avoid.append(server)
return execute(func, *args, avoid=avoid)
result = {}
result['result'] = getattr(proxy, func)(*args)
result['server'] = server
return result
except Pyro4.errors.CommunicationError:
ns.remove(server)
servers.remove(server)
return execute(func, *args)
@app.errorhandler(404)
def not_found(error):
return json.dumps({
'error': 'Movie not found'
}), 404
@app.route('/<movie_id>', methods=['GET'])
def get_movie(movie_id):
refresh_servers()
try:
result = execute('get_movie', movie_id, avoid=[])
return json.dumps(result)
except NoServersOnlineError:
abort(503)
except KeyError:
abort(404)
@app.route('/<movie_id>/rating', methods=['POST'])
def post_rating(movie_id):
refresh_servers()
global update_id
try:
rating = int(request.form['rating'])
execute('add_rating', movie_id, rating, update_id, avoid=[])
update_id += 1
return json.dumps({'status': 'updated'})
except NoServersOnlineError:
abort(503)
except KeyError:
abort(404)
print('Ready')
| [
"[email protected]"
] | |
b5dcc9981dbfed0cd29ed4ebd34415c2df8551d2 | 9b834b334447e72b73b277d121737eab046ab127 | /loops/Breaking-and-Continuing-in-Loops.py | 67a4c36abcc9a4f63f6f040482ef3c384f7d6586 | [] | no_license | ty1cube/Python-Is-Easy | 7a52ee9195eb240d45e9473feafe060edb848361 | 6c460db15dc4e22662da3dc44de0c8d259d476d4 | refs/heads/master | 2020-04-11T08:22:09.563781 | 2018-12-09T17:22:38 | 2018-12-09T17:22:38 | 161,640,660 | 2 | 0 | null | 2018-12-13T13:07:39 | 2018-12-13T13:07:39 | null | UTF-8 | Python | false | false | 2,775 | py | ##
# Breaking and Continuing in Loops Lecture
##
# -*- coding: utf-8 -*-
Participants = ["Jen", "Alex", "Tina", "Joe", "Ben"] #create a list of 5 elements.
position = 0 #set position is equal to 0
for name in Participants: #loop over each element of list
if name == "Tina": #check if the element of list matches to "Tina"
break #come outside of loop if the condition is met
position = position + 1 #increment variable position by 1
print(position) #print the value of position
position = 0 #set position is equal to 0
for name in Participants: #loop over each element of list
if name == "Tina": #check if the element of list matches to "Tina"
print("About to break") #print message
break #come outside of loop if the condition is met
print("About to increment") #print message
position = position + 1 #increment variable position by 1
print(position) #print the value of position
'''
finds the index of matched string from the list
'''
Index = 0 #set Index to 0
for currentIndex in range(len(Participants)): #loop over all elements in list
print(currentIndex) #print value of currentIndex
if Participants[currentIndex] == "Joe": #check if list element is equal to Joe
print("Have Breaked") #print message
break #come out of the loop
print("Not Breaked") #print message
print(currentIndex+1) #print currentIndex of matched element
for number in range(10): #loop from range of 0 to 10
if number%3 == 0: #check remainder is 0 if divided by 3
print(number) #print value of a number
print("Divisible by 3") #print message
continue #continue
print(number) #print value of number
print("Not Divisible by 3") #print message
| [
"[email protected]"
] | |
405b1afa2defd7618c5c3cba640e214cc724e058 | 1d18c82d6ad1ac7ebaa332a81c89a716b1dfdae8 | /remote-control-with-speech-recognition/public/chatbot_module/chatbot.py | f545cebbbf2d4a04f25cb7b2927e506c3e54879e | [] | no_license | roshandev77/personal-assistant | b32dc39f420b28270152d63b0c8e2f796c477573 | a280f6a7846df3a29734301141bd4dc04e128d32 | refs/heads/master | 2020-04-27T18:23:49.461063 | 2019-03-08T16:14:12 | 2019-03-08T16:14:12 | 174,567,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
def get_response(usrText):
bot = ChatBot('Bot',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
{
'import_path': 'chatterbot.logic.LowConfidenceAdapter',
'threshold': 0.70,
'default_response': 'I am sorry, this is out of my trained data please teach me first'
}
],
trainer='chatterbot.trainers.ListTrainer')
bot.set_trainer(ListTrainer)
while True:
if usrText.strip()!= 'Bye':
result = bot.get_response(usrText)
reply = str(result)
return(reply)
if usrText.strip() == 'Bye':
return('Bye')
break
| [
"[email protected]"
] | |
ac7ea011d20baa44d30192b324ebc9a79ac1974b | 61a591f2a8019be6abadf880be7c614b6e907835 | /servos/simple_servo_test.py | dabc36daf616b933583d7b43b6e1036acdb6194e | [] | no_license | sabatesduran/motioneyeos-telegram-bot | df6de9b0995230e2df0337196d3216e7261f78d4 | 3d66668951653a95702655a20f32bbea27a7a288 | refs/heads/master | 2023-05-27T13:38:41.074813 | 2020-05-03T13:07:08 | 2020-05-03T13:07:08 | 249,285,896 | 0 | 0 | null | 2023-05-22T21:39:06 | 2020-03-22T22:39:54 | Python | UTF-8 | Python | false | false | 476 | py | import sys
from gpiozero import AngularServo
from time import sleep
SERVO = sys.argv[1]
TO_POSITION = sys.argv[2]
V_SERVO_PIN = 12
H_SERVO_PIN = 13
def move(with_servo, to_position):
if to_position == 'mid':
with_servo.mid()
elif to_position == 'max':
with_servo.max()
elif to_position == 'min':
with_servo.min()
sleep(0.5)
if SERVO == 'v':
pin = V_SERVO_PIN
else:
pin = H_SERVO_PIN
move(AngularServo(pin), TO_POSITION)
| [
"[email protected]"
] | |
a84e11b9b91a2583243dd53c152683f6f6a05eee | bb67d55c64589c77e574529835dbf998461f2878 | /pattern/pattern53.py | e6053e06201aca0d44a3bc2881a0e0d6160071d5 | [] | no_license | kritikyadav/Practise_work_python | bf8eb42b03ee8f47a5021f7d3fbc68a02142ae8c | 22a24d18c7d0502f5896f3239cee7730192ea5c3 | refs/heads/main | 2023-07-24T05:47:45.886517 | 2021-08-26T14:47:23 | 2021-08-26T14:47:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | n=int(input('rows= '))
for i in range(1,n+1):
for j in range(1,n-i+1):
print(end=' ')
for k in range(1,i+1):
print(k,end='')
for l in range(i-1,0,-1):
print(l,end='')
print()
| [
"[email protected]"
] | |
9b7300e3043117c6eb3dc1adadad3d3652e6d3cf | 7dfdd69c2a698b1618cf62b98795f63d0fcb67bd | /even_odd_array.py | 09a0a90fc4dd26c3eea163d6dedb22fa33215f02 | [] | no_license | nestorghh/coding_interview | 437eb35da1e282c14a4706f5faa9b36a01a985c3 | f3df5520a7491b3e8d0c245036b1647302538716 | refs/heads/master | 2021-09-24T08:49:43.730747 | 2021-09-16T03:25:45 | 2021-09-16T03:25:45 | 150,933,465 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Your input is an array of integers, and ou have to reorder its entries
# so that the even entries appear first.
###########################################################################
def even_odd(nums):
even, odd = 0 , len(nums)-1
while even<odd:
if nums[even]%2==0:
even+=1
else:
nums[even], nums[odd] = nums[odd], nums[even]
odd-=1
return nums
print(even_odd([3,7,8,5,4,2]))
| [
"[email protected]"
] | |
924e7a0ee39250cf95e009cc76b177a51a18c0a3 | d37363bdbe10b426d57593138f20d80b355412c1 | /dealership/migrations/0014_auto_20180206_1817.py | 58694c9e2dcd9d6f3d6089814524c90dc8981af2 | [] | no_license | Nightcrawler2114/CarSupplyChain_ | 6f11dcbe52420bbccab1efb26eaf3afcbf0bd8a6 | ccbaf80890c1e2d1f89dad10167323df054c6f9c | refs/heads/master | 2021-05-03T12:43:09.188309 | 2018-02-08T16:29:19 | 2018-02-08T16:29:19 | 120,499,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # Generated by Django 2.0.1 on 2018-02-06 17:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dealership', '0013_auto_20180202_1522'),
]
operations = [
migrations.AlterField(
model_name='wholesaledeal',
name='car_name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='manufacturing.WholesaleCar'),
),
]
| [
"[email protected]"
] | |
489cbbf423a18d45cf6f5b1ca64da3f7c45b2407 | 3825126997a40c42f56765d9c0dae2047e8b7297 | /energy/forms/orum_date_use_add_form.py | 242c048161e009255ab1692eb8e471cd7869b1e5 | [] | no_license | kirill1990/kalugaenergo | 34a87aec7104b8caef7b6b803ffa78f72e2f33f6 | 393e3cbcd6dcb78fb489408c8f94365e52606b89 | refs/heads/master | 2021-01-10T08:25:26.100346 | 2016-02-26T12:52:18 | 2016-02-26T12:52:18 | 47,502,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # coding:utf8
from django import forms
from energy.models import Orum
from energy.models import Period
__author__ = 'Demyanov Kirill'
class OrumDateUseAddForm(forms.Form):
orum = forms.ModelChoiceField(
queryset=Orum.objects.all(),
)
period = forms.ModelChoiceField(
queryset=Period.objects.all(),
)
date_use = forms.IntegerField(
min_value=0,
max_value=1000
)
| [
"[email protected]"
] | |
c70ddb74d036303d665a46c34a7b13569017ff7f | da9e4d611c36dc5beb24bc3ec844695713cd7565 | /Tests/AutoUITests/JuicyPeachInLinyi/__init__.py | 94e56cae34461002b2d94c1879237e3f4efffc50 | [] | no_license | yangjourney/AutoTestTaffy | e8128c78c0d0a515c5b4b0b459994fc71ea4da7c | 504c826813859800492c5034721856c4a534db3b | refs/heads/master | 2021-09-18T10:35:45.759077 | 2018-07-13T04:01:55 | 2018-07-13T04:01:55 | 107,228,091 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | # coding=utf-8
# 临沂蜜桃追溯系统@ | [
"[email protected]"
] | |
dd5b25af8252fced7e12f4465472f69156d301a7 | e9e0bc8adaa070b6506ed15adbac551f43082891 | /pandas-0.15.1/setup.py | d0741fce5bfd4dba784a0167d909ce0fadb457aa | [
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] | permissive | Martbov/InformationRetrieval | 515bfa809676113080a3bf0dedb55e7469c1e8e6 | 9083b3a3e94d299beca7b6721f5886efcb854a23 | refs/heads/master | 2021-01-10T16:16:59.704048 | 2016-02-03T23:18:11 | 2016-02-03T23:18:11 | 49,127,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,689 | py | #!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
import sys
import shutil
import warnings
import re
# may need to work around setuptools bug by providing a fake Pyrex
try:
import Cython
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "fake_pyrex"))
except ImportError:
pass
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
setuptools_kwargs = {}
min_numpy_ver = '1.7.0'
if sys.version_info[0] >= 3:
setuptools_kwargs = {
'zip_safe': False,
'install_requires': ['python-dateutil >= 2',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
}
if not _have_setuptools:
sys.exit("need setuptools/distribute for Py3k"
"\n$ pip install distribute")
else:
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
'zip_safe': False,
}
if not _have_setuptools:
try:
import numpy
import dateutil
setuptools_kwargs = {}
except ImportError:
sys.exit("install requires: 'python-dateutil < 2','numpy'."
" use pip or easy_install."
"\n $ pip install 'python-dateutil < 2' 'numpy'")
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.command.build_ext import build_ext as _build_ext
try:
from Cython.Distutils import build_ext as _build_ext
# from Cython.Distutils import Extension # to get pyrex debugging symbols
cython = True
except ImportError:
cython = False
from os.path import join as pjoin
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series,"
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
Note
----
Windows binaries built against NumPy 1.8.1
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "[email protected]"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
MAJOR = 0
MINOR = 15
MICRO = 1
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('pandas/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing pandas/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pandas', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ['np_datetime.c',
'np_datetime_strings.c',
'period.c',
'tokenizer.c',
'io.c',
'ujson.c',
'objToJSON.c',
'JSONtoObj.c',
'ultrajsonenc.c',
'ultrajsondec.c',
]
for root, dirs, files in os.walk('pandas'):
for f in files:
if f in self._clean_exclude:
continue
# XXX
if 'ujson' in f:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckSDist(sdist):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/lib.pyx',
'pandas/hashtable.pyx',
'pandas/tslib.pyx',
'pandas/index.pyx',
'pandas/algos.pyx',
'pandas/parser.pyx',
'pandas/src/sparse.pyx',
'pandas/src/testing.pyx']
def initialize_options(self):
sdist.initialize_options(self)
'''
self._pyxfiles = []
for root, dirs, files in os.walk('pandas'):
for f in files:
if f.endswith('.pyx'):
self._pyxfiles.append(pjoin(root, f))
'''
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
for pyxfile in self._pyxfiles:
cfile = pyxfile[:-3] + 'c'
msg = "C-source file '%s' not found." % (cfile) +\
" Run 'setup.py cython' before sdist."
assert os.path.isfile(cfile), msg
sdist.run(self)
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op."""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build,
'sdist': CheckSDist}
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
lib_depends = ['reduce', 'inference', 'properties']
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
if suffix == '.pyx':
lib_depends = [srcpath(f, suffix='.pyx') for f in lib_depends]
lib_depends.append('pandas/src/util.pxd')
else:
lib_depends = []
plib_depends = []
common_include = ['pandas/src/klib', 'pandas/src']
def pxd(name):
return os.path.abspath(pjoin('pandas', name + '.pxd'))
lib_depends = lib_depends + ['pandas/src/numpy_helper.h',
'pandas/src/parse_helper.h']
tseries_depends = ['pandas/src/datetime/np_datetime.h',
'pandas/src/datetime/np_datetime_strings.h',
'pandas/src/period.h']
# some linux distros require it
libraries = ['m'] if 'win32' not in sys.platform else []
ext_data = dict(
lib={'pyxfile': 'lib',
'pxdfiles': [],
'depends': lib_depends},
hashtable={'pyxfile': 'hashtable',
'pxdfiles': ['hashtable']},
tslib={'pyxfile': 'tslib',
'depends': tseries_depends,
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c',
'pandas/src/period.c']},
index={'pyxfile': 'index',
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c']},
algos={'pyxfile': 'algos',
'depends': [srcpath('generated', suffix='.pyx'),
srcpath('join', suffix='.pyx')]},
parser=dict(pyxfile='parser',
depends=['pandas/src/parser/tokenizer.h',
'pandas/src/parser/io.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/parser/tokenizer.c',
'pandas/src/parser/io.c'])
)
extensions = []
for name, data in ext_data.items():
sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.%s' % name,
sources=sources,
depends=data.get('depends', []),
include_dirs=include)
extensions.append(obj)
sparse_ext = Extension('pandas._sparse',
sources=[srcpath('sparse', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([sparse_ext])
testing_ext = Extension('pandas._testing',
sources=[srcpath('testing', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([testing_ext])
#----------------------------------------------------------------------
# msgpack stuff here
if sys.byteorder == 'big':
macros = [('__BIG_ENDIAN__', '1')]
else:
macros = [('__LITTLE_ENDIAN__', '1')]
msgpack_ext = Extension('pandas.msgpack',
sources = [srcpath('msgpack',
suffix=suffix if suffix == '.pyx' else '.cpp',
subdir='')],
language='c++',
include_dirs=common_include,
define_macros=macros)
extensions.append(msgpack_ext)
# if not ISRELEASED:
# extensions.extend([sandbox_ext])
if suffix == '.pyx' and 'setuptools' in sys.modules:
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith(('.c','.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas.json',
depends=['pandas/src/ujson/lib/ultrajson.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/ujson/python/ujson.c',
'pandas/src/ujson/python/objToJSON.c',
'pandas/src/ujson/python/JSONtoObj.c',
'pandas/src/ujson/lib/ultrajsonenc.c',
'pandas/src/ujson/lib/ultrajsondec.c',
'pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c'],
include_dirs=['pandas/src/ujson/python',
'pandas/src/ujson/lib',
'pandas/src/datetime'] + common_include,
extra_compile_args=['-D_GNU_SOURCE'])
extensions.append(ujson_ext)
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
# The build cache system does string matching below this point.
# if you change something, be careful.
setup(name=DISTNAME,
version=FULLVERSION,
maintainer=AUTHOR,
packages=['pandas',
'pandas.compat',
'pandas.computation',
'pandas.computation.tests',
'pandas.core',
'pandas.io',
'pandas.rpy',
'pandas.sandbox',
'pandas.sparse',
'pandas.sparse.tests',
'pandas.stats',
'pandas.util',
'pandas.tests',
'pandas.tests.test_msgpack',
'pandas.tools',
'pandas.tools.tests',
'pandas.tseries',
'pandas.tseries.tests',
'pandas.io.tests',
'pandas.io.tests.test_json',
'pandas.stats.tests',
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/0.10.1/*.pickle',
'tests/data/legacy_pickle/0.11.0/*.pickle',
'tests/data/legacy_pickle/0.12.0/*.pickle',
'tests/data/legacy_pickle/0.13.0/*.pickle',
'tests/data/legacy_pickle/0.14.0/*.pickle',
'tests/data/*.csv',
'tests/data/*.dta',
'tests/data/*.txt',
'tests/data/*.xls',
'tests/data/*.xlsx',
'tests/data/*.xlsm',
'tests/data/*.table',
'tests/data/*.html',
'tests/data/html_encoding/*.html',
'tests/test_json/data/*.json'],
'pandas.tools': ['tests/*.csv'],
'pandas.tests': ['data/*.pickle',
'data/*.csv'],
'pandas.tseries.tests': ['data/*.pickle',
'data/*.csv']
},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
| [
"[email protected]"
] | |
beece128371755369309896016d68fd877323c91 | 43e5e3b0b95bfa5bfad2045badee7dd02b5cc887 | /comprenhension.py | 6d582da8944bcfe281dbb0fc13bc4de24635b436 | [
"MIT"
] | permissive | AmaroPy/python_koans | c87ec454c41d676ae86a16fd40b5f68325ce5159 | 4b80e7fce5c1140d47c4bc8e0e858db8b807ac16 | refs/heads/master | 2020-05-14T08:41:16.021875 | 2019-05-09T10:30:41 | 2019-05-09T10:30:41 | 181,727,510 | 0 | 0 | MIT | 2019-04-16T16:32:02 | 2019-04-16T16:32:02 | null | UTF-8 | Python | false | false | 572 | py | numbers = [1, 2, 3, 10, 11, 15, 99]
def algo(n):
if n % 2 != 0:
return True
return False
def both_requisites(n):
if algo(n) and n > 10:
return True
return False
higher_than_ten = [num for num in numbers if both_requisites(num)]
print(higher_than_ten)
nombres = ['Alicia', 'María', 'Amaro']
acciones = [' salta', ' llora', ' rasca']
for nombre in nombres:
for accion in acciones:
print(nombre + accion)
for num in range(1,11):
for s_num in range(1,11):
print('{} * {} = {}'.format(num, s_num, num * s_num))
| [
"[email protected]"
] | |
e70f666c2512f65e0f4bf7cad39667faf4acf374 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/Yibo/program.py | 477ba3c31f713299617ea990c7476a0ebf64d07b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | #!/usr/bin/python2.7
f = open('input.txt', 'r')
T = int(f.readline())
mult_table = [
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[0, 2, -1, 4, -3],
[0, 3, -4, -1, 2],
[0, 4, 3, -2, -1]
]
dic = {
1: 1,
'i': 2,
'j': 3,
'k': 4
}
def mul(v1, v2):
# Returns the numerical value of v1 * v2
sign_val = v1 * v2
if sign_val >= 0:
return mult_table[abs(v1)][abs(v2)]
else:
return -mult_table[abs(v1)][abs(v2)]
def get_product_table(L, arr, start, factor):
# product[offset] will return products from start-th element
# up to (start + offset)-th element
product = [None] * L
curr = arr[start]
curr_val = mul(dic[curr], factor)
product[0] = curr_val
for offset in range(1, L):
curr = arr[(start + offset) % L]
curr_val = mul(curr_val, dic[curr])
product[offset] = curr_val
return product
def solve(L, X, arr):
product_table = get_product_table(L, arr, 0, 1)
loop_product = product_table[L - 1]
# First check if product of all of them is -1
all_product = 1
for l in range(X % 4):
all_product = mul(all_product, loop_product)
if all_product != -1:
print 'NO'
return
i_index = -1
for index in range(4 * L):
quotient = index / L
remainder = index % L
product = product_table[remainder]
for q in range(quotient):
product = mul(product, loop_product)
if product == dic['i']:
i_index = index
break
if i_index == -1:
print 'NO'
return
else:
quotient = (i_index / L) % 4
factor = 1
for q in range(quotient):
factor = mul(factor, loop_product)
start = (i_index + 1) % L
j_product_table = get_product_table(L, arr, start, factor)
j_index = -1
for index in range(min(4 * L, L * X - i_index)):
quotient = index / L
remainder = index % L
product = j_product_table[remainder]
for q in range(quotient):
product = mul(product, loop_product)
if product == dic['j']:
print 'YES'
return
print 'NO'
for t in range(T):
(L, X) = f.readline().rstrip().split(' ')
L = int(L)
X = int(X)
arr = f.readline().rstrip()
assert L == len(arr)
print "Case #" + str(t + 1) + ":",
solve(L, X, arr)
| [
"[email protected]"
] | |
98ddb19c7338d5c09a5e06f0bdcc344e8b8cd5e3 | 1d5832de0d4561ba0c42d7c8f86ca564e08f3112 | /flask_blog/wanghublog/controllers/Image.py | c5dc8239987111ff23578ce39f9e5b7570c5fe96 | [] | no_license | wanghublog/Flask_Blog_Linux | f4cec1afd5671ca5f875b9710c6716fd6d5ebdbd | 1c070edf85236962298194a495df00609d320f08 | refs/heads/master | 2022-12-14T15:29:48.487549 | 2019-11-04T12:40:31 | 2019-11-04T12:40:31 | 219,126,794 | 0 | 0 | null | 2022-12-08T06:15:09 | 2019-11-02T08:41:37 | Python | UTF-8 | Python | false | false | 1,281 | py | from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
# 随机字母:
def rndChar():
return chr(random.randint(65, 90))
# 随机颜色1:
def rndColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
# 随机颜色2:
def rndColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
def validate_picture():
# 130 x 50:
width = 130
height = 50
image = Image.new('RGB', (width, height), (255, 255, 255))
# 创建Font对象:
font = ImageFont.truetype('arial.ttf', 36)
# 创建Draw对象:
draw = ImageDraw.Draw(image)
# 填充每个像素:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rndColor())
# 输出文字:
strs = ''
for t in range(4):
str = rndChar()
draw.text((30 * t + 10, 10), str, font=font, fill=rndColor2())
str = strs + str
# 模糊:
im = image.filter(ImageFilter.BLUR)
#root_dir = os.path.abspath('.')
#img_path = root_dir + '\static' + '\image' + '\code.jpg'
#image.save('code.jpg', 'jpeg')
return im,str
if __name__ == '__main__':
creat_str_image()
| [
"[email protected]"
] | |
cbdbc3ce3b472c5817697f8da8d9f19a125c4872 | 08ec706df8afe5ed0a85c41c7ea47d83f6d59622 | /devel/lib/python2.7/dist-packages/vrx_gazebo/__init__.py | c471c9ae890c761c7b7ee01a08cdd222dd654d63 | [] | no_license | aahuhu/boatland | 8aff4f8b6cc1854370f142f1d7aa8415abbd239c | 2d66ec49afa932105aba4fc086100d94bb1874d1 | refs/heads/master | 2023-03-11T03:10:50.764343 | 2021-02-26T03:22:23 | 2021-02-26T03:22:23 | 342,447,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from pkgutil import extend_path
from sys import path as sys_path
__extended_path = '/home/hedy/autoland/src/vrx_gazebo/src'.split(';')
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"[email protected]"
] | |
0f38cbcc9e4a2dbb6b7ec9a17fa6bebb8943c405 | 04ed7d595f6a2e52455d22fc906570621c4a1288 | /app/migrations/0001_initial.py | 5e41f9ba4c32ae260dde42e1b9ef20798643ea21 | [] | no_license | mrclap/dodolist-public | a34b6569389a1491017c3d370fc52f32db820824 | 18ff53f7dc948c85fbbe75334d700ed05b74eb74 | refs/heads/master | 2020-05-25T02:20:55.037006 | 2019-05-20T05:57:40 | 2019-05-20T05:57:40 | 187,576,222 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | # Generated by Django 2.2.1 on 2019-05-15 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('cdate', models.DateTimeField(blank=True, null=True)),
('udate', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'category',
'managed': False,
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_json', models.TextField()),
('cdate', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'order',
'managed': False,
},
),
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('detail', models.TextField()),
('due_date', models.DateTimeField(blank=True, null=True)),
('priority', models.IntegerField(blank=True, null=True)),
('category_id', models.IntegerField(blank=True, null=True)),
('cdate', models.DateTimeField(blank=True, null=True)),
('udate', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'todo',
'managed': False,
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(blank=True, max_length=255, null=True)),
('pw', models.CharField(blank=True, max_length=512, null=True)),
('username', models.CharField(blank=True, max_length=255, null=True)),
('cdate', models.DateTimeField(blank=True, null=True)),
('udate', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'user',
'managed': False,
},
),
]
| [
"[email protected]"
] | |
b05dc270889047f4ca8b6c93a64fa44312707097 | 1654ad47bd1de7b7fb05360ee6d47d4ebfe39f34 | /members/test_models.py | 2d74f0e667b8ffb2e647f580c099d22ad8b40533 | [
"BSD-3-Clause"
] | permissive | profbiyi/djangoproject.com | c710906be7c3673170614fb189111bde11bd5877 | 5bc906c7b12d470aa7523d2571c4796abd89ff4c | refs/heads/master | 2021-01-11T22:25:41.736810 | 2016-12-20T17:43:56 | 2017-01-13T14:08:28 | 78,961,009 | 1 | 0 | null | 2017-01-14T19:13:21 | 2017-01-14T19:13:21 | null | UTF-8 | Python | false | false | 3,520 | py | from datetime import date, timedelta
from django.test import TestCase
from members.models import (
GOLD_MEMBERSHIP, PLATINUM_MEMBERSHIP, SILVER_MEMBERSHIP, CorporateMember,
IndividualMember,
)
class IndividualMemberTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.member = IndividualMember.objects.create(
name='DjangoDeveloper',
email='[email protected]'
)
def setUp(self):
self.member.refresh_from_db()
def test_str(self):
self.assertEqual(str(self.member), 'DjangoDeveloper')
def test_member_since_should_have_default(self):
self.assertEqual(IndividualMember().member_since, date.today())
def test_is_active(self):
self.assertTrue(self.member.is_active)
self.member.member_until = date.today()
self.assertFalse(self.member.is_active)
class CorporateMemberTests(TestCase):
today = date.today()
tomorrow = today + timedelta(days=1)
@classmethod
def setUpTestData(cls):
cls.member = CorporateMember.objects.create(
display_name='Corporation',
billing_name='foo',
billing_email='[email protected]',
contact_email='[email protected]',
membership_level=SILVER_MEMBERSHIP,
)
def setUp(self):
self.member.refresh_from_db()
def test_str(self):
self.assertEqual(str(self.member), 'Corporation')
def test_is_invoiced(self):
# No invoices == not invoiced.
self.assertEqual(self.member.is_invoiced, False)
# Invoice but no sent_date == not invoiced.
invoice = self.member.invoice_set.create(amount=500)
self.assertEqual(self.member.is_invoiced, False)
# Invoice with an sent_date == invoiced.
invoice.sent_date = self.today
invoice.save()
self.assertEqual(self.member.is_invoiced, True)
def test_is_paid(self):
# No invoices == not paid.
self.assertEqual(self.member.is_paid, False)
# Invoice but no paid_date == not paid.
invoice = self.member.invoice_set.create(amount=500)
self.assertEqual(self.member.is_paid, False)
# Invoice with a paid_date == paid.
invoice.paid_date = self.today
invoice.save()
self.assertEqual(self.member.is_paid, True)
def test_get_expiry_date(self):
self.assertIsNone(self.member.get_expiry_date())
self.member.invoice_set.create(amount=500)
self.assertIsNone(self.member.get_expiry_date())
self.member.invoice_set.create(amount=500, expiration_date=self.today)
self.assertEqual(self.member.get_expiry_date(), self.today)
self.member.invoice_set.create(amount=500, expiration_date=self.tomorrow)
self.assertEqual(self.member.get_expiry_date(), self.tomorrow)
def test_manager_by_membership_level(self):
self.assertEqual(CorporateMember.objects.by_membership_level(), {})
self.member.invoice_set.create(amount=500, expiration_date=self.tomorrow)
self.assertEqual(CorporateMember.objects.by_membership_level(), {'silver': [self.member]})
self.member.membership_level = GOLD_MEMBERSHIP
self.member.save()
self.assertEqual(CorporateMember.objects.by_membership_level(), {'gold': [self.member]})
self.member.membership_level = PLATINUM_MEMBERSHIP
self.member.save()
self.assertEqual(CorporateMember.objects.by_membership_level(), {'platinum': [self.member]})
| [
"[email protected]"
] | |
07dff20a921241c2bc968135bade70efa18b081e | 9d3e3ee3f90398a825e9cac49593eaea3ade651c | /covid_19_data_collector/covid_19_data_collector/settings.py | 46eb77a32c8c9ee78aa97529318fa9130fc569e0 | [] | no_license | mmcgov/covid_19_tracker | b73017490b9909d36f0217e036336cf7a6bfabea | b16d863d86fca1c037204307718a08dd09e30cf4 | refs/heads/master | 2021-05-20T12:42:05.284199 | 2020-08-03T22:06:00 | 2020-08-03T22:06:00 | 252,301,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,197 | py | # -*- coding: utf-8 -*-
# Scrapy settings for covid_19_tracker project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'covid_19_data_collector'
SPIDER_MODULES = ['covid_19_data_collector.spiders']
NEWSPIDER_MODULE = 'covid_19_data_collector.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'covid_19_tracker (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'covid_19_tracker.middlewares.Covid19TrackerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'covid_19_tracker.middlewares.Covid19TrackerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'covid_19_tracker.pipelines.Covid19TrackerPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
89e049e3ec9191b36e0a4997d35499d7c1f80446 | 62d1c7344691dd2513ca11c2c49282139782591b | /面向对象/demo_08_继承.py | 46c2484c679b8195124830a8299468f1cd9fee2d | [] | no_license | zhanglei12306/PythonProject | 5766ba38a54f9db43c089611d5189db9e86d56c7 | 07649f58f00825c0e833125092aec10b858ba95b | refs/heads/master | 2023-02-08T02:21:05.392537 | 2021-01-02T13:37:04 | 2021-01-02T13:37:04 | 323,079,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py |
# 继承:某个类直接具备另一个类的能力(属性和方法)
# 格式:
# class 子类名:(父类名)
# class Animal: 经典写法
class Animal(object): #新写法
def eat(self):
print("-----吃-----")
def drink(self):
print("-----喝-----")
class Dog(Animal):
def playing(self):
print("-----玩-----")
class Cat(Dog):
def pao(self):
print("-----跑-----")
wang_cai = Cat()
wang_cai.eat()
wang_cai.drink()
wang_cai.playing()
wang_cai.pao()
| [
"[email protected]"
] | |
22d627417b77424db925df02aa33d9050035ffc0 | cb04879732abff760f572012cd03dd8e64c66b97 | /prob6.py | 14f8d70f1bf2e19fca12aa770f4729d5607e731a | [] | no_license | DarylWinslow/DT211-3-Cloud- | fb2c12ad97baa1b2406a5349f66b4466bf414ceb | 14292bdce3d41c900c3d72ca172ec422103ca8d9 | refs/heads/master | 2020-05-19T09:10:43.834750 | 2014-10-02T14:56:33 | 2014-10-02T14:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | def main():
n = 100
sum1, sum2 =0, 0
for i in range(1, n+1):
sum1 += 1
sum2 += pow(i,2)
return pow(sum1,2) -sum2
if '__main__' == __name__:
print(main())
| [
"[email protected]"
] | |
257a9de7de8acf4c8dc41472a399bdf79329fae3 | 96d393cef29738f3d5eac0a2bea9711ee51d4c47 | /logicpy/result.py | 5ce5b00589491efdb07d3b3b0e32b7e1b5779fdc | [
"MIT"
] | permissive | evertheylen/logicpy | b01fae5088b30a41f37dcfb91fae563e9afe48bc | 18dac5b2b659d6598692a165b39dccbf413781bc | refs/heads/master | 2021-09-01T20:11:31.070883 | 2017-12-28T14:48:27 | 2017-12-28T14:48:27 | 109,205,315 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py |
from logicpy.data import Term, Variable, BasicTerm, has_occurence, replace
from logicpy.debug import NoDebugger
class ResultException(Exception):
pass
class UnificationFail(ResultException):
pass
class Uninstantiated(ResultException):
pass
class Result:
def __init__(self, it = None, var_cache = None):
self.var_cache = var_cache or {}
if it:
self.identities = frozenset(it)
else:
self.identities = frozenset()
# Act like a proper set ...............................
# Binary operations
# no __r*__ versions, PyPy doesn't know those
for fname in ['__and__', '__xor__', '__sub__',
'intersection', 'difference', 'symmetric_difference', 'union']:
def passthrough(self, other, f = getattr(frozenset, fname)):
return type(self)(f(self.identities, other.identities if isinstance(other, Result) else other))
locals()[fname] = passthrough
def __or__(self, other):
if isinstance(other, Result):
# Often used, so let's write a faster version using var_cache
overlap = self.var_cache.keys() & other.var_cache.keys()
for var in overlap:
if self.var_cache[var] != other.var_cache[var]:
return FailResult()
total_var_cache = {**self.var_cache, **other.var_cache}
total_identities = self.identities | other.identities
return type(self)(total_identities, total_var_cache)
else:
return type(self)(self.identities | other)
def __len__(self):
return len(self.identities)
def __iter__(self):
return iter(self.identities)
def __contains__(self, obj):
return obj in self.identities
# Representation and easy usage ......................
def __str__(self):
if len(self) == 0:
return 'ok'
return '{' + ', '.join(f"{L} = {R}" for L, R in self.identities) + '}'
def easy_dict(self):
return {L.name: R for L, R in self.identities if isinstance(L, Variable) and L.scope == 0}
# Prolog additions ....................................
def get_var(self, var):
try:
return self.var_cache[var]
except KeyError:
for A, B in self.identities:
if A == var:
self.var_cache[var] = B
return B
raise Uninstantiated(f"Uninstantiated: {var}")
def mgu(self):
return Result(Result.martelli_montanari(set(self.identities)))
@staticmethod
def martelli_montanari(E):
from logicpy.structure import Structure
if len(E) == 0:
return E
did_a_thing = True
tried = set()
while True:
untried = E - tried
if len(untried) == 0: break
(A, B) = untried.pop()
E.remove((A, B))
did_a_thing = True # Assume and unset later
if not isinstance(A, Variable) and isinstance(B, Variable):
# switch
E.add((B, A))
elif isinstance(A, BasicTerm) and isinstance(B, BasicTerm):
# peel
if A.name == B.name and len(A.children) == len(B.children):
E.update(zip(A.children, B.children))
else:
raise UnificationFail(f"Conflict {A}, {B}")
elif isinstance(A, Variable) and (not isinstance(B, Variable) or not A.really_equal(B)):
# substitute
if has_occurence(B, A):
raise UnificationFail(f"Occurs check {A}, {B}")
# While not very elegant, this is substantially faster in PyPy
# In CPython, it's about the same
remove_from_E = set()
add_to_E = list()
did_a_thing = False
for t in E:
nt = (replace(t[0], A, B), replace(t[1], A, B))
if t != nt:
did_a_thing = True
remove_from_E.add(t)
add_to_E.append(nt)
if did_a_thing:
E -= remove_from_E
E.update(add_to_E)
E.add((A, B)) # Add it back
elif (not isinstance(A, (Structure, Term))) and (not isinstance(B, (Structure, Term))):
if A != B:
raise UnificationFail(f"Constant Conflict {A}, {B}")
else:
did_a_thing = False
if did_a_thing:
tried.clear()
else:
# Add it back
E.add((A, B))
tried.add((A, B))
return E
class FailResult(Result):
def mgu(self, dbg=NoDebugger()):
dbg.output("Failure to unify was already detected")
return None
| [
"[email protected]"
] | |
bd00c13e9e1a5274d1c0d52f684814c023849b59 | 39d6a6eec662ad0b81f3f9f262646162daac59de | /WIDHYA DAILY SOLUTIONS/w2/notebook3a6b8eef1f (1).py | a7c39746029c06fa52465450f773ed7ca4f661e8 | [] | no_license | Hashmeet229/Widhya-Intern | c281554c98b72e2b89417ce4d59e0bcc8699f0dd | 56a74dcf54ba69b959ad9823e5b6c8f9f09ea14b | refs/heads/master | 2023-06-29T10:32:55.431187 | 2021-07-27T08:23:57 | 2021-07-27T08:23:57 | 389,902,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In[1]:
import pandas as pd
import numpy as np
df=pd.read_csv("../input/rcb-vs-kkr/deliveries.csv")
df.head()
df.info()
import os
print(os.listdir("../input"))
# In[2]:
a= df[['bowling_team','batting_team','noball_runs']]
b=a[(a['bowling_team']=='Royal Challengers Bangalore') & (a['batting_team'] == 'Kolkata Knight Riders') ]
b
# In[3]:
c= df[['bowling_team','batting_team','noball_runs']]
d=c[(c['bowling_team']=='Kolkata Knight Riders') & (c['batting_team'] == 'Royal Challengers Bangalore') ]
d
# In[4]:
e = pd.concat([b,d], axis=0)
e
# In[5]:
f=e.groupby('bowling_team').agg({'noball_runs':'sum'})
f
# In[ ]:
#total_noballs=14+9=23 AND total matches b/w these two are 24..
total_noballs=23
total_macthes_vs_kkr=24
noball_per_match=total_noballs/total_macthes_vs_kkr
noball_per_match
# In[6]:
df1=pd.read_csv("../input/rcb-vs-kkr/matches.csv")
df1.info()
# In[7]:
df2=df1[['team1','team2','winner','result','toss_winner','toss_decision']]
df2.head()
# In[8]:
df3 = df2[(df2['team1']=='Royal Challengers Bangalore') & (df2['team2'] == 'Kolkata Knight Riders')]
df3
# In[9]:
df4 = df2[(df2['team1']=='Kolkata Knight Riders') & (df2['team2'] == 'Royal Challengers Bangalore')]
df4
# In[10]:
df5 = pd.concat([df3,df4], axis=0)
df5
# In[11]:
df5['winner'].value_counts()
# In[12]:
df5['toss_decision'].value_counts()
# In[13]:
matches_played=pd.concat([df1['team1'],df1['team2']])
matches_played=matches_played.value_counts().reset_index()
matches_played.columns=['Team','Total Matches']
matches_played['wins']=df1['winner'].value_counts().reset_index()['winner']
matches_played.set_index('Team',inplace=True)
matches_played.reset_index().head(8)
# In[14]:
win_percentage = round(matches_played['wins']/matches_played['Total Matches'],3)*100
win_percentage.head(8)
# In[15]:
df6= df[['bowling_team','batsman','batsman_runs','match_id']]
df6.head()
df7=df6[(df6['bowling_team']=='Royal Challengers Bangalore') & (df6['batsman'] == 'KD Karthik')]
df7['match_id'].value_counts()
# In[16]:
df8=df7.groupby('batsman').agg({'batsman_runs':'sum'})
df8.div(20)
# In[17]:
df11=df6[(df6['bowling_team']=='Kolkata Knight Riders') & (df6['batsman'] == 'V Kohli')]
df11
# In[18]:
df12=df11.groupby('batsman').agg({'batsman_runs':'sum'})
df11['match_id'].value_counts()
df12.div(24)
# In[24]:
df13= df[['batsman','batsman_runs','match_id','bowling_team']]
df13.head()
df14=df13[(df13['batsman'] == 'V Kohli') & (df13['batsman_runs'] == 4) & (df13['bowling_team'] == 'Kolkata Knight Riders')]
df14
# In[26]:
no_of_matches=df14['match_id'].value_counts()
no_of_matches
# In[29]:
#from above data summing all the values in match_id.Since each match_id represents no. of fours in that match.So after summing up ..
total_fours=58
total_macthes_vs_kkr=24
four_per_match=total_fours/total_macthes_vs_kkr
four_per_match
| [
"[email protected]"
] | |
d6937ef5a73d2d4f2380e8360ad1ef7346b7ba88 | 6f28494af3db4603d0f20ad8b1e7c57c1efc8acc | /traning/test_demo1.py | 6b6f6edc6f4dff4fb71197032682239ab4a2cc7a | [] | no_license | zhangwanli-marker/Lagouzuoye | e9351652cb95856cd3c2df139db2623d1b1fe061 | 26dd998c74a12a3c0c8e6da2ad433a9d81620cca | refs/heads/master | 2023-01-03T07:27:47.061687 | 2020-10-20T12:58:13 | 2020-10-20T12:58:13 | 269,674,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import pytest
import yaml
class TestData:
# with open('listdata.yaml', 'rb') as f:
# data = yaml.safe_load(f)
@pytest.mark.parametrize("a, b", yaml.safe_load(open("./listdata.yaml")))
def test_data(self, a, b):
c = a + b
print(c)
| [
"[email protected]"
] | |
387485152a5a4b081dd974bcc6df35d3c80b145c | 90291c34ad890fe1a8c926083c0e6a1fb9d73126 | /supervised_learning/0x05-regularization/4-dropout_forward_prop.py | e6728ddc034073c39f024de42bee3a8cf3d25df9 | [] | no_license | zahraaassaad/holbertonschool-machine_learning | 7ce800f3a5bd1ef4941e4dd632f22867e2c951a1 | 131be8fcf61aafb5a4ddc0b3853ba625560eb786 | refs/heads/main | 2023-05-15T01:48:04.687777 | 2021-06-09T19:07:41 | 2021-06-09T19:07:41 | 318,150,125 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | #!/usr/bin/env python3
"""conducts forward propagation
using Dropout:"""
import numpy as np
def dropout_forward_prop(X, weights, L, keep_prob):
"""Returns: outputs of each layer and
the dropout mask used on each layer"""
cache = {}
cache["A0"] = X
for i in range(1, L + 1):
w = "W" + str(i)
b = "b" + str(i)
a = "A" + str(i - 1)
d = "D" + str(i)
Z = np.matmul(weights[w],
cache[a]) + weights[b]
a_new = "A" + str(i)
if i != L:
A = (np.exp(Z) - np.exp(-Z)) / (np.exp(Z) + np.exp(-Z))
true_false_matrix = np.random.rand(
A.shape[0], A.shape[1]) < keep_prob
cache[d] = np.multiply(true_false_matrix, 1)
a_temp = np.multiply(A, cache[d])
cache[a_new] = a_temp / keep_prob
else:
t = np.exp(Z)
a_new = "A" + str(i)
cache[a_new] = t / t.sum(axis=0, keepdims=True)
return (cache)
| [
"[email protected]"
] | |
5f0a1e189920fdab1d92c136932df084d672e547 | 1c2798a474f279e62eea0466654f6715ad90fb71 | /long_short_term_memory.py | 55e74b815f09a3b150fe1f612ea073df4371e007 | [] | no_license | gratrow96/ml_stock_market | 5822a115753fded4e56e63c2c87a7d3c710472ed | d74d8866ab3f47c7b263091dad2de22660e354e3 | refs/heads/main | 2023-02-21T05:23:29.591210 | 2021-01-21T23:08:02 | 2021-01-21T23:08:02 | 331,778,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from sklearn import utils
from math import sqrt
from scipy.stats import pearsonr, kendalltau
from sklearn.metrics import mean_squared_error as mse, accuracy_score
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.