blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d74d3b980a16f4b736de35c0620c251cd4bd605
|
575d197af5bbc31b89df37f8733e81707294948c
|
/Python2/examples/Excel/14_numeric_format.py
|
533f0bc504b9462fadff2bd1f8029ac5a60655f1
|
[] |
no_license
|
tisnik/python-programming-courses
|
5c7f1ca9cae07a5f99dd8ade2311edb30dc3e088
|
4e61221b2a33c19fccb500eb5c8cdb49f5b603c6
|
refs/heads/master
| 2022-05-13T07:51:41.138030 | 2022-05-05T15:37:39 | 2022-05-05T15:37:39 | 135,132,128 | 3 | 2 | null | 2021-04-06T12:19:16 | 2018-05-28T08:27:19 |
Python
|
UTF-8
|
Python
| false | false | 1,247 |
py
|
#!/usr/bin/env python3
# vim: set fileencoding=utf-8
"""Vytvoření sešitu s delší tabulkou, buňky se specifikací číselného formátu."""
import xlsxwriter
# vytvoření objektu reprezentujícího celý sešit
with xlsxwriter.Workbook("example14.xlsx") as workbook:
# vložení nového listu do sešitu
worksheet = workbook.add_worksheet()
# definice nového stylu
bold_style = workbook.add_format()
bold_style.set_bold()
bold_style.set_font_color("blue")
# definice dalšího nového stylu
red_style = workbook.add_format()
red_style.set_font_color("red")
# definice formátu čísel
numeric_format = workbook.add_format({"num_format": "0.0000"})
# nastavení šířky sloupců a stylu
worksheet.set_column("A:A", 8, red_style)
worksheet.set_column("B:B", 14, numeric_format)
worksheet.set_column("C:Z", 2)
# styl pro první řádek
worksheet.set_row(0, 20, bold_style)
# buňky s textem
worksheet.write_string("A1", "x")
worksheet.write_string("B1", "1/x")
# buňky s numerickými hodnotami
for x in range(1, 21):
worksheet.write_number(x, 0, x)
worksheet.write_number(x, 1, 1.0 / x)
# sešit bude uzavřen automaticky
|
[
"[email protected]"
] | |
ca0dfc4464e55ea5612d52669b8d134596d3404c
|
5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63
|
/JUNGOL/1707(달팽이사각형).py
|
07bb09b825bfea02fae29ad62638f417bf139b21
|
[] |
no_license
|
juyi212/Algorithm_study
|
f5d263c5329c994a457bbe897e5e1405d2b1d67a
|
f225cc593a50b74686111f654f7133707a1d1310
|
refs/heads/master
| 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 661 |
py
|
n = int(input())
dir = [(0, 1), (1, 0), (0, -1), (-1, 0)]
matrix = [[-1]*n for _ in range(n)]
num = 1
visited = [[False]*n for _ in range(n)]
nr, nc = 0, 0
matrix[nr][nc] = 1
visited[nr][nc] = True
cnt = 0
while cnt <= (n*n):
for r, c in dir:
cnt += 1
while True:
nr += r
nc += c
if 0 <= nr < n and 0 <= nc < n and not visited[nr][nc]:
num += 1
visited[nr][nc] = True
matrix[nr][nc] = num
else:
nr -= r
nc -= c
break
for i in matrix:
for j in i:
print(j, end = ' ')
print()
|
[
"[email protected]"
] | |
b7b17a2f843d0c6a17b1aa1e0dbc77074c1d8db7
|
4629bf721ff3d49f9f8e3e57babc325f38fa4b7e
|
/uliweb/utils/setup.py
|
4f740b75ed31de85e1ec0160e159648737b04e0b
|
[
"BSD-2-Clause"
] |
permissive
|
zhangchunlin/uliweb3
|
b3d331fd9b1738f0a38f007a0def240d85c31443
|
3c92763d3172b9f1041ea93816daf4224c8512c0
|
refs/heads/master
| 2023-03-07T01:49:35.779929 | 2018-09-26T05:51:14 | 2018-09-26T05:51:14 | 152,057,856 | 1 | 1 |
BSD-2-Clause
| 2018-10-08T10:02:56 | 2018-10-08T10:02:56 | null |
UTF-8
|
Python
| false | false | 5,765 |
py
|
from setuptools import setup
from setuptools.command import build_py as b
import os,sys
import glob
#remove build and dist directory
import shutil
#if os.path.exists('build'):
# shutil.rmtree('build')
#if os.path.exists('dist'):
# shutil.rmtree('dist')
def copy_dir(self, package, src, dst):
self.mkpath(dst)
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
copy_dir(self, package + '.' + r, fpath, os.path.join(dst, r))
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
target = os.path.join(dst, r)
self.copy_file(fpath, target)
def find_dir(self, package, src):
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
for f in find_dir(self, package + '.' + r, fpath):
yield f
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
yield fpath
def build_package_data(self):
for package in self.packages or ():
src_dir = self.get_package_dir(package)
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
copy_dir(self, package, src_dir, build_dir)
setattr(b.build_py, 'build_package_data', build_package_data)
def get_source_files(self):
filenames = []
for package in self.packages or ():
src_dir = self.get_package_dir(package)
filenames.extend(list(find_dir(self, package, src_dir)))
return filenames
setattr(b.build_py, 'get_source_files', get_source_files)
from setuptools.command.develop import develop
from distutils import sysconfig
unlink = os.unlink
def rm(obj):
import shutil
if os.path.exists(obj):
try:
if os.path.isdir(obj):
if os.path.islink(obj):
unlink(obj)
else:
shutil.rmtree(obj)
else:
if os.path.islink(obj):
unlink(obj)
else:
os.remove(obj)
except:
import traceback
traceback.print_exc()
raise
__CSL = None
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name
copys from http://stackoverflow.com/questions/1447575/symlinks-on-windows/7924557
'''
global __CSL
if __CSL is None:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
def pre_run(func):
def _f(self):
global unlink
if self.distribution.package_dir and sys.platform == 'win32':
try:
import ntfslink
except:
print ('You need to install ntfslink package first in windows platform.')
print ('You can find it at https://github.com/juntalis/ntfslink-python')
sys.exit(1)
if not hasattr(os, 'symlink'):
os.symlink = symlink
os.path.islink = ntfslink.symlink.check
unlink = ntfslink.symlink.unlink
func(self)
return _f
develop.run = pre_run(develop.run)
def post_install_for_development(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
libpath = sysconfig.get_python_lib()
if not package_dir: return
for p in sorted(packages):
#if the package is something like 'x.y.z'
#then create site-packages/x/y
#then create symlink to z to src directory
ps = p.split('.')
if len(ps)>1:
path = libpath
for x in ps[:-1]:
path = os.path.join(path, x)
if not os.path.exists(path):
os.makedirs(path)
inifile = os.path.join(path, '__init__.py')
if not os.path.exists(inifile):
with open(inifile, 'w') as f:
f.write('\n')
pkg = os.path.join(libpath, *ps)
d = package_dir.get(p, None)
if d is None:
print ("Error: the package %s directory can't be found in package_dir, please config it first" % p)
sys.exit(1)
src = os.path.abspath(os.path.join(os.getcwd(), d))
print ('Linking ', src, 'to', pkg)
rm(pkg)
os.symlink(src, pkg)
return _f
develop.install_for_development = post_install_for_development(develop.install_for_development)
def post_uninstall_link(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
if not package_dir: return
libpath = sysconfig.get_python_lib()
for p in sorted(packages, reverse=True):
print ('Unlink... %s' % p)
pkg = os.path.join(libpath, p.replace('.', '/'))
rm(pkg)
return _f
develop.uninstall_link = post_uninstall_link(develop.uninstall_link)
|
[
"[email protected]"
] | |
055b87c0c32ecf5c0e0ae69c04812464e18972ed
|
c5a921726a3805663d26a2dbaa47e49497931d4e
|
/TDD_Python/superlists/functional_tests_v3.py
|
2bcdfbc7c2b2d6920b2cf500abc85118c4d25264
|
[] |
no_license
|
snowdj/cs_course
|
a50d07548198b4202e8abde01ec572e2cce38ab3
|
fa6504cb5145d10952f4615478fa745f4b35ba13
|
refs/heads/master
| 2020-03-17T15:18:52.190747 | 2018-05-13T08:08:51 | 2018-05-13T08:08:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,652 |
py
|
"""
TDD Python, chapter 4, page 38.
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# She goes to check out its homepage.
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists.
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box.
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feather" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(
any(row.text == '1: Buy peacock feathers' for row in rows),
"New to-do item did not appear in table"
)
self.fail('Finish the test!')
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
[
"[email protected]"
] | |
47b0c80ab9d652c8131826c08251e4b823d88274
|
d9d1a3ea9f67845e6bbaa97cda4a60a8fc776ce3
|
/galtrace/libs/crawler/__init__.py
|
fa75c505970cf8ee6c207b4c4273a10afc52e9b9
|
[] |
no_license
|
legnaleurc/galtrace
|
0340bfebd367e45c87eff8254f5cd58550f18721
|
27f88c5db28f197766cd3cc732b5e1eb921d74bf
|
refs/heads/master
| 2021-01-22T23:48:49.352510 | 2015-04-25T15:29:27 | 2015-04-25T15:29:27 | 3,678,797 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
from .sites import UnsupportedLinkError
from .sites import UnavailableLinkError
from .crawler import fetch
__all__ = ['fetch', 'UnsupportedLinkError', 'UnavailableLinkError']
|
[
"[email protected]"
] | |
cde5ef37b383f1c7e93382d8a1058c013371fafb
|
bb109bd629c67a30a57850ebc97f9a9625aa998f
|
/wmtexe/cmi/dup.py
|
26e81298bdb534a1b20a6862f4198083e7fdeabd
|
[
"MIT"
] |
permissive
|
csdms/wmt-exe
|
b0966f27792be853e8469f12a7e78aea24da6bfa
|
9f6e5a20e65765389682161b985cab186db88fce
|
refs/heads/master
| 2022-11-15T06:27:23.589160 | 2022-10-25T23:57:21 | 2022-10-25T23:57:21 | 22,662,428 | 0 | 2 |
MIT
| 2022-10-25T23:57:22 | 2014-08-05T23:04:09 |
Python
|
UTF-8
|
Python
| false | false | 353 |
py
|
import argparse
import yaml
from .bocca import dup_c_impl
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='Path to impl files')
parser.add_argument('name', help='Name of new class')
args = parser.parse_args()
dup_c_impl(args.path, args.name, destdir='.')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
59cac7d3b44940ed7343645d2d5df7b8ec308d3a
|
6b96a11195094a0023a059ba7d5df95ce58c56f1
|
/1527B.py
|
d2463a6c906bb932f57923f1d8afb8efbdbb7d93
|
[] |
no_license
|
ldfdev/CodeForces-Div2-Problems
|
d932b09ee14a430fd0054d5b295f6016553be2b7
|
d18824a4330a4593099d249496ae22f3f69d5f44
|
refs/heads/master
| 2021-08-11T03:29:18.772870 | 2021-07-25T07:21:09 | 2021-07-29T20:09:43 | 72,371,376 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
# explanation bemow:
# 1 0 0 0 0 1
# alice 1 1 0 0 0 1
# bob 1 1 0 0 1 0
# alice 1 1 0 1 1 1
# bob 1 1 1 0 1 1 (rev)
# alice 1 1 1 1 1 1
# 0 0 0
# alice 0 1 0
# bob 1 1 0
# alice 0 1 1 (rev)
# bob 1 1 1
def solve():
n = int(input())
palindrome = list(input())
zeros = len([x for x in palindrome if x == '0'])
if zeros == 1:
return 'BOB'
if zeros & 1:
return 'ALICE'
return 'BOB'
if __name__=='__main__':
for _ in range(int(input().strip())):
print(solve())
|
[
"[email protected]"
] | |
29bd029d56f2d4656129ad4580be26f15d405eac
|
bbcba0bb02cc62c4d445582172605776ab1be8cb
|
/save_to_csv.py
|
52c9c1e2deb5ed1112c543a1a5324897d69ba610
|
[] |
no_license
|
wvuvl/GPND2
|
36f208a9d5cb35bf020c251fc226ce6dfe213187
|
b41dd8d662e11ff5999ac4e2392f536f4e62a50c
|
refs/heads/master
| 2023-02-08T09:30:40.795626 | 2020-12-07T03:34:29 | 2020-12-07T03:34:29 | 319,194,499 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 951 |
py
|
import csv
import numpy as np
import pickle
import pprint
def save_results(results, filename):
with open(filename[:-4] + ".pkl", "wb") as f:
pickle.dump(results, f)
print(results)
pp = pprint.PrettyPrinter()
pp.pprint(results)
percentages = list(results[0].keys())
measures = list(list(results[0].values())[0].keys())
f = open(filename, 'wt')
writer = csv.writer(f)
for m in measures:
writer.writerow((m,))
header = ['Percentage %d' % x for x in percentages]
writer.writerow(header)
for r in results:
row = []
for p in percentages:
if p in r:
row.append(r[p][m])
writer.writerow(tuple(row))
f.close()
mean_f1 = np.asarray([r[50]['f1'] for r in results]).mean()
f = open(filename[:-4] + "_%.3f" % mean_f1, 'w')
f.close()
print('Mean F1 at 50%%: %.3f' % mean_f1)
|
[
"[email protected]"
] | |
43b225614126bfa4e218f3018c9185630319aeb4
|
c2eba49f66ee0948c0ab089475b02f3a641fafb1
|
/xfc_control/migrations/0001_initial.py
|
9d000c9fdbc5537eca92dc318a1642a5f7b7b9c4
|
[] |
no_license
|
cedadev/django-xfc_control
|
d4dcb7205c889443c0adba423a095b8b9ba68ffd
|
63a792214f267f2beb975d7138c46b449f560dbf
|
refs/heads/master
| 2021-07-15T05:15:02.873093 | 2020-04-21T11:35:46 | 2020-04-21T11:35:46 | 91,581,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,616 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-03 12:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import sizefield.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CachedFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(help_text='Relative path to the file', max_length=2024)),
('size', sizefield.models.FileSizeField(default=0, help_text='Size of the file')),
('first_seen', models.DateTimeField(blank=True, help_text='Date the file was first scanned by the cache_manager', null=True)),
],
),
migrations.CreateModel(
name='CacheDisk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mountpoint', models.CharField(blank=True, help_text='Root directory of cache area', max_length=1024, unique=True)),
('size_bytes', sizefield.models.FileSizeField(default=0, help_text='Maximum size on the disk that can be allocated to the cache area')),
('allocated_bytes', sizefield.models.FileSizeField(default=0, help_text='Amount of space allocated to users')),
('used_bytes', sizefield.models.FileSizeField(default=0, help_text='Used value calculated by update daemon')),
],
),
migrations.CreateModel(
name='ScheduledDeletion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_entered', models.DateTimeField(blank=True, help_text='Date the deletion was entered into the scheduler', null=True)),
('time_delete', models.DateTimeField(blank=True, help_text='Time the deletion will take place', null=True)),
('delete_files', models.ManyToManyField(default=None, help_text='The list of files to be deleted in this schedule', to='xfc_control.CachedFile')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of user - should be same as JASMIN user name', max_length=254)),
('email', models.EmailField(help_text='Email of user', max_length=254)),
('notify', models.BooleanField(default=False, help_text='Switch notifications on / off')),
('quota_size', sizefield.models.FileSizeField(default=0, help_text='Size of quota allocated to user, in (bytes day)')),
('quota_used', sizefield.models.FileSizeField(default=0, help_text='Size of quota allocated to user, in (bytes day)')),
('hard_limit_size', sizefield.models.FileSizeField(default=0, help_text='Upper limit allocated to user, in bytes. This limit cannot be exceeded.')),
('total_used', sizefield.models.FileSizeField(default=0, help_text='Total size of all files owned by the user.')),
('cache_path', models.CharField(help_text='Relative path to cache area', max_length=2024)),
('cache_disk', models.ForeignKey(help_text='Cache disk allocated to the user', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.CacheDisk')),
],
),
migrations.CreateModel(
name='UserLock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_lock', models.ForeignKey(blank=True, help_text='User that is locked', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User')),
],
),
migrations.AddField(
model_name='scheduleddeletion',
name='user',
field=models.ForeignKey(help_text='User that the ScheduledDeletion belongs to', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User'),
),
migrations.AddField(
model_name='cachedfile',
name='user',
field=models.ForeignKey(help_text='User that owns the file', null=True, on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User'),
),
]
|
[
"[email protected]"
] | |
2c01a97f078d58f8355d548b0c2dc3c1a4e8250e
|
0c6832a2534ad92fa9c0b3f8c38588d05bf7cdac
|
/myjson/json_byteified.py
|
4c22d6de876a04aaacda398ba15b06cba9383732
|
[] |
no_license
|
rlaneyjr/myutils
|
62690f932e642e8025f1abbaf871890d2df38aaa
|
1966fe15c1e28725486c3286113722bd109d8bbf
|
refs/heads/master
| 2021-07-14T19:15:28.020853 | 2020-06-12T20:51:37 | 2020-06-12T20:51:37 | 163,216,692 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,089 |
py
|
#!/usr/bin/env python
# title :json_byteified.py
# description :Json Byteified using json_hook
# author :Ricky Laney
# date :20170814
# version :1
# usage :python json_byteified.py or ./json_byteified.py
# notes :
# python_version :2.7.13
'''
Example usage:
>>> json_loads_byteified('{"Hello": "World"}')
{'Hello': 'World'}
>>> json_loads_byteified('"I am a top-level string"')
'I am a top-level string'
>>> json_loads_byteified('7')
7
>>> json_loads_byteified('["I am inside a list"]')
['I am inside a list']
>>> json_loads_byteified('[[[[[["I am inside a big nest of lists"]]]]]]')
[[[[[['I am inside a big nest of lists']]]]]]
>>> json_loads_byteified('{"foo": "bar",
"things": [7, {"qux": "baz",
"moo": {"cow": ["milk"]}}]}')
{'things': [7, {'qux': 'baz', 'moo': {'cow': ['milk']}}], 'foo': 'bar'}
>>> json_load_byteified(open('somefile.json'))
{'more json': 'from a file'}
'''
import json
def json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_loads_byteified(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True):
_byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
|
[
"[email protected]"
] | |
34b8fa298e9825147350ab807b8c4e738840a108
|
833ef0efdcac57f8bc585263fdd303edc06a5caa
|
/sc-kpm/sc-python/services/common/sc_log.py
|
885f80bd62050788b0fd1293fbdad4fe7081c7ae
|
[
"MIT"
] |
permissive
|
PogudoTanya/sc-machine
|
f77d5965b9f81cf4852afe0e4d5394f869be44d5
|
ffa65770b457968f4e6f39a6d2f2513e1ab9462a
|
refs/heads/master
| 2022-11-10T05:33:22.903073 | 2020-02-22T21:06:54 | 2020-02-22T21:06:54 | 271,785,773 | 0 | 0 |
NOASSERTION
| 2020-06-12T11:50:20 | 2020-06-12T11:50:19 | null |
UTF-8
|
Python
| false | false | 663 |
py
|
from termcolor import colored, cprint
class Log:
def __init__(self, subsystem_name):
self.name = subsystem_name
def __print_colored(self, level, level_color, message, message_color):
print (colored('[' + self.name + '][' + level + ']', level_color) + colored(': ' + message, message_color))
def debug(self, message):
self.__print_colored('debug', 'grey', message, 'grey')
def info(self, message):
self.__print_colored('info', 'white', message, 'white')
def error(self, message):
self.__print_colored('error', 'red', message, 'red')
def warning(self, message):
self.__print_colored('warning', 'yellow', message, 'yellow')
|
[
"[email protected]"
] | |
7508a95e0bacdf8bef69e846404a88d24fcc3681
|
2df21455b93cf15328cda87de9831bd23f9d8343
|
/GTFparse.py
|
88ee302fa60d28a34dbbe6d303f093d08b82178b
|
[] |
no_license
|
RagnarDanneskjold/ProcessSingleCell
|
d2e8f07f485319dea6df6d58e5f6cc93662cc983
|
bc6e7e4eca5ad77f6b15ead6fc7badaa4f7a7996
|
refs/heads/master
| 2021-06-15T03:45:09.984440 | 2017-03-21T20:07:56 | 2017-03-21T20:07:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,146 |
py
|
# Aparna Rajpurkar
# This is the GTFparse module of the FastCount.py program
# imports
import re
import operator
from Tree import GeneTree # my Object Oriented classes
def parseValidLine(line):
"""Function that parses a line of a GTF file and returns a useful \
data structure of its fields"""
# split line into array
line_ar = line.rstrip('\n\r').split('\t')
# if line is too short, return nothing
if len(line_ar) < 9:
return {}
# Grab the gene ID from the line using regular expressions
id_string = line_ar[8]
gene_id = re.search(r'gene_id \"(.+?)\";', id_string).group(1)
# construct the results dictionary for this line
result = {
'chrom': line_ar[0],
'feature': line_ar[2],
'start': int(line_ar[3]),
'end': int(line_ar[4]),
'strand': line_ar[6],
'gene_id': gene_id
}
# We are only interested in gene and exon features, so return
# nothing if not gene or exon
if result['feature'] != "gene" and result['feature'] != "exon":
return {}
# return the result dictionary
return result
def parseGTFFile (gtffile, bam_num):
"""Function that handles parsing the GTF file and intializing the GeneTree\
Objects for each chromosome and strand"""
# open the GTF file and initialize local variables
gtf_fp = open(gtffile,"r")
parsedData = dict()
curr_gene = 0
# iterate over every line in the GTF file
for line in gtf_fp:
# skip if this is a header line
if line.startswith('#'):
continue
# parse line into fields dictionary
fields = parseValidLine(line)
# skip if we could not parse, or feature is not of interest
if not fields:
continue
# if we're on a new chromosome, initialize its GeneTree objects
if fields['chrom'] not in parsedData:
# set this chromosome's strand dictionary
parsedData[fields['chrom']] = dict()
# for each strand, intitialize a GeneTree object
# which will store all entries for its genes
parsedData[fields['chrom']]['+'] = GeneTree(fields['chrom'],'+')
parsedData[fields['chrom']]['-'] = GeneTree(fields['chrom'],'-')
# if this feature is a gene, add it to the GeneTree
if fields['feature'] == 'gene':
# call the addNode method of the GeneTree object on this gene
curr_gene = parsedData[fields['chrom']][fields['strand']].addNode(fields,fields['gene_id'], bam_num)
else: # exon
# if this is an exon, add it to the current gene's Tree
parsedData[fields['chrom']][fields['strand']].addExon(fields,curr_gene)
# close the GTF file
gtf_fp.close()
# for each chromosome and strand, call the GeneTree object's balance method
# to ensure optimally efficient find() operations later
for chrom in parsedData:
for strand in parsedData[chrom]:
parsedData[chrom][strand].balanceAll()
# return our data structure
return parsedData
|
[
"[email protected]"
] | |
3f59c1fe83aa3ac391b2850beb2b625538a1e3cc
|
8076124f4087781e0513dbe09c0f43dc6a861ab0
|
/tests/sentry/tasks/test_sentry_apps.py
|
f9edb169a457931d2d87503086d0f2e3d4efa913
|
[
"BSD-2-Clause"
] |
permissive
|
sharmapacific/sentry
|
75e3356f87cb5a1e812e0974b081fd47852dfe33
|
fceabe7cb84de587fe05b2c36edc013058e7e55a
|
refs/heads/master
| 2020-08-19T00:13:48.748983 | 2019-10-17T17:09:06 | 2019-10-17T17:09:06 | 215,851,537 | 1 | 0 |
BSD-3-Clause
| 2019-10-17T17:43:49 | 2019-10-17T17:43:49 | null |
UTF-8
|
Python
| false | false | 15,367 |
py
|
from __future__ import absolute_import
import six
from celery import Task
from collections import namedtuple
from django.core.urlresolvers import reverse
from mock import patch
from sentry.models import Rule, SentryApp, SentryAppInstallation, SentryAppWebhookError
from sentry.testutils import TestCase
from sentry.testutils.helpers import with_feature
from sentry.testutils.helpers.faux import faux
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.utils.http import absolute_uri
from sentry.receivers.sentry_apps import * # NOQA
from sentry.utils import json
from sentry.tasks.post_process import post_process_group
from sentry.api.serializers import serialize
from sentry.tasks.sentry_apps import (
send_alert_event,
notify_sentry_app,
process_resource_change,
process_resource_change_bound,
installation_webhook,
workflow_notification,
send_webhooks,
)
RuleFuture = namedtuple("RuleFuture", ["rule", "kwargs"])
MockResponse = namedtuple("MockResponse", ["headers", "content", "ok", "status_code"])
MockResponseInstance = MockResponse({}, {}, True, 200)
MockFailureResponseInstance = MockResponse({}, {}, False, 400)
class DictContaining(object):
def __init__(self, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict):
self.args = []
self.kwargs = args[0]
else:
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return self._args_match(other) and self._kwargs_match(other)
def _args_match(self, other):
for key in self.args:
if key not in other.keys():
return False
return True
def _kwargs_match(self, other):
for key, value in six.iteritems(self.kwargs):
if self.kwargs[key] != other[key]:
return False
return True
class TestSendAlertEvent(TestCase):
def setUp(self):
self.organization = self.create_organization(slug="foo")
self.sentry_app = self.create_sentry_app(organization=self.organization)
self.project = self.create_project(organization=self.organization)
self.rule = Rule.objects.create(project=self.project, label="Issa Rule")
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
@patch("sentry.tasks.sentry_apps.safe_urlopen")
def test_no_sentry_app(self, safe_urlopen):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
send_alert_event(event, self.rule, 9999)
assert not safe_urlopen.called
@patch("sentry.tasks.sentry_apps.safe_urlopen")
def test_no_sentry_app_in_future(self, safe_urlopen):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
rule_future = RuleFuture(rule=self.rule, kwargs={})
with self.tasks():
notify_sentry_app(event, [rule_future])
assert not safe_urlopen.called
@patch("sentry.tasks.sentry_apps.safe_urlopen")
def test_no_installation(self, safe_urlopen):
sentry_app = self.create_sentry_app(organization=self.organization)
group = self.create_group(project=self.project)
event = self.create_event(group=group)
rule_future = RuleFuture(rule=self.rule, kwargs={"sentry_app": sentry_app})
with self.tasks():
notify_sentry_app(event, [rule_future])
assert not safe_urlopen.called
@patch("sentry.tasks.sentry_apps.safe_urlopen")
def test_send_alert_event(self, safe_urlopen):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
rule_future = RuleFuture(rule=self.rule, kwargs={"sentry_app": self.sentry_app})
with self.tasks():
notify_sentry_app(event, [rule_future])
data = json.loads(faux(safe_urlopen).kwargs["data"])
assert data == {
"action": "triggered",
"installation": {"uuid": self.install.uuid},
"data": {
"event": DictContaining(
event_id=event.event_id,
url=absolute_uri(
reverse(
"sentry-api-0-project-event-details",
args=[self.organization.slug, self.project.slug, event.event_id],
)
),
web_url=absolute_uri(
reverse(
"sentry-organization-event-detail",
args=[self.organization.slug, group.id, event.event_id],
)
),
issue_url=absolute_uri("/api/0/issues/{}/".format(group.id)),
),
"triggered_rule": self.rule.label,
},
"actor": {"type": "application", "id": "sentry", "name": "Sentry"},
}
assert faux(safe_urlopen).kwarg_equals(
"headers",
DictContaining(
"Content-Type",
"Request-ID",
"Sentry-Hook-Resource",
"Sentry-Hook-Timestamp",
"Sentry-Hook-Signature",
),
)
@patch("sentry.tasks.sentry_apps.safe_urlopen", return_value=MockResponseInstance)
class TestProcessResourceChange(TestCase):
def setUp(self):
self.project = self.create_project()
self.sentry_app = self.create_sentry_app(
organization=self.project.organization, events=["issue.created"]
)
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
def test_group_created_sends_webhook(self, safe_urlopen):
issue = self.create_group(project=self.project)
event = self.create_event(group=issue)
with self.tasks():
post_process_group(
event=event, is_new=True, is_regression=False, is_new_group_environment=False
)
data = json.loads(faux(safe_urlopen).kwargs["data"])
assert data["action"] == "created"
assert data["installation"]["uuid"] == self.install.uuid
assert data["data"]["issue"]["id"] == six.text_type(issue.id)
assert faux(safe_urlopen).kwargs_contain("headers.Content-Type")
assert faux(safe_urlopen).kwargs_contain("headers.Request-ID")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Resource")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Timestamp")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Signature")
def test_does_not_process_disallowed_event(self, safe_urlopen):
process_resource_change("delete", "Group", self.create_group().id)
assert len(safe_urlopen.mock_calls) == 0
def test_does_not_process_sentry_apps_without_issue_webhooks(self, safe_urlopen):
SentryAppInstallation.objects.all().delete()
SentryApp.objects.all().delete()
# DOES NOT subscribe to Issue events
self.create_sentry_app_installation(organization=self.organization)
process_resource_change("created", "Group", self.create_group().id)
assert len(safe_urlopen.mock_calls) == 0
@patch("sentry.tasks.sentry_apps._process_resource_change")
def test_process_resource_change_bound_passes_retry_object(self, process, safe_urlopen):
group = self.create_group(project=self.project)
process_resource_change_bound("created", "Group", group.id)
task = faux(process).kwargs["retryer"]
assert isinstance(task, Task)
@with_feature("organizations:integrations-event-hooks")
def test_error_created_sends_webhook(self, safe_urlopen):
sentry_app = self.create_sentry_app(
organization=self.project.organization, events=["error.created"]
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
one_min_ago = iso_format(before_now(minutes=1))
event = self.store_event(
data={
"message": "Foo bar",
"exception": {"type": "Foo", "value": "shits on fiah yo"},
"level": "error",
"timestamp": one_min_ago,
},
project_id=self.project.id,
assert_no_errors=False,
)
with self.tasks():
post_process_group(
event=event, is_new=False, is_regression=False, is_new_group_environment=False
)
data = json.loads(faux(safe_urlopen).kwargs["data"])
assert data["action"] == "created"
assert data["installation"]["uuid"] == install.uuid
assert data["data"]["error"]["event_id"] == event.event_id
assert faux(safe_urlopen).kwargs_contain("headers.Content-Type")
assert faux(safe_urlopen).kwargs_contain("headers.Request-ID")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Resource")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Timestamp")
assert faux(safe_urlopen).kwargs_contain("headers.Sentry-Hook-Signature")
@patch("sentry.mediators.sentry_app_installations.InstallationNotifier.run")
class TestInstallationWebhook(TestCase):
def setUp(self):
self.project = self.create_project()
self.user = self.create_user()
self.sentry_app = self.create_sentry_app(organization=self.project.organization)
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
def test_sends_installation_notification(self, run):
installation_webhook(self.install.id, self.user.id)
run.assert_called_with(install=self.install, user=self.user, action="created")
def test_gracefully_handles_missing_install(self, run):
installation_webhook(999, self.user.id)
assert len(run.mock_calls) == 0
def test_gracefully_handles_missing_user(self, run):
installation_webhook(self.install.id, 999)
assert len(run.mock_calls) == 0
@patch("sentry.tasks.sentry_apps.safe_urlopen", return_value=MockResponseInstance)
class TestWorkflowNotification(TestCase):
def setUp(self):
self.project = self.create_project()
self.user = self.create_user()
self.sentry_app = self.create_sentry_app(
organization=self.project.organization,
events=["issue.resolved", "issue.ignored", "issue.assigned"],
)
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
self.issue = self.create_group(project=self.project)
def test_sends_resolved_webhook(self, safe_urlopen):
workflow_notification(self.install.id, self.issue.id, "resolved", self.user.id)
assert faux(safe_urlopen).kwarg_equals("url", self.sentry_app.webhook_url)
assert faux(safe_urlopen).kwarg_equals("data.action", "resolved", format="json")
assert faux(safe_urlopen).kwarg_equals("headers.Sentry-Hook-Resource", "issue")
assert faux(safe_urlopen).kwarg_equals(
"data.data.issue.id", six.binary_type(self.issue.id), format="json"
)
def test_sends_resolved_webhook_as_Sentry_without_user(self, safe_urlopen):
workflow_notification(self.install.id, self.issue.id, "resolved", None)
assert faux(safe_urlopen).kwarg_equals("data.actor.type", "application", format="json")
assert faux(safe_urlopen).kwarg_equals("data.actor.id", "sentry", format="json")
assert faux(safe_urlopen).kwarg_equals("data.actor.name", "Sentry", format="json")
def test_does_not_send_if_no_service_hook_exists(self, safe_urlopen):
sentry_app = self.create_sentry_app(
name="Another App", organization=self.project.organization, events=[]
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
workflow_notification(install.id, self.issue.id, "assigned", self.user.id)
assert not safe_urlopen.called
def test_does_not_send_if_event_not_in_app_events(self, safe_urlopen):
sentry_app = self.create_sentry_app(
name="Another App",
organization=self.project.organization,
events=["issue.resolved", "issue.ignored"],
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
workflow_notification(install.id, self.issue.id, "assigned", self.user.id)
assert not safe_urlopen.called
@patch("sentry.tasks.sentry_apps.safe_urlopen", return_value=MockFailureResponseInstance)
class TestWebhookErrors(TestCase):
def setUp(self):
self.project = self.create_project()
self.user = self.create_user()
self.sentry_app = self.create_sentry_app(
organization=self.project.organization,
events=["issue.resolved", "issue.ignored", "issue.assigned"],
)
self.install = self.create_sentry_app_installation(
organization=self.project.organization, slug=self.sentry_app.slug
)
self.issue = self.create_group(project=self.project)
def test_saves_error_if_workflow_webhook_request_fails(self, safe_urlopen):
sentry_app = self.create_sentry_app(
name="Test App",
organization=self.project.organization,
events=["issue.resolved", "issue.ignored", "issue.assigned"],
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
data = {"issue": serialize(self.issue)}
send_webhooks(installation=install, event="issue.assigned", data=data, actor=self.user)
error_count = SentryAppWebhookError.objects.count()
error = SentryAppWebhookError.objects.first()
assert safe_urlopen.called
assert error_count == 1
assert error.sentry_app.id == install.sentry_app.id
assert error.organization.id == install.organization.id
def test_does_not_save_error_if_nonworkflow_request_fails(self, safe_urlopen):
sentry_app = self.create_sentry_app(
name="Test App",
organization=self.project.organization,
events=[
"issue.resolved",
"issue.ignored",
"issue.assigned",
"issue.created",
"error.created",
],
)
install = self.create_sentry_app_installation(
organization=self.project.organization, slug=sentry_app.slug
)
data = {"issue": serialize(self.issue)}
send_webhooks(installation=install, event="issue.created", data=data)
send_webhooks(installation=install, event="error.created", data=data)
error_count = SentryAppWebhookError.objects.count()
assert safe_urlopen.called
assert error_count == 0
|
[
"[email protected]"
] | |
6fc2b00a799a1bc7eac3492402f25eef3d1aabc9
|
8e225d87038bdca1e5c82b4a875a1c2d25dced0c
|
/setup.py
|
cd2dfa11505b41434faed9d03535b1b083d27d7d
|
[
"MIT"
] |
permissive
|
feifzhou/fortpy
|
c65b486f942db8cdfa325f1d11dbd37e60b7a0d0
|
dc926c9169033ea59d31ea7df7bbe5373633aeb1
|
refs/heads/master
| 2021-01-15T23:53:18.793577 | 2015-08-26T06:04:48 | 2015-08-26T06:04:48 | 41,407,308 | 0 | 0 | null | 2015-08-26T05:58:56 | 2015-08-26T05:58:56 | null |
UTF-8
|
Python
| false | false | 2,050 |
py
|
#!/usr/bin/env python
try:
from setuptools import setup
args = {}
except ImportError:
from distutils.core import setup
print("""\
*** WARNING: setuptools is not found. Using distutils...
""")
from setuptools import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
setup(name='Fortpy',
version='1.6.1',
description='Fortran Parsing, Unit Testing and Intellisense',
long_description=read_md('README.md'),
author='Conrad W Rosenbrock',
author_email='[email protected]',
url='https://github.com/rosenbrockc/fortpy',
license='MIT',
install_requires=[
"argparse",
"pyparsing",
"python-dateutil",
"paramiko",
"termcolor",
"numpy",
"matplotlib",
"scipy",
],
packages=['fortpy', 'fortpy.parsers', 'fortpy.isense', 'fortpy.testing',
'fortpy.templates', 'fortpy.interop',
'fortpy.printing' ],
scripts=['fortpy/scripts/compare.py', 'fortpy/scripts/convert.py', 'fortpy/scripts/runtests.py',
'fortpy/scripts/analyze.py', 'fortpy/scripts/parse.py', 'fortpy/scripts/ftypes.py'],
package_data={'fortpy': ['isense/builtin.xml']},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
[
"[email protected]"
] | |
c8d103b20e2d4b98c3aaf390b547b1ce8ff564c1
|
c2281d55883a51b2698119e3aeb843df9c8c885b
|
/Thesis ch 2/ClusteringBuckets/GenericModels/LogisticRegression/12.PredictProb.py
|
b05ad079af41b543da0b9691ac6c46327c6c7b1c
|
[] |
no_license
|
akshitasawhney3008/Thesis-Final
|
1c004ffc6c2dd6ec711b212f9a35e46ea067c9c7
|
10865bab16bcc2ca4a5d4af345ffb4f2f7222104
|
refs/heads/master
| 2023-02-01T20:56:43.763024 | 2020-12-10T09:28:45 | 2020-12-10T09:28:45 | 320,037,411 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,897 |
py
|
import numpy as np
from sklearn.metrics import precision_score, recall_score, roc_curve,auc, accuracy_score, matthews_corrcoef, f1_score
import pickle
from sklearn.preprocessing import normalize
iterations = 5
# threshold = 0.5
def getPredictionsGivenThreshold(myMatrix, th):
myList = []
for i in range(myMatrix.shape[0]):
p1 = myMatrix[i, 1]
if p1>= th:
myList.append(1)
else:
myList.append(0)
return np.asarray(myList)
def getResults(predProb, labels):
thresholdList = []
precisionList = []
recallList = []
aucList = []
accuracyListtr = []
accuracyList = []
mcList = []
f1scoreList = []
for threshold in thresholdRange:
matrixPredictions = getPredictionsGivenThreshold(predProb, threshold)
precision = precision_score(labels, matrixPredictions)
recall = recall_score(labels, matrixPredictions)
fpr, tpr, thresholds = roc_curve(labels, matrixPredictions, pos_label=1)
auroc = auc(fpr, tpr)
accuracy = accuracy_score(labels, matrixPredictions)
matthewsCoeff = matthews_corrcoef(labels, matrixPredictions)
f1score = f1_score(labels, matrixPredictions)
thresholdList.append(threshold)
precisionList.append(precision)
recallList.append(recall)
aucList.append(auroc)
accuracyList.append(accuracy)
mcList.append(matthewsCoeff)
f1scoreList.append(f1score)
print(max(accuracyList))
ind = accuracyList.index((max(accuracyList)))
print('Threshold: ' + str(thresholdList[ind]))
print('Precision: ' + str(precisionList[ind]))
print('Recall: ' + str(recallList[ind]))
print('F1: ' + str(f1scoreList[ind]))
print('Accuracy: ' + str(accuracyList[ind]))
print('AUROC: ' + str(aucList[ind]))
print('MCC: ' + str(mcList[ind]) + '\n')
return max(accuracyList),precisionList[ind],recallList[ind], f1scoreList[ind],aucList[ind],mcList[ind]
path = "C://Users//Arushi//PycharmProjects//ThesisChap2//ClusteringBuckets//"
listOfPredictionProbabilities = []
actualpredictions = []
# genenamesFile = open("transformedColumnNames221.txt",'r').readline().rstrip('\n').split(',')
# selectedFeaturesfile = open('SelectedFeatures.csv').readlines()
# flag = 0
#
# list_of_gene_numbers = []
# for line in selectedFeaturesfile:
#
#
# list_of_gene_names = line.rstrip('\n').split(',')
# if len(list_of_gene_names) == 55:
# for gene in list_of_gene_names:
# list_of_gene_numbers.append(genenamesFile.index(gene))
# flag = 1
finacclist = []
finpre = []
finrec =[]
finf1 =[]
finauc =[]
finmcc =[]
thresholdRange = np.linspace(start=0.40, stop=0.60, num=500)
for i in range(iterations):
X_test = np.load(path + 'final_test_binarydata_' + str(i) + '.npy')
Y_test = np.load(path + 'final_test_labels_' + str(i) + '.npy')
# X_test = X_test[:, list_of_gene_numbers]
X_test = X_test.astype('float')
X_test = normalize(X_test)
Y_test = Y_test.astype('float')
Y_test = Y_test.astype(int)
with open('Model_ism_lr' + str(i) + '.pkl', 'rb') as f:
model = pickle.load(f)
predictionsProb_file = open("predictionsProb_ism_lr" + str(i) + ".csv", 'w')
predictionProbabilities = model.predict_proba(X_test)
for prob in predictionProbabilities:
for pr in prob:
predictionsProb_file.write(str(pr) + ',')
predictionsProb_file.write('\n')
acc,pre,rec,f1,au,mcc = getResults(predictionProbabilities, Y_test)
finacclist.append(acc)
finpre.append(pre)
finrec.append(rec)
finf1.append(f1)
finauc.append(au)
finmcc.append(mcc)
print(sum(finacclist)/iterations)
print(sum(finpre)/iterations)
print(sum(finrec)/iterations)
print(sum(finf1)/iterations)
print(sum(finauc)/iterations)
print(sum(finmcc)/iterations)
print('Done')
|
[
"[email protected]"
] | |
1f1da02090abd16cccd2e361003b7c7c5129c546
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/account/tests/test_reconciliation_heavy_load.py
|
076ed17493b8981e32bae3e3e33fa1a0c0f50dbb
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,848 |
py
|
from harpiya import api, fields
from harpiya.tests import tagged
from harpiya.addons.account.tests.account_test_classes import AccountingTestCase
@tagged('post_install', '-at_install')
class TestReconciliationHeavyLoad(AccountingTestCase):
"""Check that reconciliation can be done for a move with many lines
"""
def _create_move(self, journal):
values = {
'ref': "Test reconcile - Auto-generated by script",
'journal_id': journal.id,
'state': 'draft',
'company_id': journal.env.user.company_id.id,
}
return journal.env['account.move'].create(values)
def _get_values_account_move_line(
self, account, journal, name, move,
credit=0, debit=0, date=fields.Date.today()):
return {
'journal_id': journal.id,
'name': name,
'account_id': account.id,
'move_id': move.id,
'quantity': 1,
'credit': credit,
'debit': debit,
'date': date,
}
def setUp(self):
super(TestReconciliationHeavyLoad, self).setUp()
self.account_type = self.env.ref('account.data_account_type_receivable')
self.journal = self.env['account.journal'].search([
('type', '=', 'bank'),
('company_id', '=', self.env.user.company_id.id),
], limit=1)
self.account = self.env['account.account'].search([
('user_type_id', '=', self.account_type.id),
('company_id', '=', self.env.user.company_id.id),
], limit=1)
def test_heavy_load_reconciliation(self):
"""Does reconciliation on a move with nb_lines lines.
To avoid burdening uselessly the runbot, we only set nb_lines to 10,
but it should be of order 10^3 to be meaningful.
The day we manage to use system build settings to execute tests
this could be done automatically for "heavy load builds",
but for now this should be changed manually.
"""
total = 0
line_ids = []
amount_per_line = 1
nb_lines = 10 # change this to 1000 or more
move = self._create_move(self.journal)
for i in range(nb_lines):
name = "Move line credit #%s" % i
total += amount_per_line
values = self._get_values_account_move_line(
self.account, self.journal, name, move, credit=amount_per_line)
line_ids.append((0, False, values))
values = self._get_values_account_move_line(
self.account, self.journal, "Move line Debit", move, debit=total)
line_ids.append((0, False, values))
move.write({'line_ids': line_ids})
move.line_ids.reconcile()
self.assertTrue(all(move.line_ids.mapped('reconciled')))
|
[
"[email protected]"
] | |
92a9c983b89ac8d402b925347d860d62f034a371
|
f50f1aa1f8f139d546db3230a1cb1f53043fd9e6
|
/system/base/net-tools/actions.py
|
837eeac22cd5a7fb9a0f4446547b1c64394aa1b4
|
[] |
no_license
|
pars-linux/corporate2
|
7887961d1552d39bc3b0bef4a60fd3413d9b82bb
|
14d1eacfc824fb8d0bff8173e7ac06b36b88d10d
|
refs/heads/master
| 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 932 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
pisitools.dosed("Makefile", "(?m)^(COPTS =.*)", "COPTS = %s -fPIE" % get.CFLAGS())
pisitools.dosed("Makefile", "(?m)^(LOPTS =.*)", "LOPTS = %s -pie" % get.LDFLAGS())
def build():
shelltools.export("CC", get.CC())
autotools.make("libdir")
autotools.make()
autotools.make("ether-wake")
autotools.make("i18ndir")
def install():
autotools.rawInstall("BASEDIR=%s" % get.installDIR())
pisitools.dosbin("ether-wake")
pisitools.dosym("/bin/hostname", "/usr/bin/hostname")
pisitools.dodoc("README", "README.ipv6", "TODO")
|
[
"eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9"
] |
eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9
|
8ee6b1e2088ea8fcf36bf7ea351d8bec16454b2f
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/parties/activityFSMs.py
|
07ecce6deaf62a4abdea57d2c165f9c32a26ee27
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,359 |
py
|
#-------------------------------------------------------------------------------
# Contact: Rob Gordon
# Created: Oct 2008
#
# Purpose: Individual Activity FSMs
#-------------------------------------------------------------------------------
# Panda Imports
from direct.directnotify import DirectNotifyGlobal
# parties imports
from BaseActivityFSM import BaseActivityFSM
from activityFSMMixins import IdleMixin
from activityFSMMixins import RulesMixin
from activityFSMMixins import ActiveMixin
from activityFSMMixins import DisabledMixin
from activityFSMMixins import ConclusionMixin
from activityFSMMixins import WaitForEnoughMixin
from activityFSMMixins import WaitToStartMixin
from activityFSMMixins import WaitClientsReadyMixin
from activityFSMMixins import WaitForServerMixin
class FireworksActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "FireworksActivityFSM" )
def __init__(self, activity):
FireworksActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Active", "Disabled"],
"Active" : ["Disabled"],
"Disabled" : [],
}
class CatchActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "CatchActivityFSM" )
def __init__(self, activity):
CatchActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Active", "Conclusion"],
"Active" : ["Conclusion"],
"Conclusion" : ["Idle"],
}
class TrampolineActivityFSM(BaseActivityFSM, IdleMixin, RulesMixin, ActiveMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "TrampolineActivityFSM" )
def __init__(self, activity):
TrampolineActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Rules", "Active"], # added Active to this list as the fsm will sometimes get set directly to this from idle when a toon comes late to a party
"Rules" : ["Active", "Idle"],
"Active" : ["Idle"],
}
class DanceActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "DanceActivityFSM" )
def __init__(self, activity):
DanceActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Active" : ["Disabled"],
"Disabled" : ["Active"],
}
class TeamActivityAIFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, WaitClientsReadyMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory("TeamActivityAIFSM")
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.notify.debug("__init__")
self.defaultTransitions = {
"WaitForEnough" : ["WaitToStart"],
"WaitToStart" : ["WaitForEnough", "WaitClientsReady"],
"WaitClientsReady" : ["WaitForEnough", "Active"],
"Active" : ["WaitForEnough", "Conclusion"],
"Conclusion" : ["WaitForEnough"],
}
class TeamActivityFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, RulesMixin, WaitForServerMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory("TeamActivityFSM")
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
assert(self.notify.debug("__init__"))
self.defaultTransitions = {
"WaitForEnough" : ["WaitToStart"],
"WaitToStart" : ["WaitForEnough", "Rules"],
# Instances without the local toon in the activity will go from Rules directly to Active.
# If a toon drops unexpectedly, the game will revert back to WaitForEnough
"Rules" : ["WaitForServer", "Active", "WaitForEnough"],
"WaitForServer" : ["Active", "WaitForEnough"],
"Active" : ["Conclusion", "WaitForEnough"],
"Conclusion" : ["WaitForEnough"],
}
|
[
"[email protected]"
] | |
bf434a2dde5c1b7b25c30d1f2b90a45984deffc7
|
463c053bcf3f4a7337b634890720ea9467f14c87
|
/python/ray/ml/__init__.py
|
4778a644583fac1d6217ccdfc7faeb8972300648
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
pdames/ray
|
e8faddc4440976211a6bcead8f8b6e62c1dcda01
|
918d3601c6519d333f10910dc75eb549cbb82afa
|
refs/heads/master
| 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 |
Apache-2.0
| 2023-01-14T08:02:21 | 2020-03-06T20:59:04 |
Python
|
UTF-8
|
Python
| false | false | 209 |
py
|
from ray.ml.checkpoint import Checkpoint
from ray.ml.config import RunConfig, ScalingConfig
from ray.ml.preprocessor import Preprocessor
__all__ = ["Checkpoint", "Preprocessor", "RunConfig", "ScalingConfig"]
|
[
"[email protected]"
] | |
1a994d033d63f26689a109eacd06972b120aeb9f
|
128adc6237ecbcb493f927a5de7990be21542526
|
/data.py
|
684fa7d99b74eca739dd448959e48c1028646882
|
[] |
no_license
|
mwitiderrick/Determined-DVC
|
529f53cc1959a53b8081d1a0d40665ed95be9265
|
2bbce280f9c955cb76843671a8a9da3e1824c8e0
|
refs/heads/master
| 2023-02-19T11:58:46.848293 | 2021-01-22T12:26:53 | 2021-01-22T12:26:53 | 331,883,888 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
import tensorflow
import pandas as pd
import numpy as np
def load_training_data():
df = pd.read_csv("data/mnist_train.csv")
x_train = df.drop("label",axis=1)
x_train = x_train.values
y_train = df["label"].values
return x_train, y_train
def load_validation_data():
df = pd.read_csv("data/mnist_test.csv")
x_test = df.drop("label",axis=1)
x_test = x_test.values
y_test = df["label"].values
return x_test, y_test
|
[
"[email protected]"
] | |
54f04994ca4be43c62c02c025f8ead0f23b0e48d
|
4c6a5e64f1b530e7fd2a7596b0567db0cc8d45f3
|
/Tools/Scripts/libraries/webkitscmpy/webkitscmpy/remote/git_hub.py
|
1b2006afa72b7d9fa1b98256e2e18fb0ac339dde
|
[] |
no_license
|
heihuhei2013/WebKit
|
44a4814f89d6ab0051a4c0eb77d8ce830095bb67
|
65e89098d4437bafe28a0f8d3e7996aab02d212f
|
refs/heads/main
| 2023-02-28T22:38:36.033928 | 2021-01-31T17:16:58 | 2021-01-31T17:16:58 | 334,744,997 | 1 | 0 | null | 2021-01-31T20:00:48 | 2021-01-31T20:00:48 | null |
UTF-8
|
Python
| false | false | 16,077 |
py
|
# Copyright (C) 2020 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import calendar
import getpass
import os
import re
import requests
import six
import sys
from datetime import datetime
from requests.auth import HTTPBasicAuth
from subprocess import CalledProcessError
from webkitcorepy import OutputCapture, decorators
from webkitscmpy import Commit, Contributor
from webkitscmpy.remote.scm import Scm
from xml.dom import minidom
class GitHub(Scm):
URL_RE = re.compile(r'\Ahttps?://github.(?P<domain>\S+)/(?P<owner>\S+)/(?P<repository>\S+)\Z')
EMAIL_RE = re.compile(r'(?P<email>[^@]+@[^@]+)(@.*)?')
@classmethod
def is_webserver(cls, url):
return True if cls.URL_RE.match(url) else False
def __init__(self, url, dev_branches=None, prod_branches=None, contributors=None):
match = self.URL_RE.match(url)
if not match:
raise self.Exception("'{}' is not a valid GitHub project".format(url))
self.api_url = 'https://api.github.{}'.format(match.group('domain'))
self.owner = match.group('owner')
self.name = match.group('repository')
self._hash_link_re = re.compile(r'/{owner}/{name}/[^/]*commit[^/]*/(?P<hash>[0-9a-f]+)'.format(
owner=self.owner,
name=self.name,
))
self._cached_credentials = None
super(GitHub, self).__init__(url, dev_branches=dev_branches, prod_branches=prod_branches, contributors=contributors)
def credentials(self, required=True):
if self._cached_credentials:
return self._cached_credentials
prefix = self.url.split('/')[2].replace('.', '_').upper()
username = os.environ.get('{}_USERNAME'.format(prefix))
access_token = os.environ.get('{}_ACCESS_TOKEN'.format(prefix))
if username and access_token:
self._cached_credentials = (username, access_token)
return username, access_token
with OutputCapture():
try:
import keyring
except (CalledProcessError, ImportError):
keyring = None
username_prompted = False
password_prompted = False
if not username:
try:
if keyring:
username = keyring.get_password(self.api_url, 'username')
except RuntimeError:
pass
if not username and required:
if not sys.stderr.isatty() or not sys.stdin.isatty():
raise OSError('No tty to prompt user for username')
sys.stderr.write("Authentication required to use GitHub's API\n")
sys.stderr.write("Please generate a 'Personal access token' via 'Developer settings' for your user\n")
sys.stderr.write('Username: ')
username = (input if sys.version_info > (3, 0) else raw_input)()
username_prompted = True
if not access_token and required:
try:
if keyring:
access_token = keyring.get_password(self.api_url, username)
except RuntimeError:
pass
if not access_token:
if not sys.stderr.isatty() or not sys.stdin.isatty():
raise OSError('No tty to prompt user for username')
access_token = getpass.getpass('API key: ')
password_prompted = True
if username and access_token:
self._cached_credentials = (username, access_token)
if keyring and (username_prompted or password_prompted):
sys.stderr.write('Store username and access token in system keyring for {}? (Y/N): '.format(self.api_url))
response = (input if sys.version_info > (3, 0) else raw_input)()
if response.lower() in ['y', 'yes', 'ok']:
sys.stderr.write('Storing credentials...\n')
keyring.set_password(self.api_url, 'username', username)
keyring.set_password(self.api_url, username, access_token)
else:
sys.stderr.write('Credentials cached in process.\n')
return username, access_token
@property
def is_git(self):
return True
def request(self, path=None, params=None, headers=None, authenticated=None):
headers = {key: value for key, value in headers.items()} if headers else dict()
headers['Accept'] = headers.get('Accept', 'application/vnd.github.v3+json')
username, access_token = self.credentials(required=bool(authenticated))
auth = HTTPBasicAuth(username, access_token) if username and access_token else None
if authenticated is False:
auth = None
if authenticated and not auth:
raise self.Exception('Request requires authentication, none provided')
params = {key: value for key, value in params.items()} if params else dict()
params['per_page'] = params.get('per_page', 100)
params['page'] = params.get('page', 1)
url = '{api_url}/repos/{owner}/{name}{path}'.format(
api_url=self.api_url,
owner=self.owner,
name=self.name,
path='/{}'.format(path) if path else '',
)
response = requests.get(url, params=params, headers=headers, auth=auth)
if response.status_code != 200:
return None
result = response.json()
while isinstance(response.json(), list) and len(response.json()) == params['per_page']:
params['page'] += 1
response = requests.get(url, params=params, headers=headers, auth=auth)
if response.status_code != 200:
raise self.Exception("Failed to assemble pagination requests for '{}', failed on page {}".format(url, params['page']))
result += response.json()
return result
def _count_for_ref(self, ref=None):
ref = ref or self.default_branch
# We need the number of parents a commit has to construct identifiers, which is not something GitHub's
# API lets us find, although the UI does have the information
response = requests.get('{}/tree/{}'.format(self.url, ref))
if response.status_code != 200:
raise self.Exception("Failed to query {}'s UI to find the number of parents {} has".format(self.url, ref))
# This parsing is pretty brittle, but the webpage may not be valid xml
count = None
hash = None
previous = None
for line in response.text.splitlines():
match = self._hash_link_re.search(line)
if match:
hash = match.group('hash')
elif 'aria-label="Commits on ' in line:
count = int(minidom.parseString(previous).documentElement.childNodes[0].data.replace(',', ''))
break
previous = line
if not hash or not count:
raise self.Exception("Failed to compute the number of parents to '{}'".format(ref))
return count, hash
def _difference(self, reference, head):
response = self.request('compare/{}...{}'.format(reference, head), headers=dict(Accept='application/vnd.github.VERSION.sha'))
if not response or not response.get('status') in ['diverged', 'ahead', 'behind']:
raise self.Exception('Failed to request the difference between {} and {}'.format(reference, head))
return int(response['behind_by' if response.get('status') == 'behind' else 'ahead_by'])
def _branches_for(self, hash):
# We need to find the branch that a commit is on. GitHub's UI provides this information, but the only way to
# retrieve this information via the API would be to check all branches for the commit, so we scrape the UI.
response = requests.get('{}/branch_commits/{}'.format(self.url, hash))
if response.status_code != 200:
return []
result = []
for line in response.text.splitlines():
if 'class="branch"' not in line:
continue
result.append(minidom.parseString(line).documentElement.childNodes[0].childNodes[0].data)
return result
@property
@decorators.Memoize()
def default_branch(self):
response = self.request()
if not response:
raise self.Exception("Failed to query {} for {}'s default branch".format(self.url, self.name))
return response.get('default_branch', 'master')
@property
def branches(self):
response = self.request('branches')
if not response:
return [self.default_branch]
return sorted([details.get('name') for details in response if details.get('name')])
@property
def tags(self):
response = self.request('tags')
if not response:
return []
return sorted([details.get('name') for details in response if details.get('name')])
def commit(self, hash=None, revision=None, identifier=None, branch=None, tag=None, include_log=True):
if revision:
raise self.Exception('Cannot map revisions to commits on GitHub')
if identifier is not None:
if revision:
raise ValueError('Cannot define both revision and identifier')
if hash:
raise ValueError('Cannot define both hash and identifier')
if tag:
raise ValueError('Cannot define both tag and identifier')
parsed_branch_point, identifier, parsed_branch = Commit._parse_identifier(identifier, do_assert=True)
if parsed_branch:
if branch and branch != parsed_branch:
raise ValueError(
"Caller passed both 'branch' and 'identifier', but specified different branches ({} and {})".format(
branch, parsed_branch,
),
)
branch = parsed_branch
branch = branch or self.default_branch
is_default = branch == self.default_branch
if is_default and parsed_branch_point:
raise self.Exception('Cannot provide a branch point for a commit on the default branch')
if is_default:
base_count, base_ref = self._count_for_ref(ref=self.default_branch)
else:
_, base_ref = self._count_for_ref(ref=branch)
base_count = self._difference(self.default_branch, base_ref)
if identifier > base_count:
raise self.Exception('Identifier {} cannot be found on {}'.format(identifier, branch))
# Negative identifiers are actually commits on the default branch, we will need to re-compute the identifier
if identifier < 0 and is_default:
raise self.Exception('Illegal negative identifier on the default branch')
commit_data = self.request('commits/{}~{}'.format(base_ref, base_count - identifier))
if not commit_data:
raise self.Exception("Failed to retrieve commit information for '{}@{}'".format(identifier, branch or 'HEAD'))
# If an identifier is negative, unset it so we re-compute before constructing the commit.
if identifier <= 0:
identifier = None
elif branch or tag:
if hash:
raise ValueError('Cannot define both tag/branch and hash')
if branch and tag:
raise ValueError('Cannot define both tag and branch')
commit_data = self.request('commits/{}'.format(branch or tag))
if not commit_data:
raise self.Exception("Failed to retrieve commit information for '{}'".format(branch or tag))
else:
hash = Commit._parse_hash(hash, do_assert=True)
commit_data = self.request('commits/{}'.format(hash or self.default_branch))
if not commit_data:
raise self.Exception("Failed to retrieve commit information for '{}'".format(hash or 'HEAD'))
branches = self._branches_for(commit_data['sha'])
if branches:
branch = self.prioritize_branches(branches)
else:
# A commit not on any branches cannot have an identifier
identifier = None
branch = None
branch_point = None
if branch and branch == self.default_branch:
if not identifier:
result = self._count_for_ref(ref=commit_data['sha'])
if not result:
raise Exception('{} {}'.format(result, commit_data['sha']))
identifier, _ = result
elif branch:
if not identifier:
identifier = self._difference(self.default_branch, commit_data['sha'])
branch_point = self._count_for_ref(ref=commit_data['sha'])[0] - identifier
match = self.GIT_SVN_REVISION.search(commit_data['commit']['message'])
revision = int(match.group('revision')) if match else None
date = datetime.strptime(commit_data['commit']['committer']['date'], '%Y-%m-%dT%H:%M:%SZ')
email_match = self.EMAIL_RE.match(commit_data['commit']['author']['email'])
return Commit(
hash=commit_data['sha'],
revision=revision,
branch_point=branch_point,
identifier=identifier,
branch=branch,
timestamp=int(calendar.timegm(date.timetuple())),
author=self.contributors.create(
commit_data['commit']['author']['name'],
email_match.group('email') if email_match else None,
), message=commit_data['commit']['message'] if include_log else None,
)
def find(self, argument, include_log=True):
if not isinstance(argument, six.string_types):
raise ValueError("Expected 'argument' to be a string, not '{}'".format(type(argument)))
if argument in self.DEFAULT_BRANCHES:
argument = self.default_branch
parsed_commit = Commit.parse(argument, do_assert=False)
if parsed_commit:
if parsed_commit.branch in self.DEFAULT_BRANCHES:
parsed_commit.branch = self.default_branch
return self.commit(
hash=parsed_commit.hash,
revision=parsed_commit.revision,
identifier=parsed_commit.identifier,
branch=parsed_commit.branch,
include_log=include_log,
)
commit_data = self.request('commits/{}'.format(argument))
if not commit_data:
raise ValueError("'{}' is not an argument recognized by git".format(argument))
return self.commit(hash=commit_data['sha'], include_log=include_log)
|
[
"[email protected]"
] | |
432948d68cc903561aac0a61cf7ff79ac3a11e08
|
5a281cb78335e06c631181720546f6876005d4e5
|
/swift-2.21.0/test/unit/obj/test_ssync_receiver.py
|
1818cb6453b73672d8745a8c404da09e16536dc6
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 |
Apache-2.0
| 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null |
UTF-8
|
Python
| false | false | 100,872 |
py
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import shutil
import tempfile
import unittest
import eventlet
import mock
import six
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import swob
from swift.common.storage_policy import POLICIES
from swift.common import utils
from swift.common.swob import HTTPException
from swift.obj import diskfile
from swift.obj import server
from swift.obj import ssync_receiver, ssync_sender
from swift.obj.reconstructor import ObjectReconstructor
from test import listen_zero, unit
from test.unit import (debug_logger, patch_policies, make_timestamp_iter,
mock_check_drive, skip_if_no_xattrs)
from test.unit.obj.common import write_diskfile
@unit.patch_policies()
class TestReceiver(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
# Not sure why the test.unit stuff isn't taking effect here; so I'm
# reinforcing it.
self.testdir = os.path.join(
tempfile.mkdtemp(), 'tmp_test_ssync_receiver')
utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.conf = {
'devices': self.testdir,
'mount_check': 'false',
'replication_concurrency_per_device': '0',
'log_requests': 'false'}
utils.mkdirs(os.path.join(self.testdir, 'device', 'partition'))
self.controller = server.ObjectController(self.conf)
self.controller.bytes_per_sync = 1
self.account1 = 'a'
self.container1 = 'c'
self.object1 = 'o1'
self.name1 = '/' + '/'.join((
self.account1, self.container1, self.object1))
self.hash1 = utils.hash_path(
self.account1, self.container1, self.object1)
self.ts1 = '1372800001.00000'
self.metadata1 = {
'name': self.name1,
'X-Timestamp': self.ts1,
'Content-Length': '0'}
self.account2 = 'a'
self.container2 = 'c'
self.object2 = 'o2'
self.name2 = '/' + '/'.join((
self.account2, self.container2, self.object2))
self.hash2 = utils.hash_path(
self.account2, self.container2, self.object2)
self.ts2 = '1372800002.00000'
self.metadata2 = {
'name': self.name2,
'X-Timestamp': self.ts2,
'Content-Length': '0'}
def tearDown(self):
shutil.rmtree(os.path.dirname(self.testdir))
def body_lines(self, body):
lines = []
for line in body.split('\n'):
line = line.strip()
if line:
lines.append(line)
return lines
def test_SSYNC_semaphore_locked(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
self.controller.logger = mock.MagicMock()
mocked_replication_semaphore.acquire.return_value = False
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[":ERROR: 503 '<html><h1>Service Unavailable</h1><p>The "
"server is currently unavailable. Please try again at a "
"later time.</p></html>'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_SSYNC_calls_replication_lock(self):
with mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'replication_lock') as mocked_replication_lock:
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
mocked_replication_lock.assert_called_once_with('sda1',
POLICIES.legacy,
'1')
def test_Receiver_with_default_storage_policy(self):
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[0])
def test_Receiver_with_storage_policy_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertIsNone(rcvr.frag_index)
def test_Receiver_with_bad_storage_policy_index_header(self):
valid_indices = sorted([int(policy) for policy in POLICIES])
bad_index = valid_indices[-1] + 1
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '0',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': bad_index},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
self.controller.logger = mock.MagicMock()
try:
ssync_receiver.Receiver(self.controller, req)
self.fail('Expected HTTPException to be raised.')
except HTTPException as err:
self.assertEqual('503 Service Unavailable', err.status)
self.assertEqual('No policy with index 2', err.body)
@unit.patch_policies()
def test_Receiver_with_only_frag_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertEqual(rcvr.frag_index, 7)
@unit.patch_policies()
def test_Receiver_with_only_node_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
# we used to require the reconstructor to send the frag_index twice as
# two different headers because of evolutionary reasons, now we ignore
# node_index
self.assertEqual(rcvr.frag_index, None)
@unit.patch_policies()
def test_Receiver_with_matched_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertEqual(rcvr.frag_index, 7)
@unit.patch_policies()
def test_Receiver_with_invalid_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': 'None',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': 'None',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
@unit.patch_policies()
def test_Receiver_with_mismatched_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '6',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
# node_index if provided should always match frag_index; but if they
# differ, frag_index takes precedence
self.assertEqual(rcvr.frag_index, 7)
def test_SSYNC_replication_lock_fail(self):
def _mock(path, policy, partition):
with exceptions.ReplicationLockTimeout(0.01, '/somewhere/' + path):
eventlet.sleep(0.05)
with mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'replication_lock', _mock):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[":ERROR: 0 '0.01 seconds: /somewhere/sda1'"])
self.controller.logger.debug.assert_called_once_with(
'None/sda1/1 SSYNC LOCK TIMEOUT: 0.01 seconds: '
'/somewhere/sda1')
def test_SSYNC_replication_lock_per_partition(self):
def _concurrent_ssync(path1, path2):
env = {'REQUEST_METHOD': 'SSYNC'}
body = ':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n' \
':UPDATES: START\r\n:UPDATES: END\r\n'
req1 = swob.Request.blank(path1, environ=env, body=body)
req2 = swob.Request.blank(path2, environ=env, body=body)
rcvr1 = ssync_receiver.Receiver(self.controller, req1)
rcvr2 = ssync_receiver.Receiver(self.controller, req2)
body_lines1 = []
body_lines2 = []
for chunk1, chunk2 in itertools.izip_longest(rcvr1(), rcvr2()):
if chunk1 and chunk1.strip():
body_lines1.append(chunk1.strip())
if chunk2 and chunk2.strip():
body_lines2.append(chunk2.strip())
return body_lines1, body_lines2
self.controller._diskfile_router[POLICIES[0]]\
.replication_lock_timeout = 0.01
self.controller._diskfile_router[POLICIES[0]]\
.replication_concurrency_per_device = 2
# It should be possible to lock two different partitions
body_lines1, body_lines2 = _concurrent_ssync('/sda1/1', '/sda1/2')
self.assertEqual(
body_lines1,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(
body_lines2,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
# It should not be possible to lock the same partition twice
body_lines1, body_lines2 = _concurrent_ssync('/sda1/1', '/sda1/1')
self.assertEqual(
body_lines1,
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertRegexpMatches(
''.join(body_lines2),
"^:ERROR: 0 '0\.0[0-9]+ seconds: "
"/.+/sda1/objects/1/.lock-replication'$")
def test_SSYNC_initial_path(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
["Invalid path: /device"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
["Invalid path: /device/"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"'])
self.assertEqual(resp.status_int, 200)
mocked_replication_semaphore.acquire.assert_called_once_with(0)
mocked_replication_semaphore.release.assert_called_once_with()
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/partition/junk',
environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
["Invalid path: /device/partition/junk"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
def test_SSYNC_mount_check(self):
with mock.patch.object(self.controller, 'replication_semaphore'), \
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', False), \
mock_check_drive(isdir=True) as mocks:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"'])
self.assertEqual(resp.status_int, 200)
self.assertEqual([], mocks['ismount'].call_args_list)
with mock.patch.object(self.controller, 'replication_semaphore'), \
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', True), \
mock_check_drive(ismount=False) as mocks:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
["<html><h1>Insufficient Storage</h1><p>There "
"was not enough space to save the resource. Drive: "
"device</p></html>"])
self.assertEqual(resp.status_int, 507)
self.assertEqual([mock.call(os.path.join(
self.controller._diskfile_router[POLICIES.legacy].devices,
'device'))], mocks['ismount'].call_args_list)
mocks['ismount'].reset_mock()
mocks['ismount'].return_value = True
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"'])
self.assertEqual(resp.status_int, 200)
self.assertEqual([mock.call(os.path.join(
self.controller._diskfile_router[POLICIES.legacy].devices,
'device'))], mocks['ismount'].call_args_list)
def test_SSYNC_Exception(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = '1.2.3.4'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Got no headers for Bad content is here'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'1.2.3.4/device/partition EXCEPTION in ssync.Receiver')
def test_SSYNC_Exception_Exception(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = mock.MagicMock()
req.remote_addr.__str__ = mock.Mock(
side_effect=Exception("can't stringify this"))
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END'])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'EXCEPTION in ssync.Receiver')
def test_MISSING_CHECK_timeout(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = six.StringIO.readline(self)
if line.startswith('hash'):
eventlet.sleep(0.1)
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
req.remote_addr = '2.3.4.5'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[":ERROR: 408 '0.01 seconds: missing_check line'"])
self.assertEqual(resp.status_int, 200)
self.assertTrue(mock_shutdown_safe.called)
self.controller.logger.error.assert_called_once_with(
'2.3.4.5/sda1/1 TIMEOUT in ssync.Receiver: '
'0.01 seconds: missing_check line')
def test_MISSING_CHECK_other_exception(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = six.StringIO.readline(self)
if line.startswith('hash'):
raise Exception('test exception')
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
req.remote_addr = '3.4.5.6'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[":ERROR: 0 'test exception'"])
self.assertEqual(resp.status_int, 200)
self.assertTrue(mock_shutdown_safe.called)
self.controller.logger.exception.assert_called_once_with(
'3.4.5.6/sda1/1 EXCEPTION in ssync.Receiver')
def test_MISSING_CHECK_empty_list(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_none(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' dm',
self.hash2 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_extra_line_parts(self):
# check that rx tolerates extra parts in missing check lines to
# allow for protocol upgrades
extra_1 = 'extra'
extra_2 = 'multiple extra parts'
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + ' ' + extra_1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + ' ' + extra_2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' dm',
self.hash2 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_one_exact(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash2 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_missing_meta_expired_data(self):
# verify that even when rx disk file has expired x-delete-at, it will
# still be opened and checked for missing meta
self.controller.logger = mock.MagicMock()
ts1 = next(make_timestamp_iter())
df = self.controller.get_diskfile(
'sda1', '1', self.account1, self.container1, self.object1,
POLICIES[0])
write_diskfile(df, ts1, extra_metadata={'X-Delete-At': 0})
# make a request - expect newer metadata to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1.internal + ' m:30d40\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
'c2519f265f9633e74f9b2fe3b9bec27d m',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
@patch_policies(with_ec_default=True)
def test_MISSING_CHECK_missing_durable(self):
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
# make rx disk file but don't commit it, so durable state is missing
ts1 = next(make_timestamp_iter()).internal
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts1,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
# make a request - expect no data to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
@patch_policies(with_ec_default=True)
@mock.patch('swift.obj.diskfile.ECDiskFileWriter.commit')
def test_MISSING_CHECK_missing_durable_but_commit_fails(self, mock_commit):
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
# make rx disk file but don't commit it, so durable state is missing
ts1 = next(make_timestamp_iter()).internal
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts1,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
# make a request with commit disabled - expect data to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# make a request with commit raising error - expect data to be wanted
mock_commit.side_effect = Exception
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.exception.called)
self.assertIn(
'EXCEPTION in ssync.Receiver while attempting commit of',
self.controller.logger.exception.call_args[0][0])
def test_MISSING_CHECK_storage_policy(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[1])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash2 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_one_newer(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
newer_ts1 = utils.normalize_timestamp(float(self.ts1) + 1)
self.metadata1['X-Timestamp'] = newer_ts1
fp = open(os.path.join(object_dir, newer_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash2 + ' dm',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_newer_meta(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)
self.metadata1['X-Timestamp'] = older_ts1
fp = open(os.path.join(object_dir, older_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
# write newer .meta file
metadata = {'name': self.name1, 'X-Timestamp': self.ts2,
'X-Object-Meta-Test': 'test'}
fp = open(os.path.join(object_dir, self.ts2 + '.meta'), 'w+')
diskfile.write_metadata(fp, metadata)
# receiver has .data at older_ts, .meta at ts2
# sender has .data at ts1
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' d',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_older_meta(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)
self.metadata1['X-Timestamp'] = older_ts1
fp = open(os.path.join(object_dir, older_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
# write .meta file at ts1
metadata = {'name': self.name1, 'X-Timestamp': self.ts1,
'X-Object-Meta-Test': 'test'}
fp = open(os.path.join(object_dir, self.ts1 + '.meta'), 'w+')
diskfile.write_metadata(fp, metadata)
# receiver has .data at older_ts, .meta at ts1
# sender has .data at older_ts, .meta at ts2
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + older_ts1 + ' m:30d40\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START',
self.hash1 + ' m',
':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_UPDATES_timeout(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = six.StringIO.readline(self)
if line.startswith('DELETE'):
eventlet.sleep(0.1)
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
req.remote_addr = '2.3.4.5'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 408 '0.01 seconds: updates line'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.error.assert_called_once_with(
'2.3.4.5/device/partition TIMEOUT in ssync.Receiver: '
'0.01 seconds: updates line')
def test_UPDATES_other_exception(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = six.StringIO.readline(self)
if line.startswith('DELETE'):
raise Exception('test exception')
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
req.remote_addr = '3.4.5.6'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'test exception'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'3.4.5.6/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_problems_no_hard_disconnect(self):
class _Wrapper(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(ssync_receiver.eventlet.greenio,
'shutdown_safe') as mock_shutdown_safe, \
mock.patch.object(
self.controller, 'DELETE',
return_value=swob.HTTPNoContent()):
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(mock_shutdown_safe.called)
self.assertFalse(mock_wsgi_input.mock_socket.close.called)
def test_UPDATES_bad_subrequest_line(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'bad_subrequest_line\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'need more than 1 value to unpack'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
with mock.patch.object(
self.controller, 'DELETE',
return_value=swob.HTTPNoContent()):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
'bad_subrequest_line2')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'need more than 1 value to unpack'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_headers(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Got no headers for DELETE /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_bad_headers(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Bad-Header Test\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'need more than 1 value to unpack'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Good-Header: Test\r\n'
'Bad-Header Test\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'need more than 1 value to unpack'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_bad_content_length(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: a\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':ERROR: 0 "invalid literal for int() with base 10: \'a\'"'])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_content_length_with_DELETE(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Content-Length: 1\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'DELETE subrequest with content-length /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_content_length_with_PUT(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'No content-length sent for PUT /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_early_termination(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Early termination for PUT /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_failures(self):
@server.public
def _DELETE(request):
if request.path == '/device/partition/a/c/works':
return swob.HTTPNoContent()
else:
return swob.HTTPInternalServerError()
# failures never hit threshold
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 500 'ERROR: With :UPDATES: 3 failures to 0 "
"successes'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(3, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and no successes, so ratio is like infinity
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Too many 4 failures to 0 successes'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and ratio hits 1.33333333333
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 500 'ERROR: With :UPDATES: 4 failures to 3 "
"successes'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and ratio hits 2.0
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Too many 4 failures to 2 successes'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
def test_UPDATES_PUT(self):
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n'
'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Etag': 'c4ca4238a0b923820dcc509a6f75849b',
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
def test_UPDATES_PUT_replication_headers(self):
self.controller.logger = mock.MagicMock()
# sanity check - regular PUT will not persist Specialty-Header
req = swob.Request.blank(
'/sda1/0/a/c/o1', body='1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '1',
'Content-Type': 'text/plain',
'Etag': 'c4ca4238a0b923820dcc509a6f75849b',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
df = self.controller.get_diskfile(
'sda1', '0', 'a', 'c', 'o1', POLICIES.default)
df.open()
self.assertFalse('Specialty-Header' in df.get_metadata())
# an SSYNC request can override PUT header filtering...
req = swob.Request.blank(
'/sda1/0',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o2\r\n'
'Content-Length: 1\r\n'
'Content-Type: text/plain\r\n'
'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
# verify diskfile has metadata permitted by replication headers
# including Specialty-Header
df = self.controller.get_diskfile(
'sda1', '0', 'a', 'c', 'o2', POLICIES.default)
df.open()
for chunk in df.reader():
self.assertEqual('1', chunk)
expected = {'ETag': 'c4ca4238a0b923820dcc509a6f75849b',
'Content-Length': '1',
'Content-Type': 'text/plain',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'name': '/a/c/o2'}
actual = df.get_metadata()
self.assertEqual(expected, actual)
def test_UPDATES_POST(self):
_POST_request = [None]
@server.public
def _POST(request):
_POST_request[0] = request
return swob.HTTPAccepted()
with mock.patch.object(self.controller, 'POST', _POST):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'POST /a/c/o\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Specialty-Header: value\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
req = _POST_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertIsNone(req.content_length)
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'x-timestamp x-object-meta-test1 specialty-header')})
def test_UPDATES_with_storage_policy(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, '1')
def test_UPDATES_PUT_with_storage_policy_and_node_index(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Ssync-Frag-Index': '7',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, '1')
def test_UPDATES_DELETE(self):
_DELETE_request = [None]
@server.public
def _DELETE(request):
_DELETE_request[0] = request
return swob.HTTPNoContent()
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_DELETE_request), 1) # sanity
req = _DELETE_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.76334',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
def test_UPDATES_BONK(self):
_BONK_request = [None]
@server.public
def _BONK(request):
_BONK_request[0] = request
return swob.HTTPOk()
self.controller.BONK = _BONK
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'BONK /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 0 'Invalid subrequest method BONK'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertEqual(len(_BONK_request), 1) # sanity
self.assertIsNone(_BONK_request[0])
def test_UPDATES_multiple(self):
_requests = []
@server.public
def _PUT(request):
_requests.append(request)
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
@server.public
def _POST(request):
_requests.append(request)
return swob.HTTPOk()
@server.public
def _DELETE(request):
_requests.append(request)
return swob.HTTPNoContent()
with mock.patch.object(self.controller, 'PUT', _PUT), \
mock.patch.object(self.controller, 'POST', _POST), \
mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.00001\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1'
'DELETE /a/c/o2\r\n'
'X-Timestamp: 1364456113.00002\r\n'
'\r\n'
'PUT /a/c/o3\r\n'
'Content-Length: 3\r\n'
'X-Timestamp: 1364456113.00003\r\n'
'\r\n'
'123'
'PUT /a/c/o4\r\n'
'Content-Length: 4\r\n'
'X-Timestamp: 1364456113.00004\r\n'
'\r\n'
'1\r\n4'
'DELETE /a/c/o5\r\n'
'X-Timestamp: 1364456113.00005\r\n'
'\r\n'
'DELETE /a/c/o6\r\n'
'X-Timestamp: 1364456113.00006\r\n'
'\r\n'
'PUT /a/c/o7\r\n'
'Content-Length: 7\r\n'
'X-Timestamp: 1364456113.00007\r\n'
'\r\n'
'1234567'
'POST /a/c/o7\r\n'
'X-Object-Meta-Test-User: user_meta\r\n'
'X-Timestamp: 1364456113.00008\r\n'
'\r\n'
)
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
':UPDATES: START', ':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_requests), 8) # sanity
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o1')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.00001',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, '1')
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o2')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00002',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o3')
self.assertEqual(req.content_length, 3)
self.assertEqual(req.headers, {
'Content-Length': '3',
'X-Timestamp': '1364456113.00003',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, '123')
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o4')
self.assertEqual(req.content_length, 4)
self.assertEqual(req.headers, {
'Content-Length': '4',
'X-Timestamp': '1364456113.00004',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, '1\r\n4')
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o5')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00005',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o6')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00006',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o7')
self.assertEqual(req.content_length, 7)
self.assertEqual(req.headers, {
'Content-Length': '7',
'X-Timestamp': '1364456113.00007',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, '1234567')
req = _requests.pop(0)
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/device/partition/a/c/o7')
self.assertIsNone(req.content_length)
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00008',
'X-Object-Meta-Test-User': 'user_meta',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'x-object-meta-test-user x-timestamp')})
self.assertEqual(_requests, [])
def test_UPDATES_subreq_does_not_read_all(self):
# This tests that if a SSYNC subrequest fails and doesn't read
# all the subrequest body that it will read and throw away the rest of
# the body before moving on to the next subrequest.
# If you comment out the part in ssync_receiver where it does:
# for junk in subreq.environ['wsgi.input']:
# pass
# You can then see this test fail.
_requests = []
@server.public
def _PUT(request):
_requests.append(request)
# Deliberately just reading up to first 2 bytes.
request.read_body = request.environ['wsgi.input'].read(2)
return swob.HTTPInternalServerError()
class _IgnoreReadlineHint(six.StringIO):
def __init__(self, value):
six.StringIO.__init__(self, value)
def readline(self, hint=-1):
return six.StringIO.readline(self)
self.controller.PUT = _PUT
self.controller.network_chunk_size = 2
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
'Content-Length: 3\r\n'
'X-Timestamp: 1364456113.00001\r\n'
'\r\n'
'123'
'PUT /a/c/o2\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.00002\r\n'
'\r\n'
'1')
req.environ['wsgi.input'] = _IgnoreReadlineHint(req.body)
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[':MISSING_CHECK: START', ':MISSING_CHECK: END',
":ERROR: 500 'ERROR: With :UPDATES: 2 failures to 0 successes'"])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(2, self.controller.logger.warning.call_count)
self.assertEqual(len(_requests), 2) # sanity
req = _requests.pop(0)
self.assertEqual(req.path, '/device/partition/a/c/o1')
self.assertEqual(req.content_length, 3)
self.assertEqual(req.headers, {
'Content-Length': '3',
'X-Timestamp': '1364456113.00001',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, '12')
req = _requests.pop(0)
self.assertEqual(req.path, '/device/partition/a/c/o2')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.00002',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, '1')
self.assertEqual(_requests, [])
@patch_policies(with_ec_default=True)
class TestSsyncRxServer(unittest.TestCase):
# Tests to verify behavior of SSYNC requests sent to an object
# server socket.
def setUp(self):
skip_if_no_xattrs()
# dirs
self.tmpdir = tempfile.mkdtemp()
self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server')
self.devices = os.path.join(self.tempdir, 'srv/node')
for device in ('sda1', 'sdb1'):
os.makedirs(os.path.join(self.devices, device))
self.conf = {
'devices': self.devices,
'swift_dir': self.tempdir,
}
self.rx_logger = debug_logger('test-object-server')
rx_server = server.ObjectController(self.conf, logger=self.rx_logger)
self.rx_ip = '127.0.0.1'
self.sock = listen_zero()
self.rx_server = eventlet.spawn(
eventlet.wsgi.server, self.sock, rx_server, utils.NullLogger())
self.rx_port = self.sock.getsockname()[1]
self.tx_logger = debug_logger('test-reconstructor')
self.daemon = ObjectReconstructor(self.conf, self.tx_logger)
self.daemon._diskfile_mgr = self.daemon._df_router[POLICIES[0]]
def tearDown(self):
self.rx_server.kill()
self.sock.close()
eventlet.sleep(0)
shutil.rmtree(self.tmpdir)
def test_SSYNC_disconnect(self):
node = {
'replication_ip': '127.0.0.1',
'replication_port': self.rx_port,
'device': 'sdb1',
}
job = {
'partition': 0,
'policy': POLICIES[0],
'device': 'sdb1',
}
sender = ssync_sender.Sender(self.daemon, node, job, ['abc'])
# kick off the sender and let the error trigger failure
with mock.patch(
'swift.obj.ssync_receiver.Receiver.initialize_request') \
as mock_initialize_request:
mock_initialize_request.side_effect = \
swob.HTTPInternalServerError()
success, _ = sender()
self.assertFalse(success)
stderr = six.StringIO()
with mock.patch('sys.stderr', stderr):
# let gc and eventlet spin a bit
del sender
for i in range(3):
eventlet.sleep(0)
self.assertNotIn('ValueError: invalid literal for int() with base 16',
stderr.getvalue())
def test_SSYNC_device_not_available(self):
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sdc1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(POLICIES[0]))
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(507, resp.status)
resp.read()
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
def test_SSYNC_invalid_policy(self):
valid_indices = sorted([int(policy) for policy in POLICIES])
bad_index = valid_indices[-1] + 1
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sda1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
bad_index)
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(503, resp.status)
resp.read()
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
def test_bad_request_invalid_frag_index(self):
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sda1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Ssync-Frag-Index',
'None')
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(400, resp.status)
error_msg = resp.read()
self.assertIn("Invalid X-Backend-Ssync-Frag-Index 'None'", error_msg)
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
class TestModuleMethods(unittest.TestCase):
def test_decode_missing(self):
object_hash = '9d41d8cd98f00b204e9800998ecf0abc'
ts_iter = make_timestamp_iter()
t_data = next(ts_iter)
t_meta = next(ts_iter)
t_ctype = next(ts_iter)
d_meta_data = t_meta.raw - t_data.raw
d_ctype_data = t_ctype.raw - t_data.raw
# legacy single timestamp string
msg = '%s %s' % (object_hash, t_data.internal)
expected = dict(object_hash=object_hash,
ts_meta=t_data,
ts_data=t_data,
ts_ctype=t_data)
self.assertEqual(expected, ssync_receiver.decode_missing(msg))
# hex meta delta encoded as extra message part
msg = '%s %s m:%x' % (object_hash, t_data.internal, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_data)
self.assertEqual(expected, ssync_receiver.decode_missing(msg))
# hex content type delta encoded in extra message part
msg = '%s %s t:%x,m:%x' % (object_hash, t_data.internal,
d_ctype_data, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_ctype)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg))
# order of subparts does not matter
msg = '%s %s m:%x,t:%x' % (object_hash, t_data.internal,
d_meta_data, d_ctype_data)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg))
# hex content type delta may be zero
msg = '%s %s t:0,m:%x' % (object_hash, t_data.internal, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_data)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg))
# unexpected zero delta is tolerated
msg = '%s %s m:0' % (object_hash, t_data.internal)
expected = dict(object_hash=object_hash,
ts_meta=t_data,
ts_data=t_data,
ts_ctype=t_data)
self.assertEqual(expected, ssync_receiver.decode_missing(msg))
# unexpected subparts in timestamp delta part are tolerated
msg = '%s %s c:12345,m:%x,junk' % (object_hash,
t_data.internal,
d_meta_data)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg))
# extra message parts tolerated
msg = '%s %s m:%x future parts' % (object_hash,
t_data.internal,
d_meta_data)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data)
self.assertEqual(expected, ssync_receiver.decode_missing(msg))
def test_encode_wanted(self):
ts_iter = make_timestamp_iter()
old_t_data = next(ts_iter)
t_data = next(ts_iter)
old_t_meta = next(ts_iter)
t_meta = next(ts_iter)
remote = {
'object_hash': 'theremotehash',
'ts_data': t_data,
'ts_meta': t_meta,
}
# missing
local = {}
expected = 'theremotehash dm'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# in-sync
local = {
'ts_data': t_data,
'ts_meta': t_meta,
}
expected = None
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# out-of-sync
local = {
'ts_data': old_t_data,
'ts_meta': old_t_meta,
}
expected = 'theremotehash dm'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old data
local = {
'ts_data': old_t_data,
'ts_meta': t_meta,
}
expected = 'theremotehash d'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old metadata
local = {
'ts_data': t_data,
'ts_meta': old_t_meta,
}
expected = 'theremotehash m'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# in-sync tombstone
local = {
'ts_data': t_data,
}
expected = None
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old tombstone
local = {
'ts_data': old_t_data,
}
expected = 'theremotehash d'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
if __name__ == '__main__':
unittest.main()
|
[
"Wayne [email protected]"
] |
Wayne [email protected]
|
0dfe69892b820556fd59cbf745f85f75f750a462
|
4fd5b888aff049ecf84fac6969e2d6950c8bf683
|
/pyjob/tests/test_cexec.py
|
636eb93da237f009416b54d5176ddd4f1318b582
|
[
"MIT"
] |
permissive
|
FilomenoSanchez/pyjob
|
1d705fa9af4f8ba8827743f6b0c21a2ff9500ff2
|
b8dac5e53570f44370c222f97f063d666eeb0d64
|
refs/heads/master
| 2020-07-18T22:24:02.147767 | 2020-03-03T09:51:29 | 2020-03-03T09:51:29 | 206,324,268 | 0 | 0 |
MIT
| 2019-09-04T13:20:52 | 2019-09-04T13:20:51 | null |
UTF-8
|
Python
| false | false | 2,326 |
py
|
__author__ = 'Felix Simkovic'
import os
import pytest
import sys
from pyjob.cexec import cexec
from pyjob.exception import PyJobExecutableNotFoundError, PyJobExecutionError
class TestCexec(object):
def test_1(self):
stdout = cexec([sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)'])
assert stdout == 'hello'
def test_2(self):
with pytest.raises(PyJobExecutionError):
cexec([sys.executable, '-c', 'import sys; sys.exit(1)'])
def test_3(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(1)']
stdout = cexec(cmd, permit_nonzero=True)
assert stdout == 'hello'
def test_4(self):
if sys.version_info < (3, 0):
cmd = [sys.executable, '-c', 'import sys; print(raw_input()); sys.exit(0)']
else:
cmd = [sys.executable, '-c', 'import sys; print(input()); sys.exit(0)']
stdout = cexec(cmd, stdin='hello')
assert stdout == 'hello'
def test_5(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit(0)']
directory = os.path.join(os.getcwd())
stdout = cexec(cmd, cwd=directory)
assert stdout == directory
def test_6(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)']
fname = 'test.log'
with open(fname, 'w') as f:
stdout = cexec(cmd, stdout=f)
assert stdout is None
with open(fname, 'r') as f:
assert f.read().strip() == 'hello'
pytest.helpers.unlink([fname])
def test_7(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit("error message")']
directory = os.path.join(os.getcwd())
with open('stdout.log', 'w') as fstdout, open('stderr.log', 'w') as fstderr:
stdout = cexec(cmd, stdout=fstdout, stderr=fstderr, permit_nonzero=True)
assert stdout is None
with open('stdout.log', 'r') as f:
assert f.read().strip() == directory
with open('stderr.log', 'r') as f:
assert f.read().strip() == 'error message'
pytest.helpers.unlink(['stdout.log', 'stderr.log'])
def test_8(self):
with pytest.raises(PyJobExecutableNotFoundError):
cexec(['fjezfsdkj'])
|
[
"[email protected]"
] | |
34b81f7e23b59b2b431174df1e4acd91d52f4fd2
|
a1d30d667cbf814db1809c31cf68ba75c01f819c
|
/Google/2. medium/274. H-Index.py
|
86a0a49e17a3c9ca641aab1bfbb569642dbecf80
|
[] |
no_license
|
yemao616/summer18
|
adb5f0e04e6f1e1da6894b0b99a61da3c5cba8ee
|
8bb17099be02d997d554519be360ef4aa1c028e3
|
refs/heads/master
| 2021-06-02T04:32:07.703198 | 2020-01-09T17:45:29 | 2020-01-09T17:45:29 | 110,744,323 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,651 |
py
|
# Given an array of citations (each citation is a non-negative integer) of a researcher, write a function to compute the researcher's h-index.
# According to the definition of h-index on Wikipedia: "A scientist has index h if h of his/her N papers have at least h citations each, and the other N − h papers have no more than h citations each."
# For example, given citations = [3, 0, 6, 1, 5], which means the researcher has 5 papers in total and each of them had received 3, 0, 6, 1, 5 citations respectively. Since the researcher has 3 papers with at least 3 citations each and the remaining two with no more than 3 citations each, his h-index is 3.
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
class Solution(object):
def hIndex(self, citations): # O(nlgn)
citations.sort()
n = len(citations)
for i in xrange(n):
if citations[i] >= n-i:
return n-i
return 0
def hIndex(self, citations): # O(n) space, O(n) time
n = len(citations)
citeCount = [0] * (n+1)
for c in citations:
if c >= n:
citeCount[n] += 1
else:
citeCount[c] += 1
i = n-1
while i >= 0:
citeCount[i] += citeCount[i+1]
if citeCount[i+1] >= i+1:
return i+1
i -= 1
return 0
Further Thoughts
Is it possible to have multiple hh-values?
The answer is NO. One can find this intuitively from Figure 1. The dashed line y = xy=x crosses the histogram once and only once, because the sorted bars are monotonic. It can also be proven from the definition of the hh-index.
|
[
"[email protected]"
] | |
8104d13b788a66efc1a7bcff6fd2911d319a2e9c
|
159bd4c0274271aae7cf2d42bc6819957ee626c9
|
/viz.py
|
5f91147e0c9a32109927e7d3d101b098362d385e
|
[] |
no_license
|
Schuck9/UG-in-Weighted-Network
|
aaa9810e8806d6130ec87c275a169009da460abc
|
8e2a6ebde2ed4b9e2f6d2a2ca9d84140c2c5e792
|
refs/heads/master
| 2021-03-01T04:03:05.983146 | 2020-04-24T02:51:34 | 2020-04-24T02:51:34 | 245,752,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,801 |
py
|
"""
Ultimatum Game in complex network Visualization
@date: 2020.3.19
@author: Tingyu Mo
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from img_concat import image_compose
def pq_distribution(value_list):
x_axis = np.arange(0,1.05,1/20) # 21 descrete points,range 0~1,step size 0.05
y_axis = np.zeros(x_axis.size)
for v in value_list:
for i in range(x_axis.size):
if abs(v-x_axis[i]) < 0.05:
y_axis[i] += 1
return y_axis
def pq_distribution_viz(RecordName,time_option = "all"):
# Epoch_list = ['1','100','1000','20000']
Epoch_list = ['100','1000','20000']
result_dir = "./result"
record_dir = os.path.join(result_dir,RecordName)
checkpoint_list = os.listdir(record_dir)
parse_str = checkpoint_list[0].split("_")
del(parse_str[-1])
info_str = '_'.join(parse_str)
save_path =os.path.join(record_dir, info_str+'.jpg')
y_axis_plist = []
y_axis_qlist = []
for Epoch in Epoch_list:
info_e = info_str+"_"+Epoch
Epoch_dir = os.path.join(record_dir,info_e )
strategy_path = os.path.join(Epoch_dir,info_e+"_strategy.csv")
strategy = pd.read_csv(strategy_path)
# strategy.reset_index(drop = True)
pq_array = strategy.values
# np.delete(pq_array,1,axis=1)
p = pq_array[0][1:]
q = pq_array[1][1:]
# del(p[0])
# del(q[0])
p = pq_distribution(p)
q = pq_distribution(q)
y_axis_plist.append(p/10000)
y_axis_qlist.append(q/10000)
plt.figure()
x_axis = np.arange(0,1.05,1/20)
# plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
# # plt.title("")
plt.xlabel("p")#x轴p上的名字
plt.ylabel("D(p)")#y轴上的名字
plt.plot(x_axis, y_axis_plist[0] ,marker='^',linestyle='-',color='skyblue', label='t = 100')
plt.plot(x_axis, y_axis_plist[1], marker='s',linestyle='-',color='green', label='t = 1000')
plt.plot(x_axis, y_axis_plist[2], marker='*',linestyle='-',color='red', label='t = 20000')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # 显示图例
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
def avg_pq_viz():
'''
Figure 2 like
'''
u = 0.1
info = 'RG_Weighted_0.4'
save_path = "./result/{}_u_{}.jpg".format(info,u)
x_label = [0.001,0.01,0.1,1,10]
x_axis = np.log10(x_label)
avg_list = [ (0.5,0.5),
(0.494011917,0.496625418),(0.498278643,0.471188505),
(0.341997159,0.261274376),(0.124914813,0.115971024),
]
p_axis = list()
q_axis = list()
for stg in avg_list:
p,q = stg
p_axis.append(p)
q_axis.append(q)
plt.figure()
# plt.rcParams['font.family'] = ['sans-serif']
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.title(" {} u={}".format(info,u))
plt.xlabel("Selection strength(w)")#x轴p上的名字
plt.ylabel("Mean")#y轴上的名字
plt.xticks(x_axis,x_label,fontsize=16)
plt.plot(x_axis, p_axis,marker='^',linestyle='-',color='skyblue', label='Offer (p)')
plt.plot(x_axis, q_axis, marker='s',linestyle='-',color='red', label='Demand (q)')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # 显示图例
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
def data_loader(data_path):
weight_axis = [0.25, 0.3, 0.35, 0.4, 0.55, 0.7, 0.85]
weight_axis = [str(x )for x in weight_axis]
u = [0.001, 0.01, 0.1]
u = [str(i) for i in u]
w = [0.001, 0.01, 0.1, 1, 10]
w = [str(i) for i in w]
data = pd.read_excel(data_path)
data_dict = dict()
for weight_key in weight_axis:
data_dict[weight_key] = dict()
for u_key in u:
data_dict[weight_key][u_key] =dict()
# for w_key in w:
# data_dict[weight_key][u_key][w_key] = np.zeros([1,2])
pq_data =data[['p','q']].dropna()
for i,weight_key in enumerate(weight_axis):
weight_data = pq_data.iloc[15*i:15*(i+1)]
for j,u_key in enumerate(u):
u_data = weight_data.iloc[5*j:5*(j+1)].values
for k,w_key in enumerate(w):
# print(u_data[k])
data_dict[weight_key][u_key][w_key] = u_data[k]
print("data loaded!")
return data_dict
def weighted_graph_viz():
data_path ='./result/Result_data.xlsx'
data_dict= data_loader(data_path)
weight_axis = [0.25, 0.3, 0.35, 0.4, 0.55, 0.7, 0.85]
weight_axis_str = [str(x )for x in weight_axis]
x_axis = weight_axis
pq = ['Offer(p)','Demond(q)']
u = [0.001, 0.01, 0.1]
u = [str(i) for i in u]
w = [0.001, 0.01, 0.1, 1, 10]
w = [str(i) for i in w]
index = np.array([[0,2,4],[1,3,5]])
for k ,role in enumerate(pq):
for j,u_ in enumerate(u):
y_list = []
for w_ in w:
ls = []
for i,weight_key in enumerate(weight_axis_str):
ls.append(data_dict[weight_key][u_][w_][k])
y_list.append(ls)
# print("y_axis done!")
info_str = role+"_"+u_
save_path = './result/Fig/{}_{}.jpg'.format(index[k][j],info_str)
plt.figure()
# plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
plt.title(info_str)
plt.xlabel("weight")#x轴p上的名字
plt.ylabel("{}".format(role))#y轴上的名字
plt.plot(x_axis, y_list[0] ,marker='>',linestyle='-',color='purple', label='w = 0.001')
plt.plot(x_axis, y_list[1] ,marker='^',linestyle='-',color='skyblue', label='w = 0.01')
plt.plot(x_axis, y_list[2], marker='s',linestyle='-',color='green', label='w = 0.1')
plt.plot(x_axis, y_list[3], marker='*',linestyle='-',color='red', label='w = 1')
plt.plot(x_axis, y_list[4], marker='x',linestyle='-',color='black', label='w = 10')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # 显示图例
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
# plt.show()
if __name__ == '__main__':
# RecordName ='2020-03-03-09-14-20'
# time_option = "all"
# pq_distribution_viz(RecordName,time_option)
# avg_pq_viz()
weighted_graph_viz()
image_compose("./result/Fig/")
|
[
"[email protected]"
] | |
7ef8c7d8fbcd5f4846d737fcee832ff469bab76b
|
7dc502a62dcc4ff39f572040ba180315981e3ba8
|
/src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2021_11_01_preview/operations/_operations.py
|
d2af43806a36b987abf97f3c948a353bae6592c7
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
dbradish-microsoft/azure-cli-extensions
|
2bec15b90666fee7a0a833b407ca2619e25fed86
|
fe44a1bb123a58b7e8248850bdc20555ca893406
|
refs/heads/master
| 2023-08-31T15:19:35.673988 | 2022-02-09T08:50:18 | 2022-02-09T08:50:18 | 252,317,425 | 0 | 0 |
MIT
| 2020-04-02T00:29:14 | 2020-04-02T00:29:13 | null |
UTF-8
|
Python
| false | false | 5,575 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.ContainerService/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_11_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
|
[
"[email protected]"
] | |
f33f85ac4aba4e4d933e8040620605b393102a38
|
02bdaef6edebcfc1c46bb62dbc79a3a805946ee7
|
/ns/scheduler/sp.py
|
954d9e976b5227933b1360fdf17c012f18aff21f
|
[
"Apache-2.0"
] |
permissive
|
chapter09/ns.py
|
707ea084306ff04d606a25635d80cfe741183df8
|
d8fb5f838e8d163e9b5a872326282dac2238b9c5
|
refs/heads/main
| 2023-06-25T08:47:05.855105 | 2021-07-30T06:49:19 | 2021-07-30T06:49:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,781 |
py
|
"""
Implements a Static Priority (SP) server.
"""
import uuid
from collections import defaultdict as dd
from collections.abc import Callable
import simpy
from ns.packet.packet import Packet
class SPServer:
"""
Parameters
----------
env: simpy.Environment
The simulation environment.
rate: float
The bit rate of the port.
priorities: list or dict
This can be either a list or a dictionary. If it is a list, it uses the flow_id ---
or class_id, if class-based static priority scheduling is activated using the
`flow_classes' parameter below --- as its index to look for the flow (or class)'s
corresponding priority. If it is a dictionary, it contains (flow_id or class_id
-> priority) pairs for each possible flow_id or class_id.
flow_classes: function
This is a function that matches flow_id's to class_ids, used to implement class-based
static priority scheduling. The default is an identity lambda function, which is
equivalent to flow-based WFQ.
zero_buffer: bool
Does this server have a zero-length buffer? This is useful when multiple
basic elements need to be put together to construct a more complex element
with a unified buffer.
zero_downstream_buffer: bool
Does this server's downstream element has a zero-length buffer? If so, packets
may queue up in this element's own buffer rather than be forwarded to the
next-hop element.
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self,
env,
rate,
priorities,
flow_classes: Callable = lambda x: x,
zero_buffer=False,
zero_downstream_buffer=False,
debug=False) -> None:
self.env = env
self.rate = rate
self.prio = priorities
self.flow_classes = flow_classes
self.element_id = uuid.uuid1()
self.stores = {}
self.prio_queue_count = {}
if isinstance(priorities, list):
priorities_list = priorities
elif isinstance(priorities, dict):
priorities_list = priorities.values()
else:
raise ValueError(
'Priorities must be either a list or a dictionary.')
for prio in priorities_list:
if prio not in self.prio_queue_count:
self.prio_queue_count[prio] = 0
self.priorities_list = sorted(self.prio_queue_count, reverse=True)
self.packets_available = simpy.Store(self.env)
self.current_packet = None
self.byte_sizes = dd(lambda: 0)
self.packets_received = 0
self.out = None
self.upstream_updates = {}
self.upstream_stores = {}
self.zero_buffer = zero_buffer
self.zero_downstream_buffer = zero_downstream_buffer
if self.zero_downstream_buffer:
self.downstream_stores = {}
self.debug = debug
self.action = env.process(self.run())
def update(self, packet):
"""The packet has just been retrieved from this element's own buffer, so
update internal housekeeping states accordingly."""
if self.zero_buffer:
self.upstream_stores[packet].get()
del self.upstream_stores[packet]
self.upstream_updates[packet](packet)
del self.upstream_updates[packet]
if self.debug:
print(
f"Sent out packet {packet.packet_id} from flow {packet.flow_id} "
f"belonging to class {self.flow_classes(packet.packet_id)} "
f"of priority {packet.prio[self.element_id]}")
self.prio_queue_count[packet.prio[self.element_id]] -= 1
if self.flow_classes(packet.flow_id) in self.byte_sizes:
self.byte_sizes[self.flow_classes(packet.flow_id)] -= packet.size
else:
raise ValueError("Error: the packet is from an unrecorded flow.")
def packet_in_service(self) -> Packet:
"""
Returns the packet that is currently being sent to the downstream element.
Used by a ServerMonitor.
"""
return self.current_packet
def byte_size(self, queue_id) -> int:
"""
Returns the size of the queue for a particular queue_id, in bytes.
Used by a ServerMonitor.
"""
if queue_id in self.byte_sizes:
return self.byte_sizes[queue_id]
return 0
def size(self, queue_id) -> int:
"""
Returns the size of the queue for a particular queue_id, in the
number of packets. Used by a ServerMonitor.
"""
if queue_id in self.stores:
return len(self.stores[queue_id].items)
return 0
def all_flows(self) -> list:
"""
Returns a list containing all the flow IDs.
"""
return self.byte_sizes.keys()
def total_packets(self) -> int:
"""
Returns the total number of packets currently in the queues.
"""
return sum(self.prio_queue_count.values())
def run(self):
"""The generator function used in simulations."""
while True:
for prio in self.priorities_list:
if self.prio_queue_count[prio] > 0:
if self.zero_downstream_buffer:
ds_store = self.downstream_stores[prio]
packet = yield ds_store.get()
packet.prio[self.element_id] = prio
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.stores[prio])
self.current_packet = None
else:
store = self.stores[prio]
packet = yield store.get()
packet.prio[self.element_id] = prio
self.update(packet)
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet)
self.current_packet = None
break
if self.total_packets() == 0:
yield self.packets_available.get()
def put(self, packet, upstream_update=None, upstream_store=None):
""" Sends a packet to this element. """
self.packets_received += 1
flow_id = packet.flow_id
self.byte_sizes[self.flow_classes(flow_id)] += packet.size
if self.total_packets() == 0:
self.packets_available.put(True)
prio = self.prio[self.flow_classes(flow_id)]
self.prio_queue_count[prio] += 1
if self.debug:
print(
"At time {:.2f}: received packet {:d} from flow {} belonging to class {}"
.format(self.env.now, packet.packet_id, flow_id,
self.flow_classes(flow_id)))
if not prio in self.stores:
self.stores[prio] = simpy.Store(self.env)
if self.zero_downstream_buffer:
self.downstream_stores[prio] = simpy.Store(self.env)
if self.zero_buffer and upstream_update is not None and upstream_store is not None:
self.upstream_stores[packet] = upstream_store
self.upstream_updates[packet] = upstream_update
if self.zero_downstream_buffer:
self.downstream_stores[prio].put(packet)
return self.stores[prio].put(packet)
|
[
"[email protected]"
] | |
1a87fb2af2a759146329e7e2d18173418219e3fb
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/project/report/__init__.py
|
9758b46594df454fc0784fccf9aba297bf09cd45
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 131 |
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from . import project_report
|
[
"[email protected]"
] | |
9e67469d766ce79591979d03e6ccc685e0a7c7b3
|
0d347de6f7fb39ddd3f16905056f95d8397d0f72
|
/app/main/views.py
|
fe26140f3727e94324c4fff33dd4e6932ab2f8eb
|
[] |
no_license
|
vincentmuya/Movie_review
|
83564d36a5e76a49ccebb2c28f633a68f3c050b0
|
5cb1c0c49e790d27086acbb7de1342c5cc3eed60
|
refs/heads/master
| 2021-05-16T14:38:38.566432 | 2018-02-03T08:43:49 | 2018-02-03T08:43:49 | 118,459,602 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,295 |
py
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..requests import get_movies,get_movie,search_movie
from ..models import Review,User
from .forms import ReviewForm,UpdateProfile
from flask_login import login_required
from .. import db,photos
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting popular movie
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The best Movie Review Website Online'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search', movie_name = search_movie))
else:
return render_template('index.html', title = title, popular = popular_movies, upcoming = upcoming_movie, now_showing = now_showing_movie )
@main.route('/movie/<int:id>')
def movie(id):
'''
view movie page function that returns the movies details page and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html',title = title, movie = movie, reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
view function to display search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search results for {movie_name}'
return render_template('search.html',movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
new_review = Review(movie.id,title,movie.poster,review)
new_review.save_review()
return redirect(url_for('.movie',id=movie.id))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form = form, movie = movie)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
|
[
"[email protected]"
] | |
0e1b6253f6161cf18f67b4792dc671a337070222
|
0a9d9477edb5068bd2b8d31e8f6c69f34b885be7
|
/toto/toto/middlewares.py
|
d76f5dbd13814ea6a61996ab33be2eee5f2b36da
|
[] |
no_license
|
brady-wang/python
|
ddb534c07ed9d988ae6ad132b1ab088f47e563e9
|
06afcc55b4a9e68e3d91ea7bf67b07bb415b24f1
|
refs/heads/master
| 2021-09-06T00:41:51.814355 | 2018-02-01T01:27:23 | 2018-02-01T01:27:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,593 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class TotoSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TotoDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"[email protected]"
] | |
452f4c80e4947afd07246f7965349ef536026b55
|
5b3d8f56f4d18dc8809f9f5aa7d2a7089cdbf489
|
/TablesRedo/RParagraphs.py
|
69dc4358f20ceaff1920f26f5bb299ac9d843406
|
[] |
no_license
|
heyliljill/edpsych-cloned
|
89ba1a827ed66651b7387b25bc2c188ff344e8d1
|
ba02e4789e390bb6488b11608b994ee5678a4b30
|
refs/heads/master
| 2020-07-26T00:51:41.004018 | 2019-09-14T17:26:45 | 2019-09-14T17:26:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,032 |
py
|
# 5. (Table 7) Mean number of paragraphs represented in reading, per passage (3 paragraphs) (mrimpara,mriipara...4 total)
# a. 3 paragraphs (=3), 2 paragraphs (=2), 1 paragraph (=1), topic only (=4), topic related (=5)
# b. Break up into interest, difficulty, gender
# c. By overall, WDI - Reading subsample, LWDI - Reading subsample
f = open('manova-paragraphs','w+')
def main():
GLMtext = """
GLM mrimpara mriipara mrbmpara mrbipara BY sex
/WSFACTOR=interest 2 Polynomial diff 2 Polynomial
/METHOD=SSTYPE(3) :
/EMMEANS=TABLES(sex) COMPARE ADJ(LSD)
/EMMEANS=TABLES(interest) COMPARE ADJ(LSD)
/EMMEANS=TABLES(diff) COMPARE ADJ(LSD)
/EMMEANS=TABLES(sex*interest)
/EMMEANS=TABLES(sex*diff)
/EMMEANS=TABLES(interest*diff)
/EMMEANS=TABLES(sex*interest*diff)
/CRITERIA=ALPHA(.05)
/WSDESIGN=interest diff interest*diff
/DESIGN=sex.
"""
switchtext = "DATASET ACTIVATE main.\n"
f.write("DATASET NAME main.\n")
text1 = """DATASET COPY mainCopy1 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy1.\n
recode mrimpara mriipara mrbmpara mrbipara (1=1) (else = 0)."""
f.write(text1)
f.write(GLMtext)
f.write(switchtext)
text2 = """DATASET COPY mainCopy2 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy2.\n
recode mrimpara mriipara mrbmpara mrbipara (2=1) (else = 0)."""
f.write(text2)
f.write(GLMtext)
f.write(switchtext)
text3 = """DATASET COPY mainCopy3 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy3.\n
recode mrimpara mriipara mrbmpara mrbipara (3=1) (else = 0)."""
f.write(text3)
f.write(GLMtext)
f.write(switchtext)
text4 = """DATASET COPY mainCopy4 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy4.\n
recode mrimpara mriipara mrbmpara mrbipara (4=1) (else = 0)."""
f.write(text4)
f.write(GLMtext)
f.write(switchtext)
text5 = """DATASET COPY mainCopy5 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy5.\n
recode mrimpara mriipara mrbmpara mrbipara (5=1) (else = 0)."""
f.write(text5)
f.write(GLMtext)
f.write(switchtext)
main()
|
[
"[email protected]"
] | |
afcb238fc31c3171d21a8cf02075b81d5fbe3ba5
|
115d4be6df61f1e555826f49c2fd605ae83107bd
|
/solutions/217_contains-duplicate.py
|
2241ba5c5b67ae9984a66fbc4398eb9e2eefeaf8
|
[] |
no_license
|
ipudu/leetcode
|
82dd12236b31b5fc48e20b8cccadc2392bce7b52
|
0e4b0b83c8d3fb50b7db1dc0e1bc55942e91d811
|
refs/heads/master
| 2021-06-07T14:04:04.114618 | 2020-02-27T21:52:54 | 2020-02-27T21:52:54 | 108,054,385 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 740 |
py
|
"""
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
Example 1:
Input: [1,2,3,1]
Output: true
Example 2:
Input: [1,2,3,4]
Output: false
Example 3:
Input: [1,1,1,3,3,4,3,2,4,2]
Output: true
"""
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
d = {}
for n in nums:
if n in d.keys():
d[n] += 1
else:
d[n] = 1
for key, value in d.items():
if value >= 2:
return True
return False
|
[
"[email protected]"
] | |
5ed045ae7d418ee7f741a74b81d0d00af2b6e967
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/4131.py
|
82826060d0b03ea8b01335c5cf9ea63f6c7224ac
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 818 |
py
|
#! /usr/bin/python
import sets
def readInt():
return int(raw_input())
def readInts():
return map(int, raw_input().split())
def main():
t = readInt()
for i in range(t):
op = 'Case #' + str(i+1) + ': '
r1 = readInt()
for i1 in range(4):
if i1 == r1-1:
g1 = set(readInts())
else:
raw_input()
r2 = readInt()
for i1 in range(4):
if i1 == r2-1:
g2 = set(readInts())
else:
raw_input()
c = g1.intersection(g2)
if len(c) == 1:
op = op + str(c.pop())
elif len(c) == 0:
op = op + 'Volunteer cheated!'
else:
op = op + 'Bad magician!'
print op
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
16f8201b5bb7154c572064a4886dac1f421c6458
|
8a4f5627b58aa54fd6a549f90b3c79b6e285c638
|
/Python/Fibonacci SubProblems/tiling_problem.py
|
a6972bca002d507375da62c6b6a8d8a24a3ccb11
|
[] |
no_license
|
MrChepe09/Dynamic-Programming-Series
|
334f24af4f834f88840bf5222746d2b7452a33ee
|
d49e5bd7cb329b0b0f1382eb8627ba0427383499
|
refs/heads/master
| 2022-11-29T09:40:01.065561 | 2020-08-07T05:15:21 | 2020-08-07T05:15:21 | 283,384,811 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
def tiling(n, m):
a = [0 for _ in range(n+1)]
for i in range(n+1):
if(i<m):
a[i] = 1
elif(i==m):
a[i] = 2
else:
a[i] = a[i-1] + a[i-m]
return a[n]
test = int(input())
for i in range(test):
n = int(input())
m = int(input())
print(tiling(n, m))
|
[
"[email protected]"
] | |
310e48bb177cd293890bd09abeb7ff05b2c2c63c
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/AtlasTest/DatabaseTest/AthenaPoolTest/share/LArCellContReader_jobOptionsReg.py
|
d8f78625d4d299d402886e24b26a21d14024b1d1
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,239 |
py
|
###############################################################
#
# Job options file
#
## @file LArCellContReader_jobOptionsReg.py
##
## @brief For Athena POOL test: read in LArCellContainers via explicit collections
##
## @author RD Schaffer <[email protected]>
#
#==============================================================
## basic job configuration
import AthenaCommon.AtlasUnixStandardJob
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
## get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
## get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
#--------------------------------------------------------------
# Load POOL support
#--------------------------------------------------------------
import AthenaPoolCnvSvc.ReadAthenaPool
#--------------------------------------------------------------
# Set flags and load det descr
#--------------------------------------------------------------
# For general flags
doAOD = False
doTrigger = False
DetDescrVersion = "ATLAS-CSC-01-02-00"
include( "RecExCond/RecExCommon_flags.py" )
# Set local flags - only need LAr DetDescr
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Tile_setOff()
DetFlags.detdescr.Muon_setOff()
# set up all detector description description
include ("RecExCond/AllDet_detDescr.py")
# the correct tag should be specified
svcMgr.IOVDbSvc.GlobalTag="OFLCOND-CSC-00-01-00"
#--------------------------------------------------------------
# Define the output Db parameters (the default value are shown)
#--------------------------------------------------------------
#svcMgr.EventSelector.CollectionType = "ExplicitROOT"
svcMgr.EventSelector.InputCollections = [ "NewEventCollection" ]
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
theApp.EvtMax = 20
#--------------------------------------------------------------
# Application: AthenaPoolTest options
#--------------------------------------------------------------
from AthenaPoolTest.AthenaPoolTestConf import LArCellContFakeReader
topSequence += LArCellContFakeReader( "LArCellContFakeReader" )
from AthenaPoolTest.AthenaPoolTestConf import AthenaPoolTestAttrReader
topSequence += AthenaPoolTestAttrReader( "AthenaPoolTestAttrReader" )
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.ClassIDSvc.OutputLevel = INFO
svcMgr.AthenaSealSvc.OutputLevel = INFO
svcMgr.MetaDataSvc.OutputLevel = DEBUG
#AthenaPoolTestAttrReader.OutputLevel = DEBUG
LArCellContFakeReader.OutputLevel = DEBUG
#StoreGateSvc = Service( "StoreGateSvc" )
#StoreGateSvc.Dump = TRUE
# No stats printout
include( "AthenaPoolTest/NoStats_jobOptions.py" )
#==============================================================
#
# End of job options file
#
###############################################################
|
[
"[email protected]"
] | |
fc792d3b088a28cbf5de771a55b42ee3a71883f2
|
409c4d0dce72de987dff7c76857499fba8f8b7a0
|
/fastset.py
|
4d0505c494e2d9af7fc369ae1897fd10bb9681e0
|
[] |
no_license
|
crystaleone/test
|
b4fece7fbc4e8ddd6186ea13245c62970c6d7038
|
4af3964bf6a657e888c7850f07a031440ba29e7a
|
refs/heads/master
| 2021-01-18T19:17:36.924170 | 2017-09-19T03:37:01 | 2017-09-19T03:37:01 | 86,895,858 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 567 |
py
|
import set
class Set(set.Set):
def __init__(self, value = []):
self.data = {}
self.concat(value)
def intersect(self, other):
res = {}
for x in other:
if x in self.data:
res[x] = None
return Set(res.keys())
def union(self, other):
res = {}
for x in other:
res[x] = None
for x in self.data.keys():
res[x] = None
return Set(res.keys())
def concat(self, value):
for x in value: self.data[x] = None
def __getitem__(self, ix):
return list(self.data.keys())[ix]
def __repr__(self):
return '<Set:%r>' % list(self.data.keys())
|
[
"[email protected]"
] | |
5c666cd09a963110ce134af62eb48ad855c72adb
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/sklearn/feature_extraction/tests/test_text.py
|
f8f741a862594e938c553e625f070ef9c599b4ea
|
[
"MIT"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684 | 2023-06-26T00:53:53 | 2023-06-26T00:53:53 | 94,152,032 | 14 | 0 |
MIT
| 2023-09-04T02:53:29 | 2017-06-13T00:21:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 48,733 |
py
|
# -*- coding: utf-8 -*-
from collections.abc import Mapping
import re
import warnings
import pytest
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import VectorizerMixin
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.utils._testing import (assert_almost_equal,
assert_warns_message, assert_raise_message,
clean_warning_registry,
SkipTest, assert_no_warnings,
fails_if_pypy, assert_allclose_dense_sparse,
skip_if_32bit)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('é', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = 'àáâãäåçèéêë'
expected = 'aaaaaaceeee'
assert strip_accents_unicode(a) == expected
a = 'ìíîïñòóôõöùúûüý'
expected = 'iiiinooooouuuuy'
assert strip_accents_unicode(a) == expected
# check some arabic
a = '\u0625' # alef with a hamza below: إ
expected = '\u0627' # simple alef: ا
assert strip_accents_unicode(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = 'this is a test'
assert strip_accents_unicode(a) == expected
# strings that are already decomposed
a = "o\u0308" # o with diaresis
expected = "o"
assert strip_accents_unicode(a) == expected
# combining marks by themselves
a = "\u0300\u0301\u0302\u0303"
expected = ""
assert strip_accents_unicode(a) == expected
# Multiple combining marks on one character
a = "o\u0308\u0304"
expected = "o"
assert strip_accents_unicode(a) == expected
def test_to_ascii():
# check some classical latin accentuated symbols
a = 'àáâãäåçèéêë'
expected = 'aaaaaaceeee'
assert strip_accents_ascii(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = 'iiiinooooouuuuy'
assert strip_accents_ascii(a) == expected
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert strip_accents_ascii(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = 'this is a test'
assert strip_accents_ascii(a) == expected
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, HashingVectorizer))
def test_word_analyzer_unigrams(Vectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert wa(text) == expected
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert wa(text) == expected
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert wa(text) == expected
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
" c'était pas très bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert wa(text) == expected
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert wa(text) == expected
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert wa(text) == expected
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
with pytest.raises(UnicodeDecodeError):
wa(text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
with pytest.raises(UnicodeDecodeError):
ca(text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert cnga(text)[:5] == expected
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert cnga(text)[-5:] == expected
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert cnga(text)[:5] == expected
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert cnga(text)[:5] == expected
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert cnga(text)[:5] == expected
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert cnga(text)[:6] == expected
def test_word_ngram_analyzer():
cnga = CountVectorizer(analyzer='word', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['this is test', 'is test really', 'test really met']
assert cnga(text)[:3] == expected
expected = ['test really met harry yesterday',
'this is test really met harry',
'is test really met harry yesterday']
assert cnga(text)[-3:] == expected
cnga_file = CountVectorizer(input='file', analyzer='word',
ngram_range=(3, 6)).build_analyzer()
file = StringIO(text)
assert cnga_file(file) == cnga(text)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert vect.vocabulary_ == vocab
else:
assert set(vect.vocabulary_) == terms
X = vect.transform(JUNK_FOOD_DOCS)
assert X.shape[1] == len(terms)
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
inv = vect.inverse_transform(X)
assert len(inv) == X.shape[0]
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert (set(pipe.named_steps['count'].vocabulary_) ==
set(what_we_like))
assert X.shape[1] == len(what_we_like)
def test_countvectorizer_custom_vocabulary_repeated_indices():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert "vocabulary contains repeated indices" in str(e).lower()
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert "doesn't contain index" in str(e).lower()
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert cv.get_stop_words() == ENGLISH_STOP_WORDS
cv.set_params(stop_words='_bad_str_stop_')
with pytest.raises(ValueError):
cv.get_stop_words()
cv.set_params(stop_words='_bad_unicode_stop_')
with pytest.raises(ValueError):
cv.get_stop_words()
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert cv.get_stop_words() == set(stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert "empty vocabulary" in str(e).lower()
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert "empty vocabulary" in str(e).lower()
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert X1.shape[1] != X2.shape[1]
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert tfidf[0] == 1
assert tfidf[1] > tfidf[0]
assert tfidf[2] > tfidf[1]
assert tfidf[1] < 2
assert tfidf[2] < 3
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert counts_train[0, v1.vocabulary_["pizza"]] == 2
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert counts_test[0, vocabulary["salad"]] == 1
assert counts_test[0, vocabulary["tomato"]] == 1
assert counts_test[0, vocabulary["water"]] == 1
# stop word from the fixed list
assert "the" not in vocabulary
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert "copyright" not in vocabulary
# not present in the sample
assert counts_test[0, vocabulary["coke"]] == 0
assert counts_test[0, vocabulary["burger"]] == 0
assert counts_test[0, vocabulary["beer"]] == 0
assert counts_test[0, vocabulary["pizza"]] == 0
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert len(t1.idf_) == len(v1.vocabulary_)
assert tfidf.shape == (n_train, len(v1.vocabulary_))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert not hasattr(t2, "idf_")
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
with pytest.raises(ValueError):
t3.transform(counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
with pytest.raises(ValueError):
t3.transform(X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert not tv.fixed_vocabulary_
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
with pytest.raises(ValueError):
v3.transform(train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
processor = v3.build_preprocessor()
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
expected = strip_accents_ascii(text)
result = processor(text)
assert expected == result
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
with pytest.raises(ValueError):
v3.build_preprocessor()
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
with pytest.raises(ValueError):
v3.build_analyzer()
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert tv._tfidf.norm == 'l1'
tv.use_idf = True
assert tv._tfidf.use_idf
tv.smooth_idf = True
assert tv._tfidf.smooth_idf
tv.sublinear_tf = True
assert tv._tfidf.sublinear_tf
# FIXME Remove copy parameter support in 0.24
def test_tfidf_vectorizer_deprecationwarning():
msg = ("'copy' param is unused and has been deprecated since "
"version 0.22. Backward compatibility for 'copy' will "
"be removed in 0.24.")
with pytest.warns(FutureWarning, match=msg):
tv = TfidfVectorizer()
train_data = JUNK_FOOD_DOCS
tv.fit(train_data)
tv.transform(train_data, copy=True)
@fails_if_pypy
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.min(X.data) < 0
assert np.max(X.data) > 0
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert ngrams_nnz > token_nnz
assert ngrams_nnz < 2 * token_nnz
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
with pytest.raises(ValueError):
cv.get_feature_names()
assert not cv.fixed_vocabulary_
# test for vocabulary learned from data
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert len(cv.vocabulary_) == n_features
feature_names = cv.get_feature_names()
assert len(feature_names) == n_features
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
# test for custom vocabulary
vocab = ['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water']
cv = CountVectorizer(vocabulary=vocab)
feature_names = cv.get_feature_names()
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad',
'sparkling', 'tomato', 'water'], feature_names)
assert cv.fixed_vocabulary_
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))
def test_vectorizer_max_features(Vectorizer):
expected_vocabulary = {'burger', 'beer', 'salad', 'pizza'}
expected_stop_words = {'celeri', 'tomato', 'copyright', 'coke',
'sparkling', 'water', 'the'}
# test bounded number of extracted features
vectorizer = Vectorizer(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert set(vectorizer.vocabulary_) == expected_vocabulary
assert vectorizer.stop_words_ == expected_stop_words
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert 7 == counts_1.max()
assert 7 == counts_3.max()
assert 7 == counts_None.max()
# The most common feature should be the same
assert "the" == features_1[np.argmax(counts_1)]
assert "the" == features_3[np.argmax(counts_3)]
assert "the" == features_None[np.argmax(counts_None)]
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert 'a' in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert 'a' not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert 'a' in vect.stop_words_
assert len(vect.stop_words_) == 2
vect.max_df = 1
vect.fit(test_data)
assert 'a' not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert 'a' in vect.stop_words_
assert len(vect.stop_words_) == 2
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert 'a' in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.min_df = 2
vect.fit(test_data)
assert 'c' not in vect.vocabulary_.keys() # {bcdt} ignored
assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
assert 'c' in vect.stop_words_
assert len(vect.stop_words_) == 4
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert 'c' not in vect.vocabulary_.keys() # {bcdet} ignored
assert len(vect.vocabulary_.keys()) == 1 # {a} remains
assert 'c' in vect.stop_words_
assert len(vect.stop_words_) == 5
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert X_sparse.dtype == np.float32
@fails_if_pypy
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(alternate_sign=False, analyzer='char', norm=None)
X = vect.transform(test_data)
assert np.max(X[0:1].data) == 3
assert np.max(X[1:2].data) == 2
assert X.dtype == np.float64
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', alternate_sign=False,
binary=True, norm=None)
X = vect.transform(test_data)
assert np.max(X.data) == 1
assert X.dtype == np.float64
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', alternate_sign=False,
binary=True, norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert X.dtype == np.float64
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))
def test_vectorizer_inverse_transform(Vectorizer):
# raw documents
data = ALL_FOOD_DOCS
vectorizer = Vectorizer()
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert best_vectorizer.ngram_range == (1, 1)
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert best_vectorizer.ngram_range == (1, 1)
assert best_vectorizer.norm == 'l2'
assert not best_vectorizer.fixed_vocabulary_
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
@fails_if_pypy
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"Машинное обучение — обширный подраздел искусственного "
"интеллекта, изучающий методы построения алгоритмов, "
"способных обучаться."
)
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert X_counted.shape == (1, 12)
vect = HashingVectorizer(norm=None, alternate_sign=False)
X_hashed = vect.transform([document])
assert X_hashed.shape == (1, 2 ** 20)
# No collisions on such a small dataset
assert X_counted.nnz == X_hashed.nnz
# When norm is None and not alternate_sign, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert vect.fixed_vocabulary_
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert copy.get_params() == orig.get_params()
if IS_PYPY and isinstance(orig, HashingVectorizer):
continue
else:
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
@pytest.mark.parametrize('factory', [
CountVectorizer.build_analyzer,
CountVectorizer.build_preprocessor,
CountVectorizer.build_tokenizer,
])
def test_pickling_built_processors(factory):
"""Tokenizers cannot be pickled
https://github.com/scikit-learn/scikit-learn/issues/12833
"""
vec = CountVectorizer()
function = factory(vec)
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
roundtripped_function = pickle.loads(pickle.dumps(function))
expected = function(text)
result = roundtripped_function(text)
assert result == expected
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert cv.get_feature_names() == unpickled_cv.get_feature_names()
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert cv.get_feature_names() == unpickled_cv.get_feature_names()
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_transformer_idf_setter():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
copy = TfidfTransformer()
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(X).toarray(),
orig.transform(X).toarray())
def test_tfidf_vectorizer_setter():
orig = TfidfVectorizer(use_idf=True)
orig.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(JUNK_FOOD_DOCS).toarray(),
orig.transform(JUNK_FOOD_DOCS).toarray())
def test_tfidfvectorizer_invalid_idf_attr():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
expected_idf_len = len(vect.idf_)
invalid_idf = [1.0] * (expected_idf_len + 1)
with pytest.raises(ValueError):
setattr(copy, 'idf_', invalid_idf)
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
with pytest.raises(ValueError):
vect.fit([])
@fails_if_pypy
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert v.binary
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_
@pytest.mark.parametrize('Vectorizer',
(CountVectorizer, TfidfVectorizer, HashingVectorizer))
def test_vectorizer_string_object_as_input(Vectorizer):
message = ("Iterable over raw text documents expected, "
"string object received.")
vec = Vectorizer()
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(ValueError, message, vec.fit, "hello world!")
vec.fit(["some text", "some other text"])
assert_raise_message(ValueError, message, vec.transform, "hello world!")
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
def test_tfidf_transformer_type(X_dtype):
X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
X_trans = TfidfTransformer().fit_transform(X)
assert X_trans.dtype == X.dtype
def test_tfidf_transformer_sparse():
X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
X_csc = sparse.csc_matrix(X)
X_csr = sparse.csr_matrix(X)
X_trans_csc = TfidfTransformer().fit_transform(X_csc)
X_trans_csr = TfidfTransformer().fit_transform(X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
assert X_trans_csc.format == X_trans_csr.format
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False)]
)
def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype,
warning_expected):
X = np.array(["numpy", "scipy", "sklearn"])
vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
warning_msg_match = "'dtype' should be used."
warning_cls = UserWarning
expected_warning_cls = warning_cls if warning_expected else None
with pytest.warns(expected_warning_cls,
match=warning_msg_match) as record:
X_idf = vectorizer.fit_transform(X)
if expected_warning_cls is None:
relevant_warnings = [w for w in record
if isinstance(w, warning_cls)]
assert len(relevant_warnings) == 0
assert X_idf.dtype == output_dtype
@pytest.mark.parametrize("vec", [
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1))
])
def test_vectorizers_invalid_ngram_range(vec):
# vectorizers could be initialized with invalid ngram range
# test for raising error message
invalid_range = vec.ngram_range
message = ("Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(invalid_range))
if isinstance(vec, HashingVectorizer):
pytest.xfail(reason='HashingVectorizer is not supported on PyPy')
assert_raise_message(
ValueError, message, vec.fit, ["good news everyone"])
assert_raise_message(
ValueError, message, vec.fit_transform, ["good news everyone"])
if isinstance(vec, HashingVectorizer):
assert_raise_message(
ValueError, message, vec.transform, ["good news everyone"])
def _check_stop_words_consistency(estimator):
stop_words = estimator.get_stop_words()
tokenize = estimator.build_tokenizer()
preprocess = estimator.build_preprocessor()
return estimator._check_stop_words_consistency(stop_words, preprocess,
tokenize)
@fails_if_pypy
def test_vectorizer_stop_words_inconsistent():
lstr = "['and', 'll', 've']"
message = ('Your stop_words may be inconsistent with your '
'preprocessing. Tokenizing the stop words generated '
'tokens %s not in stop_words.' % lstr)
for vec in [CountVectorizer(),
TfidfVectorizer(), HashingVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", 'AND'])
assert_warns_message(UserWarning, message, vec.fit_transform,
['hello world'])
# reset stop word validation
del vec._stop_words_id
assert _check_stop_words_consistency(vec) is False
# Only one warning per stop list
assert_no_warnings(vec.fit_transform, ['hello world'])
assert _check_stop_words_consistency(vec) is None
# Test caching of inconsistency assessment
vec.set_params(stop_words=["you've", "you", "you'll", 'blah', 'AND'])
assert_warns_message(UserWarning, message, vec.fit_transform,
['hello world'])
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {
"scikit-learn": 0,
"is": 1,
"great!": 2
}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize('Estimator',
[CountVectorizer, TfidfVectorizer, HashingVectorizer])
def test_stop_word_validation_custom_preprocessor(Estimator):
data = [{'text': 'some text'}]
vec = Estimator()
assert _check_stop_words_consistency(vec) is True
vec = Estimator(preprocessor=lambda x: x['text'],
stop_words=['and'])
assert _check_stop_words_consistency(vec) == 'error'
# checks are cached
assert _check_stop_words_consistency(vec) is None
vec.fit_transform(data)
class CustomEstimator(Estimator):
def build_preprocessor(self):
return lambda x: x['text']
vec = CustomEstimator(stop_words=['and'])
assert _check_stop_words_consistency(vec) == 'error'
vec = Estimator(tokenizer=lambda doc: re.compile(r'\w{1,}')
.findall(doc),
stop_words=['and'])
assert _check_stop_words_consistency(vec) is True
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
HashingVectorizer]
)
@pytest.mark.parametrize(
'input_type, err_type, err_msg',
[('filename', FileNotFoundError, ''),
('file', AttributeError, "'str' object has no attribute 'read'")]
)
def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
if issubclass(Estimator, HashingVectorizer):
pytest.xfail('HashingVectorizer is not supported on PyPy')
data = ['this is text, not file or filename']
with pytest.raises(err_type, match=err_msg):
Estimator(analyzer=lambda x: x.split(),
input=input_type).fit_transform(data)
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy)]
)
@pytest.mark.parametrize(
'analyzer', [lambda doc: open(doc, 'r'), lambda doc: doc.read()]
)
@pytest.mark.parametrize('input_type', ['file', 'filename'])
def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
data = ['this is text, not file or filename']
warn_msg = 'Since v0.21, vectorizer'
with pytest.raises((FileNotFoundError, AttributeError)):
with pytest.warns(ChangedBehaviorWarning, match=warn_msg) as records:
Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
assert len(records) == 1
assert warn_msg in str(records[0])
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
HashingVectorizer]
)
def test_callable_analyzer_reraise_error(tmpdir, Estimator):
# check if a custom exception from the analyzer is shown to the user
def analyzer(doc):
raise Exception("testing")
if issubclass(Estimator, HashingVectorizer):
pytest.xfail('HashingVectorizer is not supported on PyPy')
f = tmpdir.join("file.txt")
f.write("sample content\n")
with pytest.raises(Exception, match="testing"):
Estimator(analyzer=analyzer, input='file').fit_transform([f])
@pytest.mark.parametrize(
'Vectorizer',
[CountVectorizer, HashingVectorizer, TfidfVectorizer]
)
@pytest.mark.parametrize(
'stop_words, tokenizer, preprocessor, ngram_range, token_pattern,'
'analyzer, unused_name, ovrd_name, ovrd_msg',
[(["you've", "you'll"], None, None, (1, 1), None, 'char',
"'stop_words'", "'analyzer'", "!= 'word'"),
(None, lambda s: s.split(), None, (1, 1), None, 'char',
"'tokenizer'", "'analyzer'", "!= 'word'"),
(None, lambda s: s.split(), None, (1, 1), r'\w+', 'word',
"'token_pattern'", "'tokenizer'", "is not None"),
(None, None, lambda s:s.upper(), (1, 1), r'\w+', lambda s:s.upper(),
"'preprocessor'", "'analyzer'", "is callable"),
(None, None, None, (1, 2), None, lambda s:s.upper(),
"'ngram_range'", "'analyzer'", "is callable"),
(None, None, None, (1, 1), r'\w+', 'char',
"'token_pattern'", "'analyzer'", "!= 'word'")]
)
def test_unused_parameters_warn(Vectorizer, stop_words,
tokenizer, preprocessor,
ngram_range, token_pattern,
analyzer, unused_name, ovrd_name,
ovrd_msg):
train_data = JUNK_FOOD_DOCS
# setting parameter and checking for corresponding warning messages
vect = Vectorizer()
vect.set_params(stop_words=stop_words, tokenizer=tokenizer,
preprocessor=preprocessor, ngram_range=ngram_range,
token_pattern=token_pattern, analyzer=analyzer)
msg = ("The parameter %s will not be used"
" since %s %s" % (unused_name, ovrd_name, ovrd_msg)
)
with pytest.warns(UserWarning, match=msg):
vect.fit(train_data)
# TODO: Remove in 0.24
def test_vectorizermixin_is_deprecated():
class MyVectorizer(VectorizerMixin):
pass
msg = ("VectorizerMixin is deprecated in version 0.22 and will be removed "
"in version 0.24.")
with pytest.warns(FutureWarning, match=msg):
MyVectorizer()
|
[
"[email protected]"
] | |
91eab4fa8185c49b8477407a722f1c6715895fb2
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/memre/armv7l/obsolete/corp2/x11/util/xorg-util/actions.py
|
5173c2317879124ca4f74274e77256d2c546affd
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,710 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import crosstools as autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "."
Skip = ("patches", "pisiBuildState", ".")
def setup():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
if package.startswith("xorg-cf-files"):
pisitools.dosed("host.def", "_PARDUS_CC_", "%(CC)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_CXX_", "%(CXX)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_AS_", "%(AS)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_LD_", "%(LD)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_CFLAGS_", "%(CFLAGS)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_LDFLAGS_", "%(LDFLAGS)s" % autotools.environment)
autotools.configure("--with-config-dir=/usr/share/X11/config")
shelltools.cd("../")
def build():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
autotools.make()
shelltools.cd("../")
def install():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
shelltools.cd("../")
|
[
"[email protected]"
] | |
1376cb7097bd78ec34020abe5909b6c7788177ca
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/autohome_newcar/autohome_newcar/pipelines.py
|
c3d57bcd32af5c7814ca4370dea1aa74f9349e84
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,213 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
import pymongo
from scrapy.utils.project import get_project_settings
from scrapy.exceptions import DropItem
settings = get_project_settings()
class AutohomeNewcarPipeline(object):
def __init__(self):
self.mongocounts = 0
self.connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = self.connection[settings['MONGODB_DB_SAVE']]
self.collection = db[settings['MONGODB_WRITE_COLLECTION']]
def process_item(self, item, spider):
logging.log(msg="Car added to MongoDB database!", level=logging.INFO)
self.mongocounts += 1
logging.log(msg=f"scrapy {self.mongocounts} items", level=logging.INFO)
if len(item["carinfo"]) == 0:
raise DropItem(f"Unqualified data! --> {item['url']}")
else:
self.collection.insert(item)
return item
def close_spider(self, spider):
self.connection.close()
|
[
"[email protected]"
] | |
862086ef086ece1194fb916bde5ba9f0315ac214
|
ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7
|
/syntactic_mutations/udacity/mutants/mutant51.py
|
a96c3a128cbcd12bf06722bf26693aee24cb014f
|
[] |
no_license
|
dlfaults/mutation_operators_evaluation
|
ea7f33459ba7bcf7d70092d9db8b40f9b338d516
|
7d1ff30e901931a46bf8908e9bb05cae3daa5f0f
|
refs/heads/master
| 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,742 |
py
|
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from batch_generator import Generator
from utils import INPUT_SHAPE, batch_generator, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS
from keras import backend as K
from PIL import Image
import numpy as np
def build_model(args):
'''
Modified NVIDIA model
'''
pass
model.add(Lambda((lambda x: ((x / 127.5) - 1.0)), input_shape=INPUT_SHAPE))
model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
return model
def train_model(x_train, x_valid, y_train, y_valid, model_name, args):
'''
Train the model
'''
model = build_model(args)
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
train_generator = Generator(x_train, y_train, True, args)
validation_generator = Generator(x_valid, y_valid, False, args)
model.fit_generator(train_generator, validation_data=\
validation_generator, epochs=\
args.nb_epoch, use_multiprocessing=\
False, max_queue_size=\
10, workers=\
4)
model.save(model_name)
|
[
"[email protected]"
] | |
3e4c15f5894ce5582ec1c8f2b54085c0fbfeb742
|
06cf972369c30da9d98b296bcbc26a826aa98126
|
/aloisioimoveis/locations/tests/serializers/test_serializer_neighborhood.py
|
35cc0545c519585355f8815f4d1e162ca82666f7
|
[] |
no_license
|
thiagorossener/aloisioimoveis
|
2597422af6ac058ed3b8aa6e58f0f8913488a7fe
|
f9d974440f9a8cc875da8a1d4a5c885429563c1b
|
refs/heads/master
| 2021-06-16T23:02:11.193518 | 2021-02-01T14:17:10 | 2021-02-01T14:17:10 | 94,144,023 | 18 | 17 | null | 2021-06-10T20:35:48 | 2017-06-12T21:55:18 |
JavaScript
|
UTF-8
|
Python
| false | false | 654 |
py
|
from django.test import TestCase
from model_mommy import mommy
from aloisioimoveis.locations.models import City, Neighborhood
from aloisioimoveis.locations.serializers import NeighborhoodSerializer
class NeighborhoodSerializerTest(TestCase):
def test_serializer(self):
"""Neighborhood serialization should return dict with id and name"""
city = mommy.make(City, name='Taubaté')
neighborhood = mommy.make(Neighborhood, name='Independência', city=city)
serializer = NeighborhoodSerializer(neighborhood)
self.assertDictEqual({'id': 1, 'name': 'Independência'},
serializer.data)
|
[
"[email protected]"
] | |
79957b5682bbec421f84acc58f582cf4bee98906
|
b1c7a768f38e2e987a112da6170f49503b9db05f
|
/accounts/migrations/0001_initial.py
|
16cd2e320b91ce057984484170e3cb744ebb5223
|
[] |
no_license
|
Niladrykar/bracketerp
|
8b7491aa319f60ec3dcb5077258d75b0394db374
|
ca4ee60c2254c6c132a38ce52410059cc6b19cae
|
refs/heads/master
| 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 |
JavaScript
|
UTF-8
|
Python
| false | false | 981 |
py
|
# Generated by Django 2.0.5 on 2018-09-07 10:46
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user', models.Model),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"[email protected]"
] | |
025dd83f0936e55ee882cf696aa5f658a0d79663
|
fd19962d7c1f37e8bdabf7946c48516f640e2ff3
|
/product/management/commands/mycommand.py
|
bf55951d9ca2bb8991c5b28074bbba8a7932a5d5
|
[] |
no_license
|
amurakho/django_ready_to_delete
|
fe71bb727ad20ef134d3752568a043614acb4c64
|
0fed1890ce556bac301278e444426619dd0f2903
|
refs/heads/master
| 2022-12-05T12:01:02.412789 | 2020-08-20T12:30:31 | 2020-08-20T12:30:31 | 289,000,995 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 174 |
py
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Creates groups'
def handle(self, *args, **options):
print('Done')
|
[
"[email protected]"
] | |
a6f56459317ed81b634e5e6f5ac92dd207e7ed70
|
f84540a209490c4d3ee7583c4668fe1c8b1c230e
|
/Graph/TopologicalSort/CourseScheduleII.py
|
b261982bba05af554e26c9924d34c7ae784eb183
|
[] |
no_license
|
TimMKChang/AlgorithmSampleCode
|
9e08a3a88f24b9645ca70f834970650d400fd259
|
d5bcdce147bd6c3f05648962ca2096f79e4f003f
|
refs/heads/master
| 2023-07-19T15:28:27.722181 | 2021-09-09T07:02:22 | 2021-09-09T07:02:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,458 |
py
|
from typing import List
from collections import defaultdict
class TopologicalSort: # by DFS (recursive)
def __init__(self):
self.ans = None
self.index = None
def dfs(self, node, edges, visited, curVisited):
for v in edges[node]:
if v in curVisited:
return False
if v not in visited:
visited.add(v)
curVisited.add(v)
if not self.dfs(v, edges, visited, curVisited):
return False
curVisited.remove(v)
self.ans[self.index] = node
self.index -= 1
return True
def sort(self, N, edges):
self.ans = [None for _ in range(N)]
self.index = N - 1
visited = set()
curVisited = set()
for n in range(N):
if n not in visited:
visited.add(n)
curVisited.add(n)
if not self.dfs(n, edges, visited, curVisited):
return []
curVisited.remove(n)
return self.ans
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
edges = defaultdict(list)
for v, u in prerequisites:
edges[u].append(v)
return TopologicalSort().sort(numCourses, edges)
s = Solution()
numCourses = 4
prerequisites = [[1,0],[2,0],[3,1],[3,2]]
print(s.canFinish(numCourses, prerequisites))
|
[
"[email protected]"
] | |
8706de911986f56f365524ecf0b45299673202ac
|
f13c586b82224c07f28f7bb7d9dd503e64eb5cb2
|
/tests/drawer/test_drawer_utils.py
|
dbb661b0f0863c546d53e12356b0e139287236e5
|
[
"Apache-2.0",
"MPL-1.1"
] |
permissive
|
therooler/pennylane
|
095f104e40254be2ed3050bc7be9ea9d2ee11ebd
|
fde1f24bd784d6ee2af5c980c2d5010b4c2bbe54
|
refs/heads/master
| 2023-04-29T13:32:43.115108 | 2023-04-18T09:41:42 | 2023-04-18T09:41:42 | 202,356,685 | 0 | 0 |
Apache-2.0
| 2019-08-14T13:30:39 | 2019-08-14T13:30:38 | null |
UTF-8
|
Python
| false | false | 5,547 |
py
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the pennylane.drawer.utils` module.
"""
import pytest
import pennylane as qml
from pennylane.drawer.utils import default_wire_map, convert_wire_order, unwrap_controls
from pennylane.wires import Wires
class TestDefaultWireMap:
"""Tests ``_default_wire_map`` helper function."""
def test_empty(self):
"""Test creating an empty wire map"""
wire_map = default_wire_map([])
assert wire_map == {}
def test_simple(self):
"""Test creating a wire map with wires that do not have successive ordering"""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_map = default_wire_map(ops)
assert wire_map == {0: 0, 2: 1, 1: 2}
def test_string_wires(self):
"""Test wire map works with string labelled wires."""
ops = [qml.PauliY("a"), qml.CNOT(wires=("b", "c"))]
wire_map = default_wire_map(ops)
assert wire_map == {"a": 0, "b": 1, "c": 2}
class TestConvertWireOrder:
"""Tests the ``convert_wire_order`` utility function."""
def test_no_wire_order(self):
"""Test that a wire map is produced if no wire order is passed."""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_map = convert_wire_order(ops)
assert wire_map == {0: 0, 2: 1, 1: 2}
def test_wire_order_ints(self):
"""Tests wire map produced when initial wires are integers."""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_order = [2, 1, 0]
wire_map = convert_wire_order(ops, wire_order)
assert wire_map == {2: 0, 1: 1, 0: 2}
def test_wire_order_str(self):
"""Test wire map produced when initial wires are strings."""
ops = [qml.CNOT(wires=("a", "b")), qml.PauliX("c")]
wire_order = ("c", "b", "a")
wire_map = convert_wire_order(ops, wire_order)
assert wire_map == {"c": 0, "b": 1, "a": 2}
def test_show_all_wires_false(self):
"""Test when `show_all_wires` is set to `False` only used wires are in the map."""
ops = [qml.PauliX("a"), qml.PauliY("c")]
wire_order = ["a", "b", "c", "d"]
wire_map = convert_wire_order(ops, wire_order, show_all_wires=False)
assert wire_map == {"a": 0, "c": 1}
def test_show_all_wires_true(self):
"""Test when `show_all_wires` is set to `True` everything in ``wire_order`` is included."""
ops = [qml.PauliX("a"), qml.PauliY("c")]
wire_order = ["a", "b", "c", "d"]
wire_map = convert_wire_order(ops, wire_order, show_all_wires=True)
assert wire_map == {"a": 0, "b": 1, "c": 2, "d": 3}
class TestUnwrapControls:
"""Tests the ``unwrap_controls`` utility function."""
# pylint:disable=too-few-public-methods
@pytest.mark.parametrize(
"op,expected_control_wires,expected_control_values",
[
(qml.PauliX(wires="a"), Wires([]), None),
(qml.CNOT(wires=["a", "b"]), Wires("a"), None),
(qml.ctrl(qml.PauliX(wires="b"), control="a"), Wires("a"), [True]),
(
qml.ctrl(qml.PauliX(wires="b"), control=["a", "c", "d"]),
Wires(["a", "c", "d"]),
[True, True, True],
),
(
qml.ctrl(qml.PauliZ(wires="c"), control=["a", "d"], control_values=[True, False]),
Wires(["a", "d"]),
[True, False],
),
(
qml.ctrl(
qml.CRX(0.3, wires=["c", "e"]),
control=["a", "b", "d"],
control_values=[True, False, False],
),
Wires(["a", "b", "d", "c"]),
[True, False, False, True],
),
(
qml.ctrl(qml.CNOT(wires=["c", "d"]), control=["a", "b"]),
Wires(["a", "b", "c"]),
[True, True, True],
),
(
qml.ctrl(qml.ctrl(qml.CNOT(wires=["c", "d"]), control=["a", "b"]), control=["e"]),
Wires(["e", "a", "b", "c"]),
[True, True, True, True],
),
(
qml.ctrl(
qml.ctrl(
qml.CNOT(wires=["c", "d"]), control=["a", "b"], control_values=[False, True]
),
control=["e"],
control_values=[False],
),
Wires(["e", "a", "b", "c"]),
[False, False, True, True],
),
],
)
def test_multi_defined_control_values(
self, op, expected_control_wires, expected_control_values
):
"""Test a multi-controlled single-qubit operation with defined control values."""
control_wires, control_values = unwrap_controls(op)
assert control_wires == expected_control_wires
assert control_values == expected_control_values
|
[
"[email protected]"
] | |
c63cd03fad1c0eed11bee4803c0caf81814cb90c
|
f9e53b4e1c25adbf36fc19fa945173172da43048
|
/vspk/v5_0/nuingressaclentrytemplate.py
|
e5d8e31df2bb04bec70b6c0f92f2c96b65ff386f
|
[
"BSD-3-Clause"
] |
permissive
|
mohaimenhasan/vspk-python
|
42145f098ac0c837073280a65fd759650225ed0b
|
4c7b297427048340b250cc3c74d9214dc0d4bde1
|
refs/heads/master
| 2020-04-17T01:56:52.644618 | 2018-11-27T17:31:22 | 2018-11-27T17:31:44 | 166,114,832 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 37,867 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUStatisticsFetcher
from bambou import NURESTObject
class NUIngressACLEntryTemplate(NURESTObject):
""" Represents a IngressACLEntryTemplate in the VSD
Notes:
Defines the template of Ingress ACL entries
"""
__rest_name__ = "ingressaclentrytemplate"
__resource_name__ = "ingressaclentrytemplates"
## Constants
CONST_NETWORK_TYPE_NETWORK_MACRO_GROUP = "NETWORK_MACRO_GROUP"
CONST_ACTION_DROP = "DROP"
CONST_LOCATION_TYPE_ZONE = "ZONE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_NETWORK_TYPE_PUBLIC_NETWORK = "PUBLIC_NETWORK"
CONST_ACTION_FORWARD = "FORWARD"
CONST_NETWORK_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_LOCATION_TYPE_ANY = "ANY"
CONST_NETWORK_TYPE_ENDPOINT_DOMAIN = "ENDPOINT_DOMAIN"
CONST_LOCATION_TYPE_PGEXPRESSION = "PGEXPRESSION"
CONST_NETWORK_TYPE_ENTERPRISE_NETWORK = "ENTERPRISE_NETWORK"
CONST_NETWORK_TYPE_ANY = "ANY"
CONST_LOCATION_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_NETWORK_TYPE_SUBNET = "SUBNET"
CONST_NETWORK_TYPE_ZONE = "ZONE"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE_GROUP = "L4_SERVICE_GROUP"
CONST_NETWORK_TYPE_UNDERLAY_INTERNET_POLICYGROUP = "UNDERLAY_INTERNET_POLICYGROUP"
CONST_NETWORK_TYPE_SAAS_APPLICATION_GROUP = "SAAS_APPLICATION_GROUP"
CONST_NETWORK_TYPE_ENDPOINT_SUBNET = "ENDPOINT_SUBNET"
CONST_LOCATION_TYPE_SUBNET = "SUBNET"
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE = "L4_SERVICE"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_NETWORK_TYPE_PGEXPRESSION = "PGEXPRESSION"
CONST_NETWORK_TYPE_ENDPOINT_ZONE = "ENDPOINT_ZONE"
def __init__(self, **kwargs):
""" Initializes a IngressACLEntryTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ingressaclentrytemplate = NUIngressACLEntryTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'IngressACLEntryTemplate')
>>> ingressaclentrytemplate = NUIngressACLEntryTemplate(data=my_dict)
"""
super(NUIngressACLEntryTemplate, self).__init__()
# Read/Write Attributes
self._acl_template_name = None
self._icmp_code = None
self._icmp_type = None
self._ipv6_address_override = None
self._dscp = None
self._last_updated_by = None
self._action = None
self._address_override = None
self._description = None
self._destination_port = None
self._network_id = None
self._network_type = None
self._mirror_destination_id = None
self._flow_logging_enabled = None
self._enterprise_name = None
self._entity_scope = None
self._location_id = None
self._location_type = None
self._policy_state = None
self._domain_name = None
self._source_port = None
self._priority = None
self._protocol = None
self._associated_l7_application_signature_id = None
self._associated_live_entity_id = None
self._associated_live_template_id = None
self._associated_traffic_type = None
self._associated_traffic_type_id = None
self._associated_virtual_firewall_rule_id = None
self._stateful = None
self._stats_id = None
self._stats_logging_enabled = None
self._ether_type = None
self._overlay_mirror_destination_id = None
self._external_id = None
self.expose_attribute(local_name="acl_template_name", remote_name="ACLTemplateName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_code", remote_name="ICMPCode", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_type", remote_name="ICMPType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_address_override", remote_name="IPv6AddressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="action", remote_name="action", attribute_type=str, is_required=True, is_unique=False, choices=[u'DROP', u'FORWARD'])
self.expose_attribute(local_name="address_override", remote_name="addressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_id", remote_name="networkID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_type", remote_name="networkType", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'ENDPOINT_DOMAIN', u'ENDPOINT_SUBNET', u'ENDPOINT_ZONE', u'ENTERPRISE_NETWORK', u'NETWORK_MACRO_GROUP', u'PGEXPRESSION', u'POLICYGROUP', u'PUBLIC_NETWORK', u'SAAS_APPLICATION_GROUP', u'SUBNET', u'UNDERLAY_INTERNET_POLICYGROUP', u'ZONE'])
self.expose_attribute(local_name="mirror_destination_id", remote_name="mirrorDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="flow_logging_enabled", remote_name="flowLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="location_type", remote_name="locationType", attribute_type=str, is_required=True, is_unique=False, choices=[u'ANY', u'PGEXPRESSION', u'POLICYGROUP', u'SUBNET', u'ZONE'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="domain_name", remote_name="domainName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_l7_application_signature_id", remote_name="associatedL7ApplicationSignatureID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_live_template_id", remote_name="associatedLiveTemplateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_traffic_type", remote_name="associatedTrafficType", attribute_type=str, is_required=False, is_unique=False, choices=[u'L4_SERVICE', u'L4_SERVICE_GROUP'])
self.expose_attribute(local_name="associated_traffic_type_id", remote_name="associatedTrafficTypeID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_virtual_firewall_rule_id", remote_name="associatedVirtualFirewallRuleID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stateful", remote_name="stateful", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_id", remote_name="statsID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_logging_enabled", remote_name="statsLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="overlay_mirror_destination_id", remote_name="overlayMirrorDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def acl_template_name(self):
""" Get acl_template_name value.
Notes:
The name of the parent Template for this acl entry
This attribute is named `ACLTemplateName` in VSD API.
"""
return self._acl_template_name
@acl_template_name.setter
def acl_template_name(self, value):
""" Set acl_template_name value.
Notes:
The name of the parent Template for this acl entry
This attribute is named `ACLTemplateName` in VSD API.
"""
self._acl_template_name = value
@property
def icmp_code(self):
""" Get icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP.
This attribute is named `ICMPCode` in VSD API.
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, value):
""" Set icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP.
This attribute is named `ICMPCode` in VSD API.
"""
self._icmp_code = value
@property
def icmp_type(self):
""" Get icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP.
This attribute is named `ICMPType` in VSD API.
"""
return self._icmp_type
@icmp_type.setter
def icmp_type(self, value):
""" Set icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP.
This attribute is named `ICMPType` in VSD API.
"""
self._icmp_type = value
@property
def ipv6_address_override(self):
""" Get ipv6_address_override value.
Notes:
Overrides the source IPv6 for Ingress and destination IPv6 for Egress, MAC entries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
return self._ipv6_address_override
@ipv6_address_override.setter
def ipv6_address_override(self, value):
""" Set ipv6_address_override value.
Notes:
Overrides the source IPv6 for Ingress and destination IPv6 for Egress, MAC entries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
self._ipv6_address_override = value
@property
def dscp(self):
""" Get dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
return self._dscp
@dscp.setter
def dscp(self, value):
""" Set dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
self._dscp = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def action(self):
""" Get action value.
Notes:
The action of the ACL entry DROP or FORWARD or REDIRECT. Action REDIRECT is allowed only for IngressAdvancedForwardingEntry Possible values are DROP, FORWARD, REDIRECT, .
"""
return self._action
@action.setter
def action(self, value):
""" Set action value.
Notes:
The action of the ACL entry DROP or FORWARD or REDIRECT. Action REDIRECT is allowed only for IngressAdvancedForwardingEntry Possible values are DROP, FORWARD, REDIRECT, .
"""
self._action = value
@property
def address_override(self):
""" Get address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, MAC entries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
return self._address_override
@address_override.setter
def address_override(self, value):
""" Set address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, MAC entries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
self._address_override = value
@property
def description(self):
""" Get description value.
Notes:
Description of the ACL entry
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the ACL entry
"""
self._description = value
@property
def destination_port(self):
""" Get destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or single port number or a port range
This attribute is named `destinationPort` in VSD API.
"""
return self._destination_port
@destination_port.setter
def destination_port(self, value):
""" Set destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or single port number or a port range
This attribute is named `destinationPort` in VSD API.
"""
self._destination_port = value
@property
def network_id(self):
""" Get network_id value.
Notes:
The ID of the destination endpoint (Subnet/Zone/Macro/MacroGroup/PolicyGroup/PolicyGroupExpression)
This attribute is named `networkID` in VSD API.
"""
return self._network_id
@network_id.setter
def network_id(self, value):
""" Set network_id value.
Notes:
The ID of the destination endpoint (Subnet/Zone/Macro/MacroGroup/PolicyGroup/PolicyGroupExpression)
This attribute is named `networkID` in VSD API.
"""
self._network_id = value
@property
def network_type(self):
""" Get network_type value.
Notes:
Type of the destination endpoint (Subnet/Zone/Macro/MacroGroup/PolicyGroup/PolicyGroupExpression)
This attribute is named `networkType` in VSD API.
"""
return self._network_type
@network_type.setter
def network_type(self, value):
""" Set network_type value.
Notes:
Type of the destination endpoint (Subnet/Zone/Macro/MacroGroup/PolicyGroup/PolicyGroupExpression)
This attribute is named `networkType` in VSD API.
"""
self._network_type = value
@property
def mirror_destination_id(self):
""" Get mirror_destination_id value.
Notes:
Destination ID of the mirror destination object.
This attribute is named `mirrorDestinationID` in VSD API.
"""
return self._mirror_destination_id
@mirror_destination_id.setter
def mirror_destination_id(self, value):
""" Set mirror_destination_id value.
Notes:
Destination ID of the mirror destination object.
This attribute is named `mirrorDestinationID` in VSD API.
"""
self._mirror_destination_id = value
@property
def flow_logging_enabled(self):
""" Get flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
return self._flow_logging_enabled
@flow_logging_enabled.setter
def flow_logging_enabled(self, value):
""" Set flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
self._flow_logging_enabled = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
The name of the enterprise for the domains parent
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
The name of the enterprise for the domains parent
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def location_id(self):
""" Get location_id value.
Notes:
The ID of the source endpoint (Subnet/Zone/VportTag/PortGroup/PolicyGroupExpression)
This attribute is named `locationID` in VSD API.
"""
return self._location_id
@location_id.setter
def location_id(self, value):
""" Set location_id value.
Notes:
The ID of the source endpoint (Subnet/Zone/VportTag/PortGroup/PolicyGroupExpression)
This attribute is named `locationID` in VSD API.
"""
self._location_id = value
@property
def location_type(self):
""" Get location_type value.
Notes:
Type of the source endpoint (Subnet/Zone/VportTag/PortGroup/PolicyGroupExpression)
This attribute is named `locationType` in VSD API.
"""
return self._location_type
@location_type.setter
def location_type(self, value):
""" Set location_type value.
Notes:
Type of the source endpoint (Subnet/Zone/VportTag/PortGroup/PolicyGroupExpression)
This attribute is named `locationType` in VSD API.
"""
self._location_type = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
State of the policy.
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
State of the policy.
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def domain_name(self):
""" Get domain_name value.
Notes:
The name of the domain/domain template for the aclTemplateNames parent
This attribute is named `domainName` in VSD API.
"""
return self._domain_name
@domain_name.setter
def domain_name(self, value):
""" Set domain_name value.
Notes:
The name of the domain/domain template for the aclTemplateNames parent
This attribute is named `domainName` in VSD API.
"""
self._domain_name = value
@property
def source_port(self):
""" Get source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value can be either * or single port number or a port range
This attribute is named `sourcePort` in VSD API.
"""
return self._source_port
@source_port.setter
def source_port(self, value):
""" Set source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value can be either * or single port number or a port range
This attribute is named `sourcePort` in VSD API.
"""
self._source_port = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def protocol(self):
""" Get protocol value.
Notes:
Protocol number that must be matched
"""
return self._protocol
@protocol.setter
def protocol(self, value):
""" Set protocol value.
Notes:
Protocol number that must be matched
"""
self._protocol = value
@property
def associated_l7_application_signature_id(self):
""" Get associated_l7_application_signature_id value.
Notes:
The UUID of the associated L7 Application signature
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
return self._associated_l7_application_signature_id
@associated_l7_application_signature_id.setter
def associated_l7_application_signature_id(self, value):
""" Set associated_l7_application_signature_id value.
Notes:
The UUID of the associated L7 Application signature
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
self._associated_l7_application_signature_id = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_live_template_id(self):
""" Get associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
return self._associated_live_template_id
@associated_live_template_id.setter
def associated_live_template_id(self, value):
""" Set associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
self._associated_live_template_id = value
@property
def associated_traffic_type(self):
""" Get associated_traffic_type value.
Notes:
This property reflects the type of traffic in case an ACL entry is created using an Service or Service Group. In case a protocol and port are specified for the ACL entry, this property has to be empty (null). Supported values are L4_SERVICE, L4_SERVICE_GROUP and empty.
This attribute is named `associatedTrafficType` in VSD API.
"""
return self._associated_traffic_type
@associated_traffic_type.setter
def associated_traffic_type(self, value):
""" Set associated_traffic_type value.
Notes:
This property reflects the type of traffic in case an ACL entry is created using an Service or Service Group. In case a protocol and port are specified for the ACL entry, this property has to be empty (null). Supported values are L4_SERVICE, L4_SERVICE_GROUP and empty.
This attribute is named `associatedTrafficType` in VSD API.
"""
self._associated_traffic_type = value
@property
def associated_traffic_type_id(self):
""" Get associated_traffic_type_id value.
Notes:
If a traffic type is specified as Service or Service Group, then the associated Id of Service / Service Group should be specifed here
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
return self._associated_traffic_type_id
@associated_traffic_type_id.setter
def associated_traffic_type_id(self, value):
""" Set associated_traffic_type_id value.
Notes:
If a traffic type is specified as Service or Service Group, then the associated Id of Service / Service Group should be specifed here
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
self._associated_traffic_type_id = value
@property
def associated_virtual_firewall_rule_id(self):
""" Get associated_virtual_firewall_rule_id value.
Notes:
The ID of the Virtual Firewall Rule, if this was derived as part of the Virtual Firewall Rule creation
This attribute is named `associatedVirtualFirewallRuleID` in VSD API.
"""
return self._associated_virtual_firewall_rule_id
@associated_virtual_firewall_rule_id.setter
def associated_virtual_firewall_rule_id(self, value):
""" Set associated_virtual_firewall_rule_id value.
Notes:
The ID of the Virtual Firewall Rule, if this was derived as part of the Virtual Firewall Rule creation
This attribute is named `associatedVirtualFirewallRuleID` in VSD API.
"""
self._associated_virtual_firewall_rule_id = value
@property
def stateful(self):
""" Get stateful value.
Notes:
True means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. False means that there is no corresponding rule created by OVS in the network.
"""
return self._stateful
@stateful.setter
def stateful(self, value):
""" Set stateful value.
Notes:
True means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. False means that there is no corresponding rule created by OVS in the network.
"""
self._stateful = value
@property
def stats_id(self):
""" Get stats_id value.
Notes:
The statsID that is created in the VSD and identifies this ACL Template Entry. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
return self._stats_id
@stats_id.setter
def stats_id(self, value):
""" Set stats_id value.
Notes:
The statsID that is created in the VSD and identifies this ACL Template Entry. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
self._stats_id = value
@property
def stats_logging_enabled(self):
""" Get stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
return self._stats_logging_enabled
@stats_logging_enabled.setter
def stats_logging_enabled(self, value):
""" Set stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
self._stats_logging_enabled = value
@property
def ether_type(self):
""" Get ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
return self._ether_type
@ether_type.setter
def ether_type(self, value):
""" Set ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
self._ether_type = value
@property
def overlay_mirror_destination_id(self):
""" Get overlay_mirror_destination_id value.
Notes:
ID of the overlay mirror destination
This attribute is named `overlayMirrorDestinationID` in VSD API.
"""
return self._overlay_mirror_destination_id
@overlay_mirror_destination_id.setter
def overlay_mirror_destination_id(self, value):
""" Set overlay_mirror_destination_id value.
Notes:
ID of the overlay mirror destination
This attribute is named `overlayMirrorDestinationID` in VSD API.
"""
self._overlay_mirror_destination_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return True
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.parent and self.rest_name != self.parent_type
|
[
"[email protected]"
] | |
1b40eb68b83d8a1ec91107ae2cbc6b3056e9faa8
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/flask-mail-labs/.venv/lib/python2.7/site-packages/simplekv/_compat.py
|
e6366d3e29f008720a7016d76636ecbd2a50fdef
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null |
UTF-8
|
Python
| false | false | 824 |
py
|
"""Helpers for python 2/3 compatibility"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
if not PY2:
from urllib.parse import quote as url_quote, unquote as url_unquote
else:
from urllib import quote as url_quote
from urllib import unquote as url_unquote
if not PY2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
if not PY2:
imap = map
else:
from itertools import imap
if not PY2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
if not PY2:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
xrange = range if not PY2 else xrange
|
[
"[email protected]"
] | |
91b587a8dbdd479e11e2f57a8ed843c45a664fcd
|
b5454f3c7fcaa8cac63b2d6ac261f4edbb5ac216
|
/env/bin/wheel
|
625950aae7b8958916fe933e337043702a3f99eb
|
[] |
no_license
|
rasalt/bqsnippets
|
f3cb45f3b2fc50ebb73872ae3e56a5a4b7fc2f67
|
075cb01b4cf67b2dcd74c2c492ad8654484b670e
|
refs/heads/master
| 2020-04-01T19:07:40.506676 | 2018-10-17T23:33:17 | 2018-10-17T23:33:17 | 153,536,445 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
#!/home/rkharwar/bigq/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
9728ac841e892564c6b2a480d4bea9f443d747fc
|
42652e0f4025eed896fbe91667bd495523fd4c3b
|
/app/config/__init__.py
|
6c813f40f8bab2bce4dae58abbd9b637d3b88743
|
[] |
no_license
|
g10guang/whoyoungblog
|
47ef0a8ae1e75e422618fd59f4666287a09c4ec2
|
a3fd93bd7591700f492ae8806f7f1f2c32643b27
|
refs/heads/master
| 2022-12-15T14:44:45.476103 | 2017-11-23T11:55:41 | 2017-11-23T11:55:41 | 106,388,722 | 3 | 0 | null | 2022-12-08T00:39:06 | 2017-10-10T08:19:56 |
Python
|
UTF-8
|
Python
| false | false | 745 |
py
|
#!/usr/bin/env python3
# coding=utf-8
# author: Xiguang Liu<[email protected]>
# 2017-09-10 18:11
import os
def load_config():
"""
加载配置文件
:return:
"""
MODE = os.environ.get('BLOG_MODE')
try:
if MODE == 'PRODUCT':
# 在生产环境,将标准输入流输出流重定向到文件中
import sys
f = open('std.txt', 'w')
sys.stderr = f
sys.stdout = f
from app.config.product import ProductConfig
return ProductConfig
else:
from app.config.develop import DevelopConfig
return DevelopConfig
except ImportError:
from app.config.default import Config
return Config
|
[
"[email protected]"
] | |
484c2ced2ec5392403047eef0cd58b15067fc246
|
2d6557110f59d1c28bc83d58e60bd8de8e64d693
|
/formatloader.py
|
9cec2d59dace465a76001e164fcec1ddf444695a
|
[
"MIT"
] |
permissive
|
restful-open-annotation/oa-adapter
|
8067df0227e3820ef84c493f518c80c463c2e431
|
d40f935cdef380675e1efa051ad2e1f4b8c8ae7a
|
refs/heads/master
| 2021-01-23T22:39:05.060992 | 2015-03-18T14:19:26 | 2015-03-18T14:19:26 | 31,967,902 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,097 |
py
|
#!/usr/bin/env python
"""File format support module loader.
The import and export formats supported by the adapter are determined
at runtime based on the format support "modules" found in the formats/
directory.
"""
import sys
import os
# Directory containing format modules.
FORMAT_DIRECTORY = 'formats'
# Attributes that every module should have.
# format_name: string giving the short name of the format.
# mimetypes: list of MIME types that should be associated with the format.
# from_jsonld: function rendering JSON-LD to string in the format.
# to_jsonld: function parsing string in the format to JSON-LD.
REQUIRED_ATTRIBUTES = [
'format_name',
'mimetypes',
'from_jsonld',
'to_jsonld',
]
def _is_valid(m, err=None):
"""Returns if the given module has all required attributes."""
if err is None:
err = sys.stderr
for a in REQUIRED_ATTRIBUTES:
try:
getattr(m, a)
except AttributeError, e:
print >> err, 'Module %s is not valid: %s' % (m.__name__, e)
return False
return True
def _is_format_module(fn):
if fn == '__init__.py':
return False
return fn.endswith('_format.py')
def _load_format_module(dir, mn):
if mn.endswith('.py'):
mn = mn[:-3]
try:
mod = __import__(dir, fromlist=[mn])
except:
raise
return getattr(mod, mn)
def load(dir=FORMAT_DIRECTORY):
"""Load format processing modules."""
# Load everything matching the naming conventions.
modules = []
for fn in (f for f in os.listdir(dir) if _is_format_module(f)):
module = _load_format_module(dir, fn)
if module is None:
continue
modules.append(module)
# Filter to exclude modules that don't have the required attributes.
valid = []
seen = set()
for module in (m for m in modules if _is_valid(m)):
if module.format_name in seen:
print >> sys.stderr, 'Duplicate format %s' % module.format_name
else:
valid.append(module)
seen.add(module)
return valid
|
[
"[email protected]"
] | |
65dbc73ef2e946e62d80b18d9cbf68805e58637a
|
abc422f58ad053bcbb6653ba15b66e46d220a199
|
/scripts/performance/sanity.py
|
330359f2b4452e9d5401bc95c45d30a88e4ebebb
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
tungstenfabric/tf-test
|
d3efff59bca931b614d0008260b2c0881d1fc009
|
4b9eca7eb182e5530223131ecab09d3bdf366407
|
refs/heads/master
| 2023-02-26T19:14:34.345423 | 2023-01-11T08:45:18 | 2023-01-11T10:37:25 | 265,231,958 | 8 | 22 | null | 2023-02-08T00:53:29 | 2020-05-19T11:46:12 |
Python
|
UTF-8
|
Python
| false | false | 3,973 |
py
|
import os
import fixtures
import testtools
from common.connections import ContrailConnections
from tcutils.wrappers import preposttest_wrapper
from common.contrail_test_init import ContrailTestInit
from performance.verify import PerformanceTest
class PerformanceSanity(testtools.TestCase, PerformanceTest):
def setUp(self):
super(PerformanceSanity, self).setUp()
if 'TEST_CONFIG_FILE' in os.environ:
self.input_file = os.environ.get('TEST_CONFIG_FILE')
else:
self.input_file = 'params.ini'
self.inputs = ContrailTestInit(self.input_file)
self.connections = ContrailConnections(self.inputs)
self.agent_inspect = self.connections.agent_inspect
self.quantum_h = self.connections.quantum_h
self.nova_h = self.connections.nova_h
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.analytics_obj = self.connections.analytics_obj
def cleanUp(self):
self.logger.info("Cleaning up")
super(PerformanceSanity, self).cleanUp()
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_STREAM(self):
"""Check the throughput between the VM's within the same VN for TCP_STREAM"""
return self.test_check_netperf_within_vn(duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_STREAM_with_MPLSoGRE(self):
"""Check the throughput between the VM's within the same VN for TCP_STREAM using MPLSoGRE"""
return self.test_check_netperf_within_vn(encap='MPLSoGRE', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_RR(self):
"""TCP Request/Response test between the VM's within the same VN"""
return self.test_check_netperf_within_vn(test_name='TCP_RR')
@preposttest_wrapper
def test_performance_netperf_within_vn_with_UDP_STREAM(self):
"""Check the throughput between the VM's within the same VN for UDP_STREAM"""
return self.test_check_netperf_within_vn(test_name='UDP_STREAM', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_UDP_STREAM_with_MPLSoGRE(self):
"""Check the throughput between the VM's within the same VN for UDP_STREAM using MPLSoGRE"""
return self.test_check_netperf_within_vn(encap='MPLSoGRE', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_UDP_RR(self):
"""UDP Request/Response test between the VM's within the same VN"""
return self.test_check_netperf_within_vn(test_name='UDP_RR')
@preposttest_wrapper
def test_performance_netperf_in_diff_vn(self):
"""Check the throughput between the VM's different VN"""
return self.test_check_netperf_within_vn(no_of_vn=2)
@preposttest_wrapper
def test_performance_ping_latency_within_vn(self):
"""Check the ping latency between the VM's within the same VN"""
return self.test_ping_latency()
@preposttest_wrapper
def test_performance_ping_latency_within_vn_icmp_flood(self):
"""Check the ping latency between the VM's within the same VN"""
return self.test_ping_latency(no_of_pkt=20)
@preposttest_wrapper
def test_flow_setup_within_vn_1000_flows(self):
"""Check the flow setup rate between the VM's within the same VN"""
return self.test_check_flow_setup_within_vn(no_of_flows=1000, dst_port_min=1000, dst_port_max=2001,
src_port_min=10000, src_port_max=10000)
@preposttest_wrapper
def test_flow_setup_within_vn_20000_flows(self):
"""Check the flow setup rate between the VM's within the same VN"""
return self.test_check_flow_setup_within_vn(no_of_flows=20000, dst_port_min=1000, dst_port_max=21000,
src_port_min=10000, src_port_max=10000)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
489a11f287a6771c194f8f5dd5b9cd086e85815e
|
c676bf5e77ba43639faa6f17646245f9d55d8687
|
/tests/ut/python/optimizer/test_debug_location.py
|
80793f37a130c8579a93c052e7e05c095ab1897f
|
[
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"LGPL-2.1-only",
"BSD-3-Clause",
"MPL-2.0",
"MPL-1.0",
"Libpng",
"AGPL-3.0-only",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"MIT",
"IJG",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
zhengnengjin/mindspore
|
1e2644e311f54a8bd17010180198a46499e9c88f
|
544b859bb5f46611882749088b44c5aebae0fba1
|
refs/heads/master
| 2022-05-13T05:34:21.658335 | 2020-04-28T06:39:53 | 2020-04-28T06:39:53 | 259,522,589 | 2 | 0 |
Apache-2.0
| 2020-04-28T03:35:33 | 2020-04-28T03:35:33 | null |
UTF-8
|
Python
| false | false | 5,973 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import pytest
from mindspore import context
from mindspore import Tensor, Parameter
from mindspore.nn.wrap.cell_wrapper import WithLossCell
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
from mindspore.ops import operations as P
from mindspore.nn.optim import Momentum
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
from mindspore.train import Model
from ....dataset_mock import MindData
from mindspore.nn.optim import Lamb
from mindspore.ops._utils import _get_broadcast_shape
from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register
from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops._grad.grad_math_ops import binop_grad_common
context.set_context(mode=context.GRAPH_MODE)
class MockNeg(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
"""init MockNeg"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
raise TypeError("InferError")
return input_x
class MockSub(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
"""init MockSub"""
self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
def infer_shape(self, x_shape, y_shape):
return _get_broadcast_shape(x_shape, y_shape)
def infer_dtype(self, x_dtype, y_dtype):
return x_dtype
@bprop_getters.register(MockSub)
def get_bprop_mock_sub(self):
"""Grad definition for `MockSub` operation."""
neg_func = MockNeg()
def bprop(x, y, out, dout):
return binop_grad_common(x, y, dout, neg_func(dout))
return bprop
class Net(nn.Cell):
def __init__(self, in_features, out_features):
super(Net, self).__init__()
self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias")
self.matmul = P.MatMul()
self.add = P.TensorAdd()
def construct(self, input):
output = self.add(self.matmul(input, self.weight), self.bias)
return output
class NetFP16(nn.Cell):
def __init__(self, in_features, out_features):
super(NetFP16, self).__init__()
self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias")
self.matmul = P.MatMul()
self.add = P.TensorAdd()
self.cast = P.Cast()
def construct(self, input):
output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
self.cast(self.bias, mstype.float16)), mstype.float32)
return output
def get_axis(x):
shape = F.shape(x)
length = F.tuple_len(shape)
perm = F.make_range(0, length)
return perm
class MSELoss(nn.Cell):
def __init__(self):
super(MSELoss, self).__init__()
self.reduce_sum = P.ReduceSum()
self.square = P.Square()
self.reduce_mean = P.ReduceMean()
self.sub = MockSub()
def construct(self, data, label):
diff = self.sub(data, label)
return self.reduce_mean(self.square(diff), get_axis(diff))
class NegCell(nn.Cell):
def __init__(self):
super(NegCell, self).__init__()
self.neg = MockNeg()
def construct(self, x):
return self.neg(x)
class Net3(nn.Cell):
def __init__(self):
super().__init__()
self.tuple = (NegCell(), nn.ReLU())
def construct(self, x):
for op in self.tuple:
x = op(x)
return x
def test_op_forward_infererror():
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
net = Net3()
with pytest.raises(TypeError) as e:
net(input_me)
class SequenceNet(nn.Cell):
def __init__(self):
super().__init__()
self.seq = nn.SequentialCell([nn.AvgPool2d(3, 1), nn.ReLU(), nn.Flatten()])
def construct(self, x):
x = self.seq(x) + bbb
return x
def test_sequential_resolve_error():
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
net = SequenceNet()
with pytest.raises(RuntimeError) as e:
net(input_me)
def test_compile_grad_error():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
lr = Tensor(np.ones([1], np.float32) * 0.1)
net = NetFP16(16, 16)
loss = MSELoss()
optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)
net_with_loss = WithLossCell(net, loss)
scale_manager = DynamicLossScaleManager()
update_cell = scale_manager.get_update_cell()
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
train_network.set_train()
with pytest.raises(TypeError) as e:
train_network(inputs, label)
print (e)
|
[
"[email protected]"
] | |
dbe7062912a362af2c02a691b4f36f395063b5d0
|
b45d66c2c009d74b4925f07d0d9e779c99ffbf28
|
/tests/integration_tests/retail_tests/implementation/retail_client_controller_test_collection.py
|
332c22c2cefad9e1f013767370ee271605807543
|
[] |
no_license
|
erezrubinstein/aa
|
d96c0e39762fe7aaeeadebbd51c80b5e58576565
|
a3f59ba59519183257ed9a731e8a1516a4c54b48
|
refs/heads/master
| 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,726 |
py
|
from __future__ import division
from tests.integration_tests.framework.svc_test_collection import ServiceTestCollection
from retail.v010.data_access.controllers.client_controller import ClientController
from retail.v010.data_access.controllers.user_controller import UserController
from retail.v010.data_access.retail_data_helper import RetailDataHelper
class RetailClientControllerTestCollection(ServiceTestCollection):
test_user_start = 456
test_client_start = 456
test_user_counter = 0
test_client_counter = 0
def initialize(self):
self.user_controller = UserController()
self.client_controller = ClientController()
self.retail_data_helper = RetailDataHelper(self.config, self.user_controller)
self.retail_data_helper.add_default_data()
def setUp(self):
self.__get_default_users()
def tearDown(self):
pass
@classmethod
def increment_test_user_counter(cls):
cls.test_user_counter += 1
@classmethod
def increment_test_client_counter(cls):
cls.test_client_counter += 1
##------------------------------------ Tests --------------------------------------##
def test_create_client(self):
client = self.__create_test_client()
correct_dict = {
'user_emails': [],
'description': u'company set out to take over the world',
'contact_email': u'[email protected]',
'contact_name': u'Thomas Aquinas',
'contact_phone': u'555-123-1234'
}
self.test_case.assertDictContainsSubset(correct_dict, client)
def test_get_client(self):
client = self.user_controller.Client.get('Signal Data')
self.test_case.assertDictContainsSubset(self.client_signal, client.serialize())
def test_find_client(self):
client = self.user_controller.Client.find(name='Signal Data')
self.test_case.assertDictContainsSubset(self.client_signal, client.serialize())
def test_find_clients(self):
client = self.__create_test_client()
clients = self.user_controller.Client.find_all(name=client["name"])
self.test_case.assertEqual(len(clients), 1)
def test_update_client(self):
client = self.__create_test_client(serialize=False)
update_dict = {
'name': 'Arnold Schwarzenegger',
'description': "Oh, you think you're bad, huh? You're a ******* choir boy compared to me! A CHOIR BOY!",
'contact_name': 'Jericho Cane',
'contact_email': '[email protected]',
'contact_phone': '555-9922342342342343242313'
}
self.client_controller.update_client('[email protected]', client["name"], update_dict)
updated_client = self.client_controller.Client.get(update_dict['name'])
self.test_case.assertDictContainsSubset(update_dict, updated_client.serialize())
def test_delete_client(self):
# create blue shift client
client = self.__create_test_client()
# create user to add to client so we can test that deleting a client doesn't delete the users in its list
ali_g = self.__create_test_user(client_name=client["name"])
# delete client, make sure the user for ali g still exists
self.client_controller.delete_client(self.user_admin['email'], client["name"])
client = self.client_controller.Client.get(client["name"])
self.test_case.assertIsNone(client)
ali_g = self.client_controller.User.get(ali_g["email"])
self.test_case.assertIsNone(ali_g)
##------------------------------------ Private helpers --------------------------------------##
def __get_default_users(self):
self.user_admin = self.user_controller.User.get("[email protected]", serialize=True)
self.client_signal = self.user_controller.Client.get("Signal Data", serialize=True)
self.role_user = self.user_controller.Role.get('user', serialize=True)
self.role_client_support = self.user_controller.Role.get('client_support', serialize=True)
def __create_test_user(self, client_name, actor_email='[email protected]', serialize=True):
password = 'yoyoyoyo%s' % (self.test_user_counter + self.test_user_start)
user_dict = {
'name': "test_user_%s" % (self.test_user_counter + self.test_user_start),
'email': "test_email_%[email protected]" % (self.test_user_counter + self.test_user_start),
'password': password,
'active': True,
'client': client_name,
'retail_access': True,
'retailer_access': False,
'roles': ['user']
}
user = self.user_controller.create_user(actor_email, user_dict, serialize=False)
user.update(active=True, password=user_dict["password"])
updated_user = self.user_controller.User.get(user.email, serialize=False)
self.increment_test_user_counter()
# Return unhashed password separately, because it's not returned in user object
return updated_user.serialize() if updated_user and serialize else updated_user
def __create_test_client(self, actor_email='[email protected]', serialize=True):
client_dict = {
'name': 'test_client_%s' % (self.test_client_counter + self.test_client_start),
'description': 'company set out to take over the world',
'contact_name': 'Thomas Aquinas',
'contact_email': '[email protected]',
'contact_phone': '555-123-1234'
}
client = self.client_controller.create_client(actor_email, client_dict, serialize=serialize)
self.increment_test_client_counter()
return client
|
[
"[email protected]"
] | |
91d65a5caf20739fb4868145dd7e221f99f2e082
|
0eb599c3bbfa6e5b31516913b88cc9db3a1311ce
|
/ARC/arc097c.py
|
33bff188d24ca2f5e1af4718c9b4e6b11b22207b
|
[] |
no_license
|
Linus-MK/AtCoder
|
5b84dc88c2d2773d0f97ed18265d303290da7879
|
a587e89a9e0c2ab4d36b09176bcc95e901e14326
|
refs/heads/master
| 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
s = input()
k = int(input())
substring_list = []
for i in range(1, 5+1):
for start in range(len(s) - i + 1):
substring_list.append(s[start:start+i])
# print(substring_list)
substring_list = sorted(set(substring_list))
print(substring_list[k-1])
# https://stackoverflow.com/questions/2931672/what-is-the-cleanest-way-to-do-a-sort-plus-uniq-on-a-python-list
# sortしてuniqueする場合は一回setを使うとよい
|
[
"[email protected]"
] | |
4698b4a9a39667193ded9513a42affd938a64226
|
7b0ede7aa01e12a1bbdb1c51caef934005e91bc2
|
/02: Python Packages for Pyramid Applications/package/setup.py
|
785885b93c11cade184223e5b40393a38cb2a3ee
|
[] |
no_license
|
werberth/pyramid-tutorial
|
0a21e124b025a6e2e3ddc8c0ef4444d61ca13a0e
|
1379adf7a16691c6aa28fa6206bbaf0d6eaf9924
|
refs/heads/master
| 2021-05-01T12:21:45.222909 | 2018-02-12T05:52:13 | 2018-02-12T05:52:13 | 121,064,440 | 0 | 0 | null | 2018-02-12T05:52:14 | 2018-02-10T23:40:33 |
Python
|
UTF-8
|
Python
| false | false | 122 |
py
|
from setuptools import setup
requires = [
'pyramid',
]
setup(
name='tutorial',
install_requires=requires,
)
|
[
"[email protected]"
] | |
8f3ea69126b695c4a7334a63b869e3810b969865
|
fcd64a87118a8c1e060449d8fd5b02034ac3dea7
|
/test/test_payments_search_body.py
|
c4e5d43d4fe46a88ccc618f278f0274b2083b92f
|
[] |
no_license
|
carlosgalvez-tiendeo/python-paycomet_client
|
2b68e4e1f7cfbab81d50357513f79753cf8c2f0e
|
71f1fe29495ce67e37aaed4ecc9acf5994de011a
|
refs/heads/master
| 2023-08-03T02:27:50.857164 | 2021-06-16T13:04:46 | 2021-06-16T13:04:46 | 377,492,186 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 937 |
py
|
# coding: utf-8
"""
PAYCOMET REST API
PAYCOMET API REST for customers. # noqa: E501
OpenAPI spec version: 2.28.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import paycomet_client
from paycomet_client.models.payments_search_body import PaymentsSearchBody # noqa: E501
from paycomet_client.rest import ApiException
class TestPaymentsSearchBody(unittest.TestCase):
"""PaymentsSearchBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentsSearchBody(self):
"""Test PaymentsSearchBody"""
# FIXME: construct object with mandatory attributes with example values
# model = paycomet_client.models.payments_search_body.PaymentsSearchBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
d774292b3bb342a201bdfb191b2b2bb3c62edcb1
|
f216fec073bcb94d34fadf3b149f6ad8e0541198
|
/scorestatistics_ui.py
|
d815d8c4a97c198a142806d8765979e797604d6d
|
[] |
no_license
|
liuyuhang791034063/ScoreSystem
|
44a6742a72a34e1673c46f3b00e2cdfd6553979b
|
ecb53a6dc2ae490ddc1028aa67b99a187fe366d9
|
refs/heads/master
| 2020-03-24T20:43:21.746962 | 2018-09-20T09:49:58 | 2018-09-20T09:49:58 | 142,993,767 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,276 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'scorestatistics.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Score(object):
def setupUi(self, Score):
Score.setObjectName("Score")
Score.resize(537, 412)
self.tableWidget = QtWidgets.QTableWidget(Score)
self.tableWidget.setGeometry(QtCore.QRect(50, 110, 421, 241))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.label = QtWidgets.QLabel(Score)
self.label.setGeometry(QtCore.QRect(40, 50, 461, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.retranslateUi(Score)
QtCore.QMetaObject.connectSlotsByName(Score)
def retranslateUi(self, Score):
_translate = QtCore.QCoreApplication.translate
Score.setWindowTitle(_translate("Score", "成绩统计"))
self.label.setText(_translate("Score", "TextLabel"))
|
[
"[email protected]"
] | |
45625ca3a9f5576911b84937df1bdec8576d9c17
|
a80874300e561174068bf510608465bb318a35f2
|
/guhaisong/bidding/bidding/028_fujian_gov_buy.py
|
5b933d388245a24483c0b86af9b54deb92f63cb0
|
[] |
no_license
|
lemonbiz/guhaisong
|
effa8af4b679511e4fa8017d71fe26ab2ce51392
|
029890f8e3c6954efdefb184fa077f2ce646d1df
|
refs/heads/master
| 2022-12-13T08:21:37.911535 | 2020-09-15T16:15:10 | 2020-09-15T16:15:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,584 |
py
|
import gevent
from gevent import monkey; monkey.patch_all()
import requests
from lxml import etree
import threading
import datetime
import hashlib
import pymongo
from utils.zb_storage_setting import StorageSetting
from utils.redis_tool import Rdis_Queue
import re
from utils.cpca import *
class GovBuy(object):
'''福建政府采购网'''
def __init__(self,source,base_url, all_page):
name = 'fujian_cz_fjzfcg_gov_cn'
self.coll = StorageSetting(name)
self.collection = self.coll.find_collection
self.headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'http://cz.fjzfcg.gov.cn/3500/noticelist/d03180adb4de41acbb063875889f9af1/?page=1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh,zh-CN;q=0.9',
}
self.session = requests.session()
self.source = source
self.base_url = base_url
self._all_page = all_page
self.rq = Rdis_Queue(host='localhost', dblist='fujian_list1', dbset='fujian_set1')
def is_running(self):
is_runing = True
if self.rq.r_len() == 0 and len (self.rq.rset_info()) > 0:
return False
else:
return is_runing
def hash_to_md5(self, sign_str):
m = hashlib.md5()
sign_str = sign_str.encode('utf-8')
m.update(sign_str)
sign = m.hexdigest()
return sign
def now_time(self):
time_stamp = datetime.datetime.now()
return time_stamp.strftime('%Y-%m-%d %H:%M:%S')
def save_to_mongo(self,result_dic):
self.coll.saves(result_dic)
self.is_running()
def get_area(self,pro, strs):
location_str = [strs]
try:
df = transform(location_str, umap={})
area_str = re.sub(r'省|市', '-', re.sub(r'省市区0', '', re.sub(r'/r|/n|\s', '', str(df))))
except:
pass
else:
if area_str == '':
area_li = [pro]
else:
area_li = (area_str.split('-'))
if len(area_li) >=2 and area_li[1] !='':
return '-'.join(area_li[:2])
else:
return area_li[0]
def load_get_html(self,tr):
if tr == None:
return
try:
selector_tr = etree.HTML(str(tr))
url = self.source + selector_tr.xpath('//tr/td[4]/a/@href')[0]
# print(url)
response = requests.get(url=url, headers=self.headers).text
selector = etree.HTML(response)
except Exception as e:
print('laod_get_html error:{}'.format(e))
else:
title = selector_tr.xpath('//tr/td[4]/a/text()')
if title != []:
title = title[0]
# try:
# status = re.search(r'[\u4e00-\u9fa5]{2}公告$', title).group()
# except:
# status = '公告'
else:
title = None
# status = '公告'
status = selector_tr.xpath('//tr/td[2]/text()')
if status != []:
status = status[0]
else:
status =None
# print(title)
# print(status)
_id = self.hash_to_md5(url)
publish_date = selector_tr.xpath('//tr/td[5]/text()')
if publish_date != []:
publish_date = publish_date[0]
# publish_date = re.search(r'(\d{4}\-\d+\-\d+)',''.join(publish_date)).group()
else:
publish_date = None
# print(publish_date)
aaa = selector_tr.xpath('//tr/td[1]/text()')
if aaa != []:
aaa = aaa[0]
else:
aaa = '福建'
area_name = self.get_area('福建',aaa )
print(area_name)
source = self.source
table = selector.xpath('//*[@id="print-content"]')[0]
content_html = etree.tostring(table, encoding="utf-8", pretty_print=True, method="html").decode('utf-8')
retult_dict = dict()
retult_dict['_id'] = _id
retult_dict['title'] = title
retult_dict['status'] = status
retult_dict['area_name'] = area_name
retult_dict['source'] = 'http://117.27.88.250:9306/'
retult_dict['publish_date'] = publish_date
retult_dict['detail_url'] = url
retult_dict['content_html'] = str(content_html)
retult_dict['create_time'] = self.now_time()
retult_dict['zh_name'] = '福建省政府采购网'
retult_dict['en_name'] = 'Fujian Province Government Procurement'
# print(retult_dict)
# print('列表长度为={}'.format(self.rq.r_len()))
self.save_to_mongo(retult_dict)
def load_get(self, page):
try:
params = {
'page':str(page),
}
url = self.base_url + 'noticelist/d03180adb4de41acbb063875889f9af1/'
print(url)
response = requests.get(url=url, headers=self.headers,params=params).text
selector = etree.HTML(response)
except:
print('load_post error')
# self.load_get(page)
else:
print('第{}页'.format(page))
tr_ele_li = selector.xpath('//div[@class="wrapTable"]/table/tbody/tr')
for tr_ele in tr_ele_li:
tr = etree.tostring(tr_ele, pretty_print=True,encoding='utf-8',method='html').decode('utf-8')
self.load_get_html(tr)
# if not self.rq.in_rset(urls):
# self.rq.add_to_rset(urls)
# self.rq.pull_to_rlist(urls)
def init(self):
count = 8
while self.is_running():
if self.rq.r_len() <= count:
count = 1
try:
spawns = [gevent.spawn(self.load_get_html, self.rq.get_to_rlist()) for i in range(count)]
gevent.joinall(spawns)
except Exception as e:
print(e)
def run(self):
# threading.Thread(target=self.init).start()
task_li = [
# {'all_page': 9111},
{'all_page': self._all_page},
]
count = 4
for task in task_li:
for page in range(1, task['all_page'] + 1, count):
try:
# self.load_get(page)
spawns = [gevent.spawn(self.load_get, page + i) for i in range(count)]
gevent.joinall(spawns)
# print('第{}页'.format(page))
except Exception as e:
print(e)
def main(self):
self.run()
if __name__ == '__main__':
task_url_li = [
# 福建
{'source': 'http://cz.fjzfcg.gov.cn/', 'base_url':'http://cz.fjzfcg.gov.cn/3500/','all_page':3},
# 福州
{'source': 'http://117.27.88.250:9306/','base_url': 'http://117.27.88.250:9306/350100/', 'all_page':3},
# 厦门
{'source': 'http://202.109.244.105:8090/','base_url': 'http://202.109.244.105:8090/350200/', 'all_page':3},
# 莆田
{'source': 'http://27.155.99.14:9090/', 'base_url': 'http://27.155.99.14:9090/350300/', 'all_page': 3},
# 三明市
{'source': 'http://test.smzfcg.gov.cn:8090/', 'base_url': 'http://test.smzfcg.gov.cn:8090/350400/', 'all_page': 3},
# 泉州
{'source': 'http://61.131.58.48/', 'base_url': 'http://61.131.58.48/350500/', 'all_page': 3},
# 漳州
{'source': 'http://zz.fjzfcg.gov.cn/', 'base_url': 'http://zz.fjzfcg.gov.cn/350600/', 'all_page': 3},
# 南平市
{'source': 'http://np.fjzfcg.gov.cn:8090/', 'base_url': 'http://np.fjzfcg.gov.cn:8090/350700/', 'all_page': 3},
# 龙岩市
{'source': 'http://222.78.94.11/', 'base_url': 'http://222.78.94.11/350800/', 'all_page': 3},
# 宁德
{'source': 'http://218.5.222.40:8090/', 'base_url': 'http://218.5.222.40:8090/350900/', 'all_page': 3},
]
for task_city in task_url_li:
gb = GovBuy(task_city['source'], task_city['base_url'], task_city['all_page'])
gb.main()
|
[
"[email protected]"
] | |
d5f632db1dc076d3677f639263dbe4cc575ad8da
|
a62fdd0beb6c47cc704c1192b68b0bcfcd024304
|
/Python/I/07-CURSORS/1/form.py
|
12e820ffa975daf039e565eaa2572d6436bb302e
|
[] |
no_license
|
a6461/Qt-PyQt
|
da1895b4faccda80b8079ecdca79f1ea525daa0a
|
404bd7fbbc432ebeaa1a486fc8e005d47aed9cfd
|
refs/heads/master
| 2020-03-14T22:16:48.714825 | 2018-06-12T20:45:58 | 2018-06-12T20:45:58 | 131,817,506 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,292 |
py
|
from ui_form import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Form(Ui_Form, QWidget):
names = []
cursors = []
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.setFixedSize(self.size())
self.names = self.enumToStr(Qt, Qt.CursorShape)
self.cursors = [QCursor(Qt.CursorShape(i))
for i in range(len(self.names))]
self.pushButton.setProperty(
'tag', self.names.index('ArrowCursor'))
def enumToStr(self, namespace, enum):
names = {}
for value in dir(namespace):
key = getattr(namespace, value)
if isinstance(key, enum):
names[key] = value
names = [value for (key, value) in sorted(names.items())]
return names
def on_pushButton_mousePressed(self, event):
k = self.pushButton.property('tag')
c = len(self.names)
if event.buttons() == Qt.LeftButton:
k = (k + 1) % c
elif event.buttons() == Qt.RightButton:
k = (k - 1 + c) % c
self.pushButton.setText(self.names[k])
self.pushButton.setCursor(self.cursors[k])
self.pushButton.setProperty('tag', k)
|
[
"[email protected]"
] | |
f18e9d3496ede63fe418afc7bf811b6426910845
|
74dff48428867fd99ab780a96357619285cadf73
|
/finch-collector-master/finchcollector/settings.py
|
93fd1a44be8f4df50c9d949892b975b5e8579703
|
[] |
no_license
|
HermanSidhu96/finchcollector
|
28ed65455dc139a23e27eeddd9939fa58d6c69ea
|
561751bdbe77537cf341bbbb7d180ac327428e83
|
refs/heads/master
| 2022-12-15T10:00:03.121582 | 2020-09-16T02:50:18 | 2020-09-16T02:50:18 | 295,905,995 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,087 |
py
|
"""
Django settings for finchcollector project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wn^p5^p@1=1$t2l0aywg2&ay-&-mwjqwtjm)83w@b^ojs!_%)('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'finchcollector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finchcollector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'finchcollector',
'USER': 'postgres',
'PASSWORD' : ''
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
88ce8bab5ece0b9764f01300ba6eab12ebc66fd0
|
e8db7be2278994660a1cb1cde3ac479d802b5b05
|
/my_datasets/zsre_with_description.py
|
e6687188be4bc399c3ea178667e0d23a9d40a016
|
[
"MIT"
] |
permissive
|
INK-USC/hypter
|
4cbf8929bc49d01e648197381c94c22fea233b95
|
732551e1e717b66ad26ba538593ed184957ecdea
|
refs/heads/main
| 2023-03-28T00:27:53.887999 | 2021-04-02T04:37:52 | 2021-04-02T04:37:52 | 318,406,884 | 13 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,781 |
py
|
import os
import json
import re
import string
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from .utils import MyQADataset, MyDataLoader
from .zsre import ZSREData
from .zsre_relations import ZSRE_RELATIONS
class ZSREWithDescriptionData(ZSREData):
def load_dataset(self, tokenizer, do_return=False):
self.tokenizer = tokenizer
postfix = 'Withdescription-' + tokenizer.__class__.__name__.replace("zer", "zed")
preprocessed_path = os.path.join(
"/".join(self.data_path.split("/")[:-1]),
self.data_path.split("/")[-1].replace(".json", "-{}.json".format(postfix)))
if self.load and os.path.exists(preprocessed_path):
# load preprocessed input
self.logger.info("Loading pre-tokenized data from {}".format(preprocessed_path))
with open(preprocessed_path, "r") as f:
input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
metadata = json.load(f)
else:
print("Start tokenizing ... {} instances".format(len(self.data)))
questions = [add_description(d["input"]) for d in self.data]
if self.data_type != "test":
answers = [[item["answer"] for item in d["output"]] for d in self.data]
else:
answers = [['TEST_NO_ANSWER'] for d in self.data]
answers, metadata = self.flatten(answers)
if self.args.do_lowercase:
questions = [question.lower() for question in questions]
answers = [answer.lower() for answer in answers]
if self.args.append_another_bos:
questions = ["<s> "+question for question in questions]
answers = ["<s> " +answer for answer in answers]
print(questions[:10])
print(answers[:10])
print("Tokenizing Input ...")
question_input = tokenizer.batch_encode_plus(questions,
pad_to_max_length=True,
max_length=self.args.max_input_length)
print("Tokenizing Output ...")
answer_input = tokenizer.batch_encode_plus(answers,
pad_to_max_length=True)
input_ids, attention_mask = question_input["input_ids"], question_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = answer_input["input_ids"], answer_input["attention_mask"]
if self.load:
preprocessed_data = [input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata]
with open(preprocessed_path, "w") as f:
json.dump([input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata], f)
self.dataset = MyQADataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=metadata,
is_training=self.is_training)
self.logger.info("Loaded {} examples from {} data".format(len(self.dataset), self.data_type))
if do_return:
return self.dataset
def add_description(input_str):
split_idx = input_str.index('[SEP]')
rel_name = input_str[split_idx+6:]
description = ZSRE_RELATIONS[rel_name]["description"]
return "{} [SEP] description: {}".format(input_str, description)
def get_accuracy(prediction, groundtruth):
if type(groundtruth)==list:
if len(groundtruth)==0:
return 0
return np.max([int(prediction==gt) for gt in groundtruth])
return int(prediction==gt)
def get_exact_match(prediction, groundtruth):
if type(groundtruth)==list:
if len(groundtruth)==0:
return 0
return np.max([get_exact_match(prediction, gt) for gt in groundtruth])
return (normalize_answer(prediction) == normalize_answer(groundtruth))
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
[
"[email protected]"
] | |
5d86d8330729cfa6f01aaf6860470f7b01e20f0b
|
2bd3b981412f81ff6d5085394aad71258bed3c1b
|
/hw3/scripts/validate_svm_fusion.py
|
fdd4ac3584a4ca43f66baa2ed4b2acc84beaf37a
|
[] |
no_license
|
JayC1208/11-775-Large-Scale-Multimedia-Analysis
|
e4d8e685cca437a6431d2f531e25d80bb1ceeeb9
|
ca88fa2c0701477e3fdb424374ba6cb1b9126c43
|
refs/heads/master
| 2023-08-30T21:19:09.110017 | 2017-03-16T02:52:38 | 2017-03-16T02:52:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,947 |
py
|
from sklearn.metrics import roc_curve
from sklearn.calibration import CalibratedClassifierCV
from sklearn import preprocessing
import commands
import numpy as np
import os
from sklearn.svm.classes import SVC
from sklearn.svm import LinearSVC
import cPickle
import sys
import pickle
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
import itertools
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from brew.base import Ensemble, EnsembleClassifier
from brew.stacking.stacker import EnsembleStack, EnsembleStackClassifier
from brew.combination.combiner import Combiner
def import_imtraj_txt(file_path):
imtraj_line = (open(file_path,"r").readlines()[0]).strip()
fields = imtraj_line.split(' ')
field_tuples = [x.split(':') for x in fields]
# The sparse vector position is 1 based
num_fields = [ (int(x[0])-1, np.float(x[1])) for x in field_tuples]
new_vect = np.zeros(32768)
for field_id, field_val in num_fields:
new_vect[field_id] = field_val
return new_vect
if __name__ == '__main__':
if len(sys.argv) != 6:
print "Usage: {0} model_file feat_dir event_name round_num output_file".format(sys.argv[0])
print "output_file path to save the prediction score"
print "event_name P001/2/3"
print "round_num 0,1,2?"
print "feat_dir -- dir of feature files"
print "model_file reads from the svm model train_file_P00X_Xround_imtraj/SIFT/CNN"
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
event_name=sys.argv[3]
round_num = int(sys.argv[4])
output_file = sys.argv[5]
pipe_lrSVC=pickle.load(open(model_file+'.pickle','rb'))
test_list="list/"+event_name+"_validation_"+str(round_num)
X=np.asarray([])
count=0
for line in open(test_list,"r"):
count=count+1
# if count%100==0:
# print count
audio_name=line.split(" ")[0]
label=line.split(" ")[1].split("\n")[0]
if "imtraj" in feat_dir:
feat_vec=import_imtraj_txt(feat_dir+audio_name+".spbof")
else:
feat_vec=np.genfromtxt(feat_dir+audio_name,delimiter=";")
if len(X)==0:
X=[feat_vec]
else:
X=np.append(X,[feat_vec],axis=0)
Y=pipe_lrSVC.predict_proba(preprocessing.scale(X))
groundtruth_label="list/"+event_name+"_validation_label_"+str(round_num)
Y_truth=[]
for line in open(groundtruth_label,"r"):
Y_truth+=[int(line.strip())]
fclassification_write=open(output_file.replace("pred/","classification/"),"w")
Y_discrete=pipe_lrSVC.predict(preprocessing.scale(X))
#print Y_discrete
#Y_discrete=[1 if y[1]>y[0] else 0 for y in Y_discrete]
for i in range(len(Y_discrete)):
fclassification_write.write(str(Y_discrete[i])+"\n")
fclassification_write.close()
fwrite=open(output_file,"w")
for i in range(len(Y)):
fwrite.write(str(Y[i][1])+"\n")
fwrite.close()
ap_output=commands.getstatusoutput("ap "+groundtruth_label+" "+output_file)
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT MAP: "+ap_output[1].split(": ")[1]
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT CLASS ACCURACY: "+str(accuracy_score(Y_truth,Y_discrete))
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT TRUE POSITIVE RATE: "+str(recall_score(Y_truth,Y_discrete))
CM=confusion_matrix(Y_truth,Y_discrete)
# print str(CM[1][1]*1.0/(CM[1][1]+CM[1][0]))
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT TRUE NEGATIVE RATE: "+str(CM[0][0]*1.0/(CM[0][0]+CM[0][1]))
|
[
"[email protected]"
] | |
9e5ee3e62bcf6a844290cd40365ca82a1e9a8db3
|
ba6d11be23574d98210a6ea8df02cbc8afae325e
|
/tokenizers/javalex/javasmartlex.py
|
36664977def81a3d4d1d13a1ef928550377cb5f4
|
[
"MIT"
] |
permissive
|
sayon/ignoreme
|
921596af5645731fa66a7ec31c11992deecd28e2
|
d3d40c2eb3b36d1ea209abb9a710effba3d921c3
|
refs/heads/master
| 2020-11-26T15:27:41.567436 | 2014-11-07T14:59:06 | 2014-11-07T14:59:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,336 |
py
|
__author__ = 'novokonst'
import ply.lex as lex
JAVA_KEYWORDS = [
'abstract'
, 'assert'
, 'boolean'
, 'break'
, 'byte'
, 'case'
, 'catch'
, 'char'
, 'class'
, 'const'
, 'continue'
, 'default'
, 'do'
, 'double'
, 'else'
, 'enum'
, 'extends'
, 'final'
, 'finally'
, 'float'
, 'for'
, 'goto'
, 'if'
, 'implements'
, 'import'
, 'instanceof'
, 'int'
, 'interface'
, 'long'
, 'native'
, 'new'
, 'package'
, 'private'
, 'protected'
, 'public'
, 'return'
, 'short'
, 'static'
, 'strictfp'
, 'super'
, 'switch'
, 'synchronized'
, 'this'
, 'throw'
, 'throws'
, 'transient'
, 'try'
, 'void'
, 'volatile'
, 'while'
]
class JavaTokenizer:
MY_KEYWORDS = JAVA_KEYWORDS
RESERVED = {kw: kw for kw in MY_KEYWORDS}
tokens = RESERVED.values() + [
'ID'
, 'STRING_LITERAL'
, 'NUMBER'
, 'ANNOTATION'
, 'COMMENT'
, 'LINE_COMMENT'
, 'MULTI_COMMENT_LEFT'
, 'MULTI_COMMENT_RIGHT'
]
def check_comment(fn):
def wrapped(self, t):
if self.nested_comment:
t.type = 'COMMENT'
return t
else:
return fn(self, t)
wrapped.__doc__ = fn.__doc__
return wrapped
@check_comment
def t_ID(self, t):
t.type = self.__class__.RESERVED.get(t.value, 'ID')
return t
@check_comment
def t_STRING_LITERAL(self, t):
return t
@check_comment
def t_NUMBER(self, t):
return t
@check_comment
def t_ANNOTATION(self, t):
return t
def t_LINE_COMMENT(self, t):
t.type = 'COMMENT'
return t
def t_MULTI_COMMENT_LEFT(self, t):
self.nested_comment += 1
t.type = 'COMMENT'
return t
def t_MULTI_COMMENT_RIGHT(self, t):
self.nested_comment -= 1
t.type = 'COMMENT'
return t
t_ignore = ' \t'
def t_error(self, t):
# self.skipped.append(t.value)
t.lexer.skip(1)
def __init__(self, **kwargs):
self.t_ID.__func__.__doc__ = r'[a-zA-z_][a-zA-Z0-9_]*'
self.t_STRING_LITERAL.__func__.__doc__ = r'\'.*\''
self.t_NUMBER.__func__.__doc__ = r'\d+'
self.t_ANNOTATION.__func__.__doc__ = r'@[a-zA-z_][a-zA-Z0-9_]*'
self.t_LINE_COMMENT.__func__.__doc__ = r'//.*'
self.t_MULTI_COMMENT_LEFT.__func__.__doc__ = r'/\*.*'
self.t_MULTI_COMMENT_RIGHT.__func__.__doc__ = r'.*\*/'
self.skipped = []
self.nested_comment = 0
self.lexer = lex.lex(module=self, **kwargs)
def refresh(self):
self.skipped = []
self.nested_comment = 0
def tokenize(self, data):
self.lexer.input(data)
self.out_token_dict = {}
while True:
tok = self.lexer.token()
if not tok: break
self.out_token_dict[tok.type] = self.out_token_dict.get(tok.type, [])
self.out_token_dict[tok.type].append(tok)
return self.out_token_dict
def keywords_ex_stats(self, extra_type_list=[]):
keys = JavaTokenizer.MY_KEYWORDS + extra_type_list
return {k: self.out_token_dict.get(k, []) for k in keys}
|
[
"[email protected]"
] | |
7ff4e3512240a49138a5b303e8e9525d6c638e6d
|
d47067156da51bfed44ae4e465f9ac9831c4138a
|
/app.py
|
2ca5a247d1c78747c6151593d111101b0d8f4b31
|
[] |
no_license
|
sylvaus/datawebserver
|
264398a1324fafef78980c7c0f85f4c5f4ae37b0
|
13cdb31302b7c0a7ff1e9cb7a3c0709caeaf166e
|
refs/heads/master
| 2020-04-28T00:59:16.070904 | 2019-03-10T21:16:45 | 2019-03-10T21:16:45 | 174,837,189 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,340 |
py
|
import atexit
import configparser
import socket
from multiprocessing import Queue
from threading import Thread
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from data_server.data_dispatcher import DataDispatcher
from data_server.data_server import DataServer, ThreadedTCPRequestHandler
from data_server.rolling_data_base import RollingDataBase
config = configparser.ConfigParser()
config.read("app.ini")
app = Flask(__name__)
app.config['SECRET_KEY'] = config.get("server.graphs", "secret_key")
socket_io = SocketIO(app)
# Configuring and loading database
database = RollingDataBase(config.get("database", "db_folder"),
auto_save_s=config.getint("database", "auto_save_s"))
database.load()
# Configuring DataServer
if config.getboolean("server.data", "auto_host"):
data_server_host = socket.gethostbyname(socket.gethostname())
else:
data_server_host = config.getboolean("server.data", "host")
data_server_port = config.getint("server.data", "port")
data_queue = Queue()
data_server = DataServer((data_server_host, data_server_port),
ThreadedTCPRequestHandler,
data_queue=data_queue)
def dispatch_update(data):
name, x_y = data
socket_io.emit("data_update", [name, x_y])
database.add(name, x_y)
data_dispatcher = DataDispatcher(data_queue, dispatch_update)
@socket_io.on('request_initial_values')
def give_initial_params():
emit("initial_values", database.get_all())
@app.route('/')
def main_page():
return render_template('index.html')
@atexit.register
def closing_resources():
print("Closing resources")
data_server.shutdown()
data_dispatcher.stop()
database.stop_auto_save()
database.save()
if __name__ == '__main__':
data_server_thread = Thread(target=data_server.serve_forever)
data_server_thread.start()
data_dispatcher_thread = Thread(target=data_dispatcher.run)
data_dispatcher_thread.start()
if config.getboolean("server.graphs", "auto_host"):
graph_server_host = socket.gethostbyname(socket.gethostname())
else:
graph_server_host = config.getboolean("server.graphs", "host")
graph_server_port = config.getint("server.graphs", "port")
socket_io.run(app, host=graph_server_host, port=graph_server_port)
|
[
"[email protected]"
] | |
7075ce150d7cf38c81d8f6f634467432440a3571
|
70e970ce9ec131449b0888388f65f0bb55f098cd
|
/SignalMC/python/pythia8/AMSB_gluinoToChargino_M-1000GeV_M-700GeV_CTau-100cm_TuneCP5_13TeV_pythia8_cff.py
|
d064fbc412960620a79e425873c0269cded2752f
|
[] |
no_license
|
OSU-CMS/DisappTrks
|
53b790cc05cc8fe3a9f7fbd097284c5663e1421d
|
1d1c076863a9f8dbd3f0c077d5821a8333fc5196
|
refs/heads/master
| 2023-09-03T15:10:16.269126 | 2023-05-25T18:37:40 | 2023-05-25T18:37:40 | 13,272,469 | 5 | 12 | null | 2023-09-13T12:15:49 | 2013-10-02T13:58:51 |
Python
|
UTF-8
|
Python
| false | false | 8,096 |
py
|
COM_ENERGY = 13000.
MGLU = 1000 # GeV
MCHI = 700 # GeV
CTAU = 1000 # mm
CROSS_SECTION = 0.385 # pb
SLHA_TABLE="""
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: C. Balazs 21 Apr 2009
Block SPINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.80 29-OCT-2009 12:50:36 # Version number
Block MODSEL # Model selection
1 3 # Minimal anomaly mediated (AMSB) model
Block SMINPUTS # Standard Model inputs
1 1.27842453E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.17200002E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73070007E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+03 # m_0
2 2.46440000E+05 # m_{3/2}
3 5.00000000E+00 # tan(beta)
4 1.00000000E+00 # sign(mu)
Block EXTPAR # Non-universal SUSY breaking parameters
0 1.04228903E+16 # Input scale
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
24 8.04229965E+01 # W^+
25 1.16918777E+02 # h^0
35 4.13995459E+03 # H^0
36 4.11271240E+03 # A^0
37 4.12772119E+03 # H^+
1000001 4.68634814E+03 # dnl
1000002 4.68567432E+03 # upl
1000003 4.68634814E+03 # stl
1000004 4.68567480E+03 # chl
1000005 4.09400562E+03 # b1
1000006 3.40991528E+03 # t1
1000011 1.14678894E+03 # el-
1000012 1.12562231E+03 # nuel
1000013 1.14678894E+03 # mul-
1000014 1.12562231E+03 # numl
1000015 1.02227649E+03 # tau1
1000016 1.11225781E+03 # nutl
1000021 %.9g # glss
1000022 6.99874146E+02 # z1ss
1000023 2.26904956E+03 # z2ss
1000024 7.00047607E+02 # w1ss
1000025 -3.87153369E+03 # z3ss
1000035 3.87282349E+03 # z4ss
1000037 3.87772314E+03 # w2ss
2000001 4.76078076E+03 # dnr
2000002 4.71648975E+03 # upr
2000003 4.76078076E+03 # str
2000004 4.71649023E+03 # chr
2000005 4.72474414E+03 # b2
2000006 4.13260303E+03 # t2
2000011 1.02800623E+03 # er-
2000013 1.02800623E+03 # mur-
2000015 1.12574829E+03 # tau2
Block ALPHA # Effective Higgs mixing parameter
-1.97664991E-01 # alpha
Block STOPMIX # stop mixing matrix
1 1 8.36024433E-02 # O_{11}
1 2 -9.96499181E-01 # O_{12}
2 1 9.96499181E-01 # O_{21}
2 2 8.36024433E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 9.99983907E-01 # O_{11}
1 2 5.66892792E-03 # O_{12}
2 1 -5.66892792E-03 # O_{21}
2 2 9.99983907E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 1.32659495E-01 # O_{11}
1 2 9.91161644E-01 # O_{12}
2 1 -9.91161644E-01 # O_{21}
2 2 1.32659495E-01 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 -8.25339637E-04 #
1 2 9.99776781E-01 #
1 3 -2.02405099E-02 #
1 4 6.01018919E-03 #
2 1 9.99794424E-01 #
2 2 1.23403966E-03 #
2 3 1.68632567E-02 #
2 4 -1.11932158E-02 #
3 1 -4.01982665E-03 #
3 2 1.00584431E-02 #
3 3 7.06979156E-01 #
3 4 7.07151294E-01 #
4 1 1.98580157E-02 #
4 2 -1.85414888E-02 #
4 3 -7.06743419E-01 #
4 4 7.06947982E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.99564528E-01 # U_{11}
1 2 2.95085218E-02 # U_{12}
2 1 -2.95085218E-02 # U_{21}
2 2 -9.99564528E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.99936998E-01 # V_{11}
1 2 1.12252701E-02 # V_{12}
2 1 -1.12252701E-02 # V_{21}
2 2 -9.99936998E-01 # V_{22}
Block GAUGE Q= 3.58269727E+03 #
1 3.57497722E-01 # g`
2 6.52475953E-01 # g_2
3 1.22070026E+00 # g_3
Block YU Q= 3.58269727E+03 #
3 3 8.38887691E-01 # y_t
Block YD Q= 3.58269727E+03 #
3 3 6.52210116E-02 # y_b
Block YE Q= 3.58269727E+03 #
3 3 5.15824445E-02 # y_tau
Block HMIX Q= 3.58269727E+03 # Higgs mixing parameters
1 3.87514209E+03 # mu(Q)
2 5.00000000E+00 # tan(beta)(M_GUT)
3 2.51709106E+02 # Higgs vev at Q
4 1.69144040E+07 # m_A^2(Q)
Block MSOFT Q= 3.58269727E+03 # DRbar SUSY breaking parameters
1 2.30335156E+03 # M_1(Q)
2 6.64254944E+02 # M_2(Q)
3 -4.50376855E+03 # M_3(Q)
31 1.12926123E+03 # MeL(Q)
32 1.12926123E+03 # MmuL(Q)
33 1.11625525E+03 # MtauL(Q)
34 1.03541077E+03 # MeR(Q)
35 1.03541077E+03 # MmuR(Q)
36 9.99967957E+02 # MtauR(Q)
41 4.45722266E+03 # MqL1(Q)
42 4.45722266E+03 # MqL2(Q)
43 3.91252832E+03 # MqL3(Q)
44 4.48730469E+03 # MuR(Q)
45 4.48730469E+03 # McR(Q)
46 3.28067163E+03 # MtR(Q)
47 4.53066406E+03 # MdR(Q)
48 4.53066406E+03 # MsR(Q)
49 4.55108252E+03 # MbR(Q)
Block AU Q= 3.58269727E+03 #
1 1 3.86256177E+03 # A_u
2 2 3.86256177E+03 # A_c
3 3 3.86256177E+03 # A_t
Block AD Q= 3.58269727E+03 #
1 1 9.22079785E+03 # A_d
2 2 9.22079785E+03 # A_s
3 3 9.22079785E+03 # A_b
Block AE Q= 3.58269727E+03 #
1 1 2.57661255E+03 # A_e
2 2 2.57661255E+03 # A_mu
3 3 2.57661255E+03 # A_tau
#
#
#
# =================
# |The decay table|
# =================
#
# PDG Width
DECAY 1000021 5.50675438E+00 # gluino decay
# BR NDA ID1 ID2 ID3
2.50000000E-01 3 1 -1 1000022
2.50000000E-01 3 2 -2 1000022
2.50000000E-01 3 1 -2 1000024
2.50000000E-01 3 -1 2 -1000024
#
# PDG Width
DECAY 1000024 %.9g # chargino decay
#
""" % (MGLU, (1.97326979e-13 / CTAU))
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'1000024:isResonance = false',
'1000024:oneChannel = 1 1.0 100 1000022 211',
'1000024:tau0 = %.1f' % CTAU,
'ParticleDecays:tau0Max = %.1f' % (CTAU * 10),
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters')
),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
slhaFile = cms.untracked.string(''), # value not used
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(MCHI), # value not used
particleFile = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/DisappTrksAMSBCascade/test/geant4_AMSB_chargino_%sGeV_ctau%scm.slha' % (MCHI, CTAU/10))
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
f8bcf1d6c11905d75cef6915a64cabd14eac0127
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/homeassistant/components/goodwe/__init__.py
|
b5872ed3deaa396f83867e451c4d61a51cafba2e
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 |
Apache-2.0
| 2023-02-22T06:24:01 | 2017-11-08T05:27:21 |
Python
|
UTF-8
|
Python
| false | false | 2,370 |
py
|
"""The Goodwe inverter component."""
from goodwe import InverterError, connect
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import DeviceInfo
from .const import (
CONF_MODEL_FAMILY,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE_INFO,
KEY_INVERTER,
PLATFORMS,
)
from .coordinator import GoodweUpdateCoordinator
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the Goodwe components from a config entry."""
hass.data.setdefault(DOMAIN, {})
host = entry.data[CONF_HOST]
model_family = entry.data[CONF_MODEL_FAMILY]
# Connect to Goodwe inverter
try:
inverter = await connect(
host=host,
family=model_family,
retries=10,
)
except InverterError as err:
raise ConfigEntryNotReady from err
device_info = DeviceInfo(
configuration_url="https://www.semsportal.com",
identifiers={(DOMAIN, inverter.serial_number)},
name=entry.title,
manufacturer="GoodWe",
model=inverter.model_name,
sw_version=f"{inverter.firmware} / {inverter.arm_firmware}",
)
# Create update coordinator
coordinator = GoodweUpdateCoordinator(hass, entry, inverter)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_INVERTER: inverter,
KEY_COORDINATOR: coordinator,
KEY_DEVICE_INFO: device_info,
}
entry.async_on_unload(entry.add_update_listener(update_listener))
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
|
[
"[email protected]"
] | |
2f2e5e0161230e936733e0a3bf0fbc147785f021
|
a2f63e108409adabc88aaa4e5eee9603d4085f3d
|
/vea-plaza1/farmacia/apps/compras/forms.py
|
fa43c3492e6f904b4fe4feb009e9ff87dff88592
|
[] |
no_license
|
armandohuarcaya/Sistema-Union
|
098569606a8259d76a42f2508f59dbf33ddb2e31
|
9f093e8952008fffcbb86ecf1e49f7c86184557e
|
refs/heads/master
| 2021-08-22T23:37:12.758059 | 2017-12-01T12:53:38 | 2017-12-01T12:53:38 | 112,769,764 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,248 |
py
|
from .models import * # Change as necessary
from django.forms import ModelForm
from django import forms
from django.utils.text import capfirst
class TodoListForm(ModelForm):
class Meta:
model = Cabecera
exclude =('trabajador',)
widgets = {
'codigo': forms.TextInput(attrs={'class': 'form-control'}),
'distribuidor': forms.Select(attrs={'class': 'form-control'}),
'laboratorio': forms.Select(attrs={'class': 'form-control'}),
}
class TodoItemForm(forms.ModelForm):
class Meta:
model = DetalleCompra
exclude = ('list',)
widgets = {
'medicamento': forms.Select(attrs={'class': 'form-control'}),
'cantidad': forms.NumberInput(attrs={'class': 'form-control'}),
}
# def __init__(self, *args, **kwargs):
# super(TodoItemForm, self).__init__(*args, **kwargs)
# self.fields['medicamento'] = forms.CharField(
# label=capfirst(u'Producto')
# )
class RangoForm (forms.Form):
fecha_i = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_i', 'data-date-format':'dd/mm/yyyy'}))
fecha_f = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_f', 'data-date-format':'dd/mm/yyyy'}))
|
[
"[email protected]"
] | |
8bd1377691cf724779af709f24241b528b2a1b8a
|
aef69557d8960205a780e61b7c2dfbb1d7733449
|
/Code/Theo/labs/peaksandvalleys.py
|
cb64dea931ac33118b64d0a46eff3440cbc5c589
|
[] |
no_license
|
sbtries/class_pandaaaa
|
579d6be89a511bdc36b0ce8c95545b9b704a734a
|
bbf9c419a00879118a55c2c19e5b46b08af806bc
|
refs/heads/master
| 2023-07-18T14:18:25.881333 | 2021-09-02T22:48:29 | 2021-09-02T22:48:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,271 |
py
|
def main():
data = [1 , 2 , 3 ,4 , 5 , 6 , 7 , 6 , 5 , 4 , 5 , 6 , 7 , 8 , 9 , 8 , 7 , 6 , 7 , 8 , 9]
peak_indices = peaks(data)
valley_indices = valleys(data)
print(valley_indices)
print(peak_indices)
print(peaks_and_valleys(peak_indices,valley_indices))
visualization(data)
exit()
def peaks(data):
peak_indices = []
i = 1
n = len(data)
while i < n-1:
if data[i-1] < data[i] and data[i+1] < data[i]:
peak_indices.append(i)
i += 1
return peak_indices
def valleys(data):
valley_indices = []
i = 1
n = len(data)
while i < n-1:
if data[i-1] > data[i] and data[i+1] > data[i]:
valley_indices.append(i)
i += 1
return valley_indices
def peaks_and_valleys(peaks,valleys):
p_v = []
p_v.extend(peaks)
p_v.extend(valleys)
p_v.sort()
return p_v
def visualization(data):
line = []
i = 0
j = 0
max = 0
for val in data:
if val > max:
max = val
n = len(data)
for i in range(max):
for j in range(n):
if data[j] >= max-i:
line.append('X')
else:
line.append(' ')
print(line)
line.clear()
main()
|
[
"[email protected]"
] | |
67066240d43e45422d72fc3cfb0f6f4f40c47a7f
|
612592a63c9c7b46f4b84f6b2ea6a18187e4838e
|
/TWOSC_autotest/TWOSC_autotest_web/PO/plan/fahuodan/fahuodan_editPlan.py
|
fa45dfb9161e946dbd0c086f78e57ef5d0c8b0a8
|
[] |
no_license
|
xiaoxiangLiu/test_gitbase
|
5c624175b58897d3314c178924fd79fc48925a96
|
da1b9c82b8501f838892a3a5089392311603309e
|
refs/heads/master
| 2021-05-08T14:56:34.612042 | 2018-03-08T16:32:39 | 2018-03-08T16:32:39 | 120,100,809 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,898 |
py
|
__author__ = '38720'
# coding=utf-8
from base_page.base import Page
from selenium.webdriver.common.by import By
class EditPlan(Page):
'''编辑发货单信息页'''
# 出库时间
chuku_time_loc = (By.CSS_SELECTOR, '#wareHouseTimeStr')
# 出库时间选择月份按钮
month_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-days > table > thead > tr:nth-child(1) > th.switch')
# 出库时间一月
january_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-months > table > tbody > tr > td > span:nth-child(1)')
# 出库时间一日
day_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-days > table > tbody > tr:nth-child(2) > td:nth-child(1)')
# 出库时间13:00
hour_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-hours > table > tbody > tr > td > span:nth-child(14)')
# 到货时间
daohuo_time_loc = (By.CSS_SELECTOR, '#arriveTimeStr')
# 到货时间选择月份
month_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[3]/table/thead/tr[1]/th[2]')
# 到货时间一月
january_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[4]/table/tbody/tr/td/span[1]')
# 到货时间12号
day_time_daohuo_loc = (By.CSS_SELECTOR, 'body > div:nth-child(12) > div.datetimepicker-days > table > tbody > tr:nth-child(3) > td:nth-child(5)')
# 到货时间10:00
hour_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[2]/table/tbody/tr/td/span[11]')
# 发货单名称lable
fahuodan_lable_loc = (By.CSS_SELECTOR, '#editInfoForm > div > div:nth-child(1) > label')
# 提交
submit_loc = (By.CSS_SELECTOR, '#GT_CommonModal > div > div > div.box-footer > div > button.btn.btn-primary')
# 移除出库时间控件
def remove_time_js(self):
js = "$('input[id=wareHouseTimeStr').removeAttr('readonly')"
return self.script(js)
# 输入出库时间
def send_chuku_time(self):
js = "$('input[id=wareHouseTimeStr').removeAttr('readonly')"
self.script(js)
return self.find_element(*self.chuku_time_loc).send_keys('2016-10-10-22:00')
# 移除到货时间并输入时间
def send_daohuo_time(self):
js = "$('input[id=arriveTimeStr').removeAttr('readonly')"
self.script(js)
return self.find_element(*self.daohuo_time_loc).send_keys('2018-10-10-22:00')
# 点击lable
def click_lable(self):
return self.find_element(*self.fahuodan_lable_loc).click()
# 点击提交
def click_submit(self):
return self.find_element(*self.submit_loc).click()
# 点击出库时间
def click_chuku_time(self):
return self.find_element(*self.chuku_time_loc).click()
# 点击选择月份
def click_month_time(self):
return self.find_element(*self.month_time_loc).click()
# 点击一月
def click_january_time(self):
return self.find_element(*self.january_time_loc).click()
# 点击一日
def click_day_time(self):
return self.find_element(*self.day_time_loc).click()
# 点击13:00
def click_hour_time(self):
return self.find_element(*self.hour_time_loc).click()
# 点击到货时间
def click_daohuo_time(self):
return self.find_element(*self.daohuo_time_loc).click()
# 点击到货时间选择月份
def click_month_daohuo(self):
return self.find_element(*self.month_time_daohuo_loc).click()
# 点击到货时间选择1月
def click_january_daohuo(self):
return self.find_element(*self.january_time_daohuo_loc).click()
# 点击12号
def click_day_daohuo(self):
return self.find_element(*self.day_time_daohuo_loc).click()
# 点击10:00
def click_hour_daohuo(self):
return self.find_element(*self.hour_time_daohuo_loc).click()
|
[
"[email protected]"
] | |
2999639695e22cf860ff0c331939e7a44e1660da
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/ros/py_ros/baxter/sonar1.py
|
05674e6f2009f101a7a4af39626441ee58c5c8ee
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 899 |
py
|
#!/usr/bin/python
#\file sonar1.py
#\brief Baxter: getting data from sonor sensor
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
#\date Oct.29, 2015
import roslib
import rospy
import sensor_msgs.msg
import baxter_interface
import time
import math
def IsPointInFront(points, max_angle, max_dist):
for p in points:
angle= math.atan2(p.y,p.x)
dist= math.sqrt(p.x*p.x+p.y*p.y)
#print (abs(angle),dist),
if abs(angle)<max_angle and dist<max_dist:
return True
#print ''
return False
def CallBack(msg):
#print '----------------'
#print msg
if IsPointInFront(msg.points,30.0/180.0*math.pi,1.1): print 'Found a near point!',msg.header.seq
if __name__=='__main__':
rospy.init_node('baxter_test')
sub_msg= rospy.Subscriber('/robot/sonar/head_sonar/state', sensor_msgs.msg.PointCloud, CallBack)
rospy.spin()
#rospy.signal_shutdown('Done.')
|
[
"[email protected]"
] | |
207b528aeee4bce98be4895e5753fe054b2d10d8
|
2b42b40ae2e84b438146003bf231532973f1081d
|
/spec/mgm4458607.3.spec
|
5f6a8ec7319a623523c4ab26f7e21f624256f1f7
|
[] |
no_license
|
MG-RAST/mtf
|
0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a
|
e2ddb3b145068f22808ef43e2bbbbaeec7abccff
|
refs/heads/master
| 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,314 |
spec
|
{
"id": "mgm4458607.3",
"metadata": {
"mgm4458607.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 181929,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3798,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1848,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 119933,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 483,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 20236,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 217573,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 161493,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 41929,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 27362,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 289036,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 15289,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 10957,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 20733,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 31499,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2126370,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 121,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 939,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 29,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 3144,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 4053,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 1600,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 420,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22465,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 11861,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458607.3/file/999.done.species.stats"
}
},
"id": "mgm4458607.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458607.3"
}
},
"raw": {
"mgm4458607.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458607.3"
}
}
}
|
[
"[email protected]"
] | |
8debe55a0166fa775d3843b6dc1d86d2907efc04
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/aqA6KSHRCwfE44Q9m_5.py
|
99fb26451b467fde54932142a65767af8a9f7fe5
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 803 |
py
|
"""
October 22nd is CAPS LOCK DAY. Apart from that day, every sentence should be
lowercase, so write a function to **normalize** a sentence.
Create a function that takes a string. If the string is **all uppercase
characters** , convert it to **lowercase** and add an **exclamation mark** at
the end.
### Examples
normalize("CAPS LOCK DAY IS OVER") ➞ "Caps lock day is over!"
normalize("Today is not caps lock day.") ➞ "Today is not caps lock day."
normalize("Let us stay calm, no need to panic.") ➞ "Let us stay calm, no need to panic."
### Notes
Each string is a sentence and should start with an uppercase character.
"""
def normalize(txt):
if all(all(i.isupper() for i in x) for x in txt.split(' ')):
return (txt.lower()).capitalize() + '!'
return txt
|
[
"[email protected]"
] | |
a8d4b395303242fdc048b7b4a0099f1f74d70414
|
12a0b2172cb480d08406c1302652b078615e5747
|
/plut/table.py
|
ab41b0dce88a7cd8f0f97277f552101289ac7843
|
[] |
no_license
|
kuntzer/plut
|
60bb59f21e5952935e471916954b6ab48d98794b
|
2924f36dc3b1c8ca1449cd666ca6c1577b03a77a
|
refs/heads/master
| 2021-01-19T09:58:19.432997 | 2017-04-20T15:16:33 | 2017-04-20T15:16:33 | 87,803,091 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,325 |
py
|
"""
Helpers for astropy.table arrays
"""
import numpy as np
import astropy.table
import datetime
import copy
import logging
logger = logging.getLogger(__name__)
def info(cat, txt=True):
"""
Returns a new table "describing" the content of the table cat.
"""
colnames = cat.colnames
dtypes = [cat[colname].dtype for colname in colnames]
ndims = [cat[colname].ndim for colname in colnames]
shapes = [cat[colname].shape for colname in colnames]
infotable = astropy.table.Table([colnames, dtypes, ndims, shapes], names=("colname", "dtype", "ndim", "shape"))
infotable.sort("colname")
infotable.meta = cat.meta
if txt:
lines = infotable.pformat(max_lines=-1, max_width=-1)
lines.append("")
lines.append("Number of rows: {}".format(len(cat)))
lines.append("Number of columns: {}".format(len(cat.colnames)))
lines.append("Metadata: {}".format(str(infotable.meta.items())))
return "\n".join(lines)
else:
return infotable
class Selector:
"""
Provides a simple way of getting "configurable" sub-selections of rows from a table.
"""
def __init__(self, name, criteria):
"""
:param name: a short string describing this selector (like "star", "low_snr", ...)
:param criteria: a list of tuples describing the criteria. Each of these tuples starts
with a string giving its type, followed by some arguments.
Illustration of the available criteria (all limits are inclusive):
- ``("in", "tru_rad", 0.5, 0.6)`` : ``"tru_rad"`` is between 0.5 and 0.6 ("in" stands for *interval*) and *not* masked
- ``("max", "snr", 10.0)`` : ``"snr"`` is below 10.0 and *not* masked
- ``("min", "adamom_flux", 10.0)`` : ``"adamom_flux"`` is above 10.0 and *not* masked
- ``("inlist", "subfield", (1, 2, 3))`` : ``subfield`` is among the elements in the tuple or list (1,2,3) and *not* masked.
- ``("is", "Flag", 2)`` : ``"Flag"`` is exactly 2 and *not* masked
- ``("nomask", "pre_g1")`` : ``"pre_g1"`` is not masked
- ``("mask", "snr")`` : ``"snr"`` is masked
"""
self.name = name
self.criteria = criteria
def __str__(self):
"""
A string describing the selector
"""
return "'%s' %s" % (self.name, repr(self.criteria))
def combine(self, *others):
"""
Returns a new selector obtained by merging the current one with one or more others.
:param others: provide one or several other selectors as arguments.
.. note:: This does **not** modify the current selector in place! It returns a new one!
"""
combiname = "&".join([self.name] + [other.name for other in others])
combicriteria = self.criteria
for other in others:
combicriteria.extend(other.criteria)
return Selector(combiname, combicriteria)
def select(self, cat):
"""
Returns a copy of cat with those rows that satisfy all criteria.
:param cat: an astropy table
"""
if len(self.criteria) is 0:
logger.warning("Selector %s has no criteria!" % (self.name))
return copy.deepcopy(cat)
passmasks = []
for crit in self.criteria:
if cat[crit[1]].ndim != 1:
logger.warning("Selecting with multidimensional column ('{}', shape={})... hopefully you know what you are doing.".format(crit[1], cat[crit[1]].shape))
if crit[0] == "in":
if len(crit) != 4: raise RuntimeError("Expected 4 elements in criterion %s" % (str(crit)))
passmask = np.logical_and(cat[crit[1]] >= crit[2], cat[crit[1]] <= crit[3])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
# Note about the "filled": if crit[2] or crit[3] englobe the values "underneath" the mask,
# some masked crit[1] will result in a masked "passmask"!
# But we implicitly want to reject masked values here, hence the filled.
elif crit[0] == "max":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] <= crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif crit[0] == "min":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] >= crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif crit[0] == "inlist":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = np.in1d(np.asarray(cat[crit[1]]), crit[2]) # This ignores any mask
if np.ma.is_masked(passmask): # As the mask is ignored by in1d, this is probably worthless and will never happen
passmask = passmask.filled(fill_value=False)
# So we need to deal with masked elements manually:
if hasattr(cat[crit[1]], "mask"): # i.e., if this column is masked:
passmask = np.logical_and(passmask, np.logical_not(cat[crit[1]].mask))
elif crit[0] == "is":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] == crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif (crit[0] == "nomask") or (crit[0] == "mask"):
if len(crit) != 2: raise RuntimeError("Expected 2 elements in criterion %s" % (str(crit)))
if hasattr(cat[crit[1]], "mask"): # i.e., if this column is masked:
if crit[0] == "nomask":
passmask = np.logical_not(cat[crit[1]].mask)
else:
passmask = cat[crit[1]].mask
else:
logger.warning("Criterion %s is facing an unmasked column!" % (str(crit)))
passmask = np.ones(len(cat), dtype=bool)
else:
raise RuntimeError("Unknown criterion %s" % (crit))
logger.debug("Criterion %s of '%s' selects %i/%i rows (%.2f %%)" %
(crit, self.name, np.sum(passmask), len(cat), 100.0 * float(np.sum(passmask))/float(len(cat))))
assert len(passmask) == len(cat)
passmasks.append(passmask) # "True" means "pass" == "keep this"
# Combining the passmasks:
passmasks = np.logical_not(np.column_stack(passmasks)) # "True" means "reject"
combimask = np.logical_not(np.sum(passmasks, axis=1).astype(bool)) # ... and "True" means "keep this" again.
logger.info("Selector '%s' selects %i/%i rows (%.2f %%)" %
(self.name, np.sum(combimask), len(cat), 100.0 * float(np.sum(combimask))/float(len(cat))))
return cat[combimask]
|
[
"[email protected]"
] | |
f2c61f6ea2a8a16d27c3a861620bfe4d898bc0d0
|
55b84573458f72618c9d285fd449e8328b75af4f
|
/test.py
|
642d7e9940b463fad5b34d1e231bd1568e731442
|
[
"MIT"
] |
permissive
|
obs145628/py-linear-regression
|
2984749d8b2295a044fff92904ae7f3daec98b07
|
8dafcfa0c1b889177afa61629540cacf8dcc0080
|
refs/heads/master
| 2021-04-27T17:37:10.715036 | 2018-02-21T11:00:54 | 2018-02-21T11:00:54 | 122,325,238 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
import numpy as np
import dataset_boston
import lineareg
X, y = dataset_boston.load_boston()
lineareg.train(X, y, 1000, 1e-8)
|
[
"[email protected]"
] | |
12afd11a82d7eedeaa2bf128ce1a1df056ad69bb
|
88023c9a62994e91291c67088156a2894cc26e9e
|
/tests/test_core.py
|
7a80ecc593d53ee3f44a38022cb4ff9fe2622c00
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
toros-astro/corral
|
41e9d0224d734c4268bf5161d472b3c0375842f0
|
75474b38ff366330d33644461a902d07374a5bbc
|
refs/heads/master
| 2023-06-10T15:56:12.264725 | 2018-09-03T17:59:41 | 2018-09-03T17:59:41 | 44,282,921 | 6 | 5 |
BSD-3-Clause
| 2023-03-24T12:03:17 | 2015-10-14T23:56:40 |
Python
|
UTF-8
|
Python
| false | false | 2,715 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""All core functions functionalities tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from corral import core, VERSION
import mock
from .base import BaseTest
# =============================================================================
# BASE CLASS
# =============================================================================
class TestCore(BaseTest):
def test_get_version(self):
actual = core.get_version()
expected = VERSION
self.assertEqual(actual, expected)
def test_setup_environment(self):
with mock.patch("corral.db.setup") as setup:
with mock.patch("corral.db.load_models_module") as load_mm:
core.setup_environment()
self.assertTrue(setup.called)
self.assertTrue(load_mm.called)
|
[
"[email protected]"
] | |
095fea23bcf8497a6557e3979f106a29b0294879
|
53c4ec58760768fc9073793cf17cd8c55978c3af
|
/annotator/uniformer/mmcv/runner/utils.py
|
c5befb8e56ece50b5fecfd007b26f8a29124c0bd
|
[
"Apache-2.0"
] |
permissive
|
HighCWu/ControlLoRA
|
0b6cab829134ed8377f22800b0e1d648ddf573b0
|
3b8481950867f61b2cf072b1f156d84f3363ac20
|
refs/heads/main
| 2023-08-05T08:51:25.864774 | 2023-02-28T13:06:24 | 2023-02-28T13:06:24 | 603,359,062 | 421 | 20 |
Apache-2.0
| 2023-08-02T02:14:40 | 2023-02-18T09:12:15 |
Python
|
UTF-8
|
Python
| false | false | 2,936 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import random
import sys
import time
import warnings
from getpass import getuser
from socket import gethostname
import numpy as np
import torch
import annotator.uniformer.mmcv as mmcv
def get_host_info():
"""Get hostname and username.
Return empty string if exception raised, e.g. ``getpass.getuser()`` will
lead to error in docker container
"""
host = ''
try:
host = f'{getuser()}@{gethostname()}'
except Exception as e:
warnings.warn(f'Host or user not found: {str(e)}')
finally:
return host
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
assert isinstance(default_args, dict) or default_args is None
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if parent is not None:
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but '
f'got {type(obj_type)}')
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
rank_shift (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Default: False.
"""
if use_rank_shift:
rank, _ = mmcv.runner.get_dist_info()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
[
"[email protected]"
] | |
b0eab014e58477ee27d3b7011ca54aee830dd3b6
|
a2777caa6247ab826c4902cc5a14bacfd3507215
|
/eggify/migrations/0045_auto_20190608_0634.py
|
436444884d5ad10093458da13388b39dd1036e00
|
[] |
no_license
|
ad-egg/eggify
|
2c7aa96f1588aefa94236f3a39693c5b9a1f931d
|
a7d1cbc319ca52fc9e14f574cadebfdad5bad3e3
|
refs/heads/master
| 2022-05-05T22:54:45.942426 | 2020-10-21T20:03:32 | 2020-10-21T20:03:32 | 190,267,814 | 3 | 6 | null | 2022-04-22T21:25:19 | 2019-06-04T19:38:53 |
Python
|
UTF-8
|
Python
| false | false | 968 |
py
|
# Generated by Django 2.2.2 on 2019-06-08 06:34
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('eggify', '0044_merge_20190608_0634'),
]
operations = [
migrations.AlterField(
model_name='eggnt',
name='id',
field=models.CharField(default='ce821d7f-ad2d-4bb7-9399-d73887475548', editable=False, max_length=50, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='eggnt',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 8, 6, 34, 39, 904850, tzinfo=utc), editable=False, verbose_name='updated at'),
),
migrations.AlterField(
model_name='eggnt',
name='words',
field=models.TextField(help_text='Enter text to be eggified!'),
),
]
|
[
"[email protected]"
] | |
9f3d40b3b0bbb876016bce103fec482a6d366115
|
887bcf643eb6ef28c9f2f36a5e8c572a7e00f2ab
|
/api/base/__init__.py
|
1465c19b06734b74fe63674d54f8fb6c492420da
|
[
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/SHARE
|
78e867de79c51751dc922e2e5729fbf64b10ab52
|
880fa0248fde111eafc8be7507dc5a5098ec433a
|
refs/heads/develop
| 2023-08-11T22:08:11.408053 | 2023-05-11T17:40:47 | 2023-05-11T17:40:47 | 21,280,081 | 96 | 70 |
Apache-2.0
| 2023-09-12T13:27:11 | 2014-06-27T15:19:21 |
Python
|
UTF-8
|
Python
| false | false | 80 |
py
|
from api.base.serializers import * # noqa
from api.base.views import * # noqa
|
[
"[email protected]"
] | |
286984d3003df45e062240f9a4fd9db5bedd3497
|
89260668655a46278e8f22a5807b1f640fd1490c
|
/mySite/records/get_person.py
|
6c0e2075d500dfb86e34d2cbb2b5d26de09a12f3
|
[] |
no_license
|
happychallenge/mySite
|
926136859c5b49b7fd8baff09b26f375b425ab30
|
ddbda42d5d3b9c380a594d81a27875b4ad10358b
|
refs/heads/master
| 2020-12-30T13:28:31.018611 | 2017-11-08T02:52:18 | 2017-11-08T02:52:18 | 91,218,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,258 |
py
|
import re
import json
import requests
from bs4 import BeautifulSoup
def get_person_info(person):
search_url = 'http://people.search.naver.com/search.naver' # Search URL
params = { 'where':'nexearch' , 'query': person } # Search Parameters
html = requests.get(search_url, params=params).text # BeautifulSoup
pattern = re.compile('(?<=oAPIResponse :)(.*?)(?=, sAPIURL)', re.DOTALL)
matches = pattern.search(html)
if matches == None:
return None
data = matches[0]
result = json.loads(data) # Json Data Load from Javascript
listPerson = result['data']['result']['itemList'] # Get person info
result = {} # 리턴값 초기
for index, item in enumerate(listPerson):
sub = {}
sub['id'] = item['object_id'] # ID Read
sub['name'] = item['m_name'] # name
sub['birth_year'] = item['birth_year'] # Birth Year
job = []
for jobs in item['job_info']:
job.append(jobs['job_name'])
sub['job'] = job
result[index] = sub
if index == 1:
break
return result
if __name__ == '__main__':
print(get_person_info('최경환'))
|
[
"[email protected]"
] | |
e89bf4569b09f51daffc997e16d3331e722abc6d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_truism.py
|
d1c7cd25dfb6033280337472ff04169465e52272
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
#calss header
class _TRUISM():
def __init__(self,):
self.name = "TRUISM"
self.definitions = [u'a statement that is so obviously true that it is almost not worth saying: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
a487c20c44f8eaa2fbeb1421daea26dca65cdb0b
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/E/eldang/noaa_historical_weather_data_csv_converter.py
|
6598a5b7b4c3aaeb5b344ed8a639dfce5f2d4ad8
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 26,768 |
py
|
#! /usr/bin/env python
# Downloader for NOAA historical weather data
# Written by Eldan Goldenberg, Sep-Oct 2012
# http://eldan.co.uk/ ~ @eldang ~ [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# The licence text is available online at:
# http://www.gnu.org/licenses/gpl-2.0.html
# Background:
# NOAA publishes an amazing trove of historical weather data from stations all
# over the world - http://www.ncdc.noaa.gov/cgi-bin/res40.pl - but in a rather
# awkward format: each year is a separate folder, in which each station
# is one file, identified by two arbitrary ref numbers. Each file is gzipped
# text in a rather non-standard format with some inconvenient features like
# NULL tokens varying per field, and numbers being concatenated together
# with flags that give interpretation information:
# ftp://ftp.ncdc.noaa.gov/pub/data/gsod/readme.txt
# This script lets the user specify a station and a number of years to download.
# It then iterates through, downloading enough years, parsing them into a much
# more standard CSV format read for use in (e.g.) Excel or Tableau, and
# concatenating them into one continuous file per station.
# USE:
# Simply call this script, with the optional argument --verbose if you want
# debug output. It will prompt you for everything else it needs interactively.
# TODO short term: make it take a list of stations at the beginning, so it can
# be left running unattended after that.
# TODO long term: replace manual USAF & WBAN code input with a lookup that lets
# users just enter a station name, and gets the two codes from that file.
# TODO total pipedream: let user give a location, and automagically find the
# nearest station based on lat & long.
# TODO for scraperwiki: have it load in the whole list of stations and just
# iterate over them.
import datetime
import urllib
import os
import gzip
import time
import csv
import sys
# Check for --verbose argument. You'll get more feedback if you use this.
if len(sys.argv) == 1:
# then default to non-verbose
verbose = False
elif sys.argv[1] == "--verbose":
verbose = True
else:
verbose = False
# I've assumed you'll want the same number of years for every station you
# download in one session, so we ask this before going into the main loop.
maxyears = int(raw_input("How many years of data would you like to download " \
"for each station?\n"))
# This function goes through each downloaded file line by line, and translates
# it from NOAA's idiosyncratic format to CSV with all the fields separated
# out rationally.
def parsefile(f_in, f_out, stationname):
# Set up connections to input and output files. The CSV library also helps
# with reading the input file, because we can treat it as space separated
# with consecutive spaces being collapsed together
reader = csv.reader(f_in, delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace=True)
writer = csv.writer(f_out, dialect=csv.excel)
for row in reader:
if (row[0] != 'STN---'):
# If it's the header row, just skip; otherwise process
outrow = [stationname] # the station name is not in the input file
# skipping first 2 cols of the input file as they are the USAF & WBAN codes
# which we're replacing with the actual station name.
# expanding col 3 into separate Y, M & D fields makes them easier to work
# with in Tableau
outrow.append((row[2])[:4]) # first 4 digits are the year
outrow.append((row[2])[4:6]) # next 2 are the month
outrow.append((row[2])[-2:]) # final 2 are the day
# now we can use a loop to get through a bunch of field pairs that all
# work the same:
# MeanTemp, NTempObs, DewPoint, NDewPointObs, SeaLevelPressure,
# NSeaLevPressObs, StationPressure, NStatPressObs
for i in range(3, 11, 2):
# for each of these, 9999.9 means NULL, and the number of observations
# follows the value
if (row[i+1] == "0") or (row[i] == "9999.9"):
outrow.append("NULL")
outrow.append(0)
else:
outrow.append(row[i])
outrow.append(row[i+1])
# Now the same principle for Visibility, which uses a different NULL token
# Visibility, NVisibilityObs
if (row[12] == "0") or (row[11] == "999.9"):
outrow.append("NULL")
outrow.append(0)
else:
outrow.append(row[11])
outrow.append(row[12])
# Now for wind data, which is 4 fields of which the second is the number
# of observations from which the other 3 values were determined
# MeanWindSpeed, NWindObs, MaxSustWindSpeed, MaxWindGust
if row[14] == "0":
# if there are 0 observations, then set a bunch of nulls
outrow.append("NULL")
outrow.append("0")
outrow.append("NULL")
outrow.append("NULL")
else:
for i in range(13, 17, 1):
if row[i] == "999.9": outrow.append("NULL")
else: outrow.append(row[i])
# Temp fields may or may not have a "*" appended after the number, so we
# handle these by first checking what the last character is:
# "MaxTemp", "MaxTempSource", "MinTemp", "MinTempSource"
for i in range(17, 19, 1):
if (row[i])[-1] == "*":
# then the flag is present, indicating the source was derived
# indirectly from hourly data
outrow.append((row[i])[:-1])
outrow.append("hourly")
else:
# if it's not present then this was an explicit max/min reading
outrow.append(row[i])
outrow.append("explicit")
# Precipitation has its own extra special flag source and NULL placeholder
# PrecipAmount, NPrecipReportHours, PrecipFlag
if row[19] == "99.99":
# then it's null, so:
outrow.append("NULL")
outrow.append("NULL")
outrow.append("NULL")
else:
outrow.append((row[19])[:-1])
# translations of the flag, as per
# ftp://ftp.ncdc.noaa.gov/pub/data/gsod/readme.txt
if (row[19])[-1] == "A": outrow.append("6")
elif (row[19])[-1] == "B": outrow.append("12")
elif (row[19])[-1] == "C": outrow.append("18")
elif (row[19])[-1] == "D": outrow.append("24")
elif (row[19])[-1] == "E": outrow.append("12")
elif (row[19])[-1] == "F": outrow.append("24")
elif (row[19])[-1] == "G": outrow.append("24")
elif (row[19])[-1] == "H": outrow.append("0")
elif (row[19])[-1] == "I": outrow.append("0")
else: outrow.append("ERR")
outrow.append((row[19])[-1])
# SnowDepth is relatively straightforward
if row[20] == "999.9":
outrow.append("NULL")
else:
outrow.append(row[20])
# Fog, Rain, Snow, Hail, Thunder, Tornado
# these are stored as one six-bit binary string, so we unpack it here
for i in range(0, 6, 1):
outrow.append((row[21])[i])
# And we're done! Now write the row to the output file
writer.writerow(outrow)
if verbose:
sys.stdout.write("parsed.\n")
else:
# even if not verbose, we say something so the user can see it's working
sys.stdout.write(".")
sys.stdout.flush() # need to flush the output buffer to show progress live
# This is the main control function. Each pass gets the user's input to pick a
# station, and then loops over years to download the relevant files, calling
# parsefile() to parse each one into standard CSV
def downloadfiles(maxyears):
# get parameters for and start constructing filenames
URLroot = "ftp://ftp.ncdc.noaa.gov/pub/data/gsod/" # base URL for all files
filesuffix = ".op.gz" # suffix for all the raw files
firstyear = 1928 # this is the first year available for any station
USAFcode = raw_input("Please enter the USAF code for the station you want " \
"data for (first column of " \
"ftp://ftp.ncdc.noaa.gov/pub/data/inventories/ISH-HISTORY.TXT )\n")
WBANcode = raw_input("Please enter the WBAN code for the station you want " \
"data for (second column of " \
"ftp://ftp.ncdc.noaa.gov/pub/data/inventories/ISH-HISTORY.TXT )\n")
# e.g. Seattle (SEA) is USAF 727930 WBAN 24233
# Portland, OR is USAF 726980 WBAN 24229
# LHR is USAF 037720 WBAN 99999
stationname = raw_input("What would you like to call this station?\n")
stationcode = str(USAFcode) + '-' + str(WBANcode)
yearsdownloaded = 0
for year in range(datetime.datetime.now().year-1, firstyear, -1):
# stopping before the current year because it's necessarily incomplete, and
# looping back from last year, on the assumption that more recent years
# are of greater interest and have higher quality data.
# First we assemble the URL for the year of interest
fullURL = (URLroot + str(year) + '/' + stationcode + '-' +
str(year) + filesuffix)
if verbose:
sys.stdout.write("Trying " + fullURL + " ... ")
sys.stdout.flush()
# Now we try to download the file, with very basic error handling if verbose
try:
urllib.urlretrieve(fullURL,str(year)+filesuffix)
if verbose: sys.stdout.write("retrieved ... ")
yearsdownloaded += 1
except IOError as e:
if verbose: print(" ")
print(e)
else: # if we got the file without any errors, then
# uncompress the file
f_in = gzip.open(str(year)+filesuffix)
if verbose: sys.stdout.write("decompressed ... ")
# and start writing the output
if yearsdownloaded == 1:
# since it's the first year, open the file and write the header row
firstyear = year
f_out = open(stationname+'.csv','w')
csv.writer(f_out).writerow(["Station", "Year", "Month", "Day", \
"MeanTemp", "NTempObs", "DewPoint", "NDewPointObs", \
"SeaLevelPressure", "NSeaLevPressObs", "StationPressure", \
"NStatPressObs", "Visibility", "NVisibilityObs", "MeanWindSpeed", \
"NWindObs", "MaxSustWindSpeed", "MaxWindGust", "MaxTemp", \
"MaxTempSource", "MinTemp", "MinTempSource", "PrecipAmount", \
"NPrecipReportHours", "PrecipFlag", "SnowDepth", "Fog", "Rain", \
"Snow", "Hail", "Thunder", "Tornado"])
# This function does the actual ETL
parsefile(f_in, f_out, stationname)
# clean up after ourselves
f_in.close()
os.remove(str(year)+filesuffix)
urllib.urlcleanup()
if yearsdownloaded == maxyears:
break # if we have enough years, then end this loop
else:
time.sleep(5) # slow down here to stop the server locking us out
time.sleep(1)
print("Successfully downloaded " + str(yearsdownloaded) + " years between " +
str(year) + " and " + str(firstyear) + " for station " + stationname)
if yearsdownloaded < maxyears:
# If we didn't get as many years as requested, alert the user
print("No more years are available at the NOAA website for this station.")
f_out.close()
# This is the main control loop. It repeatedly asks the user for station codes
# and calls downloadfiles() to download the requested data, until it's told
# to stop.
goagain = "Y"
while not (goagain.startswith('N') or goagain.startswith('n')):
downloadfiles(maxyears)
goagain = raw_input("Would you like to download another station (Y/N)?\n")
while not (goagain.startswith('N') or goagain.startswith('n') or
goagain.startswith('y') or goagain.startswith('Y')):
goagain = raw_input("Please help me, I am but a stupid computer. " \
"I can only understand Y or N as responses to this prompt. "
"Would you like to download another station (Y/N)?\n")
#! /usr/bin/env python
# Downloader for NOAA historical weather data
# Written by Eldan Goldenberg, Sep-Oct 2012
# http://eldan.co.uk/ ~ @eldang ~ [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# The licence text is available online at:
# http://www.gnu.org/licenses/gpl-2.0.html
# Background:
# NOAA publishes an amazing trove of historical weather data from stations all
# over the world - http://www.ncdc.noaa.gov/cgi-bin/res40.pl - but in a rather
# awkward format: each year is a separate folder, in which each station
# is one file, identified by two arbitrary ref numbers. Each file is gzipped
# text in a rather non-standard format with some inconvenient features like
# NULL tokens varying per field, and numbers being concatenated together
# with flags that give interpretation information:
# ftp://ftp.ncdc.noaa.gov/pub/data/gsod/readme.txt
# This script lets the user specify a station and a number of years to download.
# It then iterates through, downloading enough years, parsing them into a much
# more standard CSV format read for use in (e.g.) Excel or Tableau, and
# concatenating them into one continuous file per station.
# USE:
# Simply call this script, with the optional argument --verbose if you want
# debug output. It will prompt you for everything else it needs interactively.
# TODO short term: make it take a list of stations at the beginning, so it can
# be left running unattended after that.
# TODO long term: replace manual USAF & WBAN code input with a lookup that lets
# users just enter a station name, and gets the two codes from that file.
# TODO total pipedream: let user give a location, and automagically find the
# nearest station based on lat & long.
# TODO for scraperwiki: have it load in the whole list of stations and just
# iterate over them.
import datetime
import urllib
import os
import gzip
import time
import csv
import sys
# Check for --verbose argument. You'll get more feedback if you use this.
if len(sys.argv) == 1:
# then default to non-verbose
verbose = False
elif sys.argv[1] == "--verbose":
verbose = True
else:
verbose = False
# I've assumed you'll want the same number of years for every station you
# download in one session, so we ask this before going into the main loop.
maxyears = int(raw_input("How many years of data would you like to download " \
"for each station?\n"))
# This function goes through each downloaded file line by line, and translates
# it from NOAA's idiosyncratic format to CSV with all the fields separated
# out rationally.
def parsefile(f_in, f_out, stationname):
# Set up connections to input and output files. The CSV library also helps
# with reading the input file, because we can treat it as space separated
# with consecutive spaces being collapsed together
reader = csv.reader(f_in, delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace=True)
writer = csv.writer(f_out, dialect=csv.excel)
for row in reader:
if (row[0] != 'STN---'):
# If it's the header row, just skip; otherwise process
outrow = [stationname] # the station name is not in the input file
# skipping first 2 cols of the input file as they are the USAF & WBAN codes
# which we're replacing with the actual station name.
# expanding col 3 into separate Y, M & D fields makes them easier to work
# with in Tableau
outrow.append((row[2])[:4]) # first 4 digits are the year
outrow.append((row[2])[4:6]) # next 2 are the month
outrow.append((row[2])[-2:]) # final 2 are the day
# now we can use a loop to get through a bunch of field pairs that all
# work the same:
# MeanTemp, NTempObs, DewPoint, NDewPointObs, SeaLevelPressure,
# NSeaLevPressObs, StationPressure, NStatPressObs
for i in range(3, 11, 2):
# for each of these, 9999.9 means NULL, and the number of observations
# follows the value
if (row[i+1] == "0") or (row[i] == "9999.9"):
outrow.append("NULL")
outrow.append(0)
else:
outrow.append(row[i])
outrow.append(row[i+1])
# Now the same principle for Visibility, which uses a different NULL token
# Visibility, NVisibilityObs
if (row[12] == "0") or (row[11] == "999.9"):
outrow.append("NULL")
outrow.append(0)
else:
outrow.append(row[11])
outrow.append(row[12])
# Now for wind data, which is 4 fields of which the second is the number
# of observations from which the other 3 values were determined
# MeanWindSpeed, NWindObs, MaxSustWindSpeed, MaxWindGust
if row[14] == "0":
# if there are 0 observations, then set a bunch of nulls
outrow.append("NULL")
outrow.append("0")
outrow.append("NULL")
outrow.append("NULL")
else:
for i in range(13, 17, 1):
if row[i] == "999.9": outrow.append("NULL")
else: outrow.append(row[i])
# Temp fields may or may not have a "*" appended after the number, so we
# handle these by first checking what the last character is:
# "MaxTemp", "MaxTempSource", "MinTemp", "MinTempSource"
for i in range(17, 19, 1):
if (row[i])[-1] == "*":
# then the flag is present, indicating the source was derived
# indirectly from hourly data
outrow.append((row[i])[:-1])
outrow.append("hourly")
else:
# if it's not present then this was an explicit max/min reading
outrow.append(row[i])
outrow.append("explicit")
# Precipitation has its own extra special flag source and NULL placeholder
# PrecipAmount, NPrecipReportHours, PrecipFlag
if row[19] == "99.99":
# then it's null, so:
outrow.append("NULL")
outrow.append("NULL")
outrow.append("NULL")
else:
outrow.append((row[19])[:-1])
# translations of the flag, as per
# ftp://ftp.ncdc.noaa.gov/pub/data/gsod/readme.txt
if (row[19])[-1] == "A": outrow.append("6")
elif (row[19])[-1] == "B": outrow.append("12")
elif (row[19])[-1] == "C": outrow.append("18")
elif (row[19])[-1] == "D": outrow.append("24")
elif (row[19])[-1] == "E": outrow.append("12")
elif (row[19])[-1] == "F": outrow.append("24")
elif (row[19])[-1] == "G": outrow.append("24")
elif (row[19])[-1] == "H": outrow.append("0")
elif (row[19])[-1] == "I": outrow.append("0")
else: outrow.append("ERR")
outrow.append((row[19])[-1])
# SnowDepth is relatively straightforward
if row[20] == "999.9":
outrow.append("NULL")
else:
outrow.append(row[20])
# Fog, Rain, Snow, Hail, Thunder, Tornado
# these are stored as one six-bit binary string, so we unpack it here
for i in range(0, 6, 1):
outrow.append((row[21])[i])
# And we're done! Now write the row to the output file
writer.writerow(outrow)
if verbose:
sys.stdout.write("parsed.\n")
else:
# even if not verbose, we say something so the user can see it's working
sys.stdout.write(".")
sys.stdout.flush() # need to flush the output buffer to show progress live
# This is the main control function. Each pass gets the user's input to pick a
# station, and then loops over years to download the relevant files, calling
# parsefile() to parse each one into standard CSV
def downloadfiles(maxyears):
# get parameters for and start constructing filenames
URLroot = "ftp://ftp.ncdc.noaa.gov/pub/data/gsod/" # base URL for all files
filesuffix = ".op.gz" # suffix for all the raw files
firstyear = 1928 # this is the first year available for any station
USAFcode = raw_input("Please enter the USAF code for the station you want " \
"data for (first column of " \
"ftp://ftp.ncdc.noaa.gov/pub/data/inventories/ISH-HISTORY.TXT )\n")
WBANcode = raw_input("Please enter the WBAN code for the station you want " \
"data for (second column of " \
"ftp://ftp.ncdc.noaa.gov/pub/data/inventories/ISH-HISTORY.TXT )\n")
# e.g. Seattle (SEA) is USAF 727930 WBAN 24233
# Portland, OR is USAF 726980 WBAN 24229
# LHR is USAF 037720 WBAN 99999
stationname = raw_input("What would you like to call this station?\n")
stationcode = str(USAFcode) + '-' + str(WBANcode)
yearsdownloaded = 0
for year in range(datetime.datetime.now().year-1, firstyear, -1):
# stopping before the current year because it's necessarily incomplete, and
# looping back from last year, on the assumption that more recent years
# are of greater interest and have higher quality data.
# First we assemble the URL for the year of interest
fullURL = (URLroot + str(year) + '/' + stationcode + '-' +
str(year) + filesuffix)
if verbose:
sys.stdout.write("Trying " + fullURL + " ... ")
sys.stdout.flush()
# Now we try to download the file, with very basic error handling if verbose
try:
urllib.urlretrieve(fullURL,str(year)+filesuffix)
if verbose: sys.stdout.write("retrieved ... ")
yearsdownloaded += 1
except IOError as e:
if verbose: print(" ")
print(e)
else: # if we got the file without any errors, then
# uncompress the file
f_in = gzip.open(str(year)+filesuffix)
if verbose: sys.stdout.write("decompressed ... ")
# and start writing the output
if yearsdownloaded == 1:
# since it's the first year, open the file and write the header row
firstyear = year
f_out = open(stationname+'.csv','w')
csv.writer(f_out).writerow(["Station", "Year", "Month", "Day", \
"MeanTemp", "NTempObs", "DewPoint", "NDewPointObs", \
"SeaLevelPressure", "NSeaLevPressObs", "StationPressure", \
"NStatPressObs", "Visibility", "NVisibilityObs", "MeanWindSpeed", \
"NWindObs", "MaxSustWindSpeed", "MaxWindGust", "MaxTemp", \
"MaxTempSource", "MinTemp", "MinTempSource", "PrecipAmount", \
"NPrecipReportHours", "PrecipFlag", "SnowDepth", "Fog", "Rain", \
"Snow", "Hail", "Thunder", "Tornado"])
# This function does the actual ETL
parsefile(f_in, f_out, stationname)
# clean up after ourselves
f_in.close()
os.remove(str(year)+filesuffix)
urllib.urlcleanup()
if yearsdownloaded == maxyears:
break # if we have enough years, then end this loop
else:
time.sleep(5) # slow down here to stop the server locking us out
time.sleep(1)
print("Successfully downloaded " + str(yearsdownloaded) + " years between " +
str(year) + " and " + str(firstyear) + " for station " + stationname)
if yearsdownloaded < maxyears:
# If we didn't get as many years as requested, alert the user
print("No more years are available at the NOAA website for this station.")
f_out.close()
# This is the main control loop. It repeatedly asks the user for station codes
# and calls downloadfiles() to download the requested data, until it's told
# to stop.
goagain = "Y"
while not (goagain.startswith('N') or goagain.startswith('n')):
downloadfiles(maxyears)
goagain = raw_input("Would you like to download another station (Y/N)?\n")
while not (goagain.startswith('N') or goagain.startswith('n') or
goagain.startswith('y') or goagain.startswith('Y')):
goagain = raw_input("Please help me, I am but a stupid computer. " \
"I can only understand Y or N as responses to this prompt. "
"Would you like to download another station (Y/N)?\n")
|
[
"[email protected]"
] | |
7773438bf26b8b815417e140e5cc64fe0116bb7a
|
5f0089466266e5a118b8257a49623b095f972737
|
/leetcode/offerIsComing/动态规划/剑指 Offer 48. 最长不含重复字符的子字符串 .py
|
62b7b1a158f640bc6440bb1ed463dced37400b0d
|
[
"Apache-2.0"
] |
permissive
|
wenhaoliang/leetcode
|
983526484b67ee58656c8b0fd68483de82112a78
|
b19233521c4e9a08cba6267eda743935e1fb06a6
|
refs/heads/main
| 2023-07-19T04:31:00.214433 | 2021-09-04T10:54:17 | 2021-09-04T10:54:17 | 401,963,789 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,092 |
py
|
"""
请从字符串中找出一个最长的不包含重复字符的子字符串,计算该最长子字符串的长度。
示例1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是"wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke"是一个子序列,不是子串。
链接:https://leetcode-cn.com/problems/zui-chang-bu-han-zhong-fu-zi-fu-de-zi-zi-fu-chuan-lcof
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
状态定义:dp[j] 表示到j为止最长的子字符串
转移方程:固定有边界j,向左查找与s[j]相等的s[i],
1、i < 0, 为向左查找不到与s[j]相等的字符, 此时d[j] = dp[j-1] + 1
2、dp[j-1] < j - i, j-i这里表示有边界到左边界之间的字符串长度,
若小于则表示左边界那个相等的字符对目前最长字串长度无影响,
则可将有边界的这个字符加上去,即d[j] = dp[j-1] + 1
3、dp[j-1] >= j - i, 即左边界的那个字符在目前的最长子字符里面,
所以要重新计算最长子字符串长度,即dp[j] = j - i
初始状态: dp[0] = 0
返回值: 设置一个res来记录最大值
"""
length = len(s)
dp = [0] * length
res = 0
for j in range(length):
i = j - 1
while i >= 0 and s[i] != s[j]:
i -= 1 # 线性查找 i
if dp[j - 1] < j - i:
dp[j] = dp[j - 1] + 1
else:
dp[j] = j - i
res = max(res, dp[j])
return res
class Solution1:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
状态定义: dp[j] 表示到 j为止最长的无重复字符字串 长度
转移方程:针对dp[j-1] >? j-i 来分类讨论
s[i] 为 遍历得出,与s[j]相同的字符,若i<0则表示没有字符与s[j]匹配
j-i 表示与s[j]匹配的字符s[i]的长度
1、 dp[j-1] >= j-i => dp[j] = j-i
表示dp[j-1]的长度,把与s[j]相匹配的[i]的长度包含进去了,所以要更新长度为j-i开始计数
2、dp[j-1] < j-i => dp[j] = dp[j-1] + 1
表示dp[j-1]的长度小于与s[j]相匹配的[i]的长度,那么就把当前元素的长度加上
初始状态:dp[0] = 0
返回结果: 设置一个res来记录最大值
"""
n = len(s)
dp = [0] * n
res = 0
for j in range(n):
i = j - 1
while i >= 0 and s[i] != s[j]:
i -= 1
if dp[j-1] < j - i:
dp[j] = dp[j - 1] + 1
else:
dp[j] = j - i
res = max(res, dp[j])
return res
if __name__ == "__main__":
A = Solution()
n = "abcabcbb"
print(A.lengthOfLongestSubstring(n))
A = Solution1()
n = "abcabcbb"
print(A.lengthOfLongestSubstring(n))
"""
举个例子,比如“abcdbaa”,索引从0开始。
我们容易得到,当 j = 4时,以s[4]结尾字符串sub[4] = “cdb”的 长度dp[4] =3。
接下来我们看 j +1的情况。根据定义,sub[4]字符串中的字符肯定不重复,
所以当 j = 5时,这时距离字符s[5]的左侧的重复字符a的索引 i = 0,
也就是说s[ 0 ]在子字符串sub[ 4 ]之外了,
以s[5]结尾的字符串自然在sub[4]的基础上加上字符s[5]就构成了新的最长的不重复的子串sub[5],
长度dp[5] = dp[4] + 1; 接下来我们继续看 j =6的情况,
这时s[6]的左侧重复字符a的索引 i = 5,该重复字符在sub[ 5 ]中。
新的最长不重复的字串sub[6]左边界以 i 结尾,长度dp[6] = j - i = 1。
"""
|
[
"[email protected]"
] | |
e344a5b4330e8d47e8503070ac4ef83d4fca5268
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/baekjoon/1000~2999/1371_가장 많은 글자.py
|
0254885023ff1b56daf37d238d999f47f0496e46
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
dic = {}
while True:
try:
a = input()
for i in a:
if i == ' ':
continue
if i not in dic:
dic[i] = 1
else:
dic[i] += 1
except:
break
ar = list(dic.items())
ar = sorted(ar, key=lambda x: (-x[1], x[0]))
M = ar[0][1]
for i in range(len(ar)):
if ar[i][1] == M:
print(ar[i][0], end='')
|
[
"[email protected]"
] | |
6ad57d1eb19e11eb751d93b7746d619a3a66884e
|
88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b
|
/src/api/python/hce/dc_db/FieldRecalculator.py
|
17b54851ece91f315cf2d8b5be50cf8186b57a42
|
[] |
no_license
|
hce-project/hce-bundle
|
2f93dc219d717b9983c4bb534884e4a4b95e9b7b
|
856a6df2acccd67d7af640ed09f05b2c99895f2e
|
refs/heads/master
| 2021-09-07T22:55:20.964266 | 2018-03-02T12:00:42 | 2018-03-02T12:00:42 | 104,993,955 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,498 |
py
|
'''
@package: dc
@author scorp
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2014 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
'''
import dc_db.Constants as Constants
import dc_db.FieldRecalculatorDefaultCriterions as DefCriterions
import dc.EventObjects
import app.Utils as Utils # pylint: disable=F0401
logger = Utils.MPLogger().getLogger()
# #FieldRecalculator class makes come common processing of databse fields recalculation (using in Task classes)
class FieldRecalculator(object):
def __init__(self):
pass
# #commonSiteRecalculate - common recalculate method
#
# @param queryCallback function for queries execution
# @param additionCause additional SQL cause
# @param fieldName - processing field name (of `sites` tables)
# @param siteId - site id
def commonSiteRecalculate(self, defaultCritName, fieldName, siteId, queryCallback):
UPDATE_SQL_TEMPLATE = "UPDATE `sites` SET `%s`=(SELECT COUNT(*) FROM dc_urls.%s %s) WHERE `id` = '%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(defaultCritName, siteId, queryCallback)
query = UPDATE_SQL_TEMPLATE % (fieldName, tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# #siteResourcesRecalculate - recalculate sites.Resources field
#
def siteResourcesRecalculate(self, siteId, queryCallback):
# self.commonSiteRecalculate(queryCallback, "State>3 AND Crawled>0", "Resources", siteId)
# self.commonSiteRecalculate("Crawled>0 AND Size>0", "Resources", siteId, queryCallback)
self.commonSiteRecalculate(DefCriterions.CRIT_RESOURCES, "Resources", siteId, queryCallback)
# #siteContentsRecalculate - recalculate sites.Contents field
#
def siteContentsRecalculate(self, siteId, queryCallback):
# self.commonSiteRecalculate(queryCallback, "State=7 AND Crawled>0 AND Processed>0", "Contents", siteId)
self.commonSiteRecalculate(DefCriterions.CRIT_CONTENTS, "Contents", siteId, queryCallback)
# updateCollectedURLs updates sites.CollectedURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateCollectedURLs(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `CollectedURLs`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_CLURLS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateNewURLs updates sites.newURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateNewURLs(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `NewURLs`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_NURLS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateErrors updates sites.Errors field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateErrors(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `Errors`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_ERRORS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateDeletedURLs updates sites.deletedURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateDeletedURLs(self, siteId, queryCallback):
QUERY_TEMPLATE_SELECT = "SELECT count(*) FROM %s %s"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_DURLS, siteId, queryCallback)
query = QUERY_TEMPLATE_SELECT % (tableName, criterionsString)
res = queryCallback(query, Constants.FOURTH_DB_ID, Constants.EXEC_INDEX, True)
if res is not None and len(res) > 0 and len(res[0]) > 0:
count = res[0][0]
QUERY_TEMPLATE_UPDATE = "UPDATE `sites` SET `DeletedURLs`=%s WHERE `Id`='%s'"
query = QUERY_TEMPLATE_UPDATE % (str(count), siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# commonRecalc method makes all recalculations
#
# @param siteId - siteId
# @param queryCallback - callback sql function
# @param recalcType - full or partial recalculating
def commonRecalc(self, siteId, queryCallback, recalcType=dc.EventObjects.FieldRecalculatorObj.FULL_RECALC):
self.siteResourcesRecalculate(siteId, queryCallback)
self.siteContentsRecalculate(siteId, queryCallback)
if recalcType == dc.EventObjects.FieldRecalculatorObj.FULL_RECALC:
self.updateCollectedURLs(siteId, queryCallback)
self.updateNewURLs(siteId, queryCallback)
self.updateDeletedURLs(siteId, queryCallback)
self.updateSiteCleanupFields(siteId, queryCallback)
# updateSiteCleanupFields recalculates some site's fields in SiteCleanUpTask operation
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateSiteCleanupFields(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `Size`=%s, `Errors`=%s, `ErrorMask`=%s, `AVGSpeed`=%s WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
localSize = "`Size`"
localErrors = "`Errors`"
localErrorMask = "`ErrorMask`"
localSpeed = "`AVGSpeed`"
TMP_QUERY_TEMPLATE = "SELECT SUM(`Size`) FROM %s WHERE " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localSize = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT COUNT(*) FROM %s WHERE `errorMask` > 0 AND " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localErrors = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT BIT_OR(`errorMask`) FROM %s WHERE " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localErrorMask = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT AVG(`size`/`crawlingTime`*1000) FROM %s WHERE `crawlingTime` > 0 AND " + \
DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localSpeed = str(res[0][0])
query = QUERY_TEMPLATE % (localSize, localErrors, localErrorMask, localSpeed, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
|
[
"bgv@bgv-d9"
] |
bgv@bgv-d9
|
d4c7f258b88d04c56e29c54bb385cb510d252186
|
7f6b06334e6556ac91a19d410149129217070d5e
|
/cuda/cython/naive_add/setup.py
|
22ef269ec790486bdb8db5710a18aeb9b8ce4ced
|
[] |
no_license
|
saullocastro/programming
|
a402e5b7c34c80f0ce22e8a29ce7975b263f19c3
|
499938a566348649218dc3c0ec594a4babe4f1a4
|
refs/heads/master
| 2021-01-20T10:42:50.840178 | 2020-08-26T07:56:35 | 2020-08-26T07:56:35 | 21,904,820 | 11 | 2 | null | 2020-08-26T07:56:36 | 2014-07-16T14:47:32 |
Python
|
UTF-8
|
Python
| false | false | 625 |
py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension('my_cuda',
sources=['my_cuda.pyx'],
libraries=['cuda_blockadd', 'cuda_threadadd',
'cuda_btadd', 'cuda_longadd'],
language='c',
extra_compile_args=['/openmp',
'/O2', '/favor:INTEL64', '/fp:fast'],
extra_link_args=[],
)]
setup(name = 'my_cuda',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules)
|
[
"[email protected]"
] | |
0ca3723e3a18f9b95623bfa22b6bc0ea65396537
|
deafd775f238b2836f77b9140f4d6e14a3f3c06d
|
/python/ABC/ABC097/ABC097_B.py
|
33adf33d1f0b2d4a6326a31c043fcede83728e3d
|
[] |
no_license
|
knakajima3027/Atcoder
|
ab8e2bf912173b7523fddbb11b38abd7e296762e
|
64cb32fcc4b99501f2f84496e5535e1e7b14c467
|
refs/heads/master
| 2021-06-22T03:58:03.777001 | 2020-12-19T11:23:49 | 2020-12-19T11:23:49 | 135,173,223 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
X = int(input())
max1 = 1
max2 = 0
for b in range(1, X+1):
for q in range(2, X+1):
if b**q > X:
break
max1 = b**q
if max1 > max2:
max2 = max1
print(max2)
|
[
"[email protected]"
] | |
96c79e41e1b372df9a1a4032bb02e97a2ae2b108
|
1959b8a6fb4bd61a6f87cd3affe39f2e3bdbf962
|
/{{cookiecutter.repo_name}}/data_prep/wham/scripts/constants.py
|
cca06053bada8df02ed0092e6deb6aa1bafb6822
|
[
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
pseeth/cookiecutter-nussl
|
a85d61d00b2352d105a8f68aa66aab6ae670894b
|
fab73e7b1f3d393409360d31662600d6fe434c37
|
refs/heads/master
| 2023-01-05T00:35:17.528898 | 2020-02-27T17:21:51 | 2020-02-27T17:21:51 | 222,519,043 | 6 | 1 |
MIT
| 2022-12-26T21:00:36 | 2019-11-18T18:42:37 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 179 |
py
|
NUM_BANDS = 4
SNR_THRESH = -6.
PRE_NOISE_SECONDS = 2.0
SAMPLERATE = 16000
MAX_SAMPLE_AMP = 0.95
MIN_SNR_DB = -3.
MAX_SNR_DB = 6.
PRE_NOISE_SAMPLES = PRE_NOISE_SECONDS * SAMPLERATE
|
[
"[email protected]"
] | |
7616992a58654839cafd96f3f3cecb93b25ed1eb
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/EGL/NV/coverage_sample.py
|
0bfd3b6a9c0e69a1160f97d03cc07bb73773a2a0
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 536 |
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.EGL import _errors
_EXTENSION_NAME = 'EGL_NV_coverage_sample'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.EGL, 'EGL_NV_coverage_sample', error_checker=_errors._error_checker)
EGL_COVERAGE_BUFFERS_NV = _C('EGL_COVERAGE_BUFFERS_NV', 0x30E0)
EGL_COVERAGE_SAMPLES_NV = _C('EGL_COVERAGE_SAMPLES_NV', 0x30E1)
|
[
"[email protected]"
] | |
68a134c008ed3034618e4f6e7ed24250bea0cd2b
|
7d85c42e99e8009f63eade5aa54979abbbe4c350
|
/game/lib/coginvasion/suit/DroppableCollectableJellybeans.py
|
39de5dba276a764c3073b5510817ddd5fdc69b91
|
[] |
no_license
|
ToontownServerArchive/Cog-Invasion-Online-Alpha
|
19c0454da87e47f864c0a5cb8c6835bca6923f0e
|
40498d115ed716f1dec12cf40144015c806cc21f
|
refs/heads/master
| 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
"""
Filename: DroppableCollectableJellybeans.py
Created by: blach (22Mar15)
"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from DroppableCollectableObject import DroppableCollectableObject
class DroppableCollectableJellybeans(DroppableCollectableObject):
notify = directNotify.newCategory("DroppableCollectableJellybeans")
def __init__(self):
DroppableCollectableObject.__init__(self)
def unload(self):
self.collectSfx = None
DroppableCollectableObject.unload(self)
|
[
"[email protected]"
] | |
edae3b83d112182e5fd0828eeaf173b4a0a018e3
|
35b3e64d760b7091859a40f17c24c00681f116fa
|
/python两颗吃一颗/keras卷积神经网络2.py
|
34c1b01a6fd1a853dca5236f12701dc8676872b0
|
[] |
no_license
|
weiyinfu/TwoEatOne
|
8b9d27a28ba8469fc96fb9790cec04b30a19e21f
|
d87dfbbab550fc8bd0da3fec076034494da23bdc
|
refs/heads/master
| 2022-12-27T18:44:58.028285 | 2020-10-13T14:18:49 | 2020-10-13T14:18:49 | 79,469,916 | 1 | 0 | null | 2020-10-13T14:18:50 | 2017-01-19T16:04:02 |
Python
|
UTF-8
|
Python
| false | false | 1,080 |
py
|
import keras
from keras.layers import *
x_input = Input((4, 4, 1))
x_flat = Flatten()(x_input)
"""
统计每个棋子的活动能力,使用2*2卷积核
因为需要统计权值,所以多来几层
"""
free_space = Conv2D(2, (2, 2), padding='SAME', activation='sigmoid')(x_input)
free_space = Conv2D(2, (2, 2), padding='VALID')(free_space)
"""
吃子观,2*3和3*2的卷积核
"""
eat1 = Conv2D(2, (2, 3), padding='VALID', activation='sigmoid')(x_input)
eat2 = Conv2D(2, (3, 2), padding='VALID', activation='sigmoid')(x_input)
m = Concatenate()([Flatten()(i) for i in (eat1, eat2, free_space)] + [x_flat])
"""
手写resnet
"""
m = Dense(300, activation='relu')(Concatenate()([m, x_flat]))
m = Dense(16, activation='relu')(Concatenate()([m, x_flat]))
logits = Dense(3, activation='softmax')(m)
m = keras.models.Model(inputs=x_input, outputs=logits)
m.compile(keras.optimizers.RMSprop(0.01), "categorical_crossentropy", metrics=['accuracy'])
import data
x, y = data.get_data(True, True)
x = x.reshape(-1, 4, 4, 1)
batch_size = 120
m.fit(x, y, batch_size=batch_size, epochs=1000)
|
[
"[email protected]"
] | |
0f38bffb0639fb960bbd57073d3df069ab0bc9b8
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/misericordia.py
|
af153cf9f38085cb86112249126c670301afc267
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 138 |
py
|
ii = [('FitzRNS3.py', 1), ('SeniNSP.py', 8), ('ClarGE.py', 1), ('MereHHB.py', 1), ('FitzRNS.py', 2), ('FitzRNS2.py', 1), ('TaylIF.py', 1)]
|
[
"[email protected]"
] | |
4b208ef5abfe9ce4582acf2547252620f895832c
|
675cdd4d9d2d5b6f8e1383d1e60c9f758322981f
|
/pipeline/0x00-pandas/14-visualize.py
|
3a39452384e56b070eccd726adcc09dee9cb1d2a
|
[] |
no_license
|
AndresSern/holbertonschool-machine_learning-1
|
5c4a8db28438d818b6b37725ff95681c4757fd9f
|
7dafc37d306fcf2ea0f5af5bd97dfd78d388100c
|
refs/heads/main
| 2023-07-11T04:47:01.565852 | 2021-08-03T04:22:38 | 2021-08-03T04:22:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,356 |
py
|
#!/usr/bin/env python3
from datetime import date
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from_file = __import__('2-from_file').from_file
df = from_file('coinbaseUSD_1-min_data_2014-12-01_to_2019-01-09.csv', ',')
df = df.drop('Weighted_Price', axis=1)
df = df.rename(columns={'Timestamp': 'Date'})
df['Date'] = pd.to_datetime(df['Date'], unit='s')
df = df[df['Date'] >= '2017-01-01']
df = df.set_index('Date')
df['Close'].fillna(method='ffill', inplace=True)
df["High"].fillna(method="ffill", inplace=True)
df["Low"].fillna(method="ffill", inplace=True)
df["Open"].fillna(method="ffill", inplace=True)
df.fillna(method='ffill')
df['Volume_(BTC)'].fillna(value=0, inplace=True)
df['Volume_(Currency)'].fillna(value=0, inplace=True)
high = df['High'].groupby(pd.Grouper(freq='D')).max()
low = df['Low'].groupby(pd.Grouper(freq='D')).min()
open = df['Open'].groupby(pd.Grouper(freq='D')).mean()
close = df['Close'].groupby(pd.Grouper(freq='D')).mean()
volume_btc = df['Volume_(BTC)'].groupby(pd.Grouper(freq='D')).sum()
volume_currency = df['Volume_(Currency)'].groupby(pd.Grouper(freq='D')).sum()
plt.plot(open)
plt.plot(high)
plt.plot(low)
plt.plot(close)
plt.plot(volume_btc)
plt.plot(volume_currency)
plt.legend()
plt.xlabel('Date')
# plt.ylim([0, 3000000])
# plt.yticks(np.arange(0, 3000000, 500000))
plt.show()
|
[
"[email protected]"
] | |
f24bb3459c66b3776bb45d37e75802752e515c55
|
17f527d6936397270183a35d7097e0a99de16cb5
|
/utokyo_ist_pastexam/2011_w/6.py
|
4829c80cda541037aa901a1d74f2f2bf80003ab2
|
[] |
no_license
|
ryosuke071111/algorithms
|
e942f043d08c7c7e2c926ed332ee2b8c44bdf0c5
|
867764450cc0f2a709fa2f743d9a0d95001e9296
|
refs/heads/master
| 2020-05-14T17:14:39.314064 | 2019-04-17T12:58:12 | 2019-04-17T12:58:12 | 181,888,623 | 11 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
from uti import *
f1=open('s3.txt')
f1=f1.read().strip('\n')
ls=[]
while len(f1)>0:
ls.append(f1[:1000])
f1=f1[1000:]
# #圧縮
strings=""
dics=[]
for i in ls:
string,dic=compression(i)
strings+=string
# dics.append(dic)
print(strings)
#展開
print()
ans=decompression(string)
print(ans)
|
[
"[email protected]"
] | |
6e31a4079ade55066b95c9d5ff511edb6da190a9
|
3e9766f25777f7695247a45dd730ae60fd0a6c73
|
/main.py
|
168d4d1412a91d65894e4f0180c9cb890d2b537f
|
[] |
no_license
|
ivanmarkov97/RaseTransportManager
|
69de4599e3195b1945c8595a99a687e235e0891c
|
d8275d75e94efdbba76a63e557d33eff49b7ce4e
|
refs/heads/master
| 2020-03-20T02:44:11.515512 | 2018-06-12T20:17:11 | 2018-06-12T20:17:11 | 137,122,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,575 |
py
|
import tkinter as tk
from service import *
from DBhelper import DBhelper
def click_event():
global my_rases
global all_rases
global yscroll
global db_helper
print("click")
db_helper = DBhelper()
prev_all_rases = db_helper.get("rase")
print(time_away.get())
print(city_from.get(city_from.curselection()))
print(city_to.get(city_to.curselection()))
print(company.get())
print(plane.get())
t_away = time_away.get()
c_from = city_from.get(city_from.curselection())
c_to = city_to.get(city_to.curselection())
com = company.get()
pl = plane.get()
main(t_away, c_from, c_to, com, pl)
all_rases = tk.Listbox(root, width=90, height=10)
all_rases.grid(row=6, column=1)
yscroll = tk.Scrollbar(command=all_rases.yview, orient=tk.VERTICAL)
yscroll.grid(row=6, column=2, sticky='ns')
all_rases.configure(yscrollcommand=yscroll.set)
db_helper = DBhelper()
my_rases = db_helper.get("rase")
print(my_rases)
i = 0
while i < len(my_rases) - 1:
item_rase = []
for it in my_rases[i]:
item_rase.append(copy.deepcopy(str(it)))
if prev_all_rases[i][1] != my_rases[i][1]:
all_rases.insert(tk.END, str(item_rase))
all_rases.itemconfig(i, {'bg': 'yellow'})
else:
all_rases.insert(tk.END, str(item_rase))
i += 1
item_rase = []
for it in my_rases[i]:
item_rase.append(copy.deepcopy(str(it)))
all_rases.insert(tk.END, str(item_rase))
all_rases.itemconfig(i, {'bg': 'light green'})
lines = len(my_rases)
all_rases.yview_scroll(lines, 'units')
root = tk.Tk()
tk.Label(root, text="Время вылета", relief=tk.RIDGE, anchor='s', width=12).grid(row=0)
tk.Label(root, text="Точка вылета", relief=tk.RIDGE, anchor='s', width=12).grid(row=1)
tk.Label(root, text="Точка прилета", relief=tk.RIDGE, anchor='s', width=12).grid(row=2)
tk.Label(root, text="Компания", relief=tk.RIDGE, anchor='s', width=12).grid(row=3)
tk.Label(root, text="Самолет", relief=tk.RIDGE, anchor='s', width=12).grid(row=4)
tk.Label(root, text="Рейсы", relief=tk.RIDGE, anchor='s', width=12).grid(row=6)
time_away = tk.Entry(root, width=50)
time_away.insert(0, '2018-05-05 12:30:00')
time_away.grid(row=0, column=1)
city_from = tk.Listbox(root, width=50, height=2, exportselection=0)
city_from.grid(row=1, column=1)
company = tk.Entry(root, width=50)
company.grid(row=3, column=1)
plane = tk.Entry(root, width=50)
plane.grid(row=4, column=1)
b1 = tk.Button(root, text='Добавить', command=click_event)
b1.grid(row=5, column=1)
for item in ["St.Petersberg, Pulkovo", "Moscow, Sheremetyevo"]:
city_from.insert(tk.END, item)
city_to = tk.Listbox(root, width=50, height=2, exportselection=0)
city_to.grid(row=2, column=1)
for item in ["St.Petersberg, Pulkovo", "Moscow, Sheremetyevo"]:
city_to.insert(tk.END, item)
all_rases = tk.Listbox(root, width=90, height=10)
all_rases.grid(row=6, column=1)
yscroll = tk.Scrollbar(command=all_rases.yview, orient=tk.VERTICAL)
yscroll.grid(row=6, column=2, sticky='ns')
all_rases.configure(yscrollcommand=yscroll.set)
db_helper = DBhelper()
my_rases = db_helper.get("rase")
print(my_rases)
for rase in my_rases:
item_rase = []
for it in rase:
item_rase.append(copy.deepcopy(str(it)))
all_rases.insert(tk.END, str(item_rase))
lines = len(my_rases)
all_rases.yview_scroll(lines, 'units')
root.geometry("650x350")
root.resizable(width=False, height=False)
root.mainloop()
|
[
"[email protected]"
] | |
f00209d2c8adb18f187d9a07b18295d08eb5a998
|
84b0c9adeeba03e8dadf20346aae61e5a343d8c6
|
/tf_agents/bandits/networks/global_and_arm_feature_network_test.py
|
f4c51f215542a38f20c67391e6012069a7d7b8ba
|
[
"Apache-2.0"
] |
permissive
|
trinity77/agents
|
fa8e0a31898f4cc5178d8108c86ede95d2f36aa3
|
baf18f275294e902f462404d21168ca4697e2f6f
|
refs/heads/master
| 2022-10-19T05:14:01.172058 | 2020-06-03T19:50:57 | 2020-06-03T19:50:57 | 269,180,393 | 0 | 0 |
Apache-2.0
| 2020-06-03T19:50:58 | 2020-06-03T19:50:02 | null |
UTF-8
|
Python
| false | false | 5,364 |
py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.networks.global_and_arm_feature_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.networks import global_and_arm_feature_network as gafn
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.utils import test_utils
parameters = parameterized.named_parameters(
{
'testcase_name': 'batch2feat4act3',
'batch_size': 2,
'feature_dim': 4,
'num_actions': 3
}, {
'testcase_name': 'batch1feat7act9',
'batch_size': 1,
'feature_dim': 7,
'num_actions': 9
})
class GlobalAndArmFeatureNetworkTest(parameterized.TestCase,
test_utils.TestCase):
@parameters
def testCreateFeedForwardCommonTowerNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(obs_spec, (4, 3, 2),
(6, 5, 4), (7, 6, 5))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardDotProductNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_dot_product_network(obs_spec, (4, 3, 4),
(6, 5, 4))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
def testCreateFeedForwardCommonTowerNetworkWithFeatureColumns(
self, batch_size=2, feature_dim=4, num_actions=3):
obs_spec = {
'global': {
'dense':
tensor_spec.TensorSpec(shape=(feature_dim,), dtype=tf.float32),
'composer':
tensor_spec.TensorSpec((), tf.string)
},
'per_arm': {
'name': tensor_spec.TensorSpec((num_actions,), tf.string),
'fruit': tensor_spec.TensorSpec((num_actions,), tf.string)
}
}
columns_dense = tf.feature_column.numeric_column(
'dense', shape=(feature_dim,))
columns_composer = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'composer', ['wolfgang', 'amadeus', 'mozart']))
columns_name = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda']))
columns_fruit = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'fruit', ['banana', 'kiwi', 'pear']))
net = gafn.create_feed_forward_common_tower_network(
observation_spec=obs_spec,
global_layers=(4, 3, 2),
arm_layers=(6, 5, 4),
common_layers=(7, 6, 5),
global_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_dense, columns_composer]),
arm_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_name, columns_fruit]))
input_nest = {
'global': {
'dense': tf.constant(np.random.rand(batch_size, feature_dim)),
'composer': tf.constant(['wolfgang', 'mozart'])
},
'per_arm': {
'name':
tf.constant([[['george'], ['george'], ['george']],
[['bob'], ['bob'], ['bob']]]),
'fruit':
tf.constant([[['banana'], ['banana'], ['banana']],
[['kiwi'], ['kiwi'], ['kiwi']]])
}
}
output, _ = net(input_nest)
self.evaluate([
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.tables_initializer()
])
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
f0016ddc6d958b94d872ad1f44a38ef76afa0387
|
c7511b81624a556978550341fa7a885022ab45e9
|
/tree/balanced-BT.py
|
546737be9291e23df2da610d6df391f4738da25a
|
[] |
no_license
|
sandipan898/ds-algo-python-set-2
|
372de5e01aeda1edf7289cd784195480ca0a3696
|
53859d71d980dc08de8bd51acc049537082df0c9
|
refs/heads/main
| 2023-07-06T11:26:34.819144 | 2021-08-09T19:26:16 | 2021-08-09T19:26:16 | 378,245,185 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,137 |
py
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def find_height(root):
if root is None:
return 0
return max(find_height(root.left), find_height(root.right)) + 1
def is_balanced_tree(root):
if root is None:
return True
left_height = find_height(root.left)
right_height = find_height(root.right)
print("Node {} have Balance Factor of {}".format(root.data, left_height - right_height))
if abs(left_height - right_height) > 1:
return False
return (is_balanced_tree(root.left) and is_balanced_tree(root.right))
if __name__ == '__main__':
root = Node(2)
root.left = Node(3)
root.right = Node(4)
root.left.left = Node(5)
root.left.right = Node(6)
root.right.left = Node(7)
root.right.left.left = Node(8)
root.right.left.right = Node(9)
'''
2
/ \
3 4
/ \ /
5 6 7
/ \
8 9
'''
if is_balanced_tree(root):
print("This is a Balanced Binary Tree!")
else:
print("This is Not a Balanced Binary Tree!")
|
[
"[email protected]"
] | |
c818fe7898cfed2b1e0843d46953deed4b626626
|
68002ae9f124d808395b51944b616da298068b62
|
/Data_Visualization/Plot/Matplotlib/FT748/Ch12/Ch12_4_1.py
|
8d7d43c678747a8b7b8ba1d85dcf9604d5aeb10e
|
[] |
no_license
|
supergravity/PythonLearning-
|
b4dd51205470f27e1ba92ec19b1fa1c967101545
|
eb53b048650a7272819b45943d3dd40fa91d2297
|
refs/heads/master
| 2023-01-04T23:15:59.945385 | 2020-11-06T13:35:15 | 2020-11-06T13:35:15 | 82,797,936 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
import pandas as pd
df = pd.read_csv("products.csv", encoding="utf8")
df.columns = ["type", "name", "price"]
ordinals = ["A", "B", "C", "D", "E", "F"]
df.index = ordinals
print(df.head(2))
# 取得與更新單一純量值
print(df.loc[ordinals[0], "price"])
df.loc[ordinals[0], "price"] = 21.6
print(df.iloc[1,2])
df.iloc[1,2] = 46.3
print(df.head(2))
df.head(2).to_html("Ch12_4_1.html")
|
[
"[email protected]"
] | |
58c61840621f2e94fb6ad4e5358755b34cb76928
|
e058d8501ba8fa70c4e7a60b669e92bab1044f03
|
/apps/postman/admin.py
|
2b4d854053c1d7da4248b86b3e97d79ef0786d34
|
[] |
no_license
|
wd5/7-byhands
|
f1f237f5dc896ce804d91e958793c074ab4f3a14
|
d8a29e77f21e2947bed8414dc6ae0144798bd5a3
|
refs/heads/master
| 2021-01-18T07:19:46.447486 | 2013-01-28T21:37:55 | 2013-01-28T21:37:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 374 |
py
|
from django.contrib import admin
from django.conf import settings
from models import *
class SubscriberAdmin(admin.ModelAdmin):
pass
class MailAdmin(admin.ModelAdmin):
pass
class SentLogAdmin(admin.ModelAdmin):
pass
admin.site.register(Subscriber, SubscriberAdmin)
admin.site.register(SentLog , SentLogAdmin)
admin.site.register(Mail , MailAdmin)
|
[
"[email protected]"
] | |
08fe3bebae5f5c5f51f87adafb3f87e9659b1c8b
|
38b9706d8aea076c453e83a7ab2becc8bc2e57ed
|
/Array/39.py
|
f786f36930ae7d4ad6cddd537a6cc28e85505aa6
|
[] |
no_license
|
sathish0706/guvi-codekata
|
e564e18bc174c94eb22a8ed483a9eb93a85fecab
|
cf4a508beaccfe46de4b34ffd2ec1e126a9e9807
|
refs/heads/main
| 2023-03-19T19:18:19.070994 | 2021-01-11T16:18:00 | 2021-01-11T16:18:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 253 |
py
|
from collections import defaultdict
n = int(input())
m = list(map(int,input().split()))
d = defaultdict(int)
for i in m:
if i in d:
d[i] += 1
else:
d[i] = 1
count = 0
for i in d:
if d[i] == 1:
count = i
print(count)
|
[
"[email protected]"
] | |
530d39a7ad9a99f45a07382ab008cbebb4e6f6e7
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_289/ch67_2020_04_27_14_46_16_989502.py
|
05a10228396c4bc4074e1e5a2d4230107ae953a5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
def alunos_impares(lista_nomes):
lista_impares = []
for e in range len(lista_nomes):
if lista_nomes[e] %2 !=0:
lista_impares.append(lista_nomes[e])
return lista_impares
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.