hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
813cfc21850f486d6ac29f7b86826c89d492a555 | 41,687 | py | Python | core/models.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 10 | 2020-04-30T12:04:35.000Z | 2021-07-21T12:48:55.000Z | core/models.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 1,461 | 2020-01-23T18:20:26.000Z | 2022-03-31T08:05:56.000Z | core/models.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 3 | 2020-04-07T20:11:36.000Z | 2020-10-16T16:22:59.000Z | import hashlib
import mimetypes
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
from great_components.mixins import GA360Mixin
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.models import ClusterableModel, ParentalKey
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface,
)
from wagtail.contrib.redirects.models import Redirect
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core import blocks
from wagtail.core.blocks.stream_block import StreamBlockValidationError
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.images import get_image_model_string
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from wagtailmedia.models import Media
from core import blocks as core_blocks, mixins
from core.case_study_index import delete_cs_index, update_cs_index
from core.constants import BACKLINK_QUERYSTRING_NAME, RICHTEXT_FEATURES__MINIMAL
from core.context import get_context_provider
from core.utils import PageTopicHelper, get_first_lesson
from exportplan.core.data import (
SECTION_SLUGS as EXPORTPLAN_SLUGS,
SECTIONS as EXPORTPLAN_URL_MAP,
)
# If we make a Redirect appear as a Snippet, we can sync it via Wagtail-Transfer
register_snippet(Redirect)
class TimeStampedModel(models.Model):
"""Modified version of django_extensions.db.models.TimeStampedModel
Unfortunately, because null=True needed to be added to create and
modified fields, inheritance causes issues with field clash.
"""
created = CreationDateTimeField('created', null=True)
modified = ModificationDateTimeField('modified', null=True)
# Content models
def hero_singular_validation(value):
if value and len(value) > 1:
raise StreamBlockValidationError(
non_block_errors=ValidationError('Only one image or video allowed in Hero section', code='invalid'),
)
def _get_backlink_title(self, backlink_path):
"""For a given backlink, see if we can get a title that goes with it.
For now, this is limited only to Export Plan pages/links.
"""
# We have to re-arrange EXPORT_PLAN_SECTION_TITLES_URLS after import
# because it features lazily-evaluated URLs that aren't ready when
# models are imported
if backlink_path and len(backlink_path.split('/')) > 3:
_path = backlink_path.split('/')[3]
return self._export_plan_url_map.get(_path)
class PageView(TimeStampedModel):
page = models.ForeignKey(DetailPage, on_delete=models.CASCADE, related_name='page_views')
list_page = models.ForeignKey(ListPage, on_delete=models.CASCADE, related_name='page_views_list')
sso_id = models.TextField()
# TODO: deprecate and remove
# TODO: deprecate and remove
# If you're wondering what's going on here:
# https://docs.wagtail.io/en/stable/reference/pages/model_recipes.html#custom-tag-models
def _high_level_validation(value, error_messages):
TEXT_BLOCK = 'text' # noqa N806
MEDIA_BLOCK = 'media' # noqa N806
QUOTE_BLOCK = 'quote' # noqa N806
# we need to be strict about presence and ordering of these nodes
if [node.block_type for node in value if node.block_type != QUOTE_BLOCK] != [MEDIA_BLOCK, TEXT_BLOCK]:
error_messages.append(
(
'This block must contain one Media section (with one or '
'two items in it) and/or a Quote section, then one Text section following it.'
)
)
return error_messages
def _low_level_validation(value, error_messages):
# Check content of media node, which should be present here
MEDIA_BLOCK = 'media' # noqa N806
VIDEO_BLOCK = 'video' # noqa N806
for node in value:
if node.block_type == MEDIA_BLOCK:
subnode_block_types = [subnode.block_type for subnode in node.value]
if len(subnode_block_types) == 2:
if set(subnode_block_types) == {VIDEO_BLOCK}:
# Two videos: not allowed
error_messages.append('Only one video may be used in a case study.')
elif subnode_block_types[1] == VIDEO_BLOCK:
# implicitly, [0] must be an image
# video after image: not allowed
error_messages.append('The video must come before a still image.')
return error_messages
def case_study_body_validation(value):
"""Ensure the case study has exactly both a media node and a text node
and that the media node has the following content:
* One image, only
* One video, only
* One video + One image
* (video must comes first so that it is displayed first)
* Two images
"""
error_messages = []
if value:
error_messages = _high_level_validation(value, error_messages)
error_messages = _low_level_validation(value, error_messages)
if error_messages:
raise StreamBlockValidationError(
non_block_errors=ValidationError('; '.join(error_messages), code='invalid'),
)
| 32.491816 | 120 | 0.614052 |
813ec18cfeb4f9f63d67da715da440d160d1cd07 | 9,860 | py | Python | CV/Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection/easymia/transforms/transforms.py | dumpmemory/Research | 30fd70ff331b3d9aeede0b71e7a691ed6c2b87b3 | [
"Apache-2.0"
] | null | null | null | CV/Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection/easymia/transforms/transforms.py | dumpmemory/Research | 30fd70ff331b3d9aeede0b71e7a691ed6c2b87b3 | [
"Apache-2.0"
] | null | null | null | CV/Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection/easymia/transforms/transforms.py | dumpmemory/Research | 30fd70ff331b3d9aeede0b71e7a691ed6c2b87b3 | [
"Apache-2.0"
] | null | null | null | # -*-coding utf-8 -*-
##########################################################################
#
# Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
"""
"""
import numpy as np
import numbers
import collections
import random
import math
import cv2
from . import functional as F
from easymia.core.abstract_transforms import AbstractTransform
from easymia.libs import manager
| 34.840989 | 136 | 0.596349 |
813efba40d450227c03f83890923f36f0af07beb | 1,370 | py | Python | tests/ui/terms/test_views.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | 6 | 2019-09-02T00:01:50.000Z | 2021-11-04T08:23:40.000Z | tests/ui/terms/test_views.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | 72 | 2019-09-04T18:52:35.000Z | 2020-07-21T19:58:15.000Z | tests/ui/terms/test_views.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of menRva.
# Copyright (C) 2018-present NU,FSM,GHSL.
#
# menRva is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test terms views.py"""
from cd2h_repo_project.modules.terms.views import serialize_terms_for_edit_ui
| 28.541667 | 77 | 0.642336 |
813f340b009c015cf7a900f2f532f4b131c3414d | 1,766 | py | Python | main.py | alamin3637k/Searcher | bb948b373d1bd1261930a47c37fa9210a98e9ef3 | [
"MIT"
] | 1 | 2021-12-13T06:30:54.000Z | 2021-12-13T06:30:54.000Z | main.py | alamin3637k/Searcher | bb948b373d1bd1261930a47c37fa9210a98e9ef3 | [
"MIT"
] | null | null | null | main.py | alamin3637k/Searcher | bb948b373d1bd1261930a47c37fa9210a98e9ef3 | [
"MIT"
] | null | null | null | import webbrowser
import wikipedia
import requests
def test_site(search: str):
"""please enter site name with http information"""
try:
r = requests.get(search)
except Exception as error:
print(error)
return "site not working"
if r.status_code == 200:
print("site working")
return "site working"
| 30.448276 | 85 | 0.701586 |
813ffa71bdba0211d608c2b11546d97e7ed15b73 | 9,307 | py | Python | hw1.py | ptsurko/coursera_crypt | ec952800c441a9b07ac427045851285fee8c6543 | [
"MIT"
] | null | null | null | hw1.py | ptsurko/coursera_crypt | ec952800c441a9b07ac427045851285fee8c6543 | [
"MIT"
] | null | null | null | hw1.py | ptsurko/coursera_crypt | ec952800c441a9b07ac427045851285fee8c6543 | [
"MIT"
] | null | null | null | import string
from timeit import itertools
s1 = '315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e'
s2 = '234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f'
s3 = '32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb'
s4 = '32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa'
s5 = '3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070'
s6 = '32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4'
s7 = '32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce'
s8 = '315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3'
s9 = '271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027'
s10 = '466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83'
s11 = '32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904'
MSGS = (s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11)
MSGS_DECODED = [s.decode('hex') for s in MSGS]
# def main():
# for c in combinations('ABCD', 2):
# print c
def output_combinations_table():
comb = [(i1, i2, strxor(s1.decode('hex'), s2.decode('hex'))) for i1, i2, (s1,s2) in combinations(MSGS, 2)]
html = '<html><body>'
html += '<table style="white-space:nowrap" border="1">'
html += '<thead>'
html += '<tr>'
# WTF???
# max_len = max(combinations, key=lambda x: len(x))
max_len = 0
for i1, i2, c in comb:
if len(c) > max_len:
max_len = len(c)
# print max_len
html += '<th></th>'
for i in xrange(max_len):
html += '<th>' + str(i) + '</th>'
html += '</tr>'
html += '</thead>'
for i1, i2, c in comb:
html += '<tr>'
html += '<td>(%s, %s)</td>' % (i1 + 1, i2 + 1)
for ch in c:
html += '<td>'
html += '%02d' % ord(ch)
if ch in string.printable:
html += '<br />'
html += '&#%d;' % ord(ch)
html += '</td>'
html += '</tr>'
html += '<tr>'
html += '<th></th>'
for i in xrange(max_len):
html += '<th>' + str(i) + '</th>'
html += '</tr>'
html += '</table>'
html += '</body>'
html += '</html>'
with open('combinations.html', 'w') as f:
f.write(html)
if __name__ == "__main__":
main() | 44.745192 | 379 | 0.659933 |
81411abc782bf9b1f6f3f22e5119bf12fc73f345 | 5,777 | py | Python | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 966 | 2015-01-10T05:27:30.000Z | 2022-03-26T21:04:36.000Z | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 46 | 2015-01-16T22:33:08.000Z | 2019-09-04T16:33:27.000Z | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 143 | 2015-01-07T03:57:19.000Z | 2022-02-28T01:10:45.000Z | # -*- coding: utf-8 -*-
"""Classes (Python) to compute the Bandit UCB (Upper Confidence Bound) arm allocation and choosing the arm to pull next.
See :mod:`moe.bandit.bandit_interface` for further details on bandit.
"""
import copy
from abc import abstractmethod
from moe.bandit.bandit_interface import BanditInterface
from moe.bandit.utils import get_winning_arm_names_from_payoff_arm_name_list, get_equal_arm_allocations
def get_winning_arm_names(self, arms_sampled):
r"""Compute the set of winning arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the winning arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
# If there exists an unsampled arm, return the names of the unsampled arms
unsampled_arm_names = self.get_unsampled_arm_names(arms_sampled)
if unsampled_arm_names:
return unsampled_arm_names
number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)
| 41.561151 | 171 | 0.701229 |
8141278e8aec7ffc16f0909af9f0862c9b9fc0df | 296 | py | Python | Hedge/Shell.py | RonaldoAPSD/Hedge | 2a1550ea38a0384f39ed3541c8a91f9ca57f5a64 | [
"Apache-2.0"
] | 2 | 2020-08-16T01:42:32.000Z | 2020-08-28T21:10:03.000Z | Hedge/Shell.py | RonaldoAPSD/Hedge | 2a1550ea38a0384f39ed3541c8a91f9ca57f5a64 | [
"Apache-2.0"
] | null | null | null | Hedge/Shell.py | RonaldoAPSD/Hedge | 2a1550ea38a0384f39ed3541c8a91f9ca57f5a64 | [
"Apache-2.0"
] | null | null | null | import Hedge
while True:
text = input('Hedge > ')
if text.strip() == "":
continue
result, error = Hedge.run('<stdin>', text)
if (error):
print(error.asString())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result)) | 19.733333 | 44 | 0.60473 |
81433f45286c6ca7869898f63194549b86792d2f | 14,420 | py | Python | yt/frontends/enzo/io.py | Xarthisius/yt | 321643c3abff64a6f132d98d0747f3558f7552a3 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-05-20T13:03:57.000Z | 2021-05-20T13:03:57.000Z | yt/frontends/enzo/io.py | Xarthisius/yt | 321643c3abff64a6f132d98d0747f3558f7552a3 | [
"BSD-3-Clause-Clear"
] | 31 | 2017-04-19T21:07:18.000Z | 2017-04-20T01:08:43.000Z | yt/frontends/enzo/io.py | Xarthisius/yt | 321643c3abff64a6f132d98d0747f3558f7552a3 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-04-21T07:01:51.000Z | 2021-04-21T07:01:51.000Z | import numpy as np
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
_convert_mass = ("particle_mass", "mass")
_particle_position_names = {}
| 38.867925 | 85 | 0.499792 |
81434230700195b62a622200418ac9737e7bcf37 | 1,275 | py | Python | cidr_enum.py | arisada/cidr_enum | 1908f20ac15a83738fc1ff74ff17a7280bec769f | [
"BSD-2-Clause"
] | null | null | null | cidr_enum.py | arisada/cidr_enum | 1908f20ac15a83738fc1ff74ff17a7280bec769f | [
"BSD-2-Clause"
] | null | null | null | cidr_enum.py | arisada/cidr_enum | 1908f20ac15a83738fc1ff74ff17a7280bec769f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
cidr_enum.py is a very simple tool to help enumerate IP ranges when being used with other tools
"""
import argparse
import netaddr
if __name__ == '__main__':
main()
| 25 | 95 | 0.677647 |
81434e0f75802811d789efae93fbec2c949725b8 | 7,469 | py | Python | configs/k400-fixmatch-tg-alignment-videos-ptv-simclr/8gpu/r3d_r18_8x8x1_45e_k400_rgb_offlinetg_1percent_align0123_1clip_no_contrast_precisebn_ptv.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | configs/k400-fixmatch-tg-alignment-videos-ptv-simclr/8gpu/r3d_r18_8x8x1_45e_k400_rgb_offlinetg_1percent_align0123_1clip_no_contrast_precisebn_ptv.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | configs/k400-fixmatch-tg-alignment-videos-ptv-simclr/8gpu/r3d_r18_8x8x1_45e_k400_rgb_offlinetg_1percent_align0123_1clip_no_contrast_precisebn_ptv.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='Semi_AppSup_TempSup_SimCLR_Crossclip_PTV_Recognizer3D',
backbone=dict(
type='ResNet3d',
depth=18,
pretrained=None,
pretrained2d=False,
norm_eval=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3),
act_cfg=dict(type='ReLU'),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=400,
in_channels=512,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
cls_head_temp=None,
temp_backbone='same',
temp_sup_head='same',
train_cfg=dict(
warmup_epoch=10,
fixmatch_threshold=0.3,
temp_align_indices=(0, 1, 2, 3),
align_loss_func='Cosine',
pseudo_label_metric='avg',
crossclip_contrast_loss=[],
crossclip_contrast_range=[],
),
test_cfg=dict(average_clips='score'))
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_labeled = 'VideoDataset_Contrastive'
dataset_type_unlabeled = 'UnlabeledVideoDataset_MultiView_Contrastive'
# dataset_type_appearance = 'RawframeDataset_withAPP'
data_root = 'data/kinetics400/videos_train'
data_root_val = 'data/kinetics400/videos_val'
labeled_percentage = 1
ann_file_train_labeled = f'data/kinetics400/videossl_splits/kinetics400_train_{labeled_percentage}_percent_labeled_videos.txt'
ann_file_train_unlabeled = 'data/kinetics400/kinetics400_train_list_videos.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames_Custom', clip_len=8, frame_interval=8, num_clips=1,
total_frames_offset=-1),
dict(type='DecordDecode_Custom',
extra_modalities=['tempgrad']),
dict(type='Resize', scale=(-1, 256), lazy=True),
dict(type='RandomResizedCrop', lazy=True),
dict(type='Resize', scale=(224, 224), keep_ratio=False, lazy=True),
dict(type='Flip', flip_ratio=0.5, lazy=True),
dict(type='Fuse_WithDiff'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Normalize_Diff', **img_norm_cfg, raw_to_diff=False, redist_to_rgb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='FormatShape_Diff', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label', 'imgs_diff'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label', 'imgs_diff'])
]
# Get the frame and resize, shared by both weak and strong
train_pipeline_weak = [
dict(type='DecordInit'),
dict(type='SampleFrames_Custom', clip_len=8, frame_interval=8, num_clips=1,
total_frames_offset=-1),
dict(type='DecordDecode_Custom',
extra_modalities=['tempgrad']),
dict(type='Resize', scale=(-1, 256), lazy=True),
dict(type='RandomResizedCrop', lazy=True),
dict(type='Resize', scale=(224, 224), keep_ratio=False, lazy=True),
dict(type='Flip', flip_ratio=0.5, lazy=True),
dict(type='Fuse_WithDiff'),
]
# Only used for strong augmentation
train_pipeline_strong = [
dict(type='Imgaug', transforms='default'),
dict(type='Imgaug_Custom', transforms='default', modality='imgs_diff')
]
# Formating the input tensors, shared by both weak and strong
train_pipeline_format = [
dict(type='Normalize', **img_norm_cfg),
dict(type='Normalize_Diff', **img_norm_cfg, raw_to_diff=False, redist_to_rgb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='FormatShape_Diff', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label', 'imgs_diff'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label', 'imgs_diff'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256), lazy=True),
dict(type='CenterCrop', crop_size=224, lazy=True),
dict(type='Flip', flip_ratio=0, lazy=True),
dict(type='Fuse'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=10,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8, # NOTE: Need to reduce batch size. 16 -> 5
workers_per_gpu=4, # Default: 4
train_dataloader=dict(drop_last=True, pin_memory=True),
train_labeled=dict(
type=dataset_type_labeled,
ann_file=ann_file_train_labeled,
data_prefix=data_root,
pipeline=train_pipeline,
contrast_clip_num=1
),
train_unlabeled=dict(
type=dataset_type_unlabeled,
ann_file=ann_file_train_unlabeled,
data_prefix=data_root,
pipeline_weak=train_pipeline_weak,
pipeline_strong=train_pipeline_strong,
pipeline_format=train_pipeline_format,
contrast_clip_num=1
),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline,
test_mode=True),
precise_bn=dict(
type=dataset_type,
ann_file=ann_file_train_unlabeled,
data_prefix=data_root,
pipeline=val_pipeline),
videos_per_gpu_precise_bn=5
)
# optimizer
optimizer = dict(
type='SGD', lr=0.2, momentum=0.9,
weight_decay=0.0001) # this lr 0.2 is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_ratio=0.1,
warmup_by_epoch=True,
warmup_iters=10)
total_epochs = 45 # Might need to increase this number for different splits. Default: 180
checkpoint_config = dict(interval=5, max_keep_ckpts=3)
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) # Default: 5
log_config = dict(
interval=20, # Default: 20
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
])
precise_bn = dict(num_iters=200, interval=5,
bn_range=['backbone', 'cls_head'])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = None
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 33.95 | 126 | 0.664078 |
8143df98ebce82100584c4d53ea2d04b4dccafa6 | 3,351 | py | Python | experiments/rpi/gertboard/dtoa.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | null | null | null | experiments/rpi/gertboard/dtoa.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | 1 | 2021-03-20T05:17:03.000Z | 2021-03-20T05:17:03.000Z | experiments/rpi/gertboard/dtoa.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | null | null | null | #!/usr/bin/python2.7
# Python 2.7 version by Alex Eames of http://RasPi.TV
# functionally equivalent to the Gertboard dtoa test by Gert Jan van Loo & Myra VanInwegen
# Use at your own risk - I'm pretty sure the code is harmless, but check it yourself.
# This will not work unless you have installed py-spidev as in the README.txt file
# spi must also be enabled on your system
import spidev
import sys
from time import sleep
board_type = sys.argv[-1]
# reload spi drivers to prevent spi failures
import subprocess
unload_spi = subprocess.Popen('sudo rmmod spi_bcm2708', shell=True, stdout=subprocess.PIPE)
start_spi = subprocess.Popen('sudo modprobe spi_bcm2708', shell=True, stdout=subprocess.PIPE)
sleep(3)
spi = spidev.SpiDev()
spi.open(0,1) # The Gertboard DAC is on SPI channel 1 (CE1 - aka GPIO7)
channel = 3 # set initial value to force user selection
common = [0,0,0,160,240] # 2nd byte common to both channels
voltages = [0.0,0.5,1.02,1.36,2.04] # voltages for display
while not (channel == 1 or channel == 0): # channel is set by user input
channel = int(which_channel()) # continue asking until answer 0 or 1 given
if channel == 1: # once proper answer given, carry on
num_list = [176,180,184,186,191] # set correct channel-dependent list for byte 1
else:
num_list = [48,52,56,58,63]
print "These are the connections for the digital to analogue test:"
if board_type == "m":
print "jumper connecting GPIO 7 to CSB"
print "Multimeter connections (set your meter to read V DC):"
print " connect black probe to GND"
print " connect red probe to DA%d on D/A header" % channel
else:
print "jumper connecting GP11 to SCLK"
print "jumper connecting GP10 to MOSI"
print "jumper connecting GP9 to MISO"
print "jumper connecting GP7 to CSnB"
print "Multimeter connections (set your meter to read V DC):"
print " connect black probe to GND"
print " connect red probe to DA%d on J29" % channel
raw_input("When ready hit enter.\n")
for i in range(5):
r = spi.xfer2([num_list[i],common[i]]) #write the two bytes to the DAC
print "Your meter should read about %.2fV" % voltages[i]
raw_input("When ready hit enter.\n")
r = spi.xfer2([16,0]) # switch off channel A = 00010000 00000000 [16,0]
r = spi.xfer2([144,0]) # switch off channel B = 10010000 00000000 [144,0]
# The DAC is controlled by writing 2 bytes (16 bits) to it.
# So we need to write a 16 bit word to DAC
# bit 15 = channel, bit 14 = ignored, bit 13 =gain, bit 12 = shutdown, bits 11-4 data, bits 3-0 ignored
# You feed spidev a decimal number and it converts it to 8 bit binary
# each argument is a byte (8 bits), so we need two arguments, which together make 16 bits.
# that's what spidev sends to the DAC. If you need to delve further, have a look at the datasheet. :)
| 45.90411 | 110 | 0.664279 |
d48ba98f343e96c0da8c5db735d6d98bd7a3e3d3 | 5,370 | py | Python | modules/statusbar.py | themilkman/GitGutter | 355b4480e7e1507fe1f9ae1ad9eca9649400a76c | [
"MIT"
] | null | null | null | modules/statusbar.py | themilkman/GitGutter | 355b4480e7e1507fe1f9ae1ad9eca9649400a76c | [
"MIT"
] | null | null | null | modules/statusbar.py | themilkman/GitGutter | 355b4480e7e1507fe1f9ae1ad9eca9649400a76c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sublime
from . import blame
from . import templates
| 33.354037 | 78 | 0.570577 |
d48c84bf13aa3330a9778d95947b20e6d95dfadf | 194 | py | Python | polls/tests.py | bunya017/Django-Polls-App | 7b71ac9d1ffb66518e1d0345bc0f11ee5907c1be | [
"MIT"
] | null | null | null | polls/tests.py | bunya017/Django-Polls-App | 7b71ac9d1ffb66518e1d0345bc0f11ee5907c1be | [
"MIT"
] | 4 | 2020-06-05T18:14:33.000Z | 2022-01-13T00:45:05.000Z | polls/tests.py | bunya017/Django-Polls-App | 7b71ac9d1ffb66518e1d0345bc0f11ee5907c1be | [
"MIT"
] | 1 | 2018-05-23T11:36:36.000Z | 2018-05-23T11:36:36.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
| 14.923077 | 39 | 0.742268 |
d48e8d3a34a96d0df0efeeb8e07e14864978dc32 | 1,115 | py | Python | test.py | LeonHodgesAustin/video_stream_processor | 8014705edc37599716eb1320d46c99136fe3e262 | [
"BSD-3-Clause"
] | null | null | null | test.py | LeonHodgesAustin/video_stream_processor | 8014705edc37599716eb1320d46c99136fe3e262 | [
"BSD-3-Clause"
] | null | null | null | test.py | LeonHodgesAustin/video_stream_processor | 8014705edc37599716eb1320d46c99136fe3e262 | [
"BSD-3-Clause"
] | null | null | null | # import logging
# import hercules.lib.util.hercules_logging as l
# from hercules.lib.util import sso as sso
import opencv2 as cv2
import urllib
import numpy as np
# log = l.setup_logging(__name__)
if __name__ == "__main__":
main()
| 27.875 | 102 | 0.6287 |
d48ee17b3f638f1522292d248a4e2094be89792e | 1,244 | py | Python | ribbon/exceptions.py | cloutiertyler/RibbonGraph | 000864dd0ee33da4ed44af2f4bd1f1a83d5a1ba4 | [
"MIT"
] | 2 | 2017-09-20T17:49:09.000Z | 2017-09-20T17:55:43.000Z | ribbon/exceptions.py | cloutiertyler/RibbonGraph | 000864dd0ee33da4ed44af2f4bd1f1a83d5a1ba4 | [
"MIT"
] | null | null | null | ribbon/exceptions.py | cloutiertyler/RibbonGraph | 000864dd0ee33da4ed44af2f4bd1f1a83d5a1ba4 | [
"MIT"
] | null | null | null | from rest_framework.exceptions import APIException
from rest_framework import status
| 28.272727 | 103 | 0.762058 |
d48f61239e116e08f567623063b6adca1886ef91 | 3,792 | py | Python | kobe-trading-bot/app.py | LeonardoM011/kobe-trading-bot | 83a84ee0fb8dab3d9ae174be91e96de6d5f2d823 | [
"MIT"
] | null | null | null | kobe-trading-bot/app.py | LeonardoM011/kobe-trading-bot | 83a84ee0fb8dab3d9ae174be91e96de6d5f2d823 | [
"MIT"
] | null | null | null | kobe-trading-bot/app.py | LeonardoM011/kobe-trading-bot | 83a84ee0fb8dab3d9ae174be91e96de6d5f2d823 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Crypto trading bot using binance api
# Author: LeonardoM011<[email protected]>
# Created on 2021-02-05 21:56
# Set constants here:
DELTA_TIME = 300 # How long can we check for setting up new trade (in seconds)
# ----------------------
# Imports:
import os
import sys
import time as t
import datetime
# Adding python-binance to path and importing python-binance
sys.path.insert(1, "../deps/binance")
from binance.client import Client
from fun import *
import candles as can
# Globals:
client = None
# Main program loop
if __name__ == "__main__":
main() | 36.461538 | 219 | 0.620781 |
d4910ca755a73b263041c7cd3c681f6108d61901 | 13,061 | py | Python | imported_files/plotting_edh01.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_edh01.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | imported_files/plotting_edh01.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Plotting.py for notebook 01_Exploring_DM_Halos
This python file contains all the functions used for plotting graphs and maps in the 1st notebook (.ipynb) of the repository: 01. Exploring parameters in DM halos and sub-halos
Script written by: Soumya Shreeram
Project supervised by Johan Comparat
Date created: 23rd February 2021
Last updated on 30th March 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# scipy modules
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
import os
import importlib
# plotting imports
import matplotlib
from mpl_toolkits import axes_grid1
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import Exploring_DM_Haloes as edh
def setLabel(ax, xlabel, ylabel, title, xlim, ylim, legend=True):
"""
Function defining plot properties
@param ax :: axes to be held
@param xlabel, ylabel :: labels of the x-y axis
@param title :: title of the plot
@param xlim, ylim :: x-y limits for the axis
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim != 'default':
ax.set_xlim(xlim)
if ylim != 'default':
ax.set_ylim(ylim)
if legend:
l = ax.legend(loc='best', fontsize=14)
for legend_handle in l.legendHandles:
legend_handle._legmarker.set_markersize(12)
ax.grid(False)
ax.set_title(title, fontsize=18)
return
def plotAgnClusterDistribution(pos_z_clu, pos_z_AGN, pos_z_halo, cluster_params):
"""
Function to plot the AGN cluster distribution
@pos_z_clu :: postion and redshifts of all the selected 'clusters'
@pos_z_AGN :: postion and redshifts of all the selected AGNs
@pos_z_gal :: postion and redshifts of all the selected galaxies
"""
halo_m_500c = cluster_params[0]
fig, ax = plt.subplots(1,1,figsize=(9,8))
# plotting halos
halos = ax.plot(pos_z_halo[0], pos_z_halo[1], '.', color='#fcd16d', markersize=0.2, label=r'All DM Halos', alpha=0.2)
# plotting clusters
cluster = ax.plot(pos_z_clu[0], pos_z_clu[1], 'o', color= '#03a351', markersize=3, label=r'Clusters $M_{500c}> 10^{%.1f} M_\odot$ '%(np.log10(halo_m_500c)))
# plotting AGNs
agn = ax.plot(pos_z_AGN[0], pos_z_AGN[1], '*', color='k', markersize=3.5, label=r'AGN', alpha=0.7)
# labeling axes and defining limits
xlim = [np.min(pos_z_halo[0]), np.max(pos_z_halo[0])]
ylim = [np.min(pos_z_halo[1]), np.max(pos_z_halo[1])]
setLabel(ax, 'R.A. (deg)', 'Dec (deg)', '', xlim, ylim, legend=True)
print('Redshift z<%.2f'%(np.max(pos_z_clu[2])))
return
def plotHostSubHalos(pos_z_cen_halo, pos_z_sat_halo, pos_z_AGN):
"""
Function to plot the host and satellite halo distribution
@hd_halo :: table with all relevant info on halos, clusters, and galaxies within them
--> divided into 3 because each hd_halo holds info on 1000 halos alone
@pos_z_AGN :: postion and redshifts of all the selected AGNs
"""
ra_cen, dec_cen = pos_z_cen_halo[0], pos_z_cen_halo[1]
ra_sat, dec_sat = pos_z_sat_halo[0], pos_z_sat_halo[1]
fig, ax = plt.subplots(1,1,figsize=(9,8))
# plotting host halos
host_halos = ax.plot(ra_cen, dec_cen, '.', color= 'k', markersize=0.06, label=r'Host-halos $P_{id}=-1$', alpha=0.4)
# plotting sat halos
sat_halos = ax.plot(ra_sat, dec_sat, 'o', color='#07d9f5', markersize=0.07, label=r'Satellite halos $P_{id} \neq -1$', alpha=0.7)
# plotting AGNs
agn = ax.plot(pos_z_AGN[0], pos_z_AGN[1], '*', color='#fff717', markersize=6.5, label=r'AGN', markeredgecolor='w', markeredgewidth=0.4)
# labeling axes and defining limits
xlim = [np.min(pos_z_AGN[0]), np.max(pos_z_AGN[0])]
ylim = [np.min(pos_z_AGN[1]), np.max(pos_z_AGN[1])]
setLabel(ax, 'R.A. (deg)', 'Dec (deg)', '', xlim, ylim, legend=True)
print('AGNs: %d, Host (central) halos: %.2e, Sattelite halos: %.2e'%(len(pos_z_AGN[0]), len(ra_cen), len(ra_sat)))
return
def plotAGNfraction(pos_z_AGN, pos_z_gal, redshift_limit_agn, bin_size):
"""
Function to plot the agn fraction in the given pixel
@pos_z_AGN :: postion and redshifts of all the selected AGNs
@pos_z_gal :: postion and redshifts of all the selected galaxies
@redshift_limit_agn :: upper limit on redshift based on the clusters found
"""
fig, ax = plt.subplots(1,2,figsize=(19,7))
# getting the useful histogram properties
counts_agn, redshift_bins_agn = np.histogram(pos_z_AGN[2], bins = bin_size)
counts_gal, redshift_bins_gal = np.histogram(pos_z_gal[2], bins = bin_size)
# plotting the galaxy and agn distribution as a function of redshift
ax[0].plot(redshift_bins_gal[1:], counts_gal, 'ks', ms=4, label=r'DM Halos')
ax[0].plot(redshift_bins_agn[1:], counts_agn, 'bs', ms=4, label=r'AGNs')
# axis properties - 0
xlim = [np.min(redshift_bins_agn[1:]), np.max(redshift_bins_agn[1:])]
setLabel(ax[0], r'Redshift$_R$', 'Counts','', xlim, 'default', legend=True)
ax[0].set_yscale("log")
# agn fraction as a function of redshift
f_agn, idx = [], []
for c, c_gal in enumerate(counts_gal):
if c_gal != 0:
f_agn.append(((counts_agn[c]*100)/c_gal))
idx.append(c)
z_bin_modified = redshift_bins_gal[1:][np.array(idx)]
# plot agn fraction
ax[1].plot(z_bin_modified, f_agn, 's', color='#6b0385', ms=4)
# axis properties - 1
xlim = [np.min(redshift_bins_agn[1:])-0.02, np.max(redshift_bins_agn[1:])]
setLabel(ax[1], r'Redshift$_R$', r'$f_{AGN}$ (%s)'%"%", '', xlim, 'default', legend=False)
ax[1].set_yscale("log")
plt.savefig('figures/agn_frac.pdf', facecolor='w', edgecolor='w')
print( 'Reddhift z<%.2f'%redshift_limit_agn )
return redshift_bins_gal[1:]
def plotRedshiftComovingDistance(cosmo, redshift_limit, resolution = 0.0001):
"""Function to plot the relation between redshift and the comoving distance
@cosmo :: cosmology package loaded
@redshift_limit :: upper limit in redshift --> end point for interpolation
@resolution :: resolution of time steps (set to e-4 based of simulation resolution)
@Returns :: plot showing the dependence of redshift on comoving distance
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
distance_Mpc = cosmo.comoving_distance(np.arange(0,redshift_limit, resolution))
redshifts = np.arange(0,redshift_limit, resolution)
ax.plot(redshifts, distance_Mpc, 'k.', ms=1)
setLabel(ax, 'Redshift (z)', 'Comoving distance (Mpc)', '', 'default', 'default', legend=False)
print('Redshift-Comoving distance relationship')
return
def plotMergerDistribution(merger_val_gal, counts_gal, merger_val_agn, counts_agn, cosmo, redshift_limit):
"""
Function to plot the distribution (counts) of the merger scale factor/redshift
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
ax1 = plt.gca()
ax2 = ax1.twiny()
# plot the merger distribution for galaxies and agns
ax1.plot(merger_val_gal, counts_gal, 'kx', label='DM Halos')
ax1.plot(merger_val_agn, counts_agn, 'bx', label='AGNs')
setLabel(ax1, r'Scale, $a(t)$, of last Major Merger', 'Counts', '', 'default', 'default', legend=True)
ax.set_yscale("log")
# setting the x-label on top (converting a to redshift)
a_min, a_max = np.min(merger_val_gal), np.max(merger_val_gal)
scale_factor_arr = [a_max, a_min*4, a_min*2, a_min]
ax2.set_xticks([(1/a) -1 for a in scale_factor_arr])
ax2.invert_xaxis()
ax2.set_xlabel('Redshift (z)')
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
print("Objects with merger redshifts z < %.2f"%z_at_value(cosmo.scale_factor, a_min))
plt.savefig('figures/merger_distribution_z%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
return
def plotCentralSatelliteScaleMergers(cen_sat_AGN, cen_sat_halo, redshift_limit):
"""
Function to plot the central and sattelite scale factors for mergers
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
labels = [r'central AGNs', r'satellite AGNs', 'central DM halos', 'satellite DM halos']
c, m, ms = ['b', '#38cee8', 'k', 'grey'], ['^', '*', '^', '*'], [9, 15, 5, 9]
mec, mew = ['w', 'k', 'k', '#abaeb3'], [0.7, 0.4, 1, 0.7]
for i in [0, 1]:
s_m_agn, c_agn = np.unique(cen_sat_AGN[i]['HALO_scale_of_last_MM'], return_counts=True)
s_m_gal, c_gal = np.unique(cen_sat_halo[i]['HALO_scale_of_last_MM'], return_counts=True)
# agns
ax.plot(s_m_agn, c_agn, color=c[i], marker=m[i], ls='', ms=ms[i], label=labels[i], markeredgecolor=mec[i], markeredgewidth=mew[i])
# DM halos
j = i + 2
ax.plot(s_m_gal, c_gal, color=c[j], marker=m[j], ls='', ms=ms[j], label=labels[j], markeredgecolor=mec[j], markeredgewidth=mew[j])
# set label
setLabel(ax, r'Scale, $a(t)$, of last Major Merger', 'Counts', '', 'default', 'default', legend=True)
ax.set_yscale("log")
plt.savefig('figures/merger_dist_cenAndsat_z%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
print('Objects below z: ', redshift_limit)
return [labels, c, m, ms, mec, mew]
def plotTimeSinceMergerDist(scale_merger_AGN, scale_merger_gal, z_AGN, z_gal, cosmo, bin_size, redshift_limit):
"""
Plot the distribution of halos with respective galaxies & agns given the time since merger
"""
# get the time difference since merger events in the halos
t_merger_agn = edh.getMergerTimeDifference(scale_merger_AGN, z_AGN, cosmo)
t_merger_gal = edh.getMergerTimeDifference(scale_merger_gal, z_gal, cosmo)
# get the t since merger bins and counts
if bin_size[0]:
c_t_agn, merger_bins_agn = np.histogram(np.array(t_merger_agn), bins = bin_size[1])
c_t_gal, merger_bins_gal = np.histogram(np.array(t_merger_gal), bins = bin_size[1])
merger_bins_agn = merger_bins_agn[:-1]
merger_bins_gal = merger_bins_gal[:-1]
else:
merger_bins_agn, c_t_agn = np.unique(t_merger_agn, return_counts=True)
merger_bins_gal, c_t_gal = np.unique(t_merger_gal, return_counts=True)
fig, ax = plt.subplots(1,1,figsize=(7,6))
# plot the time since merger distribution for galaxies and agns
ax.plot(merger_bins_gal, np.cumsum(c_t_gal), 'k^', label='DM Halos', ms=4)
ax.plot(merger_bins_agn, np.cumsum(c_t_agn), 'b^', label='AGNs', ms=4)
# set labels/legends
setLabel(ax, r'$\Delta t_{merger} = t(z_{merger})-t(z_{current})$ [Gyr]', 'Cumulative counts', '', 'default', 'default', legend=False)
ax.legend(loc='lower left', fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
return ax, fig, t_merger_agn, t_merger_gal
def mergerRedshiftPlot(cen_sat_AGN, cen_sat_halo, dt_m, plot_params, redshift_limit):
"""
Function to plot the time since merger as a function of the redshift
@cen_sat_AGN(gal) :: handels to access the central and satellite AGNs(galaxies)
@dt_m :: time difference after merger for cen/sat AGNs(galaxies)
@plot_params :: to keep consistency between plots, array containing [labels, c, m, ms]
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
# change marker size for central DM halos
plot_params[3][1] = 9
z_R = [cen_sat_AGN[0]['redshift_R'], cen_sat_AGN[1]['redshift_R'], cen_sat_halo[0]['redshift_R'], cen_sat_halo[1]['redshift_R']]
# plot central, satellite merger distributions as per visual preference
for i in [2, 3, 0, 1]:
ax.plot(dt_m[i], z_R[i], plot_params[2][i], color=plot_params[1][i], ms=plot_params[3][i], label=plot_params[0][i], markeredgecolor=plot_params[4][i], markeredgewidth=plot_params[5][i])
# set labels/legends
setLabel(ax, r'$\Delta t_{merger} = t(z_{merger})-t(z_{current})$ [Gyr]', r'Redshift$_R$', '', 'default', 'default', legend=True)
ax.set_xscale("log")
plt.savefig('figures/t_since_merger_z_plot_%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
return ax
def plotMergerTimeCuts(ax, t_merger_cut_arr, l):
"""
Function to plot the defined cuts in merger times within the concerned plot
@t_merger_cut_arr :: array that defines the cuts in the merger times
@l :: array that defines the linestyles used to denote these cuts (refer to the initial codeblock in the notebook)
"""
for i, t_m_cut in enumerate(t_merger_cut_arr):
ax.axvline(x=t_m_cut, color='r', linestyle= l[i], label='%.1f Gyr'%t_m_cut)
ax.legend(fontsize=14, loc='lower left')
return | 41.996785 | 193 | 0.674298 |
d49130f40117c9ae1a6661a583616d08186beb75 | 2,239 | py | Python | asv_bench/benchmarks/omnisci/io.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | asv_bench/benchmarks/omnisci/io.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | asv_bench/benchmarks/omnisci/io.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""IO Modin on OmniSci storage format benchmarks."""
import modin.pandas as pd
from ..utils import (
generate_dataframe,
RAND_LOW,
RAND_HIGH,
ASV_USE_IMPL,
IMPL,
get_shape_id,
trigger_import,
get_benchmark_shapes,
)
from ..io.csv import TimeReadCsvTrueFalseValues # noqa: F401
| 33.924242 | 87 | 0.663243 |
d4913a27e63bc4d452b162e06717cf43b3cf28c7 | 7,730 | py | Python | benchmarks/rotation/rotated_cifar.py | ypeng22/ProgLearn | 671ff6a03c156bab3eedbd9e112705eeabd59da7 | [
"MIT"
] | 1 | 2021-02-02T03:18:46.000Z | 2021-02-02T03:18:46.000Z | benchmarks/rotation/rotated_cifar.py | ypeng22/ProgLearn | 671ff6a03c156bab3eedbd9e112705eeabd59da7 | [
"MIT"
] | null | null | null | benchmarks/rotation/rotated_cifar.py | ypeng22/ProgLearn | 671ff6a03c156bab3eedbd9e112705eeabd59da7 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import random
import pickle
from skimage.transform import rotate
from scipy import ndimage
from skimage.util import img_as_ubyte
from joblib import Parallel, delayed
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.forest import _generate_sample_indices
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from itertools import product
import keras
from keras import layers
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
from numba import cuda
import sys
sys.path.append("../../proglearn/")
from progressive_learner import ProgressiveLearner
from deciders import SimpleArgmaxAverage
from transformers import TreeClassificationTransformer, NeuralClassificationTransformer
from voters import TreeClassificationVoter, KNNClassificationVoter
### MAIN HYPERPARAMS ###
model = "dnn"
granularity = 2
reps = 4
########################
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
if model == "dnn":
for angle_adder in range(30, 180, granularity * 4):
angles = angle_adder + np.arange(0, granularity * 4, granularity)
with Pool(4) as p:
p.map(perform_angle, angles)
elif model == "uf":
angles = np.arange(30,180,2)
Parallel(n_jobs=-1)(delayed(LF_experiment)(data_x, data_y, angle, model, granularity, reps=20, ntrees=16, acorn=1) for angle in angles)
| 40.684211 | 139 | 0.625356 |
d4925b374376cf8c3d1b5d0d5ddbaf90cc28fafd | 3,763 | py | Python | sklearn_pandas/transformers/monitor.py | toddbenanzer/sklearn_pandas | 36e24c55ef4829aa261963201c346869097d4931 | [
"MIT"
] | null | null | null | sklearn_pandas/transformers/monitor.py | toddbenanzer/sklearn_pandas | 36e24c55ef4829aa261963201c346869097d4931 | [
"MIT"
] | null | null | null | sklearn_pandas/transformers/monitor.py | toddbenanzer/sklearn_pandas | 36e24c55ef4829aa261963201c346869097d4931 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn_pandas.util import validate_dataframe
| 38.010101 | 140 | 0.543981 |
d4928bbc94c4225d834897ba151f5d1146c73aa7 | 10,842 | py | Python | Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py | cbrake1/content | 5b031129f98935c492056675eeee0fefcacbd87b | [
"MIT"
] | 1 | 2020-11-25T00:42:27.000Z | 2020-11-25T00:42:27.000Z | Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py | cbrake1/content | 5b031129f98935c492056675eeee0fefcacbd87b | [
"MIT"
] | 22 | 2022-03-23T10:39:16.000Z | 2022-03-31T11:31:37.000Z | Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py | cbrake1/content | 5b031129f98935c492056675eeee0fefcacbd87b | [
"MIT"
] | null | null | null | import pytest
from CommonServerPython import *
from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, \
pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, \
get_incidents_batch_by_time_request, get_new_incidents, get_time_delta
MOCK_INCIDENT = {
"id": 1,
"type": "Malware",
"summary": "Unsolicited Bulk Email",
"description": "EvilScheme test message",
"score": 4200,
"state": "Open",
"created_at": "2018-05-26T21:07:17Z",
"event_count": 3,
"event_sources": [
"Proofpoint TAP"
],
"users": [
""
],
"assignee": "Unassigned",
"team": "Unassigned",
"hosts": {
"attacker": [
""
],
"forensics": [
"",
]
},
"incident_field_values": [
{
"name": "Attack Vector",
"value": "Email"
},
{
"name": "Classification",
"value": "Spam"
},
{
"name": "Severity",
"value": "Critical"
},
{
"name": "Abuse Disposition",
"value": "Unknown"
}
],
"events": [
{
"id": 3,
"category": "malware",
"severity": "Info",
"source": "Proofpoint TAP",
"threatname": "",
"state": "Linked",
"description": "",
"attackDirection": "inbound",
"received": "2018-05-26T21:07:17Z",
"malwareName": "",
"emails": [
{
"sender": {
"email": "test"
},
"recipient": {
"email": "test"
},
"subject": "test",
"messageId": "test",
"messageDeliveryTime": {
"chronology": {
"zone": {
"id": "UTC"
}
},
"millis": 1544640072000,
},
"abuseCopy": "false",
"body": "test",
'bodyType': "test",
'headers': "test",
'urls': "test"
}
],
}
],
"quarantine_results": [],
"successful_quarantines": 0,
"failed_quarantines": 0,
"pending_quarantines": 0
}
INCIDENT_FIELD_CONTEXT = {
"Attack_Vector": "Email",
"Classification": "Spam",
"Severity": "Critical",
"Abuse_Disposition": "Unknown"
}
INCIDENT_FIELD_INPUT = [
(MOCK_INCIDENT, INCIDENT_FIELD_CONTEXT)
]
FETCH_RESPONSE = get_fetch_data()
EMAIL_RESULT = [
{
'sender': "test",
'recipient': "test",
'subject': "test",
'message_id': "test",
'message_delivery_time': 1544640072000,
'body': "test",
'body_type': "test",
'headers': "test",
'urls': "test"
}
]
EMAILS_CONTEXT_INPUT = [
(MOCK_INCIDENT['events'][0], EMAIL_RESULT)
]
SOURCE_LIST_INPUT = [
(["Proofpoint TAP"], True),
([], True),
(["No such source"], False),
(["No such source", "Proofpoint TAP"], True)
]
ABUSE_DISPOSITION_INPUT = [
(["Unknown"], True),
([], True),
(["No such value"], False),
(["No such value", "Unknown"], True)
]
DEMISTO_PARAMS = [({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value, Unknown"},
[MOCK_INCIDENT]), ({'event_sources': "", 'abuse_disposition': ""}, [MOCK_INCIDENT]),
({'event_sources': "No such source", 'abuse_disposition': "No such value, Unknown"}, []),
({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value"}, []),
({'event_sources': "No such source", 'abuse_disposition': "No such value"}, [])]
INGEST_ALERT_ARGS = {
"attacker": "{\"attacker\":{\"key\":\"value\"}}",
"cnc_host": "{\"cnc_host\":{\"key\":\"value\"}}",
"detector": "{\"detector\":{\"key\":\"value\"}}",
"email": "{\"email\":{\"key\":\"value\"}}",
"forensics_hosts": "{\"forensics_hosts\":{\"key\":\"value\"}}",
"target": "{\"target\":{\"key\":\"value\"}}",
"threat_info": "{\"threat_info\":{\"key\":\"value\"}}",
"custom_fields": "{\"custom_fields\":{\"key\":\"value\"}}",
"post_url_id": "value",
"json_version": "value",
"summary": "value"
}
EXPECTED_RESULT = {
"attacker": {"key": "value"},
"cnc_host": {"key": "value"},
"detector": {"key": "value"},
"email": {"key": "value"},
"forensics_hosts": {"key": "value"},
"target": {"key": "value"},
"threat_info": {"key": "value"},
"custom_fields": {"key": "value"},
"post_url_id": "value",
"json_version": "value",
"summary": "value"
}
def test_fetch_incidents_limit_exceed(mocker):
"""
Given
- a dict of params given to the function which is gathered originally from demisto.params()
The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state.
- response of the api
When
- a single iteration of the fetch is activated with a fetch limit set to 5
Then
- validate that the number or incidents that is returned is equal to the limit when the api returned more.
"""
params = {
'fetch_delta': '6 hours',
'fetch_limit': ' 5',
'created_after': '2021-03-30T11:44:24Z',
'state': 'closed'
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
incidents_list = get_incidents_batch_by_time_request(params)
assert len(incidents_list) == 5
def test_fetch_incidents_with_same_created_time(mocker):
"""
Given
- a dict of params given to the function which is gathered originally from demisto.params()
The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state and
last_fetched_id.
- response of the api
When
- when a fetch occurs and the last fetched incident has exactly the same time of the next incident.
Then
- validate that only one of the incidents appear as to the fetch limit.
- validate that the next incident whose time is exactly the same is brought in the next fetch loop.
( e.g. 3057 and 3058)
"""
expected_ids_to_fetch_first = [3055, 3056, 3057]
expected_ids_to_fetch_second = [3058, 3059, 3060]
params = {
'fetch_delta': '2 hours',
'fetch_limit': '3',
'created_after': '2021-03-30T10:44:24Z',
'state': 'closed'
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
new_fetched_first = get_incidents_batch_by_time_request(params)
for incident in new_fetched_first:
assert incident.get('id') in expected_ids_to_fetch_first
params = {
'fetch_delta': '2 hour',
'fetch_limit': '3',
'created_after': '2021-03-30T11:21:24Z',
'last_fetched_id': '3057',
'state': 'closed'
}
new_fetched_second = get_incidents_batch_by_time_request(params)
for incident in new_fetched_second:
assert incident.get('id') in expected_ids_to_fetch_second
def test_get_new_incidents(mocker):
"""
Given
- a dict of request_params to the api.
- The last fetched incident id.
When
- Get new incidents is called during the fetch process.
Then
- validate that the number of expected incidents return.
- validate that all of the returned incident have a bigger id then the last fetched incident.
"""
last_incident_fetched = 3057
request_params = {
'state': 'closed',
'created_after': '2021-03-30T10:21:24Z',
'created_before': '2021-03-31T11:21:24Z',
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
new_incidnets = get_new_incidents(request_params, last_incident_fetched)
assert len(new_incidnets) == 14
for incident in new_incidnets:
assert incident.get('id') > 3057
def test_get_time_delta():
"""
Given
- input to the get_time_delta function which is valid and invalid
When
- run the get_time_delta function.
Then
- validate that on invalid input such as days or no units relevant errors are raised.
- validate that on valid inputs the return value is as expected.
"""
time_delta = get_time_delta('1 minute')
assert str(time_delta) == '0:01:00'
time_delta = get_time_delta('2 hours')
assert str(time_delta) == '2:00:00'
try:
get_time_delta('2')
except Exception as ex:
assert 'The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.' in str(
ex)
try:
get_time_delta('2 days')
except Exception as ex:
assert 'The unit of fetch_delta is invalid. Possible values are "minutes" or "hours' in str(ex)
| 32.558559 | 128 | 0.603394 |
d492fd9d00437e877a4501964cd431bb0546c438 | 3,522 | py | Python | macholib/macho_methname.py | l1haoyuan/macholib | 48c59841e2ca5aa308eab67f72faed384a2c0723 | [
"MIT"
] | null | null | null | macholib/macho_methname.py | l1haoyuan/macholib | 48c59841e2ca5aa308eab67f72faed384a2c0723 | [
"MIT"
] | null | null | null | macholib/macho_methname.py | l1haoyuan/macholib | 48c59841e2ca5aa308eab67f72faed384a2c0723 | [
"MIT"
] | null | null | null | import sys
import os
import json
from enum import Enum
from .mach_o import LC_SYMTAB
from macholib import MachO
from macholib import mach_o
from shutil import copy2
from shutil import SameFileError
def replace_methname(macho_file, methname_json, output_dir):
"""
Map method names in Mach-O file with the JSON file
"""
if not os.path.isfile(macho_file):
raise("passing not exist file " + macho_file)
if not os.path.isfile(methname_json):
raise("passing not exist file " + methname_json)
if output_dir is not None and not os.path.isdir(output_dir):
raise("passing not exist dir " + output_dir)
macho = MachO.MachO(macho_file)
name_dict = None
with open(methname_json) as json_file:
name_dict = json.load(json_file)
for header in macho.headers:
ch_methname_sect(header, name_dict)
ch_symtab(header, name_dict)
ori_dir, filename = os.path.split(macho_file)
if output_dir is None:
output_dir = ori_dir
output = os.path.join(output_dir, filename)
try:
copy2(macho_file, output_dir)
except SameFileError:
pass
with open(output, 'r+b') as fp:
macho.write(fp)
os.chmod(output, 0o755)
if __name__ == '__main__':
main()
| 30.102564 | 111 | 0.635434 |
d493c88653dfc14a4b19dd601e82fe9c227bb1db | 123 | py | Python | archive/data-processing/archive/features/sd1.py | FloFincke/affective-chat | 241c2b555541968f7e5e70b022fdb71102aed510 | [
"MIT"
] | null | null | null | archive/data-processing/archive/features/sd1.py | FloFincke/affective-chat | 241c2b555541968f7e5e70b022fdb71102aed510 | [
"MIT"
] | 10 | 2020-01-28T22:17:46.000Z | 2022-02-09T23:30:57.000Z | archive/data-processing/archive/features/sd1.py | FloFincke/affective-chat | 241c2b555541968f7e5e70b022fdb71102aed510 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
import numpy as np | 15.375 | 36 | 0.674797 |
d493cf85a9cb37a46e9d38eab9f5e238cbe228b0 | 1,515 | py | Python | forms/snippets/delete_watch.py | soheilv/python-samples | 4443431261dbcd88408dcc89d5702eeb1ac18ffd | [
"Apache-2.0"
] | 255 | 2020-10-16T16:27:54.000Z | 2022-03-31T14:26:29.000Z | forms/snippets/delete_watch.py | soheilv/python-samples | 4443431261dbcd88408dcc89d5702eeb1ac18ffd | [
"Apache-2.0"
] | 58 | 2020-10-16T14:24:27.000Z | 2022-03-19T13:27:27.000Z | forms/snippets/delete_watch.py | soheilv/python-samples | 4443431261dbcd88408dcc89d5702eeb1ac18ffd | [
"Apache-2.0"
] | 316 | 2020-10-16T17:06:00.000Z | 2022-03-30T19:18:31.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START forms_delete_watch]
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/drive"
API_KEY = "<YOUR_API_KEY>"
DISCOVERY_DOC = f"https://forms.googleapis.com/$discovery/rest?version=v1beta&key={API_KEY}&labels=FORMS_BETA_TESTERS"
store = file.Storage('credentials.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = discovery.build('forms', 'v1beta', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
form_id = '<YOUR_FORM_ID>'
watch_id = '<YOUR_WATCH_ID>'
# Print JSON response after deleting a form watch
result = service.forms().watches().delete(formId=form_id, watchId=watch_id).execute()
print(result)
# [END forms_delete_watch]
| 36.95122 | 118 | 0.770297 |
d49496c9213106a0918889d0e3a6aa3992ff1641 | 1,829 | py | Python | data_structures/disjoint_set/disjoint_set.py | egagraha/python-algorithm | 07a6a745b4ebddc93ab7c10b205c75b2427ac1fb | [
"MIT"
] | null | null | null | data_structures/disjoint_set/disjoint_set.py | egagraha/python-algorithm | 07a6a745b4ebddc93ab7c10b205c75b2427ac1fb | [
"MIT"
] | null | null | null | data_structures/disjoint_set/disjoint_set.py | egagraha/python-algorithm | 07a6a745b4ebddc93ab7c10b205c75b2427ac1fb | [
"MIT"
] | null | null | null | """
Disjoint set.
Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
def make_set(x: Node) -> None:
"""
Make x as a set.
"""
# rank is the distance from x to its' parent
# root's rank is 0
x.rank = 0
x.parent = x
def union_set(x: Node, y: Node) -> None:
"""
Union of two sets.
set with bigger rank should be parent, so that the
disjoint set tree will be more flat.
"""
x, y = find_set(x), find_set(y)
if x == y:
return
elif x.rank > y.rank:
y.parent = x
else:
x.parent = y
if x.rank == y.rank:
y.rank += 1
def find_set(x: Node) -> Node:
"""
Return the parent of x
"""
if x != x.parent:
x.parent = find_set(x.parent)
return x.parent
def find_python_set(node: Node) -> set:
"""
Return a Python Standard Library set that contains i.
"""
sets = ({0, 1, 2}, {3, 4, 5})
for s in sets:
if node.data in s:
return s
raise ValueError(f"{node.data} is not in {sets}")
def test_disjoint_set() -> None:
"""
>>> test_disjoint_set()
"""
vertex = [Node(i) for i in range(6)]
for v in vertex:
make_set(v)
union_set(vertex[0], vertex[1])
union_set(vertex[1], vertex[2])
union_set(vertex[3], vertex[4])
union_set(vertex[3], vertex[5])
for node0 in vertex:
for node1 in vertex:
if find_python_set(node0).isdisjoint(find_python_set(node1)):
assert find_set(node0) != find_set(node1)
else:
assert find_set(node0) == find_set(node1)
if __name__ == "__main__":
test_disjoint_set()
| 21.517647 | 73 | 0.556042 |
d494b4ecc12674b178766fec7fe530877b75b17d | 1,391 | py | Python | cw_EPR.py | tkeller12/spin_physics | 271f3081bc8ca87b159ed3e3494dbd0ffdea8fa5 | [
"MIT"
] | null | null | null | cw_EPR.py | tkeller12/spin_physics | 271f3081bc8ca87b159ed3e3494dbd0ffdea8fa5 | [
"MIT"
] | null | null | null | cw_EPR.py | tkeller12/spin_physics | 271f3081bc8ca87b159ed3e3494dbd0ffdea8fa5 | [
"MIT"
] | null | null | null | # Timothy Keller
# S = 1/2, I = 1/2
# Spin 1/2 electron coupled to spin 1/2 nuclei
import numpy as np
from scipy.linalg import expm
from matplotlib.pylab import *
from matplotlib import cm
sigma_x = 0.5*np.r_[[[0, 1],[1, 0]]]
sigma_y = 0.5*np.r_[[[0,-1j],[1j, 0]]]
sigma_z = 0.5*np.r_[[[1, 0],[0, -1]]]
Identity = np.eye(2)
Sx = np.kron(sigma_x, Identity)
Sy = np.kron(sigma_y, Identity)
Sz = np.kron(sigma_z, Identity)
Ix = np.kron(Identity, sigma_x)
Iy = np.kron(Identity, sigma_y)
Iz = np.kron(Identity, sigma_z)
SxIx = np.kron(sigma_x,sigma_z)
SxIx2 = np.dot(Sx,Iz)
print(SxIx)
print(SxIx2)
print(np.allclose(SxIx,SxIx2))
omega_S = 1.76e11 # rad / (s * T)
omega_I = 267.522e6 # rad / (s * T)
Aiso = 2*np.pi * 50.e6 # Isotropic Hyperfine coupling rad / s
B0 = 0.35# T
H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * np.dot(Sz,Iz)
#H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * (np.dot(Sx,Ix) + np.dot(Sy,Iy) + np.dot(Sz,Iz))
print('Hamiltonian:')
print(H)
out = np.linalg.eig(H)
E = out[0]
print(E)
E12 = E[0] - E[1]
E34 = E[2] - E[3]
E13 = E[0] - E[2]
E24 = E[1] - E[3]
print(E12)
print(E34)
print(E13)
print(E24)
print('Nuclear')
print('%0.05f MHz'%(E12 / 1e6))
print('%0.05f MHz'%(E34 / 1e6))
print('Electron')
print('%0.05f GHz'%(E13 / 1e9))
print('%0.05f GHz'%(E24 / 1e9))
matshow(abs(H), cmap = cm.jet)
title('Hamiltonian')
show()
| 21.075758 | 113 | 0.62473 |
d494b73023a37a848160341332c0ded7a2a24518 | 1,787 | py | Python | V2RaycSpider0825/MiddleKey/VMes_IO.py | TOMJERRY23333/V2RayCloudSpider | 0647db8c7b67e4393d1f65dadc08d7e16c1dc324 | [
"MIT"
] | 1 | 2020-09-16T12:59:32.000Z | 2020-09-16T12:59:32.000Z | V2RaycSpider0825/MiddleKey/VMes_IO.py | TOMJERRY23333/V2RayCloudSpider | 0647db8c7b67e4393d1f65dadc08d7e16c1dc324 | [
"MIT"
] | null | null | null | V2RaycSpider0825/MiddleKey/VMes_IO.py | TOMJERRY23333/V2RayCloudSpider | 0647db8c7b67e4393d1f65dadc08d7e16c1dc324 | [
"MIT"
] | null | null | null | from spiderNest.preIntro import *
path_ = os.path.dirname(os.path.dirname(__file__)) + '/dataBase/log_information.csv'
def save_login_info(VMess, class_):
"""
VMess
class_: ssr or v2ray
"""
now = str(datetime.now()).split('.')[0]
with open(path_, 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
# Vmess,:0
writer.writerow(['{}'.format(now), '{}'.format(VMess), class_, '0'])
def vmess_IO(class_):
"""
class_: ssr ; v2ray
"""
try:
with open(path_, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
vm_q = [vm for vm in reader]
new_q = vm_q
for i, value in enumerate(reversed(vm_q)):
if value[-1] == '0' and value[-2] == class_:
vm = value[1]
new_q[-(i + 1)][-1] = '1'
break
refresh_log(new_q)
return vm
except UnboundLocalError:
return ''
| 28.365079 | 84 | 0.525462 |
d494cc4fdc66704176b1bdb14e2b8bf08f6d120c | 29,585 | py | Python | paddlespeech/s2t/frontend/audio.py | AK391/PaddleSpeech | 8cdbe3a6c0fe447e54cfbcfd82139d2869f5fc49 | [
"Apache-2.0"
] | null | null | null | paddlespeech/s2t/frontend/audio.py | AK391/PaddleSpeech | 8cdbe3a6c0fe447e54cfbcfd82139d2869f5fc49 | [
"Apache-2.0"
] | null | null | null | paddlespeech/s2t/frontend/audio.py | AK391/PaddleSpeech | 8cdbe3a6c0fe447e54cfbcfd82139d2869f5fc49 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the audio segment class."""
import copy
import io
import random
import re
import struct
import numpy as np
import resampy
import soundfile
from scipy import signal
from .utility import convert_samples_from_float32
from .utility import convert_samples_to_float32
from .utility import subfile_from_tar
def superimpose(self, other):
"""Add samples from another segment to those of this segment
(sample-wise addition, not segment concatenation).
Note that this is an in-place transformation.
:param other: Segment containing samples to be added in.
:type other: AudioSegments
:raise TypeError: If type of two segments don't match.
:raise ValueError: If the sample rates of the two segments are not
equal, or if the lengths of segments don't match.
"""
if isinstance(other, type(self)):
raise TypeError("Cannot add segments of different types: %s "
"and %s." % (type(self), type(other)))
if self._sample_rate != other._sample_rate:
raise ValueError("Sample rates must match to add segments.")
if len(self._samples) != len(other._samples):
raise ValueError("Segment lengths must match to add segments.")
self._samples += other._samples
def to_bytes(self, dtype='float32'):
"""Create a byte string containing the audio content.
:param dtype: Data type for export samples. Options: 'int16', 'int32',
'float32', 'float64'. Default is 'float32'.
:type dtype: str
:return: Byte string containing audio content.
:rtype: str
"""
samples = self._convert_samples_from_float32(self._samples, dtype)
return samples.tostring()
def to(self, dtype='int16'):
"""Create a `dtype` audio content.
:param dtype: Data type for export samples. Options: 'int16', 'int32',
'float32', 'float64'. Default is 'float32'.
:type dtype: str
:return: np.ndarray containing `dtype` audio content.
:rtype: str
"""
samples = self._convert_samples_from_float32(self._samples, dtype)
return samples
def gain_db(self, gain):
"""Apply gain in decibels to samples.
Note that this is an in-place transformation.
:param gain: Gain in decibels to apply to samples.
:type gain: float|1darray
"""
self._samples *= 10.**(gain / 20.)
def change_speed(self, speed_rate):
"""Change the audio speed by linear interpolation.
Note that this is an in-place transformation.
:param speed_rate: Rate of speed change:
speed_rate > 1.0, speed up the audio;
speed_rate = 1.0, unchanged;
speed_rate < 1.0, slow down the audio;
speed_rate <= 0.0, not allowed, raise ValueError.
:type speed_rate: float
:raises ValueError: If speed_rate <= 0.0.
"""
if speed_rate == 1.0:
return
if speed_rate <= 0:
raise ValueError("speed_rate should be greater than zero.")
# numpy
# old_length = self._samples.shape[0]
# new_length = int(old_length / speed_rate)
# old_indices = np.arange(old_length)
# new_indices = np.linspace(start=0, stop=old_length, num=new_length)
# self._samples = np.interp(new_indices, old_indices, self._samples)
# sox, slow
try:
import soxbindings as sox
except:
try:
from paddlespeech.s2t.utils import dynamic_pip_install
package = "sox"
dynamic_pip_install.install(package)
package = "soxbindings"
dynamic_pip_install.install(package)
import soxbindings as sox
except:
raise RuntimeError("Can not install soxbindings on your system." )
tfm = sox.Transformer()
tfm.set_globals(multithread=False)
tfm.speed(speed_rate)
self._samples = tfm.build_array(
input_array=self._samples,
sample_rate_in=self._sample_rate).squeeze(-1).astype(
np.float32).copy()
def normalize(self, target_db=-20, max_gain_db=300.0):
"""Normalize audio to be of the desired RMS value in decibels.
Note that this is an in-place transformation.
:param target_db: Target RMS value in decibels. This value should be
less than 0.0 as 0.0 is full-scale audio.
:type target_db: float
:param max_gain_db: Max amount of gain in dB that can be applied for
normalization. This is to prevent nans when
attempting to normalize a signal consisting of
all zeros.
:type max_gain_db: float
:raises ValueError: If the required gain to normalize the segment to
the target_db value exceeds max_gain_db.
"""
gain = target_db - self.rms_db
if gain > max_gain_db:
raise ValueError(
"Unable to normalize segment to %f dB because the "
"the probable gain have exceeds max_gain_db (%f dB)" %
(target_db, max_gain_db))
self.gain_db(min(max_gain_db, target_db - self.rms_db))
def normalize_online_bayesian(self,
target_db,
prior_db,
prior_samples,
startup_delay=0.0):
"""Normalize audio using a production-compatible online/causal
algorithm. This uses an exponential likelihood and gamma prior to
make online estimates of the RMS even when there are very few samples.
Note that this is an in-place transformation.
:param target_db: Target RMS value in decibels.
:type target_bd: float
:param prior_db: Prior RMS estimate in decibels.
:type prior_db: float
:param prior_samples: Prior strength in number of samples.
:type prior_samples: float
:param startup_delay: Default 0.0s. If provided, this function will
accrue statistics for the first startup_delay
seconds before applying online normalization.
:type startup_delay: float
"""
# Estimate total RMS online.
startup_sample_idx = min(self.num_samples - 1,
int(self.sample_rate * startup_delay))
prior_mean_squared = 10.**(prior_db / 10.)
prior_sum_of_squares = prior_mean_squared * prior_samples
cumsum_of_squares = np.cumsum(self.samples**2)
sample_count = np.arange(self.num_samples) + 1
if startup_sample_idx > 0:
cumsum_of_squares[:startup_sample_idx] = \
cumsum_of_squares[startup_sample_idx]
sample_count[:startup_sample_idx] = \
sample_count[startup_sample_idx]
mean_squared_estimate = ((cumsum_of_squares + prior_sum_of_squares) /
(sample_count + prior_samples))
rms_estimate_db = 10 * np.log10(mean_squared_estimate)
# Compute required time-varying gain.
gain_db = target_db - rms_estimate_db
self.gain_db(gain_db)
def resample(self, target_sample_rate, filter='kaiser_best'):
"""Resample the audio to a target sample rate.
Note that this is an in-place transformation.
:param target_sample_rate: Target sample rate.
:type target_sample_rate: int
:param filter: The resampling filter to use one of {'kaiser_best',
'kaiser_fast'}.
:type filter: str
"""
self._samples = resampy.resample(
self.samples, self.sample_rate, target_sample_rate, filter=filter)
self._sample_rate = target_sample_rate
def pad_silence(self, duration, sides='both'):
"""Pad this audio sample with a period of silence.
Note that this is an in-place transformation.
:param duration: Length of silence in seconds to pad.
:type duration: float
:param sides: Position for padding:
'beginning' - adds silence in the beginning;
'end' - adds silence in the end;
'both' - adds silence in both the beginning and the end.
:type sides: str
:raises ValueError: If sides is not supported.
"""
if duration == 0.0:
return self
cls = type(self)
silence = self.make_silence(duration, self._sample_rate)
if sides == "beginning":
padded = cls.concatenate(silence, self)
elif sides == "end":
padded = cls.concatenate(self, silence)
elif sides == "both":
padded = cls.concatenate(silence, self, silence)
else:
raise ValueError("Unknown value for the sides %s" % sides)
self._samples = padded._samples
def shift(self, shift_ms):
"""Shift the audio in time. If `shift_ms` is positive, shift with time
advance; if negative, shift with time delay. Silence are padded to
keep the duration unchanged.
Note that this is an in-place transformation.
:param shift_ms: Shift time in millseconds. If positive, shift with
time advance; if negative; shift with time delay.
:type shift_ms: float
:raises ValueError: If shift_ms is longer than audio duration.
"""
if abs(shift_ms) / 1000.0 > self.duration:
raise ValueError("Absolute value of shift_ms should be smaller "
"than audio duration.")
shift_samples = int(shift_ms * self._sample_rate / 1000)
if shift_samples > 0:
# time advance
self._samples[:-shift_samples] = self._samples[shift_samples:]
self._samples[-shift_samples:] = 0
elif shift_samples < 0:
# time delay
self._samples[-shift_samples:] = self._samples[:shift_samples]
self._samples[:-shift_samples] = 0
def subsegment(self, start_sec=None, end_sec=None):
"""Cut the AudioSegment between given boundaries.
Note that this is an in-place transformation.
:param start_sec: Beginning of subsegment in seconds.
:type start_sec: float
:param end_sec: End of subsegment in seconds.
:type end_sec: float
:raise ValueError: If start_sec or end_sec is incorrectly set, e.g. out
of bounds in time.
"""
start_sec = 0.0 if start_sec is None else start_sec
end_sec = self.duration if end_sec is None else end_sec
if start_sec < 0.0:
start_sec = self.duration + start_sec
if end_sec < 0.0:
end_sec = self.duration + end_sec
if start_sec < 0.0:
raise ValueError("The slice start position (%f s) is out of "
"bounds." % start_sec)
if end_sec < 0.0:
raise ValueError("The slice end position (%f s) is out of bounds." %
end_sec)
if start_sec > end_sec:
raise ValueError("The slice start position (%f s) is later than "
"the end position (%f s)." % (start_sec, end_sec))
if end_sec > self.duration:
raise ValueError("The slice end position (%f s) is out of bounds "
"(> %f s)" % (end_sec, self.duration))
start_sample = int(round(start_sec * self._sample_rate))
end_sample = int(round(end_sec * self._sample_rate))
self._samples = self._samples[start_sample:end_sample]
def random_subsegment(self, subsegment_length, rng=None):
"""Cut the specified length of the audiosegment randomly.
Note that this is an in-place transformation.
:param subsegment_length: Subsegment length in seconds.
:type subsegment_length: float
:param rng: Random number generator state.
:type rng: random.Random
:raises ValueError: If the length of subsegment is greater than
the origineal segemnt.
"""
rng = random.Random() if rng is None else rng
if subsegment_length > self.duration:
raise ValueError("Length of subsegment must not be greater "
"than original segment.")
start_time = rng.uniform(0.0, self.duration - subsegment_length)
self.subsegment(start_time, start_time + subsegment_length)
def convolve(self, impulse_segment, allow_resample=False):
"""Convolve this audio segment with the given impulse segment.
Note that this is an in-place transformation.
:param impulse_segment: Impulse response segments.
:type impulse_segment: AudioSegment
:param allow_resample: Indicates whether resampling is allowed when
the impulse_segment has a different sample
rate from this signal.
:type allow_resample: bool
:raises ValueError: If the sample rate is not match between two
audio segments when resample is not allowed.
"""
if allow_resample and self.sample_rate != impulse_segment.sample_rate:
impulse_segment.resample(self.sample_rate)
if self.sample_rate != impulse_segment.sample_rate:
raise ValueError("Impulse segment's sample rate (%d Hz) is not "
"equal to base signal sample rate (%d Hz)." %
(impulse_segment.sample_rate, self.sample_rate))
samples = signal.fftconvolve(self.samples, impulse_segment.samples,
"full")
self._samples = samples
def convolve_and_normalize(self, impulse_segment, allow_resample=False):
"""Convolve and normalize the resulting audio segment so that it
has the same average power as the input signal.
Note that this is an in-place transformation.
:param impulse_segment: Impulse response segments.
:type impulse_segment: AudioSegment
:param allow_resample: Indicates whether resampling is allowed when
the impulse_segment has a different sample
rate from this signal.
:type allow_resample: bool
"""
target_db = self.rms_db
self.convolve(impulse_segment, allow_resample=allow_resample)
self.normalize(target_db)
def add_noise(self,
noise,
snr_dB,
allow_downsampling=False,
max_gain_db=300.0,
rng=None):
"""Add the given noise segment at a specific signal-to-noise ratio.
If the noise segment is longer than this segment, a random subsegment
of matching length is sampled from it and used instead.
Note that this is an in-place transformation.
:param noise: Noise signal to add.
:type noise: AudioSegment
:param snr_dB: Signal-to-Noise Ratio, in decibels.
:type snr_dB: float
:param allow_downsampling: Whether to allow the noise signal to be
downsampled to match the base signal sample
rate.
:type allow_downsampling: bool
:param max_gain_db: Maximum amount of gain to apply to noise signal
before adding it in. This is to prevent attempting
to apply infinite gain to a zero signal.
:type max_gain_db: float
:param rng: Random number generator state.
:type rng: None|random.Random
:raises ValueError: If the sample rate does not match between the two
audio segments when downsampling is not allowed, or
if the duration of noise segments is shorter than
original audio segments.
"""
rng = random.Random() if rng is None else rng
if allow_downsampling and noise.sample_rate > self.sample_rate:
noise = noise.resample(self.sample_rate)
if noise.sample_rate != self.sample_rate:
raise ValueError("Noise sample rate (%d Hz) is not equal to base "
"signal sample rate (%d Hz)." % (noise.sample_rate,
self.sample_rate))
if noise.duration < self.duration:
raise ValueError("Noise signal (%f sec) must be at least as long as"
" base signal (%f sec)." %
(noise.duration, self.duration))
noise_gain_db = min(self.rms_db - noise.rms_db - snr_dB, max_gain_db)
noise_new = copy.deepcopy(noise)
noise_new.random_subsegment(self.duration, rng=rng)
noise_new.gain_db(noise_gain_db)
self.superimpose(noise_new)
def _convert_samples_to_float32(self, samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
return convert_samples_to_float32(samples)
def _convert_samples_from_float32(self, samples, dtype):
"""Convert sample type from float32 to dtype.
Audio sample type is usually integer or float-point. For integer
type, float32 will be rescaled from [-1, 1] to the maximum range
supported by the integer type.
This is for writing a audio file.
"""
return convert_samples_from_float32(samples, dtype)
| 41.204735 | 84 | 0.598378 |
d49613fe0b2e81e10d722fc25f0c3fd9aa1b0a51 | 4,119 | py | Python | tornado_debugger/debug.py | bhch/tornado-debugger | 4adeead7a45506eda34fc8d1e91dd32acc8cfe4e | [
"BSD-3-Clause"
] | 1 | 2022-03-21T11:52:30.000Z | 2022-03-21T11:52:30.000Z | tornado_debugger/debug.py | bhch/tornado-debugger | 4adeead7a45506eda34fc8d1e91dd32acc8cfe4e | [
"BSD-3-Clause"
] | null | null | null | tornado_debugger/debug.py | bhch/tornado-debugger | 4adeead7a45506eda34fc8d1e91dd32acc8cfe4e | [
"BSD-3-Clause"
] | null | null | null | import os.path
import re
import sys
import traceback
from pprint import pformat
import tornado
from tornado import template
SENSITIVE_SETTINGS_RE = re.compile(
'api|key|pass|salt|secret|signature|token',
flags=re.IGNORECASE
)
| 31.442748 | 77 | 0.571255 |
d496568fcdd0e4278b5c17076444af1d96c25b39 | 2,426 | py | Python | base/pylib/seq_iter.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2017-12-10T00:05:54.000Z | 2017-12-10T00:05:54.000Z | base/pylib/seq_iter.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | null | null | null | base/pylib/seq_iter.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | null | null | null |
___assign("%SeqIter", SeqIter)
___assign("%iter", iter)
___assign("%next", next)
___assign("%FuncIter", FuncIter)
| 22.462963 | 58 | 0.490107 |
d496c50445b160bee65444aedffd5152e26bcfa5 | 1,542 | py | Python | caseworker/open_general_licences/enums.py | code-review-doctor/lite-frontend-1 | cb3b885bb389ea33ef003c916bea7b03a36d86bb | [
"MIT"
] | null | null | null | caseworker/open_general_licences/enums.py | code-review-doctor/lite-frontend-1 | cb3b885bb389ea33ef003c916bea7b03a36d86bb | [
"MIT"
] | null | null | null | caseworker/open_general_licences/enums.py | code-review-doctor/lite-frontend-1 | cb3b885bb389ea33ef003c916bea7b03a36d86bb | [
"MIT"
] | null | null | null | from lite_content.lite_internal_frontend.open_general_licences import (
OGEL_DESCRIPTION,
OGTCL_DESCRIPTION,
OGTL_DESCRIPTION,
)
from lite_forms.components import Option
| 29.09434 | 117 | 0.647211 |
d496c9cfdd316aad01a20acdae3c9c7e998fb11f | 887 | py | Python | Matrix/Python/rotatematrix.py | pratika1505/DSA-Path-And-Important-Questions | a86a0774f0abf5151c852afd2bbf67a5368125c8 | [
"MIT"
] | 26 | 2021-08-04T17:03:26.000Z | 2022-03-08T08:43:44.000Z | Matrix/Python/rotatematrix.py | pratika1505/DSA-Path-And-Important-Questions | a86a0774f0abf5151c852afd2bbf67a5368125c8 | [
"MIT"
] | 25 | 2021-08-04T16:58:33.000Z | 2021-11-01T05:26:19.000Z | Matrix/Python/rotatematrix.py | pratika1505/DSA-Path-And-Important-Questions | a86a0774f0abf5151c852afd2bbf67a5368125c8 | [
"MIT"
] | 16 | 2021-08-14T20:15:24.000Z | 2022-02-23T11:04:06.000Z | # -*- coding: utf-8 -*-
"""RotateMatrix.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LX-dZFuQCyBXDNVosTp0MHaZZxoc5T4I
"""
#Function to rotate matrix by 90 degree
if __name__ == '__main__':
#Declaring matrix
mat = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
rotate(mat)
#printing matrix
for i in mat:
print(i)
| 19.282609 | 77 | 0.500564 |
d49731577779af0d944350934f9656734de31c66 | 319 | py | Python | sort.py | EYH0602/FP_Workshop | 866b180b411c1ef439e1a2d039c6d6333e91cd39 | [
"MIT"
] | 1 | 2021-10-21T02:15:03.000Z | 2021-10-21T02:15:03.000Z | sort.py | EYH0602/FP_Workshop | 866b180b411c1ef439e1a2d039c6d6333e91cd39 | [
"MIT"
] | null | null | null | sort.py | EYH0602/FP_Workshop | 866b180b411c1ef439e1a2d039c6d6333e91cd39 | [
"MIT"
] | null | null | null |
xs = [1, 3, 2, 4, 5, 2]
sorted_xs = quicksort(xs)
| 17.722222 | 40 | 0.526646 |
d49737aed7a2d03e7911f282302b8766a0010d5f | 9,372 | py | Python | bddtests/steps/bdd_test_util.py | TarantulaTechnology/fabric5 | 6da971177ab7d74f1e1cfa6f7fc73e75768e5686 | [
"Apache-2.0"
] | 4 | 2018-01-02T04:26:16.000Z | 2018-10-25T08:51:06.000Z | bddtests/steps/bdd_test_util.py | TarantulaTechnology/fabric5 | 6da971177ab7d74f1e1cfa6f7fc73e75768e5686 | [
"Apache-2.0"
] | null | null | null | bddtests/steps/bdd_test_util.py | TarantulaTechnology/fabric5 | 6da971177ab7d74f1e1cfa6f7fc73e75768e5686 | [
"Apache-2.0"
] | 9 | 2016-11-17T07:40:04.000Z | 2020-03-16T16:11:39.000Z |
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import devops_pb2
import fabric_pb2
import chaincode_pb2
from grpc.beta import implementations
def cli_call(context, arg_list, expect_success=True):
"""Executes a CLI command in a subprocess and return the results.
@param context: the behave context
@param arg_list: a list command arguments
@param expect_success: use False to return even if an error occurred when executing the command
@return: (string, string, int) output message, error message, return code
"""
#arg_list[0] = "update-" + arg_list[0]
# We need to run the cli command by actually calling the python command
# the update-cli.py script has a #!/bin/python as the first line
# which calls the system python, not the virtual env python we
# setup for running the update-cli
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
if output is not None:
print("Output:\n" + output)
if error is not None:
print("Error Message:\n" + error)
if expect_success:
raise subprocess.CalledProcessError(p.returncode, arg_list, output)
return output, error, p.returncode
# Registerses a user on a specific composeService
def registerUser(context, secretMsg, composeService):
userName = secretMsg['enrollId']
if 'users' in context:
pass
else:
context.users = {}
if userName in context.users:
raise Exception("User already registered: {0}".format(userName))
context.users[userName] = UserRegistration(secretMsg, composeService)
# Registerses a user on a specific composeService
def ipFromContainerNamePart(namePart, containerDataList):
"""Returns the IPAddress based upon a name part of the full container name"""
ip = None
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for containerData in containerDataList:
if containerData.containerName.startswith(containerNamePrefix + namePart):
ip = containerData.ipAddress
if ip == None:
raise Exception("Could not find container with namePart = {0}".format(namePart))
return ip
def getTxResult(context, enrollId):
'''Returns the TransactionResult using the enrollId supplied'''
assert 'users' in context, "users not found in context. Did you register a user?"
assert 'compose_containers' in context, "compose_containers not found in context"
(channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
txRequest = devops_pb2.TransactionRequest(transactionUuid = context.transactionID)
response = stub.GetTransactionResult(txRequest, 2)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting Transaction Result from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
# Now grab the TransactionResult from the Msg bytes
txResult = fabric_pb2.TransactionResult()
txResult.ParseFromString(response.msg)
return txResult
def getGRPCChannelAndUser(context, enrollId):
'''Returns a tuple of GRPC channel and UserRegistration instance. The channel is open to the composeService that the user registered with.'''
userRegistration = getUserRegistration(context, enrollId)
# Get the IP address of the server that the user registered on
ipAddress = ipFromContainerNamePart(userRegistration.composeService, context.compose_containers)
channel = getGRPCChannel(ipAddress)
return (channel, userRegistration)
def getDeployment(context, ccAlias):
'''Return a deployment with chaincode alias from prior deployment, or None if not found'''
deployment = None
if 'deployments' in context:
pass
else:
context.deployments = {}
if ccAlias in context.deployments:
deployment = context.deployments[ccAlias]
# else:
# raise Exception("Deployment alias not found: '{0}'. Are you sure you have deployed a chaincode with this alias?".format(ccAlias))
return deployment
def deployChaincode(context, enrollId, chaincodePath, ccAlias, ctor):
'''Deploy a chaincode with the specified alias for the specfied enrollId'''
(channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
# Make sure deployment alias does NOT already exist
assert getDeployment(context, ccAlias) == None, "Deployment alias already exists: '{0}'.".format(ccAlias)
args = getArgsFromContextForUser(context, enrollId)
ccSpec = chaincode_pb2.ChaincodeSpec(type = chaincode_pb2.ChaincodeSpec.GOLANG,
chaincodeID = chaincode_pb2.ChaincodeID(name="",path=chaincodePath),
ctorMsg = chaincode_pb2.ChaincodeInput(function = ctor, args = args))
ccSpec.secureContext = userRegistration.getUserName()
if 'metadata' in context:
ccSpec.metadata = context.metadata
try:
ccDeploymentSpec = stub.Deploy(ccSpec, 60)
ccSpec.chaincodeID.name = ccDeploymentSpec.chaincodeSpec.chaincodeID.name
context.grpcChaincodeSpec = ccSpec
context.deployments[ccAlias] = ccSpec
except:
del stub
raise
def getContainerDataValuesFromContext(context, aliases, callback):
"""Returns the IPAddress based upon a name part of the full container name"""
assert 'compose_containers' in context, "compose_containers not found in context"
values = []
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for namePart in aliases:
for containerData in context.compose_containers:
if containerData.containerName.startswith(containerNamePrefix + namePart):
values.append(callback(containerData))
break
return values
| 40.747826 | 189 | 0.714789 |
d4973b8aa4822ac46365e7bcf3331ae6bf592f03 | 13,868 | py | Python | 1.0.0/hp/dict.py | cefect/SOFDA0 | 62c5566d0f388a5fd76a070ceb5ee3e38b0d7463 | [
"MIT"
] | null | null | null | 1.0.0/hp/dict.py | cefect/SOFDA0 | 62c5566d0f388a5fd76a070ceb5ee3e38b0d7463 | [
"MIT"
] | null | null | null | 1.0.0/hp/dict.py | cefect/SOFDA0 | 62c5566d0f388a5fd76a070ceb5ee3e38b0d7463 | [
"MIT"
] | null | null | null | '''
Created on Mar 6, 2018
@author: cef
hp functions for workign with dictionaries
'''
import logging, os, sys, math, copy, inspect
from collections import OrderedDict
from weakref import WeakValueDictionary as wdict
import numpy as np
import hp.basic
mod_logger = logging.getLogger(__name__) #creates a child logger of the root
def subset(d_big, l, #get a dictionary subset using standard user inputs
#ordered = False, using containers instead
set_type = 'sub',
method = 'exact',
container = dict,
logger = mod_logger,
*search_args):
"""
#===========================================================================
# INPUTS
#===========================================================================
l: list of keys (within d_big) on which to erturn the sutset
set_type: how to treat the set
intersect: returna dictionary with only the common keys
sub: raise a flag if not every item in 'l' is found in d_big.keys()
method: what type of key search to perform (re.function)
search: look for a key in the dictionary that contains the list entry.
returned d is keyed by the list
"""
logger = logger.getChild('subset')
#===========================================================================
# setup[]
#==========================================================================
d = container()
"""
#dictionary setup
if ordered: d = OrderedDict()
else: d = dict()"""
#input list setup
if isinstance(l, list): pass
elif isinstance(l, basestring): l = [l]
elif l is None: return d
else: raise IOError
nofnd_l = []
#===========================================================================
# determine subset by kwarg
#===========================================================================
for k in l:
try: #attempt teh direct match
d[k] = d_big[k]
except:
#===================================================================
# try again using search functions
#===================================================================
try:
if method == 'search':
#search and return this value
v = value_by_ksearch(k, d_big, logger=logger, *search_args)
if not v is None:
d[k] = v
continue #not sure this is needed
else: raise ValueError
else: raise ValueError
#===================================================================
# nothing found. proceed based on set_type
#===================================================================
except:
logger.debug('unable to find \'%s\' in the dict with method \'%s\''%(k, method))
if set_type == 'sub':
boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
if not np.all(boolar):
logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
raise IOError
elif set_type == 'intersect': nofnd_l.append(k)
else: raise IOError
#===========================================================================
# wrap up
#===========================================================================
if len(nofnd_l) >0:
logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
if set_type == 'sub': raise IOError
#===========================================================================
# check
#===========================================================================
if len(d) == 0:
logger.warning('0 common values between d(%i) and l(%i)'%(len(d), len(l)))
logger.debug('returning d with %i entries: %s \n'%(len(d), d.keys()))
return container(d)
#===============================================================================
# def subset(d_big, l, #get a dictionary subset using standard user inputs
# ordered = False, set_type = 'sub', search = 'search',
# logger = mod_logger):
# """
# #===========================================================================
# # INPUTS
# #===========================================================================
# l: list of keys (within d_big) on which to erturn the sutset
#
# set_type: how to treat the set
# intersect: returna dictionary with only the common keys
# sub: raise a flag if not every item in 'l' is found in d_big.keys()
#
# search: what type of key search to perform (re.function)
# """
# logger = logger.getChild('subset')
#
# #===========================================================================
# # setup[]
# #==========================================================================
# #dictionary setup
# if ordered: d = OrderedDict()
# else: d = dict()
#
# #input list setup
# if isinstance(l, list): pass
# elif isinstance(l, basestring): l = [l]
# elif l is None: return None
# else: raise IOError
#
# #===========================================================================
# # determine subset by kwarg
# #===========================================================================
# if set_type == 'sub':
# try:
# for k in l:
# d[k] = d_big[k]
#
# except:
# boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
#
# if not np.all(boolar):
# logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
#
# raise IOError
#
# if len(d) == 0: raise IOError
#
# elif set_type == 'intersect':
# nofnd_l = []
# for k in l:
# try:
# d[k] = d_big[k]
# except:
# nofnd_l.append(k)
#
# if len(nofnd_l) >0:
# logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
#
# #===========================================================================
# # check
# #===========================================================================
# if len(d) == 0: logger.warning('0 common values between d(%i) and l(%i)'%
# (len(d), len(l)))
#
# return d
#===============================================================================
from collections import OrderedDict
| 36.687831 | 113 | 0.391477 |
d49b6df009b775a63c890cb5c9656357e0580e52 | 3,532 | py | Python | Core/pre.py | Cyber-Dioxide/CyberPhish | bc2e39d8612ef657d481cdd40d676983f7bf190c | [
"Apache-2.0"
] | 9 | 2021-12-28T08:17:41.000Z | 2022-03-20T17:49:21.000Z | Core/pre.py | Cyber-Dioxide/CyberPhish | bc2e39d8612ef657d481cdd40d676983f7bf190c | [
"Apache-2.0"
] | null | null | null | Core/pre.py | Cyber-Dioxide/CyberPhish | bc2e39d8612ef657d481cdd40d676983f7bf190c | [
"Apache-2.0"
] | 1 | 2021-12-27T08:13:50.000Z | 2021-12-27T08:13:50.000Z | import os
import random
try:
from colorama import Fore, Style
except ModuleNotFoundError:
os.system("pip install colorama")
from urllib.request import urlopen
from Core.helper.color import green, white, blue, red, start, alert
Version = "2.2"
yellow = ("\033[1;33;40m")
all_col = [Style.BRIGHT + Fore.RED, Style.BRIGHT + Fore.CYAN, Style.BRIGHT + Fore.LIGHTCYAN_EX,
Style.BRIGHT + Fore.LIGHTBLUE_EX, Style.BRIGHT + Fore.LIGHTCYAN_EX, Style.BRIGHT + Fore.LIGHTMAGENTA_EX,
Style.BRIGHT + Fore.LIGHTYELLOW_EX]
ran = random.choice(all_col)
banner()
| 49.746479 | 144 | 0.473386 |
d49bc7fba6d65f4ec2d4a29ecf9e4f75e3ad24d1 | 10,163 | py | Python | automatoes/authorize.py | candango/automatoes | fbfd01cfaa2c36e23a7251e333ef3fa86ef4bff9 | [
"Apache-2.0"
] | 13 | 2019-10-08T14:57:19.000Z | 2022-01-12T10:01:30.000Z | automatoes/authorize.py | piraz/automatoes | fc6a20c317a8ac863bfb054c9541e310e0431e5f | [
"Apache-2.0"
] | 125 | 2019-10-08T15:04:17.000Z | 2022-03-29T19:27:12.000Z | automatoes/authorize.py | candango/automatoes | fbfd01cfaa2c36e23a7251e333ef3fa86ef4bff9 | [
"Apache-2.0"
] | 8 | 2019-10-14T15:18:57.000Z | 2021-04-21T10:41:08.000Z | #!/usr/bin/env python
#
# Copyright 2019-2020 Flavio Garcia
# Copyright 2016-2017 Veeti Paananen under MIT License
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The domain authorization command.
"""
from . import get_version
from .acme import AcmeV2
from .crypto import generate_jwk_thumbprint
from .errors import AutomatoesError
from .model import Order
from cartola import fs, sysexits
import hashlib
import os
import sys
| 39.239382 | 79 | 0.546394 |
d49c1e0bb83e7c39fdece7542b9e2c9d25d03288 | 5,832 | py | Python | rllib/agents/dqn/simple_q_torch_policy.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 3 | 2020-12-12T05:10:44.000Z | 2021-04-12T21:52:47.000Z | rllib/agents/dqn/simple_q_torch_policy.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 227 | 2021-10-01T08:00:01.000Z | 2021-12-28T16:47:26.000Z | rllib/agents/dqn/simple_q_torch_policy.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 1 | 2020-12-02T06:26:20.000Z | 2020-12-02T06:26:20.000Z | """PyTorch policy class used for Simple Q-Learning"""
import logging
from typing import Dict, Tuple
import gym
import ray
from ray.rllib.agents.dqn.simple_q_tf_policy import (
build_q_models, compute_q_values, get_distribution_inputs_and_class)
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchCategorical, \
TorchDistributionWrapper
from ray.rllib.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import concat_multi_gpu_td_errors, huber_loss
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
logger = logging.getLogger(__name__)
def build_q_losses(policy: Policy, model, dist_class,
train_batch: SampleBatch) -> TensorType:
"""Constructs the loss for SimpleQTorchPolicy.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]): The action distribution class.
train_batch (SampleBatch): The training data.
Returns:
TensorType: A single loss tensor.
"""
target_model = policy.target_models[model]
# q network evaluation
q_t = compute_q_values(
policy,
model,
train_batch[SampleBatch.CUR_OBS],
explore=False,
is_training=True)
# target q network evalution
q_tp1 = compute_q_values(
policy,
target_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
# q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(train_batch[SampleBatch.ACTIONS].long(),
policy.action_space.n)
q_t_selected = torch.sum(q_t * one_hot_selection, 1)
# compute estimate of best possible value starting from state at t + 1
dones = train_batch[SampleBatch.DONES].float()
q_tp1_best_one_hot_selection = F.one_hot(
torch.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = torch.sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_tp1_best_masked = (1.0 - dones) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = (train_batch[SampleBatch.REWARDS] +
policy.config["gamma"] * q_tp1_best_masked)
# Compute the error (Square/Huber).
td_error = q_t_selected - q_t_selected_target.detach()
loss = torch.mean(huber_loss(td_error))
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["loss"] = loss
# TD-error tensor in final stats
# will be concatenated and retrieved for each individual batch item.
model.tower_stats["td_error"] = td_error
return loss
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist) -> Dict[str, TensorType]:
"""Adds q-values to the action out dict."""
return {"q_values": policy.q_values}
def setup_late_mixins(policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict) -> None:
"""Call all mixin classes' constructors before SimpleQTorchPolicy
initialization.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
TargetNetworkMixin.__init__(policy)
SimpleQTorchPolicy = build_policy_class(
name="SimpleQPolicy",
framework="torch",
loss_fn=build_q_losses,
get_default_config=lambda: ray.rllib.agents.dqn.simple_q.DEFAULT_CONFIG,
stats_fn=stats_fn,
extra_action_out_fn=extra_action_out_fn,
after_init=setup_late_mixins,
make_model_and_action_dist=build_q_model_and_distribution,
mixins=[TargetNetworkMixin],
action_distribution_fn=get_distribution_inputs_and_class,
extra_learn_fetches_fn=concat_multi_gpu_td_errors,
)
| 35.779141 | 79 | 0.710905 |
d49d0bb7116e3f907afff13646fa7b6a2ac9aa13 | 66,873 | py | Python | viphoneme/T2IPA.py | NoahDrisort/ViSV2TTS | bea6fa1f85527c824c85986d8b7bfa3e3efd120a | [
"MIT"
] | 1 | 2021-09-23T15:46:14.000Z | 2021-09-23T15:46:14.000Z | viphoneme/T2IPA.py | v-nhandt21/ViSV2TTS | bea6fa1f85527c824c85986d8b7bfa3e3efd120a | [
"MIT"
] | null | null | null | viphoneme/T2IPA.py | v-nhandt21/ViSV2TTS | bea6fa1f85527c824c85986d8b7bfa3e3efd120a | [
"MIT"
] | null | null | null | #Grapheme
Rime_tone=[ "a","","","e","","i","o","","","u","","y","i","oa","o","oe","oo","u","u","u","u","uy","","uy","y", #blank
"","","","","","","","","","","","","i","a","o","e","o","u","u","u","","y","","uy","y", #grave
"o", "o","o", "u",
"","","","","","","","","","","","","i","a","o","e","o","u","u","u","","y","","uy","y", #acute
"o", "o","o", "u",
"","","","","","","","","","","","","i","a","o","e","o","u","u","u","","y","","uy","y", #hook
"o", "o","o", "u",
"","","","","","","","","","","","","i","a","o","e","o","u","u","u","","y","","uy","y", #tilde
"o", "o","o", "u",
"","","","","","","","","","","","","i","a","o","e","o","u","u","u","","y","","uy","y", #dot
"o", "o","o", "u"]
Onset=["b","d","h","l","m","n","p","r","s","t","v","x","","p",
"tr", "th", "ch", "ph","nh","kh","gi","qu",
"ngh","ng","gh","g","k","c"]
#coding: utf-8
#Custom phoneme follow the https://vi.wikipedia.org/wiki/%C3%82m_v%E1%BB%8B_h%E1%BB%8Dc_ti%E1%BA%BFng_Vi%E1%BB%87t
#Improve pronoune between N C S
Cus_onsets = { u'b' : u'b', u't' : u't', u'th' : u't', u'' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'', u'nh' : u'', u'ng' : u'', u'ph' : u'f', u'v' : u'v',
u'x' : u's', u'd' : u'z', u'h' : u'h', u'p' : u'p', u'qu' : u'kw',
u'gi' : u'j', u'tr' : u'', u'k' : u'k', u'c' : u'k', u'gh' : u'',
u'r' : u'', u's' : u'', u'gi': u'j'}
Cus_nuclei = { u'a' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'e' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e',
u'i' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'o' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'u' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'eo' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o': u'eo', u'o' : u'eo',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u',
u'ia' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i',
u'ia' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'ua' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i',
u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
}
Cus_offglides = { u'ai' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj',
u'ay' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'ao' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw',
u'au' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'u' : u'w', u'u' : u'w', u'u': u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'eo' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew',
u'iu' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw',
u'oi' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj',
u'ui' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj',
#u'uy' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj',
u'uy' : u'i', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj',
#thay hn ch trng m
u'uy' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw',
u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw',
u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', 'u' : u'w', u'u' : u'w'
}
#Cc m vng y i chang khng vm: khng c w trc => Try to add
Cus_onglides = { u'oa' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a',
u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a',
u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oe' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oe' : u'', u'e' : u'', u'e' : u'', u'e' : u'', u'e' : u'', u'e' : u'',
u'ua' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'ue' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'uy' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i',
u'uya' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uyu' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu',
u'uyu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu',
u'oen' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en',
u'oet' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et'
}
Cus_onoffglides = { u'oe' : u'j', u'o' : u'j', u'o' : u'j', u'o' : u'j', u'o' : u'j', u'o' : u'j',
u'oai' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj',
u'oay' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j',
u'oao' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw',
u'oeo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew',
u'oeo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew',
u'ueo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew',
u'uai' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj',
u'uay' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j',
u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j'
}
Cus_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'n', u'ng' : u'', u'nh' : u'', u'ch' : u't' }
Cus_tones_p = { u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
}
Cus_gi = { u'gi' : u'zi', u'g': u'zi', u'g' : u'zi', u'g' : u'zi', u'g' : u'zi', u'g' : u'zi'}
Cus_qu = {u'quy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi'}
#######################################################
# North
# #coding: utf-8
N_onsets = { u'b' : u'b', u't' : u't', u'th' : u't', u'' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'', u'nh' : u'', u'ng' : u'', u'ph' : u'f', u'v' : u'v',
u'x' : u's', u'd' : u'z', u'h' : u'h', u'p' : u'p', u'qu' : u'kw',
u'gi' : u'z', u'tr' : u'c', u'k' : u'k', u'c' : u'k', u'gh' : u'',
u'r' : u'z', u's' : u's', u'gi': u'z'}
N_nuclei = { u'a' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'e' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e',
u'i' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'o' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'u' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'eo' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o': u'eo', u'o' : u'eo',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u',
u'ia' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i',
u'ia' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'ua' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i',
u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
}
N_offglides = { u'ai' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj',
u'ay' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'ao' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw',
u'au' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'u' : u'w', u'u' : u'w', u'u': u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'eo' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew',
u'iu' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw',
u'oi' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj',
u'ui' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj',
u'uy' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw',
u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw',
u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', 'u' : u'w', u'u' : u'w'
}
N_onglides = { u'oa' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a',
u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a',
u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oe' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e',
u'oe' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e',
u'ua' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'ue' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'uy' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i',
u'uya' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uyu' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu',
u'uyu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu',
u'oen' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en',
u'oet' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et'
}
N_onoffglides = { u'oe' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej',
u'oai' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj',
u'oay' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j',
u'oao' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw',
u'oeo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew',
u'oeo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew',
u'ueo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew',
u'uai' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj',
u'uay' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j',
u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j'
}
N_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'n', u'ng' : u'', u'nh' : u'', u'ch' : u'k' }
#tones = { u'a' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'e' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'i' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'o' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'u' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# u'y' : 33, u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
# }
N_tones = { u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
u'' : 24, u'' : 32, u'' : 312, u'' : u'35g', u'' : u'21g',
}
# used to use \u02C0 for the unicode raised glottal character
N_tones_p = { u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 3, u'' : 6,
}
N_gi = { u'gi' : u'zi', u'g': u'zi', u'g' : u'zi', u'g' : u'zi', u'g' : u'zi', u'g' : u'zi'}
N_qu = {u'quy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi', u'qy' : u'kwi'}
#######################################################
#central.py
#coding: utf-8
C_onsets = { u'b' : u'b', u't' : u't', u'th' : u't', u'' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'', u'nh' : u'', u'ng' : u'', u'ph' : u'f', u'v' : u'j',
u'x' : u's', u'd' : u'j', u'h' : u'h', u'p' : u'p', u'qu' : u'w',
u'gi' : u'j', u'tr' : u'', u'k' : u'k', u'c' : u'k', u'gh' : u'',
u'r' : u'', u's' : u'', u'gi' : u'j'
}
C_nuclei = { u'a' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'e' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e',
u'i' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'o' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'u' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'eo' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o': u'eo', u'o' : u'eo',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u',
u'ia' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i',
u'ia' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'ua' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i',
u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
}
C_offglides = { u'ai' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj',
u'ay' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'ao' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw',
u'au' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'u' : u'w', u'u' : u'w', u'u': u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'eo' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew',
u'iu' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw',
u'oi' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj',
u'ui' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj',
u'uy' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw',
u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw',
u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', 'u' : u'w', u'u' : u'w'
}
C_onglides = { u'oa' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a',
u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a',
u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oe' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e',
u'oe' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e',
u'ua' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'ue' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'uy' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i',
u'uya' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uyu' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu',
u'uyu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu',
u'oen' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en',
u'oet' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et'
}
C_onoffglides = { u'oe' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej',
u'oai' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj',
u'oay' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j',
u'oao' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw',
u'oeo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew',
u'oeo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew',
u'ueo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew',
u'uai' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj',
u'uay' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j',
u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j'
}
C_codas = { u'p' : u'p', u't' : u'k', u'c' : u'k', u'm' : u'm', u'n' : u'', u'ng' : u'', u'nh' : u'n', u'ch' : u'k' }
# See Alves 2007 (SEALS XII), V 1982
C_tones = { u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
u'' : 13, u'' : 42, u'' : 312, u'' : 312, u'' : u'21g',
}
# used to use \u02C0 for raised glottal instead of g
C_tones_p = { u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
}
C_gi = { u'gi' : u'ji', u'g': u'ji', u'g' : u'ji', u'g' : u'ji', u'g' : u'ji', u'g' : u'ji' }
C_qu = {u'quy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi'}
############################################
#south.py
#coding: utf-8
S_onsets = { u'b' : u'b', u't' : u't', u'th' : u't', u'' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'', u'nh' : u'', u'ng' : u'', u'ph' : u'f', u'v' : u'j',
u'x' : u's', u'd' : u'j', u'h' : u'h', u'p' : u'p', u'qu' : u'w',
u'gi' : u'j', u'tr' : u'', u'k' : u'k', u'c' : u'k', u'gh' : u'',
u'r' : u'', u's' : u'', u'gi' : u'j'
}
S_nuclei = { u'a' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a', u'' : u'a',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'e' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e', u'' : u'e',
u'i' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'o' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'u' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u', u'' : u'u',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i', u'' : u'i',
u'eo' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o' : u'eo', u'o': u'eo', u'o' : u'eo',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u',
u'ia' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i', u'a' : u'i',
u'ia' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i', u'i' : u'i',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oo' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'' : u'o', u'ua' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u', u'a' : u'u',
u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'', u'a' : u'',
u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'', u'' : u'',
u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i', u'y' : u'i',
u'u' : u'u', u'u' : u'u', u'u': u'u', u'u' : u'u', u'u' : u'u', u'u' : u'u',
}
S_offglides = { u'ai' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj', u'i' : u'aj',
u'ay' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'ao' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw', u'o' : u'aw',
u'au' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j', u'y' : u'j',
u'u' : u'w', u'u' : u'w', u'u': u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'eo' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew', u'o' : u'ew',
u'iu' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw', u'u' : u'iw',
u'oi' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj', u'i' : u'oj',
u'ui' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj', u'i' : u'uj',
u'uy' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj', u'y' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w',
u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw', u'iu' : u'iw',
u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw', u'yu' : u'iw',
u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj', u'ui' : u'uj',
u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j', u'i' : u'j',
u'u' : u'w', u'u' : u'w', u'u' : u'w', u'u' : u'w', 'u' : u'w', u'u' : u'w'
}
S_onglides = { u'oa' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a', u'o' : u'a',
u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a', u'a' : u'a',
u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'', u'o' : u'',
u'oe' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e', u'o' : u'e',
u'oe' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e', u'e' : u'e',
u'ua' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a', u'u' : u'a',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'ue' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e', u'u' : u'e',
u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'', u'u' : u'',
u'uy' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i', u'u' : u'i',
u'uya' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i', u'uy' : u'i',
u'uyu' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu', u'uy' : u'iu',
u'uyu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu', u'uu' : u'iu',
u'oen' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en', u'on' : u'en',
u'oet' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et', u'ot' : u'et'
}
S_onoffglides = { u'oe' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej', u'o' : u'ej',
u'oai' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj', u'oi' : u'aj',
u'oay' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j', u'oy' : u'j',
u'oao' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw', u'oo' : u'aw',
u'oeo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew', u'oo' : u'ew',
u'oeo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew', u'eo' : u'ew',
u'ueo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew', u'uo' : u'ew',
u'uai' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj', u'ui' : u'aj',
u'uay' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j',
u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j', u'uy' : u'j'
}
S_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'', u'ng' : u'', u'nh' : u'n', u'ch' : u't' }
S_tones = { u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
u'' : 45, u'' : 32, u'' : 214, u'' : 214, u'' : 212,
}
S_tones_p = { u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
u'' : 5, u'' : 2, u'' : 4, u'' : 4, u'' : 6,
}
S_gi = { u'gi' : u'ji', u'g': u'ji', u'g' : u'ji', u'g' : u'ji', u'g' : u'ji', u'g' : u'ji' }
S_qu = {u'quy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi', u'qy' : u'wi'}
################################################3
import sys, codecs, re
from io import StringIO
from optparse import OptionParser
from string import punctuation
def convert(word, dialect, glottal, pham, cao, palatals, delimit):
"""Convert a single orthographic string to IPA."""
ons = ''
nuc = ''
cod = ''
ton = 0
seq = ''
try:
(ons, nuc, cod, ton) = trans(word, dialect, glottal, pham, cao, palatals)
if None in (ons, nuc, cod, ton):
seq = u'['+word+u']'
else:
seq = delimit+delimit.join(filter(None, (ons, nuc, cod, ton)))+delimit
except (TypeError):
pass
return seq
########################333
from vinorm import *
from underthesea import word_tokenize
import eng_to_ipa
SET=[S_onsets, S_nuclei, S_codas#, S_tones
, S_onglides, S_offglides, S_onoffglides, S_qu, S_gi, C_onsets, C_nuclei, C_codas#, C_tones
, C_onglides, C_offglides, C_onoffglides, C_qu, C_gi, N_onsets, N_nuclei, N_codas#, N_tones
, N_onglides, N_offglides, N_onoffglides, N_qu, N_gi, Cus_onsets, Cus_nuclei, Cus_codas#, N_tones
, Cus_onglides, Cus_offglides, Cus_onoffglides, Cus_qu, Cus_gi]
DICT={}
#144 in total
syms=['j', 'j', 'i', 'w', 'w', 'et', 'iw', 'uj', 'en', 'tw', '', 'iu', 'kwi', 'm', 'kp', 'cw', 'jw', 'u', 'e', 'bw', 'oj', 'i', 'vw', 'w', 'w', 'w', 'a', 'fw', 'u', 't', 't', '', 'xw', '', '', 'w', '', 'zi', '', 'dw', 'e', 'a', 'ew', 'i', 'w', 'zw', 'j', '', 'w', 'j', ':', '', 'a', 'mw', ':', 'hw', 'j', 'uj', 'lw', '', 'j', 'u:', 'aw', 'j', 'iw', 'aj', ':', 'kw', 'nw', 't', 'w', 'eo', 'sw', 'tw', 'w', 'i', 'e', 'i:', '', 'd', '', '', '', 'l', 'w', '1', '', '', 'd', '', 'p', '', 'u', 'o', '3', '', '!', '', '', '6', '', '', 'z', 'v', 'g', '', '_', '', '', '2', '', 'i', '.', '', 'b', 'h', 'n', '', '', '', 'k', 'm', '5', ' ', 'c', 'j', 'x', '', ',', '4', '', 's', '', 'a', '', '?', 'r', ':', '', 'f', ';', 'e', 't', "'"]
#print("Parsing",Parsing("default","iu iu","|"))
EN={"a":"y","":"","":"","b":"bi","c":"si","d":"i","":"","e":"i","":"","f":"p","g":"giy","h":"ch","i":"ai","j":"giy","k":"cy","l":"eo","m":"em","n":"en","o":"u","":"","":"","p":"pi","q":"kiu","r":"a","s":"t","t":"ti","u":"diu","":"","v":"vi","w":"p liu","x":"t","y":"quai","z":"git"}
import re
###################################################
checkDict()
#print(vi2IPA_split("!Singapo english? i hc l IUYE g khngtontaij NIYE BoOK","'"))
#check cc ipa ca ting anh
#print(vi2IPA_split("Another table was prepared to show available onsets. Onsets are splitted into 3 types. Type 1 are onsets which has one letter ","/"))
#Lc b du nhn ca ting anh "'"
#print(vi2IPA_split("speech? Secondly, we paper, we investigate work! One is that e language to another by","/").replace("/",""))
#Case need to be deal:
# NIYE BoOK
#print(len(getSymbol()))
#print(getSymbol())
'''
test="t"
if test in syms:
print(test)
else:
print("none")
'''
###################################################
#Step
#Vinorm
#Underthesea
#For each Convert to phoneme
#Nu khng c check phoneme ting anh
#Nu khng c trong t ting anh -> c tng k t
#Now
#+Thm k t IPA ca ting ANH
#+Thm x l case khng c cng nh case Ting anh: => dng etrain cho ting anh
#+Deal case thng nht m vc phoneme -> ok
#+Get li b symbol | 56.243061 | 821 | 0.384759 |
d49d9d1e84095417ae691e1ba67e4e09f88e34fb | 505 | py | Python | taskengine/sessions.py | retmas-dv/deftcore | 23052549e8948bbedfb958a96683b84b46820b09 | [
"Apache-2.0"
] | null | null | null | taskengine/sessions.py | retmas-dv/deftcore | 23052549e8948bbedfb958a96683b84b46820b09 | [
"Apache-2.0"
] | 9 | 2019-05-24T08:10:59.000Z | 2020-07-23T13:20:35.000Z | taskengine/sessions.py | retmas-dv/deftcore | 23052549e8948bbedfb958a96683b84b46820b09 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Dmitry Golubkov'
from django.contrib.sessions.base_session import AbstractBaseSession
from django.contrib.sessions.backends.db import SessionStore as DBStore
| 24.047619 | 71 | 0.742574 |
d49e9592c8658910d6180947346f6788ba5fdb29 | 498 | py | Python | tests/assignments/test_assign7.py | acc-cosc-1336/cosc-1336-spring-2018-vcruz350 | 0cee9fde3d4129c51626c4e0c870972aebec9b95 | [
"MIT"
] | null | null | null | tests/assignments/test_assign7.py | acc-cosc-1336/cosc-1336-spring-2018-vcruz350 | 0cee9fde3d4129c51626c4e0c870972aebec9b95 | [
"MIT"
] | 1 | 2018-03-08T19:46:08.000Z | 2018-03-08T20:00:47.000Z | tests/assignments/test_assign7.py | acc-cosc-1336/cosc-1336-spring-2018-vcruz350 | 0cee9fde3d4129c51626c4e0c870972aebec9b95 | [
"MIT"
] | null | null | null | import unittest
#write the import for function for assignment7 sum_list_values
from src.assignments.assignment7 import sum_list_values
#unittest.main(verbosity=2)
| 29.294118 | 71 | 0.736948 |
d49ef05ecf83504c528cca6ff6237271a4f54a56 | 4,957 | py | Python | setec/__init__.py | kgriffs/setec | c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54 | [
"Apache-2.0"
] | null | null | null | setec/__init__.py | kgriffs/setec | c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54 | [
"Apache-2.0"
] | null | null | null | setec/__init__.py | kgriffs/setec | c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 by Kurt Griffiths
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64decode, b64encode
import msgpack
import nacl.encoding
import nacl.secret
import nacl.signing
import nacl.utils
from .version import __version__ # NOQA
class Verifier:
"""Signature verifier based on Ed25519 and nacl.signing.
Arguments:
key (str): Base64-encoded verify key
"""
__slots__ = ('_verify_key',)
def verifyb(self, message):
"""Verify a signed binary message.
Arguments:
message(bytes): Data to verify.
Returns:
bytes: The orignal message, sans signature.
"""
return self._verify_key.verify(message)
| 29.158824 | 86 | 0.656042 |
d49f62cf4c67498959f387338aa3e5ee4e7a2d59 | 382 | py | Python | blender/arm/logicnode/native/LN_detect_mobile_browser.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/native/LN_detect_mobile_browser.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/native/LN_detect_mobile_browser.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import * | 34.727273 | 74 | 0.777487 |
d4a074467479872c4d6bb6745cf590f7c740594e | 29,959 | py | Python | corehq/apps/dump_reload/tests/test_sql_dump_load.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/dump_reload/tests/test_sql_dump_load.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/dump_reload/tests/test_sql_dump_load.py | andyasne/commcare-hq | c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import inspect
import json
import uuid
from collections import Counter
from datetime import datetime
from io import StringIO
import mock
from django.contrib.admin.utils import NestedObjects
from django.db import transaction, IntegrityError
from django.db.models.signals import post_delete, post_save
from django.test import SimpleTestCase, TestCase
from nose.tools import nottest
from casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure
from corehq.apps.commtrack.helpers import make_product
from corehq.apps.commtrack.tests.util import get_single_balance_block
from corehq.apps.domain.models import Domain
from corehq.apps.dump_reload.sql import SqlDataDumper, SqlDataLoader
from corehq.apps.dump_reload.sql.dump import (
get_model_iterator_builders_to_dump,
get_objects_to_dump,
)
from corehq.apps.dump_reload.sql.load import (
DefaultDictWithKey,
constraint_checks_deferred,
)
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.products.models import SQLProduct
from corehq.apps.zapier.consts import EventTypes
from corehq.apps.zapier.models import ZapierSubscription
from corehq.apps.zapier.signals.receivers import (
zapier_subscription_post_delete,
)
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.form_processor.models import (
CaseTransaction,
CommCareCaseIndexSQL,
CommCareCaseSQL,
LedgerTransaction,
LedgerValue,
XFormInstanceSQL,
)
from corehq.form_processor.tests.utils import (
FormProcessorTestUtils,
create_form_for_test,
sharded,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
)
def _check_signals_handle_raw(self, models):
"""Ensure that any post_save signal handlers have been updated
to handle 'raw' calls."""
whitelist_receivers = [
'django_digest.models._post_save_persist_partial_digests'
]
for model in models:
for receiver in post_save._live_receivers(model):
receiver_path = receiver.__module__ + '.' + receiver.__name__
if receiver_path in whitelist_receivers:
continue
args = inspect.getargspec(receiver).args
message = 'Signal handler "{}" for model "{}" missing raw arg'.format(
receiver, model
)
self.assertIn('raw', args, message)
class TestSQLDumpLoad(BaseDumpLoadTest):
def _normalize_object_counter(counter, for_loaded=False):
"""Converts a <Model Class> keyed counter to an model label keyed counter"""
return Counter({
_model_class_to_label(model_class): count
for model_class, count in counter.items()
})
| 38.310742 | 124 | 0.647118 |
d4a08e8d4977972540a2be8547db892cc6d2f3ab | 4,561 | py | Python | tests/keras/test_activations.py | the-moliver/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 150 | 2017-01-15T15:32:23.000Z | 2021-11-23T15:07:55.000Z | tests/keras/test_activations.py | wdw110/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 40 | 2017-01-15T15:41:05.000Z | 2020-11-16T13:15:50.000Z | tests/keras/test_activations.py | wdw110/keras | 4fa7e5d454dd4f3f33f1d756a2a8659f2e789141 | [
"MIT"
] | 38 | 2017-01-15T22:04:06.000Z | 2019-11-01T22:35:35.000Z | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras import activations
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_softmax():
'''
Test using a reference implementation of softmax
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softmax(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softplus():
'''
Test using a reference softplus implementation
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softplus(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softplus(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softsign():
'''
Test using a reference softsign implementation
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
xs = [1, 5, True, None, 'foo']
for x in xs:
assert(x == activations.linear(x))
if __name__ == '__main__':
pytest.main([__file__])
| 26.062857 | 79 | 0.635825 |
d4a0dbe903b46f2ac15b321d70b46c5431fada6b | 4,932 | py | Python | scripts/H5toXMF.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null | scripts/H5toXMF.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null | scripts/H5toXMF.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null |
#import numpy
#import os
#from xml.etree.ElementTree import *
import tables
#from Xdmf import *
if __name__ == '__main__':
from optparse import OptionParser
usage = ""
parser = OptionParser(usage=usage)
parser.add_option("-n","--size",
help="number of processors for run",
action="store",
type="int",
dest="size",
default=1)
parser.add_option("-s","--stride",
help="stride for solution output",
action="store",
type="int",
dest="stride",
default=0)
parser.add_option("-t","--finaltime",
help="finaltime",
action="store",
type="int",
dest="finaltime",
default=1000)
parser.add_option("-f","--filebase_flow",
help="base name for storage files",
action="store",
type="string",
dest="filebase",
default="solution")
(opts,args) = parser.parse_args()
start = 0
if opts.stride == 0 :
start = opts.finaltime
opts.stride = 1
H5toXMF(opts.filebase,opts.size,start,opts.finaltime,opts.stride)
| 42.153846 | 172 | 0.491484 |
d4a110091d70cdb8869da346b91adb821033a70e | 102,577 | py | Python | pysnmp-with-texts/CISCO-TRUSTSEC-POLICY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-TRUSTSEC-POLICY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-TRUSTSEC-POLICY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-TRUSTSEC-POLICY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-TRUSTSEC-POLICY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:14:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
Cisco2KVlanList, CiscoVrfName = mibBuilder.importSymbols("CISCO-TC", "Cisco2KVlanList", "CiscoVrfName")
CtsAclNameOrEmpty, CtsAclList, CtsGenerationId, CtsAclName, CtsAclListOrEmpty, CtsSgaclMonitorMode, CtsSecurityGroupTag = mibBuilder.importSymbols("CISCO-TRUSTSEC-TC-MIB", "CtsAclNameOrEmpty", "CtsAclList", "CtsGenerationId", "CtsAclName", "CtsAclListOrEmpty", "CtsSgaclMonitorMode", "CtsSecurityGroupTag")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
InetAddressType, InetAddress, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress", "InetAddressPrefixLength")
VlanIndex, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Counter32, Unsigned32, Bits, ObjectIdentity, iso, Counter64, Gauge32, Integer32, TimeTicks, MibIdentifier, ModuleIdentity, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "Bits", "ObjectIdentity", "iso", "Counter64", "Gauge32", "Integer32", "TimeTicks", "MibIdentifier", "ModuleIdentity", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress")
DisplayString, StorageType, TruthValue, RowStatus, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "StorageType", "TruthValue", "RowStatus", "DateAndTime", "TextualConvention")
ciscoTrustSecPolicyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 713))
ciscoTrustSecPolicyMIB.setRevisions(('2012-12-19 00:00', '2009-11-06 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIB.setRevisionsDescriptions(('Added following OBJECT-GROUP: - ctspNotifCtrlGroup - ctspNotifGroup - ctspNotifInfoGroup - ctspIfSgtMappingGroup - ctspVlanSgtMappingGroup - ctspSgtCachingGroup - ctspSgaclMonitorGroup - ctspSgaclMonitorStatisticGroup Added new compliance - ciscoTrustSecPolicyMIBCompliances Modified ctspIpSgtSource to add l3if(6), vlan(7), caching(8).', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIB.setLastUpdated('201212190000Z')
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIB.setDescription('This MIB module defines managed objects that facilitate the management of various policies within the Cisco Trusted Security (TrustSec) infrastructure. The information available through this MIB includes: o Device and interface level configuration for enabling SGACL (Security Group Access Control List) enforcement on Layer2/3 traffic. o Administrative and operational SGACL mapping to Security Group Tag (SGT). o Various statistics counters for traffic subject to SGACL enforcement. o TrustSec policies with respect to peer device. o Interface level configuration for enabling the propagation of SGT along with the Layer 3 traffic in portions of network which does not have the capability to support TrustSec feature. o TrustSec policies with respect to SGT propagation with Layer 3 traffic. The following terms are used throughout this MIB: VRF: Virtual Routing and Forwarding. SGACL: Security Group Access Control List. ACE: Access Control Entries. SXP: SGT Propagation Protocol. SVI: Switch Virtual Interface. IPM: Identity Port Mapping. SGT (Security Group Tag) is a unique 16 bits value assigned to every security group and used by network devices to enforce SGACL. Peer is another device connected to the local device on the other side of a TrustSec link. Default Policy: Policy applied to traffic when there is no explicit policy between the SGT associated with the originator of the traffic and the SGT associated with the destination of the traffic.')
ciscoTrustSecPolicyMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 0))
ciscoTrustSecPolicyMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1))
ciscoTrustSecPolicyMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 2))
ctspSgacl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1))
ctspPeerPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2))
ctspLayer3Transport = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3))
ctspIpSgtMappings = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4))
ctspSgtPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5))
ctspIfSgtMappings = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6))
ctspVlanSgtMappings = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7))
ctspSgtCaching = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 8))
ctspNotifsControl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 9))
ctspNotifsOnlyInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 10))
ctspSgaclGlobals = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1))
ctspSgaclMappings = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2))
ctspSgaclStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3))
ctspSgaclEnforcementEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("l3Only", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgaclEnforcementEnable.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclEnforcementEnable.setDescription("This object specifies whether SGACL enforcement for all Layer 3 interfaces (excluding SVIs) is enabled at the managed system. 'none' indicates that SGACL enforcement for all Layer 3 interfaces (excluding SVIs) is disabled. 'l3Only' indicates that SGACL enforcement is enabled on every TrustSec capable Layer3 interface (excluding SVIs) in the device.")
ctspSgaclIpv4DropNetflowMonitor = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 2), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgaclIpv4DropNetflowMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclIpv4DropNetflowMonitor.setDescription('This object specifies an existing flexible netflow monitor name used to collect and export the IPv4 traffic dropped packets statistics due to SGACL enforcement. The zero-length string indicates that no such netflow monitor is configured in the device.')
ctspSgaclIpv6DropNetflowMonitor = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 3), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgaclIpv6DropNetflowMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclIpv6DropNetflowMonitor.setDescription('This object specifies an existing flexible netflow monitor name used to collect and export the IPv6 traffic dropped packets statistics due to SGACL enforcement. The zero-length string indicates that no such netflow monitor is configured in the device.')
ctspVlanConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4), )
if mibBuilder.loadTexts: ctspVlanConfigTable.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigTable.setDescription('This table lists the SGACL enforcement for Layer 2 and Layer 3 switched packet in a VLAN as well as VRF information for VLANs in the device.')
ctspVlanConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigIndex"))
if mibBuilder.loadTexts: ctspVlanConfigEntry.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigEntry.setDescription('Each row contains the SGACL enforcement information for Layer 2 and Layer 3 switched packets in a VLAN identified by its VlanIndex value. Entry in this table is populated for VLANs which contains SGACL enforcement or VRF configuration.')
ctspVlanConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 1), VlanIndex())
if mibBuilder.loadTexts: ctspVlanConfigIndex.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigIndex.setDescription('This object indicates the VLAN-ID of this VLAN.')
ctspVlanConfigSgaclEnforcement = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 2), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanConfigSgaclEnforcement.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigSgaclEnforcement.setDescription("This object specifies the configured SGACL enforcement status for this VLAN i.e., 'true' = enabled and 'false' = disabled.")
ctspVlanSviActive = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspVlanSviActive.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSviActive.setDescription("This object indicates if there is an active SVI associated with this VLAN. 'true' indicates that there is an active SVI associated with this VLAN. and SGACL is enforced for both Layer 2 and Layer 3 switched packets within that VLAN. 'false' indicates that there is no active SVI associated with this VLAN, and SGACL is only enforced for Layer 2 switched packets within that VLAN.")
ctspVlanConfigVrfName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 4), CiscoVrfName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanConfigVrfName.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigVrfName.setDescription('This object specifies an existing VRF where this VLAN belongs to. The zero length value indicates this VLAN belongs to the default VRF.')
ctspVlanConfigStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 5), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanConfigStorageType.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigStorageType.setDescription('The objects specifies the storage type for this conceptual row.')
ctspVlanConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 1, 4, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanConfigRowStatus.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigRowStatus.setDescription("The status of this conceptual row entry. This object is used to manage creation and deletion of rows in this table. When this object value is 'active', other writable objects in the same row cannot be modified.")
ctspConfigSgaclMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1), )
if mibBuilder.loadTexts: ctspConfigSgaclMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingTable.setDescription('This table contains the SGACLs information which is applied to unicast IP traffic which carries a source SGT and travels to a destination SGT.')
ctspConfigSgaclMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingIpTrafficType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingDestSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingSourceSgt"))
if mibBuilder.loadTexts: ctspConfigSgaclMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingEntry.setDescription('Each row contains the SGACL mapping to source and destination SGT for a certain traffic type as well as status of this instance. A row instance can be created or removed by setting the appropriate value of its RowStatus object.')
ctspConfigSgaclMappingIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspConfigSgaclMappingIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingIpTrafficType.setDescription('This object indicates the type of the unicast IP traffic carrying the source SGT and travelling to destination SGT and subjected to SGACL enforcement.')
ctspConfigSgaclMappingDestSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 2), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspConfigSgaclMappingDestSgt.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingDestSgt.setDescription('This object indicates the destination SGT value. Value of zero indicates that the destination SGT is unknown.')
ctspConfigSgaclMappingSourceSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 3), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspConfigSgaclMappingSourceSgt.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingSourceSgt.setDescription('This object indicates the source SGT value. Value of zero indicates that the source SGT is unknown.')
ctspConfigSgaclMappingSgaclName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 4), CtsAclList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspConfigSgaclMappingSgaclName.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingSgaclName.setDescription('This object specifies the list of existing SGACLs which is administratively configured to apply to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspConfigSgaclMappingStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 5), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspConfigSgaclMappingStorageType.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingStorageType.setDescription('The storage type for this conceptual row.')
ctspConfigSgaclMappingRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspConfigSgaclMappingRowStatus.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingRowStatus.setDescription('This object is used to manage the creation and deletion of rows in this table. ctspConfigSgaclName may be modified at any time.')
ctspConfigSgaclMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 1, 1, 7), CtsSgaclMonitorMode().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspConfigSgaclMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMonitor.setDescription('This object specifies whether SGACL monitor mode is turned on for the configured SGACL enforced traffic.')
ctspDefConfigIpv4Sgacls = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 2), CtsAclListOrEmpty()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDefConfigIpv4Sgacls.setStatus('current')
if mibBuilder.loadTexts: ctspDefConfigIpv4Sgacls.setDescription('This object specifies the SGACLs of the unicast default policy for IPv4 traffic. If there is no SGACL configured for unicast default policy for IPv4 traffic, the value of this object is the zero-length string.')
ctspDefConfigIpv6Sgacls = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 3), CtsAclListOrEmpty()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDefConfigIpv6Sgacls.setStatus('current')
if mibBuilder.loadTexts: ctspDefConfigIpv6Sgacls.setDescription('This object specifies the SGACLs of the unicast default policy for IPv6 traffic. If there is no SGACL configured for unicast default policy for IPv6 traffic, the value of this object is the zero-length string.')
ctspDownloadedSgaclMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4), )
if mibBuilder.loadTexts: ctspDownloadedSgaclMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclMappingTable.setDescription('This table contains the downloaded SGACLs information applied to unicast IP traffic which carries a source SGT and travels to a destination SGT.')
ctspDownloadedSgaclMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclDestSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclSourceSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclIndex"))
if mibBuilder.loadTexts: ctspDownloadedSgaclMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclMappingEntry.setDescription('Each row contains the downloaded SGACLs mapping. A row instance is added for each pair of <source SGT, destination SGT> which contains SGACL that is dynamically downloaded from ACS server.')
ctspDownloadedSgaclDestSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 1), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspDownloadedSgaclDestSgt.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclDestSgt.setDescription('This object indicates the destination SGT value. Value of zero indicates that the destination SGT is unknown.')
ctspDownloadedSgaclSourceSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 2), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspDownloadedSgaclSourceSgt.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclSourceSgt.setDescription('This object indicates the source SGT value. Value of zero indicates that the source SGT is unknown.')
ctspDownloadedSgaclIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: ctspDownloadedSgaclIndex.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclIndex.setDescription('This object identifies the downloaded SGACL which is applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspDownloadedSgaclName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 4), CtsAclName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgaclName.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclName.setDescription('This object indicates the name of downloaded SGACL which is applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspDownloadedSgaclGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 5), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgaclGenId.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclGenId.setDescription('This object indicates the generation identification of downloaded SGACL which is applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspDownloadedIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 6), Bits().clone(namedValues=NamedValues(("ipv4", 0), ("ipv6", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedIpTrafficType.setDescription('This object indicates the type of the unicast IP traffic carrying the source SGT and travelling to destination SGT and subjected to SGACL enforcement by this downloaded default policy.')
ctspDownloadedSgaclMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 4, 1, 7), CtsSgaclMonitorMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgaclMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclMonitor.setDescription('This object indicates whether SGACL monitor mode is turned on for the downloaded SGACL enforced traffic.')
ctspDefDownloadedSgaclMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5), )
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMappingTable.setDescription('This table contains the downloaded SGACLs information of the default policy applied to unicast IP traffic.')
ctspDefDownloadedSgaclMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDefDownloadedSgaclIndex"))
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMappingEntry.setDescription('Each row contains the downloaded SGACLs mapping. A row instance contains the SGACL information of the default policy dynamically downloaded from ACS server for unicast IP traffic.')
ctspDefDownloadedSgaclIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: ctspDefDownloadedSgaclIndex.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclIndex.setDescription('This object identifies the SGACL of downloaded default policy applied to unicast IP traffic.')
ctspDefDownloadedSgaclName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1, 2), CtsAclName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefDownloadedSgaclName.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclName.setDescription('This object indicates the name of the SGACL of downloaded default policy applied to unicast IP traffic.')
ctspDefDownloadedSgaclGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1, 3), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefDownloadedSgaclGenId.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclGenId.setDescription('This object indicates the generation identification of the SGACL of downloaded default policy applied to unicast IP traffic.')
ctspDefDownloadedIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1, 4), Bits().clone(namedValues=NamedValues(("ipv4", 0), ("ipv6", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefDownloadedIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedIpTrafficType.setDescription('This object indicates the type of the IP traffic subjected to SGACL enforcement by this downloaded default policy.')
ctspDefDownloadedSgaclMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 5, 1, 5), CtsSgaclMonitorMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspDefDownloadedSgaclMonitor.setDescription('This object indicates whether SGACL monitor mode is turned on for the default downloaded SGACL enforced traffic.')
ctspOperSgaclMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6), )
if mibBuilder.loadTexts: ctspOperSgaclMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclMappingTable.setDescription('This table contains the operational SGACLs information applied to unicast IP traffic which carries a source SGT and travels to a destination SGT.')
ctspOperSgaclMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspOperIpTrafficType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclDestSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclSourceSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclIndex"))
if mibBuilder.loadTexts: ctspOperSgaclMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclMappingEntry.setDescription('Each row contains the operational SGACLs mapping. A row instance is added for each pair of <source SGT, destination SGT> which contains the SGACL that either statically configured at the device or dynamically downloaded from ACS server.')
ctspOperIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspOperIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspOperIpTrafficType.setDescription('This object indicates the type of the unicast IP traffic carrying the source SGT and travelling to destination SGT and subjected to SGACL enforcement.')
ctspOperSgaclDestSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 2), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspOperSgaclDestSgt.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclDestSgt.setDescription('This object indicates the destination SGT value. Value of zero indicates that the destination SGT is unknown.')
ctspOperSgaclSourceSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 3), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspOperSgaclSourceSgt.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclSourceSgt.setDescription('This object indicates the source SGT value. Value of zero indicates that the source SGT is unknown.')
ctspOperSgaclIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: ctspOperSgaclIndex.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclIndex.setDescription('This object identifies the SGACL operationally applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspOperationalSgaclName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 5), CtsAclName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspOperationalSgaclName.setStatus('current')
if mibBuilder.loadTexts: ctspOperationalSgaclName.setDescription('This object indicates the name of the SGACL operationally applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspOperationalSgaclGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 6), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspOperationalSgaclGenId.setStatus('current')
if mibBuilder.loadTexts: ctspOperationalSgaclGenId.setDescription('This object indicates the generation identification of the SGACL operationally applied to unicast IP traffic carrying the source SGT to the destination SGT.')
ctspOperSgaclMappingSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("configured", 1), ("downloaded", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspOperSgaclMappingSource.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclMappingSource.setDescription("This object indicates the source of SGACL mapping for the SGACL operationally applied to unicast IP traffic carrying the source SGT to the destination SGT. 'downloaded' indicates that the mapping is downloaded from ACS server. 'configured' indicates that the mapping is locally configured in the device.")
ctspOperSgaclConfigSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("configured", 1), ("downloaded", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspOperSgaclConfigSource.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclConfigSource.setDescription("This object indicates the source of SGACL creation for this SGACL. 'configured' indicates that the SGACL is locally configured in the local device. 'downloaded' indicates that the SGACL is created at ACS server and downloaded to the local device.")
ctspOperSgaclMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 6, 1, 9), CtsSgaclMonitorMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspOperSgaclMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclMonitor.setDescription('This object indicates whether SGACL monitor mode is turned on for the SGACL enforced traffic.')
ctspDefOperSgaclMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7), )
if mibBuilder.loadTexts: ctspDefOperSgaclMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclMappingTable.setDescription('This table contains the operational SGACLs information of the default policy applied to unicast IP traffic.')
ctspDefOperSgaclMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperIpTrafficType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperSgaclIndex"))
if mibBuilder.loadTexts: ctspDefOperSgaclMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclMappingEntry.setDescription('A row instance contains the SGACL information of the default policy which is either statically configured at the device or dynamically downloaded from ACS server for unicast IP traffic.')
ctspDefOperIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspDefOperIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperIpTrafficType.setDescription('This object indicates the type of the unicast IP traffic subjected to default policy enforcement.')
ctspDefOperSgaclIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: ctspDefOperSgaclIndex.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclIndex.setDescription('This object identifies the SGACL of default policy operationally applied to unicast IP traffic.')
ctspDefOperationalSgaclName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 3), CtsAclName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefOperationalSgaclName.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperationalSgaclName.setDescription('This object indicates the name of the SGACL of default policy operationally applied to unicast IP traffic.')
ctspDefOperationalSgaclGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 4), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefOperationalSgaclGenId.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperationalSgaclGenId.setDescription('This object indicates the generation identification of the SGACL of default policy operationally applied to unicast IP traffic.')
ctspDefOperSgaclMappingSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("configured", 1), ("downloaded", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefOperSgaclMappingSource.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclMappingSource.setDescription("This object indicates the source of SGACL mapping for the SGACL of default policy operationally applied to unicast IP traffic. 'downloaded' indicates that the mapping is downloaded from ACS server. 'configured' indicates that the mapping is locally configured in the device.")
ctspDefOperSgaclConfigSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("configured", 1), ("downloaded", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefOperSgaclConfigSource.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclConfigSource.setDescription("This object indicates the source of SGACL creation for the SGACL of default policy operationally applied to unicast IP traffic. 'downloaded' indicates that the SGACL is created at ACS server and downloaded to the local device. 'configured' indicates that the SGACL is locally configured in the local device.")
ctspDefOperSgaclMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 7, 1, 7), CtsSgaclMonitorMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefOperSgaclMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspDefOperSgaclMonitor.setDescription('This object indicates whether SGACL monitor mode is turned on for the SGACL of default policy enforced traffic.')
ctspDefConfigIpv4SgaclsMonitor = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 8), CtsSgaclMonitorMode()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDefConfigIpv4SgaclsMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspDefConfigIpv4SgaclsMonitor.setDescription('This object specifies whether SGACL monitor mode is turned on for the default configured SGACL enforced Ipv4 traffic.')
ctspDefConfigIpv6SgaclsMonitor = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 9), CtsSgaclMonitorMode()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDefConfigIpv6SgaclsMonitor.setStatus('current')
if mibBuilder.loadTexts: ctspDefConfigIpv6SgaclsMonitor.setDescription('This object specifies whether SGACL monitor mode is turned on for the default configured SGACL enforced Ipv6 traffic.')
ctspSgaclMonitorEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 2, 10), CtsSgaclMonitorMode()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgaclMonitorEnable.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclMonitorEnable.setDescription('This object specifies whether SGACL monitor mode is turned on for the entire system. It has precedence than the per SGACL ctspConfigSgaclMonitor control. It could act as safety mechanism to turn off monitor in case the monitor feature impact system performance.')
ctspSgtStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1), )
if mibBuilder.loadTexts: ctspSgtStatsTable.setStatus('current')
if mibBuilder.loadTexts: ctspSgtStatsTable.setDescription('This table describes SGACL statistics counters per a pair of <source SGT, destination SGT> that is capable of providing this information.')
ctspSgtStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpTrafficType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsDestSgt"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsSourceSgt"))
if mibBuilder.loadTexts: ctspSgtStatsEntry.setStatus('current')
if mibBuilder.loadTexts: ctspSgtStatsEntry.setDescription('Each row contains the SGACL statistics related to IPv4 or IPv6 packets carrying the source SGT travelling to the destination SGT and subjected to SGACL enforcement.')
ctspStatsIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspStatsIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpTrafficType.setDescription('This object indicates the type of the unicast IP traffic carrying the source SGT and travelling to destination SGT and subjected to SGACL enforcement.')
ctspStatsDestSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 2), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspStatsDestSgt.setStatus('current')
if mibBuilder.loadTexts: ctspStatsDestSgt.setDescription('This object indicates the destination SGT value. Value of zero indicates that the destination SGT is unknown.')
ctspStatsSourceSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 3), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspStatsSourceSgt.setStatus('current')
if mibBuilder.loadTexts: ctspStatsSourceSgt.setDescription('This object indicates the source SGT value. Value of zero indicates that the source SGT is unknown.')
ctspStatsIpSwDropPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpSwDropPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpSwDropPkts.setDescription('This object indicates the number of software-forwarded IP packets which are dropped by SGACL.')
ctspStatsIpHwDropPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpHwDropPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpHwDropPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are dropped by SGACL.')
ctspStatsIpSwPermitPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpSwPermitPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpSwPermitPkts.setDescription('This object indicates the number of software-forwarded IP packets which are permitted by SGACL.')
ctspStatsIpHwPermitPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpHwPermitPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpHwPermitPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are permitted by SGACL.')
ctspStatsIpSwMonitorPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpSwMonitorPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpSwMonitorPkts.setDescription('This object indicates the number of software-forwarded IP packets which are SGACL enforced & monitored.')
ctspStatsIpHwMonitorPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 1, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspStatsIpHwMonitorPkts.setStatus('current')
if mibBuilder.loadTexts: ctspStatsIpHwMonitorPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are SGACL enforced & monitored.')
ctspDefStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2), )
if mibBuilder.loadTexts: ctspDefStatsTable.setStatus('current')
if mibBuilder.loadTexts: ctspDefStatsTable.setDescription('This table describes statistics counters for unicast IP traffic subjected to default unicast policy.')
ctspDefStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpTrafficType"))
if mibBuilder.loadTexts: ctspDefStatsEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDefStatsEntry.setDescription('Each row contains the statistics counter for each IP traffic type.')
ctspDefIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspDefIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpTrafficType.setDescription('This object indicates the type of the IP traffic subjected to default unicast policy enforcement.')
ctspDefIpSwDropPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpSwDropPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpSwDropPkts.setDescription('This object indicates the number of software-forwarded IP packets which are dropped by default unicast policy.')
ctspDefIpHwDropPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpHwDropPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpHwDropPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are dropped by default unicast policy.')
ctspDefIpSwPermitPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpSwPermitPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpSwPermitPkts.setDescription('This object indicates the number of software-forwarded IP packets which are permitted by default unicast policy.')
ctspDefIpHwPermitPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpHwPermitPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpHwPermitPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are permitted by default unicast policy.')
ctspDefIpSwMonitorPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpSwMonitorPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpSwMonitorPkts.setDescription('This object indicates the number of software-forwarded IP packets which are monitored by default unicast policy.')
ctspDefIpHwMonitorPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 1, 3, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDefIpHwMonitorPkts.setStatus('current')
if mibBuilder.loadTexts: ctspDefIpHwMonitorPkts.setDescription('This object indicates the number of hardware-forwarded IP packets which are monitored by default unicast policy.')
ctspAllPeerPolicyAction = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("refresh", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspAllPeerPolicyAction.setStatus('current')
if mibBuilder.loadTexts: ctspAllPeerPolicyAction.setDescription("This object allows user to specify the action to be taken with respect to all peer policies in the device. When read, this object always returns the value 'none'. 'none' - No operation. 'refresh' - Refresh all peer policies in the device.")
ctspPeerPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2), )
if mibBuilder.loadTexts: ctspPeerPolicyTable.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyTable.setDescription('This table lists the peer policy information for each peer device.')
ctspPeerPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1), ).setIndexNames((1, "CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerName"))
if mibBuilder.loadTexts: ctspPeerPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyEntry.setDescription('Each row contains the managed objects for peer policies for each peer device based on its name.')
ctspPeerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 128)))
if mibBuilder.loadTexts: ctspPeerName.setStatus('current')
if mibBuilder.loadTexts: ctspPeerName.setDescription('This object uniquely identifies a peer device.')
ctspPeerSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 2), CtsSecurityGroupTag()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspPeerSgt.setStatus('current')
if mibBuilder.loadTexts: ctspPeerSgt.setDescription('This object indicates the SGT value of this peer device.')
ctspPeerSgtGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 3), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspPeerSgtGenId.setStatus('current')
if mibBuilder.loadTexts: ctspPeerSgtGenId.setDescription('This object indicates the generation identification of the SGT value assigned to this peer device.')
ctspPeerTrustState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("trusted", 1), ("noTrust", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspPeerTrustState.setStatus('current')
if mibBuilder.loadTexts: ctspPeerTrustState.setDescription("This object indicates the TrustSec trust state of this peer device. 'trusted' indicates that this is a trusted peer device. 'noTrust' indicates that this peer device is not trusted.")
ctspPeerPolicyLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 5), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspPeerPolicyLifeTime.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyLifeTime.setDescription('This object indicates the policy life time which provides the time interval during which the peer policy is valid.')
ctspPeerPolicyLastUpdate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 6), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspPeerPolicyLastUpdate.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyLastUpdate.setDescription('This object indicates the time when this peer policy is last updated.')
ctspPeerPolicyAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("refresh", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspPeerPolicyAction.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyAction.setDescription("This object allows user to specify the action to be taken with this peer policy. When read, this object always returns the value 'none'. 'none' - No operation. 'refresh' - Refresh this peer policy.")
ctspLayer3PolicyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1), )
if mibBuilder.loadTexts: ctspLayer3PolicyTable.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyTable.setDescription('This table describes Layer 3 transport policy for IP traffic regarding SGT propagation.')
ctspLayer3PolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3PolicyIpTrafficType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3PolicyType"))
if mibBuilder.loadTexts: ctspLayer3PolicyEntry.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyEntry.setDescription('Each row contains the Layer 3 transport policies per IP traffic type per policy type.')
ctspLayer3PolicyIpTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2))))
if mibBuilder.loadTexts: ctspLayer3PolicyIpTrafficType.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyIpTrafficType.setDescription("This object indicates the type of the IP traffic affected by Layer-3 transport policy. 'ipv4' indicates that the affected traffic is IPv4 traffic. 'ipv6' indicates that the affected traffic is IPv6 traffic.")
ctspLayer3PolicyType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("permit", 1), ("exception", 2))))
if mibBuilder.loadTexts: ctspLayer3PolicyType.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyType.setDescription("This object indicates the type of the Layer-3 transport policy affecting IP traffic regarding SGT propagation. 'permit' indicates that the transport policy is used to classify Layer-3 traffic which is subject to SGT propagation. 'exception' indicates that the transport policy is used to classify Layer-3 traffic which is NOT subject to SGT propagation.")
ctspLayer3PolicyLocalConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1, 3), CtsAclNameOrEmpty()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspLayer3PolicyLocalConfig.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyLocalConfig.setDescription('This object specifies the name of an ACL that is administratively configured to classify Layer3 traffic. Zero-length string indicates there is no such configured policy.')
ctspLayer3PolicyDownloaded = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1, 4), CtsAclNameOrEmpty()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspLayer3PolicyDownloaded.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyDownloaded.setDescription('This object specifies the name of an ACL that is downloaded from policy server to classify Layer3 traffic. Zero-length string indicates there is no such downloaded policy.')
ctspLayer3PolicyOperational = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 1, 1, 5), CtsAclNameOrEmpty()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspLayer3PolicyOperational.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3PolicyOperational.setDescription('This object specifies the name of an operational ACL currently used to classify Layer3 traffic. Zero-length string indicates there is no such policy in effect.')
ctspIfL3PolicyConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 2), )
if mibBuilder.loadTexts: ctspIfL3PolicyConfigTable.setStatus('current')
if mibBuilder.loadTexts: ctspIfL3PolicyConfigTable.setDescription('This table lists the interfaces which support Layer3 Transport policy.')
ctspIfL3PolicyConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctspIfL3PolicyConfigEntry.setStatus('current')
if mibBuilder.loadTexts: ctspIfL3PolicyConfigEntry.setDescription('Each row contains managed objects for Layer3 Transport on interface capable of providing this information.')
ctspIfL3Ipv4PolicyEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 2, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspIfL3Ipv4PolicyEnabled.setStatus('current')
if mibBuilder.loadTexts: ctspIfL3Ipv4PolicyEnabled.setDescription("This object specifies whether the Layer3 Transport policies will be applied on this interface for egress IPv4 traffic. 'true' indicates that Layer3 permit and exception policy will be applied at this interface for egress IPv4 traffic. 'false' indicates that Layer3 permit and exception policy will not be applied at this interface for egress IPv4 traffic.")
ctspIfL3Ipv6PolicyEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 3, 2, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspIfL3Ipv6PolicyEnabled.setStatus('current')
if mibBuilder.loadTexts: ctspIfL3Ipv6PolicyEnabled.setDescription("This object specifies whether the Layer3 Transport policies will be applied on this interface for egress IPv6 traffic. 'true' indicates that Layer3 permit and exception policy will be applied at this interface for egress IPv6 traffic. 'false' indicates that Layer3 permit and exception policy will not be applied at this interface for egress IPv6 traffic.")
ctspIpSgtMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1), )
if mibBuilder.loadTexts: ctspIpSgtMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtMappingTable.setDescription('This table contains the IP-to-SGT mapping information in the device.')
ctspIpSgtMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtVrfName"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtAddressType"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtIpAddress"), (0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtAddressLength"))
if mibBuilder.loadTexts: ctspIpSgtMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtMappingEntry.setDescription('Each row contains the IP-to-SGT mapping and status of this instance. Entry in this table is either populated automatically by the device or manually configured by a user. A manually configured row instance can be created or removed by setting the appropriate value of its RowStatus object.')
ctspIpSgtVrfName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 1), CiscoVrfName())
if mibBuilder.loadTexts: ctspIpSgtVrfName.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtVrfName.setDescription('This object indicates the VRF where IP-SGT mapping belongs to. The zero length value indicates the default VRF.')
ctspIpSgtAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 2), InetAddressType())
if mibBuilder.loadTexts: ctspIpSgtAddressType.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtAddressType.setDescription('This object indicates the type of Internet address.')
ctspIpSgtIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 3), InetAddress())
if mibBuilder.loadTexts: ctspIpSgtIpAddress.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtIpAddress.setDescription('This object indicates an Internet address. The type of this address is determined by the value of ctspIpSgtAddressType object.')
ctspIpSgtAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 4), InetAddressPrefixLength())
if mibBuilder.loadTexts: ctspIpSgtAddressLength.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtAddressLength.setDescription('This object indicates the length of an Internet address prefix.')
ctspIpSgtValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 5), CtsSecurityGroupTag()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIpSgtValue.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtValue.setDescription('This object specifies the SGT value assigned to an Internet address.')
ctspIpSgtSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("configured", 1), ("arp", 2), ("localAuthenticated", 3), ("sxp", 4), ("internal", 5), ("l3if", 6), ("vlan", 7), ("caching", 8)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIpSgtSource.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtSource.setDescription("This object indicates the source of the mapping. 'configured' indicates that the mapping is manually configured by user. 'arp' indicates that the mapping is dynamically learnt from tagged ARP replies. 'localAuthenticated' indicates that the mapping is dynamically learnt from the device authentication of a host. 'sxp' indicates that the mapping is dynamically learnt from SXP (SGT Propagation Protocol). 'internal' indicates that the mapping is automatically created by the device between the device IP addresses and the device own SGT. 'l3if' indicates that Interface-SGT mapping is configured by user. 'vlan' indicates that Vlan-SGT mapping is configured by user. 'cached' indicates that sgt mapping is cached. Only 'configured' value is accepted when setting this object.")
ctspIpSgtStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 7), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIpSgtStorageType.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtStorageType.setDescription('The storage type for this conceptual row.')
ctspIpSgtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 4, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIpSgtRowStatus.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtRowStatus.setDescription("This object is used to manage the creation and deletion of rows in this table. If this object value is 'active', user cannot modify any writable object in this row. If value of ctspIpSgtSource object in an entry is not 'configured', user cannot change the value of this object.")
ctspAllSgtPolicyAction = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("refresh", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspAllSgtPolicyAction.setStatus('current')
if mibBuilder.loadTexts: ctspAllSgtPolicyAction.setDescription("This object allows user to specify the action to be taken with respect to all SGT policies in the device. When read, this object always returns the value 'none'. 'none' - No operation. 'refresh' - Refresh all SGT policies in the device.")
ctspDownloadedSgtPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2), )
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyTable.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyTable.setDescription('This table lists the SGT policy information downloaded by the device.')
ctspDownloadedSgtPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgtPolicySgt"))
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyEntry.setDescription('Each row contains the managed objects for SGT policies downloaded by the device.')
ctspDownloadedSgtPolicySgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1, 1), CtsSecurityGroupTag())
if mibBuilder.loadTexts: ctspDownloadedSgtPolicySgt.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicySgt.setDescription('This object indicates the SGT value for which the downloaded policy is applied to. Value of zero indicates that the SGT is unknown.')
ctspDownloadedSgtPolicySgtGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1, 2), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgtPolicySgtGenId.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicySgtGenId.setDescription('This object indicates the generation identification of the SGT value denoted by ctspDownloadedSgtPolicySgt object.')
ctspDownloadedSgtPolicyLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1, 3), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyLifeTime.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyLifeTime.setDescription('This object indicates the policy life time which provides the time interval during which this downloaded policy is valid.')
ctspDownloadedSgtPolicyLastUpdate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyLastUpdate.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyLastUpdate.setDescription('This object indicates the time when this downloaded SGT policy is last updated.')
ctspDownloadedSgtPolicyAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("refresh", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyAction.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgtPolicyAction.setDescription("This object allows user to specify the action to be taken with this downloaded SGT policy. When read, this object always returns the value 'none'. 'none' - No operation. 'refresh' - Refresh this SGT policy.")
ctspDownloadedDefSgtPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3), )
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyTable.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyTable.setDescription('This table lists the default SGT policy information downloaded by the device.')
ctspDownloadedDefSgtPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedDefSgtPolicyType"))
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyEntry.setDescription('Each row contains the managed objects for default SGT policies downloaded by the device.')
ctspDownloadedDefSgtPolicyType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("unicastDefault", 1))))
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyType.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyType.setDescription("This object indicates the downloaded default SGT policy type. 'unicastDefault' indicates the SGT policy applied to traffic which carries the default unicast SGT.")
ctspDownloadedDefSgtPolicySgtGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1, 2), CtsGenerationId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicySgtGenId.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicySgtGenId.setDescription('This object indicates the generation identification of the downloaded default SGT policy.')
ctspDownloadedDefSgtPolicyLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1, 3), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyLifeTime.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyLifeTime.setDescription('This object indicates the policy life time which provides the time interval during which this download default policy is valid.')
ctspDownloadedDefSgtPolicyLastUpdate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyLastUpdate.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyLastUpdate.setDescription('This object indicates the time when this downloaded SGT policy is last updated.')
ctspDownloadedDefSgtPolicyAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 5, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("refresh", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyAction.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedDefSgtPolicyAction.setDescription("This object allows user to specify the action to be taken with this default downloaded SGT policy. When read, this object always returns the value 'none'. 'none' - No operation. 'refresh' - Refresh this default SGT policy.")
ctspIfSgtMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1), )
if mibBuilder.loadTexts: ctspIfSgtMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtMappingTable.setDescription('This table contains the Interface-to-SGT mapping configuration information in the device.')
ctspIfSgtMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctspIfSgtMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtMappingEntry.setDescription('Each row contains the SGT mapping configuration of a particular interface. A row instance can be created or removed by setting ctspIfSgtRowStatus.')
ctspIfSgtValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1, 1, 1), CtsSecurityGroupTag()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIfSgtValue.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtValue.setDescription('This object specifies the SGT value assigned to the interface.')
ctspIfSgName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1, 1, 2), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIfSgName.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgName.setDescription('This object specifies the Security Group Name assigned to the interface.')
ctspIfSgtStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIfSgtStorageType.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtStorageType.setDescription('The storage type for this conceptual row.')
ctspIfSgtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspIfSgtRowStatus.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtRowStatus.setDescription('This object is used to manage the creation and deletion of rows in this table.')
ctspIfSgtMappingInfoTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 2), )
if mibBuilder.loadTexts: ctspIfSgtMappingInfoTable.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtMappingInfoTable.setDescription('This table contains the Interface-to-SGT mapping status information in the device.')
ctspIfSgtMappingInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctspIfSgtMappingInfoEntry.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtMappingInfoEntry.setDescription('Containing the Interface-to-SGT mapping status of the specified interface.')
ctspL3IPMStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("active", 2), ("inactive", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctspL3IPMStatus.setStatus('current')
if mibBuilder.loadTexts: ctspL3IPMStatus.setDescription('This object indicates the Layer 3 Identity Port Mapping(IPM) operational mode. disabled - The L3 IPM is not configured. active - The L3 IPM is configured for this interface, and SGT is available. inactive - The L3 IPM is configured for this interface, and SGT is unavailable.')
ctspVlanSgtMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1), )
if mibBuilder.loadTexts: ctspVlanSgtMappingTable.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtMappingTable.setDescription('This table contains the Vlan-SGT mapping information in the device.')
ctspVlanSgtMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSgtMappingIndex"))
if mibBuilder.loadTexts: ctspVlanSgtMappingEntry.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtMappingEntry.setDescription('Each row contains the SGT mapping configuration of a particular VLAN. A row instance can be created or removed by setting ctspVlanSgtRowStatus.')
ctspVlanSgtMappingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1, 1, 1), VlanIndex())
if mibBuilder.loadTexts: ctspVlanSgtMappingIndex.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtMappingIndex.setDescription('This object specifies the VLAN-ID which is used as index.')
ctspVlanSgtMapValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1, 1, 2), CtsSecurityGroupTag()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanSgtMapValue.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtMapValue.setDescription('This object specifies the SGT value assigned to the vlan.')
ctspVlanSgtStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanSgtStorageType.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtStorageType.setDescription('The storage type for this conceptual row.')
ctspVlanSgtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 7, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctspVlanSgtRowStatus.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtRowStatus.setDescription('This object is used to manage the creation and deletion of rows in this table.')
ctspSgtCachingMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("standAlone", 2), ("withEnforcement", 3), ("vlan", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgtCachingMode.setStatus('current')
if mibBuilder.loadTexts: ctspSgtCachingMode.setDescription("This object specifies which SGT-caching mode is configured for SGT caching capable interfaces at the managed system. 'none' indicates that sgt-caching for all Layer 3 interfaces (excluding SVIs) is disabled. 'standAlone' indicates that SGT-caching is enabled on every TrustSec capable Layer3 interface (excluding SVIs) in the device. 'withEnforcement' indicates that SGT-caching is enabled on interfaces that have RBAC enforcement enabled. 'vlan' indicates that SGT-caching is enabled on the VLANs specified by ctspSgtCachingVlansfFirst2K & ctspSgtCachingVlansSecond2K")
ctspSgtCachingVlansFirst2K = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 8, 2), Cisco2KVlanList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgtCachingVlansFirst2K.setStatus('current')
if mibBuilder.loadTexts: ctspSgtCachingVlansFirst2K.setDescription('A string of octets containing one bit per VLAN for VLANs 0 to 2047. If the bit corresponding to a VLAN is set to 1, it indicates SGT-caching is enabled on the VLAN. If the bit corresponding to a VLAN is set to 0, it indicates SGT-caching is disabled on the VLAN.')
ctspSgtCachingVlansSecond2K = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 8, 3), Cisco2KVlanList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspSgtCachingVlansSecond2K.setStatus('current')
if mibBuilder.loadTexts: ctspSgtCachingVlansSecond2K.setDescription('A string of octets containing one bit per VLAN for VLANs 2048 to 4095. If the bit corresponding to a VLAN is set to 1, it indicates SGT-caching is enabled on the VLAN. If the bit corresponding to a VLAN is set to 0, it indicates SGT-caching is disabled on the VLAN.')
ctspPeerPolicyUpdatedNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 9, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspPeerPolicyUpdatedNotifEnable.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyUpdatedNotifEnable.setDescription("This object specifies whether the system generates ctspPeerPolicyUpdatedNotif. A value of 'false' will prevent ctspPeerPolicyUpdatedNotif notifications from being generated by this system.")
ctspAuthorizationSgaclFailNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 9, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailNotifEnable.setStatus('current')
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailNotifEnable.setDescription("This object specifies whether this system generates the ctspAuthorizationSgaclFailNotif. A value of 'false' will prevent ctspAuthorizationSgaclFailNotif notifications from being generated by this system.")
ctspOldPeerSgt = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 10, 1), CtsSecurityGroupTag()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ctspOldPeerSgt.setStatus('current')
if mibBuilder.loadTexts: ctspOldPeerSgt.setDescription('This object provides the old sgt value for ctspPeerPolicyUpdatedNotif, i.e., the sgt value before the policy is updated.')
ctspAuthorizationSgaclFailReason = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 10, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("downloadACE", 1), ("downloadSrc", 2), ("downloadDst", 3), ("installPolicy", 4), ("installPolicyStandby", 5), ("installForIP", 6), ("uninstall", 7)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailReason.setStatus('current')
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailReason.setDescription("This object indicates the reason of failure during SGACL acquisitions, installations and uninstallations, which is associated with ctspAuthorizationSgaclFailNotif; 'downloadACE' - Failure during downloading ACE in SGACL acquisition. 'downloadSrc' - Failure during downloading source list in SGACL acquisition. 'downloadDst' - Failure during downloading destination list in SGACL acquisition. 'installPolicy' - Failure during SGACL policy installation 'installPolicyStandby' - Failure during SGACL policy installation on standby 'installForIP' - Failure during SGACL installation for specific IP type. 'uninstall' - Failure during SGACL uninstallation.")
ctspAuthorizationSgaclFailInfo = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 713, 1, 10, 3), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailInfo.setStatus('current')
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailInfo.setDescription('This object provides additional information about authorization SGACL failure, which is associated with ctspAuthorizationSgaclFailNotif.')
ctspPeerPolicyUpdatedNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 713, 0, 1)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspOldPeerSgt"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerSgt"))
if mibBuilder.loadTexts: ctspPeerPolicyUpdatedNotif.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyUpdatedNotif.setDescription('A ctspPeerPolicyUpdatedNotif is generated when the SGT value of a peer device has been updated.')
ctspAuthorizationSgaclFailNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 713, 0, 2)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailReason"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailInfo"))
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailNotif.setStatus('current')
if mibBuilder.loadTexts: ctspAuthorizationSgaclFailNotif.setDescription('A ctspAuthorizationSgaclFailNotif is generated when the authorization of SGACL fails.')
ciscoTrustSecPolicyMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 1))
ciscoTrustSecPolicyMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2))
ciscoTrustSecPolicyMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 1, 1)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspGlobalSgaclEnforcementGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefSwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpHwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefHwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv4DropNetflowMonitorGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv6DropNetflowMonitorGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyActionGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3TransportGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfL3PolicyConfigGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtPolicyGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecPolicyMIBCompliance = ciscoTrustSecPolicyMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIBCompliance.setDescription('The compliance statement for the CISCO-TRUSTSEC-POLICY-MIB')
ciscoTrustSecPolicyMIBComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 1, 2)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspGlobalSgaclEnforcementGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefSwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpHwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefHwStatisticsGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv4DropNetflowMonitorGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv6DropNetflowMonitorGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyActionGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3TransportGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfL3PolicyConfigGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtPolicyGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfSgtMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSgtMappingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtCachingGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclMonitorGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclMonitorStatisticGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspNotifCtrlGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspNotifGroup"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspNotifInfoGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecPolicyMIBComplianceRev2 = ciscoTrustSecPolicyMIBComplianceRev2.setStatus('current')
if mibBuilder.loadTexts: ciscoTrustSecPolicyMIBComplianceRev2.setDescription('The compliance statement for the CISCO-TRUSTSEC-POLICY-MIB')
ctspGlobalSgaclEnforcementGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 1)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclEnforcementEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspGlobalSgaclEnforcementGroup = ctspGlobalSgaclEnforcementGroup.setStatus('current')
if mibBuilder.loadTexts: ctspGlobalSgaclEnforcementGroup.setDescription('A collection of object which provides the SGACL enforcement information for all TrustSec capable Layer 3 interfaces (excluding SVIs) at the device level.')
ctspSgaclIpv4DropNetflowMonitorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 2)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv4DropNetflowMonitor"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgaclIpv4DropNetflowMonitorGroup = ctspSgaclIpv4DropNetflowMonitorGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclIpv4DropNetflowMonitorGroup.setDescription('A collection of object which provides netflow monitor information for IPv4 traffic drop packet due to SGACL enforcement in the device.')
ctspSgaclIpv6DropNetflowMonitorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 3)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclIpv6DropNetflowMonitor"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgaclIpv6DropNetflowMonitorGroup = ctspSgaclIpv6DropNetflowMonitorGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclIpv6DropNetflowMonitorGroup.setDescription('A collection of object which provides netflow monitor information for IPv6 traffic drop packet due to SGACL enforcement in the device.')
ctspVlanConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 4)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigSgaclEnforcement"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSviActive"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigVrfName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigStorageType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanConfigRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspVlanConfigGroup = ctspVlanConfigGroup.setStatus('current')
if mibBuilder.loadTexts: ctspVlanConfigGroup.setDescription('A collection of object which provides the SGACL enforcement and VRF information for each VLAN.')
ctspConfigSgaclMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 5)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingSgaclName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingStorageType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMappingRowStatus"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefConfigIpv4Sgacls"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefConfigIpv6Sgacls"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspConfigSgaclMappingGroup = ctspConfigSgaclMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspConfigSgaclMappingGroup.setDescription('A collection of objects which provides the administratively configured SGACL mapping information in the device.')
ctspDownloadedSgaclMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 6)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedIpTrafficType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefDownloadedSgaclName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefDownloadedSgaclGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefDownloadedIpTrafficType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspDownloadedSgaclMappingGroup = ctspDownloadedSgaclMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspDownloadedSgaclMappingGroup.setDescription('A collection of objects which provides the downloaded SGACL mapping information in the device.')
ctspOperSgaclMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 7)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperationalSgaclName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperationalSgaclGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclMappingSource"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclConfigSource"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperationalSgaclName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperationalSgaclGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperSgaclMappingSource"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperSgaclConfigSource"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspOperSgaclMappingGroup = ctspOperSgaclMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspOperSgaclMappingGroup.setDescription('A collection of objects which provides the operational SGACL mapping information in the device.')
ctspIpSwStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 8)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpSwDropPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpSwPermitPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspIpSwStatisticsGroup = ctspIpSwStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: ctspIpSwStatisticsGroup.setDescription('A collection of objects which provides software statistics counters for unicast IP traffic subjected to SGACL enforcement.')
ctspIpHwStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 9)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpHwDropPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpHwPermitPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspIpHwStatisticsGroup = ctspIpHwStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: ctspIpHwStatisticsGroup.setDescription('A collection of objects which provides hardware statistics counters for unicast IP traffic subjected to SGACL enforcement.')
ctspDefSwStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 10)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpSwDropPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpSwPermitPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspDefSwStatisticsGroup = ctspDefSwStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: ctspDefSwStatisticsGroup.setDescription('A collection of objects which provides software statistics counters for unicast IP traffic subjected to unicast default policy enforcement.')
ctspDefHwStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 11)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpHwDropPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpHwPermitPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspDefHwStatisticsGroup = ctspDefHwStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: ctspDefHwStatisticsGroup.setDescription('A collection of objects which provides hardware statistics counters for unicast IP traffic subjected to unicast default policy enforcement.')
ctspPeerPolicyActionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 12)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspAllPeerPolicyAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspPeerPolicyActionGroup = ctspPeerPolicyActionGroup.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyActionGroup.setDescription('A collection of object which provides refreshing of all peer policies in the device.')
ctspPeerPolicyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 13)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerSgt"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerSgtGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerTrustState"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyLifeTime"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyLastUpdate"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspPeerPolicyGroup = ctspPeerPolicyGroup.setStatus('current')
if mibBuilder.loadTexts: ctspPeerPolicyGroup.setDescription('A collection of object which provides peer policy information in the device.')
ctspLayer3TransportGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 14)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3PolicyLocalConfig"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3PolicyDownloaded"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspLayer3PolicyOperational"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspLayer3TransportGroup = ctspLayer3TransportGroup.setStatus('current')
if mibBuilder.loadTexts: ctspLayer3TransportGroup.setDescription('A collection of objects which provides managed information regarding the SGT propagation along with Layer 3 traffic in the device.')
ctspIfL3PolicyConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 15)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfL3Ipv4PolicyEnabled"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfL3Ipv6PolicyEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspIfL3PolicyConfigGroup = ctspIfL3PolicyConfigGroup.setStatus('current')
if mibBuilder.loadTexts: ctspIfL3PolicyConfigGroup.setDescription('A collection of objects which provides managed information for Layer3 Tranport policy enforcement on capable interface in the device.')
ctspIpSgtMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 16)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtValue"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtSource"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtStorageType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIpSgtRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspIpSgtMappingGroup = ctspIpSgtMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspIpSgtMappingGroup.setDescription('A collection of objects which provides managed information regarding IP-to-Sgt mapping in the device.')
ctspSgtPolicyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 17)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspAllSgtPolicyAction"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgtPolicySgtGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgtPolicyLifeTime"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgtPolicyLastUpdate"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgtPolicyAction"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedDefSgtPolicySgtGenId"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedDefSgtPolicyLifeTime"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedDefSgtPolicyLastUpdate"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedDefSgtPolicyAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgtPolicyGroup = ctspSgtPolicyGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgtPolicyGroup.setDescription('A collection of object which provides SGT policy information in the device.')
ctspIfSgtMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 18)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfSgtValue"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfSgName"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspL3IPMStatus"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfSgtStorageType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspIfSgtRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspIfSgtMappingGroup = ctspIfSgtMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspIfSgtMappingGroup.setDescription('A collection of objects which provides managed information regarding Interface-to-Sgt mapping in the device.')
ctspVlanSgtMappingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 19)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSgtMapValue"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSgtStorageType"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspVlanSgtRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspVlanSgtMappingGroup = ctspVlanSgtMappingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspVlanSgtMappingGroup.setDescription('A collection of objects which provides sgt mapping information for the IP traffic in the specified Vlan.')
ctspSgtCachingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 20)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtCachingMode"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtCachingVlansFirst2K"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgtCachingVlansSecond2K"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgtCachingGroup = ctspSgtCachingGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgtCachingGroup.setDescription('A collection of objects which provides sgt Caching information.')
ctspSgaclMonitorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 21)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspSgaclMonitorEnable"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspConfigSgaclMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefConfigIpv4SgaclsMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefConfigIpv6SgaclsMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDownloadedSgaclMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefDownloadedSgaclMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspOperSgaclMonitor"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefOperSgaclMonitor"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgaclMonitorGroup = ctspSgaclMonitorGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclMonitorGroup.setDescription('A collection of objects which provides SGACL monitor information.')
ctspSgaclMonitorStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 22)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpSwMonitorPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspStatsIpHwMonitorPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpSwMonitorPkts"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspDefIpHwMonitorPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspSgaclMonitorStatisticGroup = ctspSgaclMonitorStatisticGroup.setStatus('current')
if mibBuilder.loadTexts: ctspSgaclMonitorStatisticGroup.setDescription('A collection of objects which provides monitor statistics counters for unicast IP traffic subjected to SGACL enforcement.')
ctspNotifCtrlGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 23)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyUpdatedNotifEnable"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailNotifEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspNotifCtrlGroup = ctspNotifCtrlGroup.setStatus('current')
if mibBuilder.loadTexts: ctspNotifCtrlGroup.setDescription('A collection of objects providing notification control for TrustSec policy notifications.')
ctspNotifGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 24)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspPeerPolicyUpdatedNotif"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailNotif"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspNotifGroup = ctspNotifGroup.setStatus('current')
if mibBuilder.loadTexts: ctspNotifGroup.setDescription('A collection of notifications for TrustSec policy.')
ctspNotifInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 713, 2, 2, 25)).setObjects(("CISCO-TRUSTSEC-POLICY-MIB", "ctspOldPeerSgt"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailReason"), ("CISCO-TRUSTSEC-POLICY-MIB", "ctspAuthorizationSgaclFailInfo"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ctspNotifInfoGroup = ctspNotifInfoGroup.setStatus('current')
if mibBuilder.loadTexts: ctspNotifInfoGroup.setDescription('A collection of objects providing the variable binding for TrustSec policy notifications.')
mibBuilder.exportSymbols("CISCO-TRUSTSEC-POLICY-MIB", ctspDefDownloadedIpTrafficType=ctspDefDownloadedIpTrafficType, ctspLayer3PolicyType=ctspLayer3PolicyType, ctspPeerTrustState=ctspPeerTrustState, ctspIfSgtValue=ctspIfSgtValue, ctspDownloadedSgaclName=ctspDownloadedSgaclName, ctspSgtCachingVlansSecond2K=ctspSgtCachingVlansSecond2K, ctspDownloadedSgtPolicyLifeTime=ctspDownloadedSgtPolicyLifeTime, ctspSgacl=ctspSgacl, ctspDownloadedDefSgtPolicyLastUpdate=ctspDownloadedDefSgtPolicyLastUpdate, ctspLayer3PolicyLocalConfig=ctspLayer3PolicyLocalConfig, ctspSgaclMappings=ctspSgaclMappings, ctspAllPeerPolicyAction=ctspAllPeerPolicyAction, ctspDefOperationalSgaclGenId=ctspDefOperationalSgaclGenId, ctspSgaclStatistics=ctspSgaclStatistics, ctspDefStatsEntry=ctspDefStatsEntry, ctspOperSgaclMappingSource=ctspOperSgaclMappingSource, ctspDefIpSwPermitPkts=ctspDefIpSwPermitPkts, ciscoTrustSecPolicyMIBObjects=ciscoTrustSecPolicyMIBObjects, ctspIfSgtMappingGroup=ctspIfSgtMappingGroup, ctspVlanConfigStorageType=ctspVlanConfigStorageType, ctspOperSgaclSourceSgt=ctspOperSgaclSourceSgt, ctspDownloadedSgtPolicyLastUpdate=ctspDownloadedSgtPolicyLastUpdate, ctspPeerPolicyUpdatedNotifEnable=ctspPeerPolicyUpdatedNotifEnable, ctspIpSgtVrfName=ctspIpSgtVrfName, ctspConfigSgaclMappingEntry=ctspConfigSgaclMappingEntry, ctspDefIpHwDropPkts=ctspDefIpHwDropPkts, ctspDefOperSgaclMappingEntry=ctspDefOperSgaclMappingEntry, ctspOperIpTrafficType=ctspOperIpTrafficType, ctspStatsIpHwMonitorPkts=ctspStatsIpHwMonitorPkts, ctspDefDownloadedSgaclMappingTable=ctspDefDownloadedSgaclMappingTable, ctspOperSgaclDestSgt=ctspOperSgaclDestSgt, ctspIpSgtMappingGroup=ctspIpSgtMappingGroup, ctspIfSgtRowStatus=ctspIfSgtRowStatus, ctspDownloadedDefSgtPolicyType=ctspDownloadedDefSgtPolicyType, ctspLayer3PolicyDownloaded=ctspLayer3PolicyDownloaded, ctspStatsDestSgt=ctspStatsDestSgt, ctspPeerSgt=ctspPeerSgt, ctspVlanConfigIndex=ctspVlanConfigIndex, ctspDefDownloadedSgaclIndex=ctspDefDownloadedSgaclIndex, ctspConfigSgaclMappingStorageType=ctspConfigSgaclMappingStorageType, ctspPeerName=ctspPeerName, ctspDefIpTrafficType=ctspDefIpTrafficType, ctspOperSgaclMappingGroup=ctspOperSgaclMappingGroup, ctspPeerPolicyUpdatedNotif=ctspPeerPolicyUpdatedNotif, ctspSgtCaching=ctspSgtCaching, ciscoTrustSecPolicyMIBComplianceRev2=ciscoTrustSecPolicyMIBComplianceRev2, ciscoTrustSecPolicyMIBConformance=ciscoTrustSecPolicyMIBConformance, ctspDefOperSgaclIndex=ctspDefOperSgaclIndex, ctspOperSgaclMappingTable=ctspOperSgaclMappingTable, ctspDownloadedSgaclGenId=ctspDownloadedSgaclGenId, ctspIfSgtMappings=ctspIfSgtMappings, ctspSgaclIpv6DropNetflowMonitor=ctspSgaclIpv6DropNetflowMonitor, ciscoTrustSecPolicyMIBGroups=ciscoTrustSecPolicyMIBGroups, ctspNotifsOnlyInfo=ctspNotifsOnlyInfo, ctspVlanConfigEntry=ctspVlanConfigEntry, ctspPeerPolicy=ctspPeerPolicy, ctspDownloadedSgaclDestSgt=ctspDownloadedSgaclDestSgt, ctspDefIpHwMonitorPkts=ctspDefIpHwMonitorPkts, ctspLayer3TransportGroup=ctspLayer3TransportGroup, ctspGlobalSgaclEnforcementGroup=ctspGlobalSgaclEnforcementGroup, ctspDownloadedSgaclMappingEntry=ctspDownloadedSgaclMappingEntry, ctspPeerPolicyActionGroup=ctspPeerPolicyActionGroup, ctspSgaclGlobals=ctspSgaclGlobals, ctspNotifInfoGroup=ctspNotifInfoGroup, ctspSgaclMonitorEnable=ctspSgaclMonitorEnable, ctspStatsIpTrafficType=ctspStatsIpTrafficType, ctspConfigSgaclMonitor=ctspConfigSgaclMonitor, ctspDefConfigIpv4Sgacls=ctspDefConfigIpv4Sgacls, ctspVlanSgtMappingGroup=ctspVlanSgtMappingGroup, ctspSgtCachingGroup=ctspSgtCachingGroup, ctspIfL3PolicyConfigEntry=ctspIfL3PolicyConfigEntry, ctspConfigSgaclMappingRowStatus=ctspConfigSgaclMappingRowStatus, ctspIpSwStatisticsGroup=ctspIpSwStatisticsGroup, ctspDownloadedSgtPolicySgt=ctspDownloadedSgtPolicySgt, ctspDefConfigIpv6SgaclsMonitor=ctspDefConfigIpv6SgaclsMonitor, ctspOperSgaclIndex=ctspOperSgaclIndex, ctspVlanSgtMappingTable=ctspVlanSgtMappingTable, ctspIfSgtMappingEntry=ctspIfSgtMappingEntry, ctspAuthorizationSgaclFailNotif=ctspAuthorizationSgaclFailNotif, ctspConfigSgaclMappingGroup=ctspConfigSgaclMappingGroup, ctspIfSgtMappingTable=ctspIfSgtMappingTable, ctspStatsIpSwDropPkts=ctspStatsIpSwDropPkts, ctspIpSgtSource=ctspIpSgtSource, ctspConfigSgaclMappingSgaclName=ctspConfigSgaclMappingSgaclName, ctspLayer3PolicyEntry=ctspLayer3PolicyEntry, ctspDownloadedSgaclSourceSgt=ctspDownloadedSgaclSourceSgt, ctspVlanConfigSgaclEnforcement=ctspVlanConfigSgaclEnforcement, ctspDefDownloadedSgaclMappingEntry=ctspDefDownloadedSgaclMappingEntry, ctspIpSgtIpAddress=ctspIpSgtIpAddress, ctspDownloadedSgaclMappingTable=ctspDownloadedSgaclMappingTable, ctspDefOperSgaclMappingTable=ctspDefOperSgaclMappingTable, ctspL3IPMStatus=ctspL3IPMStatus, ctspIfL3Ipv6PolicyEnabled=ctspIfL3Ipv6PolicyEnabled, ctspOperSgaclMonitor=ctspOperSgaclMonitor, ctspIpSgtMappings=ctspIpSgtMappings, ctspPeerPolicyAction=ctspPeerPolicyAction, ctspDownloadedDefSgtPolicyTable=ctspDownloadedDefSgtPolicyTable, ctspPeerPolicyTable=ctspPeerPolicyTable, ctspIfSgtStorageType=ctspIfSgtStorageType, ctspConfigSgaclMappingTable=ctspConfigSgaclMappingTable, PYSNMP_MODULE_ID=ciscoTrustSecPolicyMIB, ctspVlanSgtMappings=ctspVlanSgtMappings, ctspSgtCachingVlansFirst2K=ctspSgtCachingVlansFirst2K, ctspDefOperIpTrafficType=ctspDefOperIpTrafficType, ctspVlanSgtMapValue=ctspVlanSgtMapValue, ctspAuthorizationSgaclFailInfo=ctspAuthorizationSgaclFailInfo, ctspVlanSviActive=ctspVlanSviActive, ctspDownloadedSgtPolicyTable=ctspDownloadedSgtPolicyTable, ctspLayer3PolicyTable=ctspLayer3PolicyTable, ctspDownloadedIpTrafficType=ctspDownloadedIpTrafficType, ctspDownloadedSgtPolicyEntry=ctspDownloadedSgtPolicyEntry, ctspDefOperSgaclMappingSource=ctspDefOperSgaclMappingSource, ctspPeerPolicyEntry=ctspPeerPolicyEntry, ctspSgtStatsTable=ctspSgtStatsTable, ctspIfL3Ipv4PolicyEnabled=ctspIfL3Ipv4PolicyEnabled, ctspSgaclMonitorStatisticGroup=ctspSgaclMonitorStatisticGroup, ctspOperationalSgaclName=ctspOperationalSgaclName, ctspIpSgtStorageType=ctspIpSgtStorageType, ctspStatsIpSwPermitPkts=ctspStatsIpSwPermitPkts, ctspVlanSgtMappingIndex=ctspVlanSgtMappingIndex, ctspNotifsControl=ctspNotifsControl, ctspVlanSgtRowStatus=ctspVlanSgtRowStatus, ctspStatsIpSwMonitorPkts=ctspStatsIpSwMonitorPkts, ctspDefHwStatisticsGroup=ctspDefHwStatisticsGroup, ctspDownloadedDefSgtPolicyEntry=ctspDownloadedDefSgtPolicyEntry, ctspIpSgtValue=ctspIpSgtValue, ctspLayer3PolicyOperational=ctspLayer3PolicyOperational, ctspDefIpSwMonitorPkts=ctspDefIpSwMonitorPkts, ctspSgaclIpv4DropNetflowMonitor=ctspSgaclIpv4DropNetflowMonitor, ciscoTrustSecPolicyMIBNotifs=ciscoTrustSecPolicyMIBNotifs, ctspAuthorizationSgaclFailReason=ctspAuthorizationSgaclFailReason, ciscoTrustSecPolicyMIBCompliance=ciscoTrustSecPolicyMIBCompliance, ctspIpSgtMappingEntry=ctspIpSgtMappingEntry, ctspSgtStatsEntry=ctspSgtStatsEntry, ctspIfL3PolicyConfigGroup=ctspIfL3PolicyConfigGroup, ctspSgtPolicyGroup=ctspSgtPolicyGroup, ctspSgtPolicy=ctspSgtPolicy, ctspVlanConfigTable=ctspVlanConfigTable, ctspStatsSourceSgt=ctspStatsSourceSgt, ctspLayer3PolicyIpTrafficType=ctspLayer3PolicyIpTrafficType, ctspPeerPolicyLifeTime=ctspPeerPolicyLifeTime, ctspDefDownloadedSgaclGenId=ctspDefDownloadedSgaclGenId, ctspStatsIpHwPermitPkts=ctspStatsIpHwPermitPkts, ctspIpHwStatisticsGroup=ctspIpHwStatisticsGroup, ctspIpSgtAddressLength=ctspIpSgtAddressLength, ctspDownloadedSgtPolicyAction=ctspDownloadedSgtPolicyAction, ctspAllSgtPolicyAction=ctspAllSgtPolicyAction, ctspDownloadedDefSgtPolicyLifeTime=ctspDownloadedDefSgtPolicyLifeTime, ctspVlanConfigVrfName=ctspVlanConfigVrfName, ctspDownloadedDefSgtPolicySgtGenId=ctspDownloadedDefSgtPolicySgtGenId, ctspPeerSgtGenId=ctspPeerSgtGenId, ctspIfSgName=ctspIfSgName, ctspSgaclMonitorGroup=ctspSgaclMonitorGroup, ctspVlanSgtStorageType=ctspVlanSgtStorageType, ctspSgaclEnforcementEnable=ctspSgaclEnforcementEnable, ctspDefOperSgaclMonitor=ctspDefOperSgaclMonitor, ctspDownloadedSgaclMappingGroup=ctspDownloadedSgaclMappingGroup, ctspPeerPolicyGroup=ctspPeerPolicyGroup, ctspDefDownloadedSgaclMonitor=ctspDefDownloadedSgaclMonitor, ctspIfL3PolicyConfigTable=ctspIfL3PolicyConfigTable, ctspDefDownloadedSgaclName=ctspDefDownloadedSgaclName, ctspDownloadedSgtPolicySgtGenId=ctspDownloadedSgtPolicySgtGenId, ciscoTrustSecPolicyMIB=ciscoTrustSecPolicyMIB, ctspVlanConfigRowStatus=ctspVlanConfigRowStatus, ctspIpSgtRowStatus=ctspIpSgtRowStatus, ctspAuthorizationSgaclFailNotifEnable=ctspAuthorizationSgaclFailNotifEnable, ctspConfigSgaclMappingSourceSgt=ctspConfigSgaclMappingSourceSgt, ctspVlanConfigGroup=ctspVlanConfigGroup, ctspDefConfigIpv4SgaclsMonitor=ctspDefConfigIpv4SgaclsMonitor, ctspDefIpSwDropPkts=ctspDefIpSwDropPkts, ctspDefConfigIpv6Sgacls=ctspDefConfigIpv6Sgacls, ctspConfigSgaclMappingIpTrafficType=ctspConfigSgaclMappingIpTrafficType, ciscoTrustSecPolicyMIBCompliances=ciscoTrustSecPolicyMIBCompliances, ctspStatsIpHwDropPkts=ctspStatsIpHwDropPkts, ctspVlanSgtMappingEntry=ctspVlanSgtMappingEntry, ctspDefIpHwPermitPkts=ctspDefIpHwPermitPkts, ctspOperationalSgaclGenId=ctspOperationalSgaclGenId, ctspDefOperationalSgaclName=ctspDefOperationalSgaclName, ctspOperSgaclMappingEntry=ctspOperSgaclMappingEntry, ctspIpSgtMappingTable=ctspIpSgtMappingTable, ctspIfSgtMappingInfoEntry=ctspIfSgtMappingInfoEntry, ctspLayer3Transport=ctspLayer3Transport, ctspSgaclIpv4DropNetflowMonitorGroup=ctspSgaclIpv4DropNetflowMonitorGroup, ctspSgtCachingMode=ctspSgtCachingMode, ctspOperSgaclConfigSource=ctspOperSgaclConfigSource, ctspDownloadedSgaclMonitor=ctspDownloadedSgaclMonitor, ctspDefSwStatisticsGroup=ctspDefSwStatisticsGroup, ctspIpSgtAddressType=ctspIpSgtAddressType, ctspPeerPolicyLastUpdate=ctspPeerPolicyLastUpdate, ctspDownloadedDefSgtPolicyAction=ctspDownloadedDefSgtPolicyAction, ctspOldPeerSgt=ctspOldPeerSgt, ctspNotifGroup=ctspNotifGroup, ctspDefOperSgaclConfigSource=ctspDefOperSgaclConfigSource, ctspDefStatsTable=ctspDefStatsTable, ctspSgaclIpv6DropNetflowMonitorGroup=ctspSgaclIpv6DropNetflowMonitorGroup, ctspConfigSgaclMappingDestSgt=ctspConfigSgaclMappingDestSgt, ctspIfSgtMappingInfoTable=ctspIfSgtMappingInfoTable, ctspNotifCtrlGroup=ctspNotifCtrlGroup, ctspDownloadedSgaclIndex=ctspDownloadedSgaclIndex)
| 166.79187 | 10,104 | 0.796192 |
d4a19a6793c7b81c31ff51744f9dee445aa534f8 | 1,685 | py | Python | tests/test_cli/test_generate/test_generate.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | tests/test_cli/test_generate/test_generate.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | tests/test_cli/test_generate/test_generate.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the aea.cli.generate sub-module."""
from unittest import TestCase, mock
from aea.cli.generate import _generate_item
from tests.test_cli.tools_for_testing import ContextMock
| 37.444444 | 80 | 0.665282 |
d4a21ef2eb21f79e91184165f8bb407caaf1dcb1 | 17,126 | py | Python | sphinx/ext/napoleon/__init__.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 3 | 2019-06-11T09:42:08.000Z | 2020-03-10T15:57:09.000Z | sphinx/ext/napoleon/__init__.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 12 | 2019-01-09T15:43:57.000Z | 2020-01-21T10:46:30.000Z | sphinx/ext/napoleon/__init__.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 10 | 2019-02-04T11:49:35.000Z | 2020-03-21T13:32:20.000Z | """
sphinx.ext.napoleon
~~~~~~~~~~~~~~~~~~~
Support for NumPy and Google style docstrings.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx import __display_version__ as __version__
from sphinx.application import Sphinx
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
the ``setup()`` function, which in turn notifies Sphinx of everything
the extension offers.
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
See Also
--------
`The Sphinx documentation on Extensions
<http://sphinx-doc.org/extensions.html>`_
`The Extension Tutorial <http://sphinx-doc.org/extdev/tutorial.html>`_
`The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_
"""
if not isinstance(app, Sphinx):
# probably called by tests
return {'version': __version__, 'parallel_read_safe': True}
_patch_python_domain()
app.setup_extension('sphinx.ext.autodoc')
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
for name, (default, rebuild) in Config._config_values.items():
app.add_config_value(name, default, rebuild)
return {'version': __version__, 'parallel_read_safe': True}
def _process_docstring(app, what, name, obj, options, lines):
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
of docstring lines that `_process_docstring` modifies in place to change
what Sphinx outputs.
The following settings in conf.py control what styles of docstrings will
be parsed:
* ``napoleon_google_docstring`` -- parse Google style docstrings
* ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process.
what : str
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
lines : list of str
The lines of the docstring, see above.
.. note:: `lines` is modified *in place*
"""
result_lines = lines
docstring = None # type: GoogleDocstring
if app.config.napoleon_numpy_docstring:
docstring = NumpyDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
if app.config.napoleon_google_docstring:
docstring = GoogleDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
lines[:] = result_lines[:]
def _skip_member(app, what, name, obj, skip, options):
# type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
members or init methods are included in the generated documentation:
* ``napoleon_include_init_with_doc`` --
include init methods if they have docstrings
* ``napoleon_include_private_with_doc`` --
include private members if they have docstrings
* ``napoleon_include_special_with_doc`` --
include special members if they have docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
what : str
A string specifying the type of the object to which the member
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The name of the member.
obj : module, class, exception, function, method, or attribute.
For example, if the member is the __init__ method of class A, then
`obj` will be `A.__init__`.
skip : bool
A boolean indicating if autodoc will skip this member if `_skip_member`
does not override the decision
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Returns
-------
bool
True if the member should be skipped during creation of the docs,
False if it should be included in the docs.
"""
has_doc = getattr(obj, '__doc__', False)
is_member = (what == 'class' or what == 'exception' or what == 'module')
if name != '__weakref__' and has_doc and is_member:
cls_is_owner = False
if what == 'class' or what == 'exception':
qualname = getattr(obj, '__qualname__', '')
cls_path, _, _ = qualname.rpartition('.')
if cls_path:
try:
if '.' in cls_path:
import importlib
import functools
mod = importlib.import_module(obj.__module__)
mod_path = cls_path.split('.')
cls = functools.reduce(getattr, mod_path, mod)
else:
cls = obj.__globals__[cls_path]
except Exception:
cls_is_owner = False
else:
cls_is_owner = (cls and hasattr(cls, name) and # type: ignore
name in cls.__dict__)
else:
cls_is_owner = False
if what == 'module' or cls_is_owner:
is_init = (name == '__init__')
is_special = (not is_init and name.startswith('__') and
name.endswith('__'))
is_private = (not is_init and not is_special and
name.startswith('_'))
inc_init = app.config.napoleon_include_init_with_doc
inc_special = app.config.napoleon_include_special_with_doc
inc_private = app.config.napoleon_include_private_with_doc
if ((is_special and inc_special) or
(is_private and inc_private) or
(is_init and inc_init)):
return False
return None
| 36.515991 | 88 | 0.608782 |
d4a24c39597d568e3ab31f3730cb741839a01aff | 2,390 | py | Python | plugins/similarity/rdkit/tanimoto/lbvs-entry.py | skodapetr/viset | 87863ed6cde63392b2d503ceda53bb2cea367d69 | [
"MIT"
] | 1 | 2018-12-28T19:36:04.000Z | 2018-12-28T19:36:04.000Z | plugins/similarity/rdkit/tanimoto/lbvs-entry.py | skodapetr/viset | 87863ed6cde63392b2d503ceda53bb2cea367d69 | [
"MIT"
] | 14 | 2017-11-15T17:45:58.000Z | 2018-12-10T17:52:23.000Z | plugins/similarity/rdkit/tanimoto/lbvs-entry.py | skodapetr/viset | 87863ed6cde63392b2d503ceda53bb2cea367d69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from rdkit import DataStructs
import plugin_api
__license__ = "X11"
| 29.506173 | 77 | 0.599582 |
d4a3f90c44e54f8024d6bee8196a0b29bb2aed61 | 2,849 | py | Python | mall_spider/spiders/actions/proxy_service.py | 524243642/taobao_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 12 | 2019-06-06T12:23:08.000Z | 2021-06-15T17:50:07.000Z | mall_spider/spiders/actions/proxy_service.py | 524243642/mall_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 3 | 2021-03-31T19:02:47.000Z | 2022-02-11T03:43:15.000Z | mall_spider/spiders/actions/proxy_service.py | 524243642/taobao_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 5 | 2019-09-17T03:55:56.000Z | 2020-12-18T03:34:03.000Z | # coding: utf-8
import time
from config.config_loader import global_config
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.direct_proxy_action import DirectProxyAction
__proxy_service = None
| 33.127907 | 208 | 0.601264 |
d4a46b8215ad96def234df7df255d9ac5c89bb08 | 965 | py | Python | app/weather_tests.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
] | 1 | 2017-08-25T18:55:11.000Z | 2017-08-25T18:55:11.000Z | app/weather_tests.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
] | null | null | null | app/weather_tests.py | joedanz/flask-weather | fe35aa359da6f5d7f942d97837403e153b5c5ede | [
"Apache-2.0"
] | null | null | null | import os
import weather
import datetime
import unittest
import tempfile
if __name__ == '__main__':
unittest.main()
| 26.805556 | 71 | 0.631088 |
d4a58432909af220904a476edcdbf9bcba8bc8c1 | 984 | py | Python | modules/sensors/Activator.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 5 | 2018-11-27T15:15:00.000Z | 2022-02-10T21:44:13.000Z | modules/sensors/Activator.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 2 | 2018-10-20T15:48:40.000Z | 2018-11-20T05:11:33.000Z | modules/sensors/Activator.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 1 | 2020-02-07T12:44:47.000Z | 2020-02-07T12:44:47.000Z | import asyncio
| 24 | 66 | 0.705285 |
d4a5dfe986967f5b7fa8e3f7e5dcaa1ed0f98f18 | 7,779 | py | Python | examples/retrieval/evaluation/sparse/evaluate_deepct.py | ArthurCamara/beir | 2739990b719f2d4814d88473cf9965d92d4f4c18 | [
"Apache-2.0"
] | 24 | 2022-03-20T18:48:52.000Z | 2022-03-31T08:28:42.000Z | examples/retrieval/evaluation/sparse/evaluate_deepct.py | ArthurCamara/beir | 2739990b719f2d4814d88473cf9965d92d4f4c18 | [
"Apache-2.0"
] | 9 | 2022-03-19T14:50:30.000Z | 2022-03-30T17:31:18.000Z | examples/retrieval/evaluation/sparse/evaluate_deepct.py | ArthurCamara/beir | 2739990b719f2d4814d88473cf9965d92d4f4c18 | [
"Apache-2.0"
] | 3 | 2022-03-25T15:45:14.000Z | 2022-03-25T17:51:23.000Z | """
This example shows how to evaluate DeepCT (using Anserini) in BEIR.
For more details on DeepCT, refer here: https://arxiv.org/abs/1910.10687
The original DeepCT repository is not modularised and only works with Tensorflow 1.x (1.15).
We modified the DeepCT repository to work with Tensorflow latest (2.x).
We do not change the core-prediction code, only few input/output file format and structure to adapt to BEIR formats.
For more details on changes, check: https://github.com/NThakur20/DeepCT and compare it with original repo!
Please follow the steps below to install DeepCT:
1. git clone https://github.com/NThakur20/DeepCT.git
Since Anserini uses Java-11, we would advise you to use docker for running Pyserini.
To be able to run the code below you must have docker locally installed in your machine.
To install docker on your local machine, please refer here: https://docs.docker.com/get-docker/
After docker installation, please follow the steps below to get docker container up and running:
1. docker pull docker pull beir/pyserini-fastapi
2. docker build -t pyserini-fastapi .
3. docker run -p 8000:8000 -it --rm pyserini-fastapi
Usage: python evaluate_deepct.py
"""
from DeepCT.deepct import run_deepct # git clone https://github.com/NThakur20/DeepCT.git
from beir import util, LoggingHandler
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
from beir.generation.models import QGenModel
from tqdm.autonotebook import trange
import pathlib, os, json
import logging
import requests
import random
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#### Download scifact.zip dataset and unzip the dataset
dataset = "scifact"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "datasets")
data_path = util.download_and_unzip(url, out_dir)
corpus, queries, qrels = GenericDataLoader(data_path).load(split="test")
#### 1. Download Google BERT-BASE, Uncased model ####
# Ref: https://github.com/google-research/bert
base_model_url = "https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip"
out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "models")
bert_base_dir = util.download_and_unzip(base_model_url, out_dir)
#### 2. Download DeepCT MSMARCO Trained BERT checkpoint ####
# Credits to DeepCT authors: Zhuyun Dai, Jamie Callan, (https://github.com/AdeDZY/DeepCT)
model_url = "http://boston.lti.cs.cmu.edu/appendices/arXiv2019-DeepCT-Zhuyun-Dai/outputs/marco.zip"
out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "models")
checkpoint_dir = util.download_and_unzip(model_url, out_dir)
##################################################
#### 3. Configure Params for DeepCT inference ####
##################################################
# We cannot use the original Repo (https://github.com/AdeDZY/DeepCT) as it only runs with TF 1.15.
# We reformatted the code (https://github.com/NThakur20/DeepCT) and made it working with latest TF 2.X!
if not os.path.isfile(os.path.join(data_path, "deepct.jsonl")):
################################
#### Command-Line Arugments ####
################################
run_deepct.FLAGS.task_name = "beir" # Defined a seperate BEIR task in DeepCT. Check out run_deepct.
run_deepct.FLAGS.do_train = False # We only want to use the code for inference.
run_deepct.FLAGS.do_eval = False # No evaluation.
run_deepct.FLAGS.do_predict = True # True, as we would use DeepCT model for only prediction.
run_deepct.FLAGS.data_dir = os.path.join(data_path, "corpus.jsonl") # Provide original path to corpus data, follow beir format.
run_deepct.FLAGS.vocab_file = os.path.join(bert_base_dir, "vocab.txt") # Provide bert-base-uncased model vocabulary.
run_deepct.FLAGS.bert_config_file = os.path.join(bert_base_dir, "bert_config.json") # Provide bert-base-uncased config.json file.
run_deepct.FLAGS.init_checkpoint = os.path.join(checkpoint_dir, "model.ckpt-65816") # Provide DeepCT MSMARCO model (bert-base-uncased) checkpoint file.
run_deepct.FLAGS.max_seq_length = 350 # Provide Max Sequence Length used for consideration. (Max: 512)
run_deepct.FLAGS.train_batch_size = 128 # Inference batch size, Larger more Memory but faster!
run_deepct.FLAGS.output_dir = data_path # Output directory, this will contain two files: deepct.jsonl (output-file) and predict.tf_record
run_deepct.FLAGS.output_file = "deepct.jsonl" # Output file for storing final DeepCT produced corpus.
run_deepct.FLAGS.m = 100 # Scaling parameter for DeepCT weights: scaling parameter > 0, recommend 100
run_deepct.FLAGS.smoothing = "sqrt" # Use sqrt to smooth weights. DeepCT Paper uses None.
run_deepct.FLAGS.keep_all_terms = True # Do not allow DeepCT to delete terms.
# Runs DeepCT model on the corpus.jsonl
run_deepct.main()
#### Download Docker Image beir/pyserini-fastapi ####
#### Locally run the docker Image + FastAPI ####
docker_beir_pyserini = "http://127.0.0.1:8000"
#### Upload Multipart-encoded files ####
with open(os.path.join(data_path, "deepct.jsonl"), "rb") as fIn:
r = requests.post(docker_beir_pyserini + "/upload/", files={"file": fIn}, verify=False)
#### Index documents to Pyserini #####
index_name = "beir/you-index-name" # beir/scifact
r = requests.get(docker_beir_pyserini + "/index/", params={"index_name": index_name})
######################################
#### 2. Pyserini-Retrieval (BM25) ####
######################################
#### Retrieve documents from Pyserini #####
retriever = EvaluateRetrieval()
qids = list(queries)
query_texts = [queries[qid] for qid in qids]
payload = {"queries": query_texts, "qids": qids, "k": max(retriever.k_values),
"fields": {"contents": 1.0}, "bm25": {"k1": 18, "b": 0.7}}
#### Retrieve pyserini results (format of results is identical to qrels)
results = json.loads(requests.post(docker_beir_pyserini + "/lexical/batch_search/", json=payload).text)["results"]
#### Retrieve RM3 expanded pyserini results (format of results is identical to qrels)
# results = json.loads(requests.post(docker_beir_pyserini + "/lexical/rm3/batch_search/", json=payload).text)["results"]
#### Evaluate your retrieval using NDCG@k, MAP@K ...
logging.info("Retriever evaluation for k in: {}".format(retriever.k_values))
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
#### Retrieval Example ####
query_id, scores_dict = random.choice(list(results.items()))
logging.info("Query : %s\n" % queries[query_id])
scores = sorted(scores_dict.items(), key=lambda item: item[1], reverse=True)
for rank in range(10):
doc_id = scores[rank][0]
logging.info("Doc %d: %s [%s] - %s\n" % (rank+1, doc_id, corpus[doc_id].get("title"), corpus[doc_id].get("text")))
| 56.781022 | 189 | 0.655354 |
d4a684609779826c5d7b8e2a668f0007ffd391fe | 3,018 | py | Python | Examples/Space Truss - Nodal Load.py | AmirHosseinNamadchi/PyNite | 8cc1fe3262e1efe029c6860394d2436601272e33 | [
"MIT"
] | 2 | 2022-02-26T23:11:19.000Z | 2022-02-26T23:11:21.000Z | Examples/Space Truss - Nodal Load.py | AmirHosseinNamadchi/PyNite | 8cc1fe3262e1efe029c6860394d2436601272e33 | [
"MIT"
] | null | null | null | Examples/Space Truss - Nodal Load.py | AmirHosseinNamadchi/PyNite | 8cc1fe3262e1efe029c6860394d2436601272e33 | [
"MIT"
] | 2 | 2020-08-27T15:36:42.000Z | 2020-10-02T00:29:22.000Z | # Engineering Mechanics: Statics, 4th Edition
# Bedford and Fowler
# Problem 6.64
# Units for this model are meters and kilonewtons
# Import 'FEModel3D' and 'Visualization' from 'PyNite'
from PyNite import FEModel3D
from PyNite import Visualization
# Create a new model
truss = FEModel3D()
# Define the nodes
truss.AddNode('A', 1.1, -0.4, 0)
truss.AddNode('B', 1, 0, 0)
truss.AddNode('C', 0, 0, 0.6)
truss.AddNode('D', 0, 0, -0.4)
truss.AddNode('E', 0, 0.8, 0)
# Define the supports
truss.DefineSupport('C', True, True, True, True, True, True)
truss.DefineSupport('D', True, True, True, True, True, True)
truss.DefineSupport('E', True, True, True, True, True, True)
# Create members
# Member properties were not given for this problem, so assumed values will be used
# To make all the members act rigid, the modulus of elasticity will be set to a very large value
E = 99999999
truss.AddMember('AB', 'A', 'B', E, 100, 100, 100, 100, 100)
truss.AddMember('AC', 'A', 'C', E, 100, 100, 100, 100, 100)
truss.AddMember('AD', 'A', 'D', E, 100, 100, 100, 100, 100)
truss.AddMember('BC', 'B', 'C', E, 100, 100, 100, 100, 100)
truss.AddMember('BD', 'B', 'D', E, 100, 100, 100, 100, 100)
truss.AddMember('BE', 'B', 'E', E, 100, 100, 100, 100, 100)
# Release the moments at the ends of the members to make truss members
truss.DefineReleases('AC', False, False, False, False, True, True, \
False, False, False, False, True, True)
truss.DefineReleases('AD', False, False, False, False, True, True, \
False, False, False, False, True, True)
truss.DefineReleases('BC', False, False, False, False, True, True, \
False, False, False, False, True, True)
truss.DefineReleases('BD', False, False, False, False, True, True, \
False, False, False, False, True, True)
truss.DefineReleases('BE', False, False, False, False, True, True, \
False, False, False, False, True, True)
# Add nodal loads
truss.AddNodeLoad('A', 'FX', 10)
truss.AddNodeLoad('A', 'FY', 60)
truss.AddNodeLoad('A', 'FZ', 20)
# Analyze the model
truss.Analyze()
# Print results
print('Member BC calculated axial force: ' + str(truss.GetMember('BC').MaxAxial()))
print('Member BC expected axial force: 32.7 Tension')
print('Member BD calculated axial force: ' + str(truss.GetMember('BD').MaxAxial()))
print('Member BD expected axial force: 45.2 Tension')
print('Member BE calculated axial force: ' + str(truss.GetMember('BE').MaxAxial()))
print('Member BE expected axial force: 112.1 Compression')
# Render the model for viewing. The text height will be set to 50 mm.
# Because the members in this example are nearly rigid, there will be virtually no deformation. The deformed shape won't be rendered.
# The program has created a default load case 'Case 1' and a default load combo 'Combo 1' since we didn't specify any. We'll display 'Case 1'.
Visualization.RenderModel(truss, text_height=0.05, render_loads=True, case='Case 1')
| 44.382353 | 142 | 0.674619 |
d4a6c416bd8a2d26fc2585b919cf37090ef128d8 | 322 | py | Python | Using Yagmail to make sending emails easier.py | CodeMaster7000/Sending-Emails-in-Python | 2ec44f6520a6b98508c8adf372a191f2577fbf98 | [
"MIT"
] | 1 | 2021-12-23T15:42:01.000Z | 2021-12-23T15:42:01.000Z | Using Yagmail to make sending emails easier.py | CodeMaster7000/Sending-Emails-in-Python | 2ec44f6520a6b98508c8adf372a191f2577fbf98 | [
"MIT"
] | null | null | null | Using Yagmail to make sending emails easier.py | CodeMaster7000/Sending-Emails-in-Python | 2ec44f6520a6b98508c8adf372a191f2577fbf98 | [
"MIT"
] | null | null | null | import yagmail
receiver = "[email protected]" #Receiver's gmail address
body = "Hello there from Yagmail"
filename = "document.pdf"
yag = yagmail.SMTP("[email protected]")#Your gmail address
yag.send(
to=receiver,
subject="Yagmail test (attachment included",
contents=body,
attachments=filename,
)
| 23 | 54 | 0.689441 |
d4a6cec9904df1ff0e2230e88f7f8978eeccd5f8 | 5,064 | py | Python | pycad/py_src/transformations.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | pycad/py_src/transformations.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | pycad/py_src/transformations.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
transformations
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
:var DEG: unit of degree
:var RAD: unit of radiant
"""
__author__="Lutz Gross, [email protected]"
import numpy
import math
_TYPE=numpy.float64
DEG=math.pi/180.
RAD=1.
def _cross(x, y):
"""
Returns the cross product of ``x`` and ``y``.
"""
return numpy.array([x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]], _TYPE)
| 29.788235 | 124 | 0.610585 |
d4a6efea0d126676c34a41838cc4fe1e41395646 | 1,116 | py | Python | example/complex_scalar_star_solver.py | ThomasHelfer/BosonStar | 5442a6e6171122a3ba1d6b079e6483ab72aa7338 | [
"MIT"
] | 2 | 2021-04-07T13:20:11.000Z | 2021-04-07T17:11:25.000Z | example/complex_scalar_star_solver.py | ThomasHelfer/BosonStar | 5442a6e6171122a3ba1d6b079e6483ab72aa7338 | [
"MIT"
] | 1 | 2021-06-14T15:40:25.000Z | 2021-06-14T15:40:25.000Z | example/complex_scalar_star_solver.py | ThomasHelfer/BosonStar | 5442a6e6171122a3ba1d6b079e6483ab72aa7338 | [
"MIT"
] | null | null | null | from bosonstar.ComplexBosonStar import Complex_Boson_Star
# =====================
# All imporntnat definitions
# =====================
# Physics defintions
phi0 = 0.40 # centeral phi
D = 5.0 # Dimension (total not only spacial)
Lambda = -0.2 # Cosmological constant
# Solver definitions
Rstart = 3
Rend = 50.00
deltaR = 1
N = 100000
e_pow_minus_delta_guess = 0.4999
verbose = 2
eps = 1e-10 # Small epsilon to avoid r \neq 0
# ====================================
# Main routine
# ====================================
pewpew = Complex_Boson_Star(e_pow_minus_delta_guess, phi0, D, Lambda, verbose)
pewpew.print_parameters()
alpha0 = pewpew.radial_walker(Rstart, Rend, deltaR, N, eps)
# =====================================
# Output and plotting
# =====================================
soldict = pewpew.get_solution()
# Makes sure that lapse goes asymptotically to 1
# (Not an essential step, but recommended)
pewpew.normalise_edelta()
pewpew.check_Einstein_equation()
# ===============================
path = pewpew.get_path()
pewpew.plot_solution()
pewpew.print_solution()
| 24.26087 | 78 | 0.580645 |
d4a71c335f605cc7723cb3705f2699bfe1e1693b | 796 | py | Python | setup.py | ouyhlan/fastNLP | cac13311e28c1e8e3c866d50656173650eb5c7a1 | [
"Apache-2.0"
] | 2,693 | 2018-03-08T03:09:20.000Z | 2022-03-30T07:38:42.000Z | setup.py | ouyhlan/fastNLP | cac13311e28c1e8e3c866d50656173650eb5c7a1 | [
"Apache-2.0"
] | 291 | 2018-07-21T07:43:17.000Z | 2022-03-07T13:06:58.000Z | setup.py | ouyhlan/fastNLP | cac13311e28c1e8e3c866d50656173650eb5c7a1 | [
"Apache-2.0"
] | 514 | 2018-03-09T06:54:25.000Z | 2022-03-26T20:11:44.000Z | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('fastNLP')]
print(pkgs)
setup(
name='FastNLP',
version='0.7.0',
url='https://gitee.com/fastnlp/fastNLP',
description='fastNLP: Deep Learning Toolkit for NLP, developed by Fudan FastNLP Team',
long_description=readme,
long_description_content_type='text/markdown',
license='Apache License',
author='Fudan FastNLP Team',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| 26.533333 | 90 | 0.675879 |
d4a7c3329ad1568f426144783b7f79e1a58585b3 | 855 | py | Python | clients/client/python/ory_client/__init__.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/__init__.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/__init__.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.187
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
__version__ = "v0.0.1-alpha.187"
# import ApiClient
from ory_client.api_client import ApiClient
# import Configuration
from ory_client.configuration import Configuration
# import exceptions
from ory_client.exceptions import OpenApiException
from ory_client.exceptions import ApiAttributeError
from ory_client.exceptions import ApiTypeError
from ory_client.exceptions import ApiValueError
from ory_client.exceptions import ApiKeyError
from ory_client.exceptions import ApiException
| 29.482759 | 194 | 0.803509 |
d4a7d95a9f223064052da15a9a7a9eecfe46cfa7 | 3,810 | py | Python | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | null | null | null | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | 9 | 2021-02-21T21:53:03.000Z | 2021-11-05T06:06:55.000Z | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Tuple
from requests import Response
from .pydantic_models import (AppliedExclusionConditionsResponse,
BiasAttributeConfigListResponse,
ComputeRewardResponse, DefaultPredictionResponse,
ExclusionRuleConditionListResponse,
PredictionResponsePayloadFormatListResponse)
def format_prediction_payload_response(
self,
default_prediction_response: DefaultPredictionResponse,
payload_format: str, # noqa pylint: disable=unused-argument
) -> dict:
"""
You can format the prediction the way you want based
on the information returned by default
"""
return default_prediction_response
def get_exclusion_rule_conditions(self) -> ExclusionRuleConditionListResponse:
"""
Define the exclusion rules for the activity
"""
return ExclusionRuleConditionListResponse(exclusion_rule_conditions=[])
def get_applied_exclusion_conditions(
self, prediction_request: dict # noqa pylint: disable=unused-argument
) -> AppliedExclusionConditionsResponse:
"""
Define the exclusion rules for the activity
"""
return AppliedExclusionConditionsResponse(applied_exclusion_conditions=[])
def get_bias_attribute_configs(self) -> BiasAttributeConfigListResponse:
"""
Define the bias attribute configs, these decide which attributes may be
used by atmospherex as bias attributes
"""
return BiasAttributeConfigListResponse(bias_attribute_configs=[])
| 36.634615 | 87 | 0.684777 |
d4a7f2382cdb35d8940e5dd478b2dac3b5b10bd0 | 752 | py | Python | Module1/file3.py | modulo16/PfNE | 9706afc42c44dcfd1490e5ac074156f41e5515a8 | [
"Unlicense"
] | null | null | null | Module1/file3.py | modulo16/PfNE | 9706afc42c44dcfd1490e5ac074156f41e5515a8 | [
"Unlicense"
] | null | null | null | Module1/file3.py | modulo16/PfNE | 9706afc42c44dcfd1490e5ac074156f41e5515a8 | [
"Unlicense"
] | null | null | null | from __future__ import print_function, unicode_literals
#Ensures Unicode is used for all strings.
my_str = 'whatever'
#Shows the String type, which should be unicode
type(my_str)
#declare string:
ip_addr = '192.168.1.1'
#check it with boolean:(True)
ip_addr == '192.168.1.1'
#(false)
ip_addr == '10.1.1.1'
#is this substring in this variable?
'192.168' in ip_addr
'1.1' in ip_addr
'15.1' not in ip_addr
#Strings also have indices starting at '0'
#in the case below we get '1' which is the first character
ip_addr[0]
#we can also get the last using negative notation. The follow gets the last:
ip_addr[-1]
#second to last:
ip_addr[-2]
#show length of string:
len(ip_addr)
#Example string concatenation
my_str = 'Hello'
my_str + ' something'
| 18.8 | 76 | 0.731383 |
d4acef5631789f4b877955db52847e8e212a8725 | 10,411 | py | Python | pp_io_plugins/pp_kbddriver_plus.py | arcticmatter/pipresents-beep | e5945f929b47249f19b0cb3433a138e874b592db | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | pp_io_plugins/pp_kbddriver_plus.py | arcticmatter/pipresents-beep | e5945f929b47249f19b0cb3433a138e874b592db | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | pp_io_plugins/pp_kbddriver_plus.py | arcticmatter/pipresents-beep | e5945f929b47249f19b0cb3433a138e874b592db | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | #enhanced keyboard driver
import copy
import os
import configparser
from pp_displaymanager import DisplayManager
if __name__ == '__main__':
from tkinter import *
root = Tk()
w = Label(root, text="pp_kbddriver_plus.py test harness")
w.pack()
idd=pp_kbddriver_plus()
reason,message=idd.init('pp_kbddriver_plus.cfg','/home/pi/pipresents/pp_io_config/keys_plus.cfg',root,key_callback)
print(reason,message)
if reason != 'error':
idd.start()
root.mainloop()
| 41.979839 | 134 | 0.589761 |
d4ade5ab9af89265fbd2d849b58156e138f3d82c | 452 | py | Python | grocery/migrations/0003_alter_item_comments.py | akshay-kapase/shopping | 7bf3bac4a78d07bca9a9f9d44d85e11bb826a366 | [
"MIT"
] | null | null | null | grocery/migrations/0003_alter_item_comments.py | akshay-kapase/shopping | 7bf3bac4a78d07bca9a9f9d44d85e11bb826a366 | [
"MIT"
] | null | null | null | grocery/migrations/0003_alter_item_comments.py | akshay-kapase/shopping | 7bf3bac4a78d07bca9a9f9d44d85e11bb826a366 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-09-03 15:48
from django.db import migrations, models
| 22.6 | 79 | 0.606195 |
d4ae07ad4070643d0ba3b0f74c8b5ba6215fad3c | 2,770 | py | Python | projects/objects/buildings/protos/textures/colored_textures/textures_generator.py | yjf18340/webots | 60d441c362031ab8fde120cc0cd97bdb1a31a3d5 | [
"Apache-2.0"
] | 1 | 2019-11-13T08:12:02.000Z | 2019-11-13T08:12:02.000Z | projects/objects/buildings/protos/textures/colored_textures/textures_generator.py | chinakwy/webots | 7c35a359848bafe81fe0229ac2ed587528f4c73e | [
"Apache-2.0"
] | null | null | null | projects/objects/buildings/protos/textures/colored_textures/textures_generator.py | chinakwy/webots | 7c35a359848bafe81fe0229ac2ed587528f4c73e | [
"Apache-2.0"
] | 1 | 2020-09-25T02:01:45.000Z | 2020-09-25T02:01:45.000Z | #!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate textures prepared for OSM, based on image templates."""
import glob
import os
from PIL import Image
# change directory to this script directory in order to allow this script to be called from another directory.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# get all the template files in put them in a list of tuples
templates = []
for f in glob.glob("*_diffuse_template.jpg"):
templates.append((f, f.replace('_diffuse_', '_color_mask_')))
# target colors
# ref: http://wiki.openstreetmap.org/wiki/Key:colour
# TODO: is it sufficient?
colors = {
'000000': (0.0, 0.0, 0.0),
'FFFFFF': (0.84, 0.84, 0.84),
'808080': (0.4, 0.4, 0.4),
'C0C0C0': (0.65, 0.65, 0.65),
'800000': (0.4, 0.15, 0.15),
'FF0000': (0.45, 0.0, 0.0),
'808000': (0.4, 0.4, 0.2),
'FFFF00': (0.7, 0.6, 0.15),
'008000': (0.15, 0.3, 0.15),
'00FF00': (0.55, 0.69, 0.52),
'008080': (0.15, 0.3, 0.3),
'00FFFF': (0.6, 0.7, 0.7),
'000080': (0.2, 0.2, 0.3),
'0000FF': (0.4, 0.4, 0.75),
'800080': (0.5, 0.4, 0.5),
'FF00FF': (0.9, 0.75, 0.85),
'F5DEB3': (0.83, 0.78, 0.65),
'8B4513': (0.3, 0.1, 0.05)
}
effectFactor = 0.5 # power of the effect, found empirically
# foreach template
for template in templates:
# load the templates
diffuse = Image.open(template[0])
mask = Image.open(template[1])
assert diffuse.size == mask.size
width, height = diffuse.size
# create an image per color
for colorString, color in colors.iteritems():
image = Image.new('RGB', diffuse.size)
pixels = image.load()
for x in range(height):
for y in range(width):
dR, dG, dB = diffuse.getpixel((x, y))
mR, mG, mB = mask.getpixel((x, y))
r = dR + int(255.0 * (mR / 255.0) * (color[0] * 2.0 - 1.0) * effectFactor)
g = dG + int(255.0 * (mG / 255.0) * (color[1] * 2.0 - 1.0) * effectFactor)
b = dB + int(255.0 * (mB / 255.0) * (color[2] * 2.0 - 1.0) * effectFactor)
pixels[x, y] = (r, g, b)
image.save(template[0].replace('_diffuse_template', '_' + colorString))
| 35.063291 | 110 | 0.605415 |
d4af2a44bf54fabe00a0ec0f2c572fb0bf043633 | 92 | py | Python | tutorial/test_env.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | 11 | 2015-11-11T13:57:21.000Z | 2019-08-14T15:53:43.000Z | tutorial/test_env.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | null | null | null | tutorial/test_env.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | 6 | 2015-11-11T13:57:25.000Z | 2018-09-12T07:53:03.000Z | """
test local env
"""
import os
for k, v in os.environ.iteritems():
print k, '=', v
| 9.2 | 35 | 0.565217 |
d4b0acbd3ae55e6638c516e22ca4f69932aebab2 | 27,844 | py | Python | project2/marriage.py | filipefborba/MarriageNSFG | d550301fbb9d80ddabf391a6168d2c8636113ed9 | [
"MIT"
] | null | null | null | project2/marriage.py | filipefborba/MarriageNSFG | d550301fbb9d80ddabf391a6168d2c8636113ed9 | [
"MIT"
] | null | null | null | project2/marriage.py | filipefborba/MarriageNSFG | d550301fbb9d80ddabf391a6168d2c8636113ed9 | [
"MIT"
] | null | null | null | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import bisect
import numpy as np
import pandas as pd
import scipy.stats
import gzip
import matplotlib.pyplot as plt
from collections import defaultdict
from collections import OrderedDict
from collections import Counter
import thinkstats2
import thinkplot
import survival
def ResampleResps(resps, remove_missing=False, jitter=0):
"""Resamples each dataframe and then concats them.
resps: list of DataFrame
returns: DataFrame
"""
# we have to resample the data from each cycle separately
samples = [ResampleRowsWeighted(resp) for resp in resps]
# then join the cycles into one big sample
sample = pd.concat(samples, ignore_index=True, sort=False)
# remove married people with unknown marriage dates
if remove_missing:
sample = sample[~sample.missing]
# jittering the ages reflects the idea that the resampled people
# are not identical to the actual respondents
if jitter:
Jitter(sample, 'age', jitter=jitter)
Jitter(sample, 'agemarry', jitter=jitter)
DigitizeResp(resp)
return sample
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples the rows in df in accordance with a weight column.
df: DataFrame
returns: DataFrame
"""
weights = df['finalwgt'].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
return df.loc[indices]
def Jitter(df, column, jitter=1):
"""Adds random noise to a column.
df: DataFrame
column: string column name
jitter: standard deviation of noise
"""
df[column] += np.random.uniform(-jitter, jitter, size=len(df))
def EstimateSurvival(resp, cutoff=None):
"""Estimates the survival curve.
resp: DataFrame of respondents
cutoff: where to truncate the estimated functions
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp.loc[resp.complete, 'complete_var'].dropna()
ongoing = resp.loc[~resp.complete, 'ongoing_var'].dropna()
hf = survival.EstimateHazardFunction(complete, ongoing)
if cutoff:
hf.Truncate(cutoff)
sf = hf.MakeSurvival()
return hf, sf
def PropensityMatch(target, group, colname='agemarry'):
"""Choose a random subset of `group` to matches propensity with `target`.
target: DataFrame
group: DataFrame
colname: string name of column with propensity scores
returns: DataFrame with sample of rows from `group`
"""
rv = scipy.stats.norm(scale=1)
values = group[colname].fillna(100)
indices = [ChooseIndex(value) for value in target[colname]]
return group.loc[indices]
def EstimateSurvivalByCohort(resps, iters=101,
cutoffs=None, predict_flag=False,
prop_match=None, error_rate=0):
"""Makes survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
cutoffs: map from cohort to the first unreliable age_index
returns: map from group name to list of survival functions
"""
if cutoffs == None:
cutoffs = {}
sf_map = defaultdict(list)
# iters is the number of resampling runs to make
for i in range(iters):
sample = ResampleResps(resps)
# group by decade
grouped = sample.groupby('birth_index')
if prop_match:
last = grouped.get_group(prop_match)
# and estimate (hf, sf) for each group
hf_map = OrderedDict()
for name, group in iter(grouped):
if prop_match:
group = PropensityMatch(last, group)
if error_rate:
AddErrors(group, 'complete_missing', error_rate)
AddErrors(group, 'ongoing_missing', error_rate)
# the amount of missing data is small; I think it is better
# to drop it than to fill with random data
#FillMissingColumn(group, 'complete_var', 'complete_missing')
#FillMissingColumn(group, 'ongoing_var', 'ongoing_missing')
cutoff = cutoffs.get(name, 100)
hf_map[name] = EstimateSurvival(group, cutoff)
# make predictions if desired
if predict_flag:
MakePredictions(hf_map)
# extract the sf from each pair and accumulate the results
for name, (hf, sf) in hf_map.items():
sf_map[name].append(sf)
return sf_map
def AddErrors(group, colname, error_rate):
"""
NOTE: This will not work if there are actual missing values!
"""
group[colname] = np.random.random(len(group)) < error_rate
def FillMissingColumn(group, colname, missing_colname):
"""Fills missing values of the given column.
group: DataFrame
colname: string
"""
null = group[group[missing_colname]]
if len(null) == 0:
return
# print(len(null), len(group))
valid = group[colname].dropna()
fill = valid.sample(len(null), replace=True)
fill.index = null.index
group[colname].fillna(fill, inplace=True)
def PlotSurvivalFunctions(sf_map, predict_flag=False, colormap=None):
"""Plot estimated survival functions.
sf_map: map from group name to sequence of survival functions
predict_flag: whether the lines are predicted or actual
colormap: map from group name to color
"""
for name, sf_seq in sorted(sf_map.items(), reverse=True):
if len(sf_seq) == 0:
continue
sf = sf_seq[0]
if len(sf) == 0:
continue
ts, rows = MakeSurvivalCI(sf_seq, [10, 50, 90])
thinkplot.FillBetween(ts, rows[0], rows[2], color='gray', alpha=0.2)
if not predict_flag:
if colormap:
color = colormap[name]
thinkplot.Plot(ts, rows[1], label='%ds'%name, color=color)
else:
thinkplot.Plot(ts, rows[1], label='%ds'%name)
def MakePredictions(hf_map):
"""Extends a set of hazard functions and recomputes survival functions.
For each group in hf_map, we extend hf and recompute sf.
hf_map: map from group name to (HazardFunction, SurvivalFunction)
"""
names = list(hf_map.keys())
names.sort()
hfs = [hf_map[name][0] for name in names]
# extend each hazard function using data from the previous cohort,
# and update the survival function
for i, name in enumerate(names):
hf, sf = hf_map[name]
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
hf_map[name] = hf, sf
def MakeSurvivalCI(sf_seq, percents):
"""Makes confidence intervals from a list of survival functions.
sf_seq: list of SurvivalFunction
percents: list of percentiles to select, like [5, 95]
returns: (ts, rows) where ts is a sequence of times and
rows contains one row of values for each percent
"""
# find the union of all ts where the sfs are evaluated
ts = set()
for sf in sf_seq:
ts |= set(sf.ts)
ts = list(ts)
ts.sort()
# evaluate each sf at all times
ss_seq = [sf.Probs(ts) for sf in sf_seq if len(sf) > 0]
# return the requested percentiles from each column
rows = thinkstats2.PercentileRows(ss_seq, percents)
return ts, rows
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 3.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['finalwgt', 'ageint', 'mar2p', 'cmmarrhx', 'fmarital',
'cmintvw', 'cmbirth', 'f18m1', 'cmdivorcx', 'cmstphsbx', 'fmarno']
colspecs = [(976-1, 982),
(1001-1, 1002),
(1268-1, 1271),
(1037-1, 1040),
(1041-1, 1041),
(841-1, 844),
(12-1, 15),
(606-1, 606),
(619-1, 622),
(625-1, 628),
(1142-1, 1143),
]
df = pd.read_fwf(dat_file,
colspecs=colspecs,
names=names,
header=None,
nrows=7969,
compression='gzip')
df.cmintvw.replace([9797, 9898, 9999], np.nan, inplace=True)
df.cmbirth.replace([9797, 9898, 9999], np.nan, inplace=True)
df.cmmarrhx.replace([9797, 9898, 9999], np.nan, inplace=True)
df.cmdivorcx.replace([9797, 9898, 9999], np.nan, inplace=True)
df.cmstphsbx.replace([9797, 9898, 9999], np.nan, inplace=True)
df.f18m1.replace([7, 8, 9], np.nan, inplace=True)
# CM values above 9000 indicate month unknown
df.loc[df.cmintvw>9000, 'cmintvw'] -= 9000
df.loc[df.cmbirth>9000, 'cmbirth'] -= 9000
df.loc[df.cmmarrhx>9000, 'cmmarrhx'] -= 9000
df.loc[df.cmdivorcx>9000, 'cmdivorcx'] -= 9000
df.loc[df.cmstphsbx>9000, 'cmstphsbx'] -= 9000
df['evrmarry'] = (df.fmarno > 0)
df['divorced'] = (df.f18m1 == 4)
df['separated'] = (df.f18m1 == 5)
df['widowed'] = (df.f18m1 == 3)
df['stillma'] = (df.fmarno==1) & (df.fmarital==1)
df['cycle'] = 3
CleanResp(df)
return df
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
Read as if were a standard ascii file
returns: DataFrame
"""
filename = '1988FemRespDataLines.dat.gz'
names = ['finalwgt', 'ageint', 'currentcm',
'firstcm', 'cmintvw', 'cmbirth',
'f23m1', 'cmdivorcx', 'cmstphsbx', 'fmarno']
colspecs = [(2568-1, 2574),
(36-1, 37),
(1521-1, 1525),
(1538-1, 1542),
(12-1, 16),
(26-1, 30),
(1554-1, 1554),
(1565-1, 1569),
(1570-1, 1574),
(2441-1, 2442),
]
df = pd.read_fwf(filename,
colspecs=colspecs,
names=names,
header=None,
compression='gzip')
df.cmintvw.replace([0, 99999], np.nan, inplace=True)
df.cmbirth.replace([0, 99999], np.nan, inplace=True)
df.firstcm.replace([0, 99999], np.nan, inplace=True)
df.currentcm.replace([0, 99999], np.nan, inplace=True)
df.cmdivorcx.replace([0, 99999], np.nan, inplace=True)
df.cmstphsbx.replace([0, 99999], np.nan, inplace=True)
# CM values above 9000 indicate month unknown
df.loc[df.cmintvw>90000, 'cmintvw'] -= 90000
df.loc[df.cmbirth>90000, 'cmbirth'] -= 90000
df.loc[df.firstcm>90000, 'firstcm'] -= 90000
df.loc[df.currentcm>90000, 'currentcm'] -= 90000
df.loc[df.cmdivorcx>90000, 'cmdivorcx'] -= 90000
df.loc[df.cmstphsbx>90000, 'cmstphsbx'] -= 90000
# combine current and first marriage
df['cmmarrhx'] = df.firstcm
df.cmmarrhx.fillna(df.currentcm, inplace=True)
# define evrmarry if either currentcm or firstcm is non-zero
df['evrmarry'] = (df.fmarno > 0)
df['divorced'] = (df.f23m1==2)
df['separated'] = (df.f23m1==3)
df['widowed'] = (df.f23m1==1)
df['stillma'] = (df.fmarno==1) & (df.f23m1.isnull())
df['cycle'] = 4
CleanResp(df)
return df
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['cmintvw', 'timesmar', 'cmmarrhx', 'cmbirth', 'finalwgt',
'marend01', 'cmdivorcx', 'cmstphsbx', 'marstat']
colspecs = [(12360-1, 12363),
(4637-1, 4638),
(11759-1, 11762),
(14-1, 16),
(12350-1, 12359),
(4713-1, 4713),
(4718-1, 4721),
(4722-1, 4725),
(17-1, 17)]
df = pd.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
df.cmbirth.replace(invalid, np.nan, inplace=True)
df.cmmarrhx.replace(invalid, np.nan, inplace=True)
df.cmdivorcx.replace(invalid, np.nan, inplace=True)
df.cmstphsbx.replace(invalid, np.nan, inplace=True)
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.timesmar==1) & (df.marend01.isnull())
df['cycle'] = 5
CleanResp(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['caseid', 'cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'parity', 'finalwgt',
'mardat01', 'marend01', 'mardis01', 'rmarital',
'fmarno', 'mar1diss']
df = ReadResp('2002FemResp.dct', '2002FemResp.dat.gz', usecols=usecols)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
df.cmbirth.replace(invalid, np.nan, inplace=True)
df.cmmarrhx.replace(invalid, np.nan, inplace=True)
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['cycle'] = 6
CleanResp(df)
return df
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['caseid', 'cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'parity', 'wgtq1q16',
'mardat01', 'marend01', 'mardis01', 'rmarital',
'fmarno', 'mar1diss']
df = ReadResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
df.cmbirth.replace(invalid, np.nan, inplace=True)
df.cmmarrhx.replace(invalid, np.nan, inplace=True)
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgtq1q16
df['cycle'] = 7
CleanResp(df)
return df
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['caseid', 'cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'parity', 'wgt2011_2013',
'mardat01', 'marend01', 'mardis01', 'rmarital',
'fmarno', 'mar1diss']
df = ReadResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
df.cmbirth.replace(invalid, np.nan, inplace=True)
df.cmmarrhx.replace(invalid, np.nan, inplace=True)
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2011_2013
df['cycle'] = 8
CleanResp(df)
return df
def ReadFemResp2015():
"""Reads respondent data from NSFG Cycle 9.
returns: DataFrame
"""
usecols = ['caseid', 'cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'parity', 'wgt2013_2015',
'mardat01', 'marend01', 'mardis01', 'rmarital',
'fmarno', 'mar1diss']
df = ReadResp('2013_2015_FemRespSetup.dct',
'2013_2015_FemRespData.dat.gz',
usecols=usecols)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
df.cmbirth.replace(invalid, np.nan, inplace=True)
df.cmmarrhx.replace(invalid, np.nan, inplace=True)
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2013_2015
df['cycle'] = 9
CleanResp(df)
return df
def ReadFemResp2017():
"""Reads respondent data from NSFG Cycle 10.
returns: DataFrame
"""
# removed 'cmmarrhx', 'cmdivorcx', 'cmbirth',
usecols = ['caseid', 'cmintvw', 'ager',
'evrmarry', 'parity', 'wgt2015_2017',
'mardat01', 'marend01', 'mardis01', 'rmarital',
'fmarno', 'mar1diss']
df = ReadResp('2015_2017_FemRespSetup.dct',
'2015_2017_FemRespData.dat.gz',
usecols=usecols)
invalid = [9997, 9998, 9999]
df.cmintvw.replace(invalid, np.nan, inplace=True)
#df.cmbirth.replace(invalid, np.nan, inplace=True)
#df.cmmarrhx.replace(invalid, np.nan, inplace=True)
# since cmbirth and cmmarrhx are no longer included,
# we have to compute them based on other variables;
# the result can be off by up to 12 months
df['cmbirth'] = df.cmintvw - df.ager*12
df['cmmarrhx'] = (df.mardat01-1900) * 12
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2015_2017
df['cycle'] = 10
# Instead of calling CleanResp, we have to customize
#CleanResp(df)
df['agemarry'] = (df.cmmarrhx - df.cmbirth) / 12.0
df['age'] = (df.cmintvw - df.cmbirth) / 12.0
# if married, we need agemarry; if not married, we need age
df['missing'] = np.where(df.evrmarry,
df.agemarry.isnull(),
df.age.isnull())
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm)
for cm in df.cmbirth]
df['year'] = (pd.DatetimeIndex(dates).year - 1900)
DigitizeResp(df)
return df
def ReadResp(dct_file, dat_file, **options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
return df
def CleanResp(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
Adds columns: agemarry, age, decade, fives
"""
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
# if married, we need agemarry; if not married, we need age
resp['missing'] = np.where(resp.evrmarry,
resp.agemarry.isnull(),
resp.age.isnull())
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['year'] = (pd.DatetimeIndex(dates).year - 1900)
#resp['decade'] = resp.year // 10
#resp['fives'] = resp.year // 5
DigitizeResp(resp)
def DigitizeResp(df):
"""Computes indices for age, agemarry, and birth year.
Groups each of these variables into bins and then assigns
an index to each bin.
For example, anyone between 30 and 30.99 year old is
assigned age_index 30. Anyone born in the 80s is given
the year_index 80.
This function allows me to run the analysis with different
levels of granularity.
df: DataFrame
"""
age_min = 10
age_max = 55
age_step = 1
age_bins = np.arange(age_min, age_max, age_step)
year_min = 0
year_max = 120
year_step = 10
year_bins = np.arange(year_min, year_max, year_step)
df['age_index'] = np.digitize(df.age, age_bins) * age_step
df.age_index += age_min - age_step
df.loc[df.age.isnull(), 'age_index'] = np.nan
df['agemarry_index'] = np.digitize(df.agemarry, age_bins) * age_step
df.agemarry_index += age_min - age_step
df.loc[df.agemarry.isnull(), 'agemarry_index'] = np.nan
df['birth_index'] = np.digitize(df.year, year_bins) * year_step
df.birth_index += year_min - year_step
def ReadCanadaCycle5():
"""
"""
#age at first marriage: CC232
#age of respondent at interview: C3
#final weight: C1
#marital status: C5
#Respondent every married: CC227
pass
def ReadCanadaCycle6():
"""
"""
#age at first marriage: CC232
#age of respondent at interview: C3
#final weight: C1
#marital status: C5
#Respondent every married: CC227
pass
def ReadMaleResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmdivw', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt', 'fmarit', 'timesmar', 'marrend4',
#'marrend', 'marrend2', 'marrend3', marrend5', 'marrend6',
]
df = ReadResp('2002Male.dct', '2002Male.dat.gz', usecols=usecols)
#df.marrend.replace([8,9], np.nan, inplace=True)
#df.marrend2.replace([8,9], np.nan, inplace=True)
#df.marrend3.replace([8,9], np.nan, inplace=True)
df.marrend4.replace([8,9], np.nan, inplace=True)
#df.marrend5.replace([8,9], np.nan, inplace=True)
#df.marrend6.replace([8,9], np.nan, inplace=True)
df.timesmar.replace([98,99], np.nan, inplace=True)
# the way marriage ends are recorded is really confusing,
# but it looks like marrend4 is the end of the first marriage.
df['marend01'] = df.marrend4
df['cmmarrhx'] = df.mardat01
df['evrmarry'] = (df.timesmar > 0)
df['divorced'] = (df.marend01==2) | (df.marend01==3)
df['separated'] = (df.marend01==4)
df['widowed'] = (df.marend01==1)
df['stillma'] = (df.timesmar== 1) & (df.fmarit==1)
df['cycle'] = 6
CleanResp(df)
return df
def ReadMaleResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmdivw', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16',
'marend01', 'rmarital', 'fmarno', 'mar1diss']
df = ReadResp('2006_2010_MaleSetup.dct',
'2006_2010_Male.dat.gz',
usecols=usecols)
df['cmmarrhx'] = df.mardat01
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgtq1q16
df['cycle'] = 7
CleanResp(df)
return df
def ReadMaleResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmdivw', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013',
'marend01', 'rmarital', 'fmarno', 'mar1diss']
df = ReadResp('2011_2013_MaleSetup.dct',
'2011_2013_MaleData.dat.gz',
usecols=usecols)
df['cmmarrhx'] = df.mardat01
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2011_2013
df['cycle'] = 8
CleanResp(df)
return df
def ReadMaleResp2015():
"""Reads respondent data from NSFG Cycle 9.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmdivw', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2013_2015',
'marend01', 'rmarital', 'fmarno', 'mar1diss']
df = ReadResp('2013_2015_MaleSetup.dct',
'2013_2015_MaleData.dat.gz',
usecols=usecols)
df['cmmarrhx'] = df.mardat01
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2013_2015
df['cycle'] = 9
CleanResp(df)
return df
def ReadMaleResp2017():
"""Reads respondent data from NSFG Cycle 10.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmintvw', 'ager',
'evrmarry', 'wgt2015_2017',
'marend01', 'rmarital', 'fmarno', 'mar1diss']
df = ReadResp('2015_2017_MaleSetup.dct',
'2015_2017_MaleData.dat.gz',
usecols=usecols)
# since cmbirth and cmmarrhx are no longer included,
# we have to compute them based on other variables;
# the result can be off by up to 12 months
df['cmbirth'] = df.cmintvw - df.ager*12
df['cmmarrhx'] = (df.mardat01-1900) * 12
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2015_2017
df['cycle'] = 10
# Instead of calling CleanResp, we have to customize
#CleanResp(df)
df['agemarry'] = (df.cmmarrhx - df.cmbirth) / 12.0
df['age'] = (df.cmintvw - df.cmbirth) / 12.0
# if married, we need agemarry; if not married, we need age
df['missing'] = np.where(df.evrmarry,
df.agemarry.isnull(),
df.age.isnull())
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm)
for cm in df.cmbirth]
df['year'] = (pd.DatetimeIndex(dates).year - 1900)
DigitizeResp(df)
return df
if __name__ == '__main__':
main()
| 29.064718 | 79 | 0.599196 |
d4b13f250b052bca7bffe7a5880d063d7c169a7e | 3,955 | py | Python | xfel/merging/application/reflection_table_utils.py | ErwinP/cctbx_project | 58f9fb5ed38c7391510e892f0ca9520467b692c1 | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/merging/application/reflection_table_utils.py | ErwinP/cctbx_project | 58f9fb5ed38c7391510e892f0ca9520467b692c1 | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/merging/application/reflection_table_utils.py | ErwinP/cctbx_project | 58f9fb5ed38c7391510e892f0ca9520467b692c1 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
from six.moves import range
from dials.array_family import flex
import math
| 40.357143 | 147 | 0.683439 |
d4b1cf0c1cabef461b1902ca1dbcbf5165c73bc9 | 45,496 | py | Python | rpython/memory/test/test_transformed_gc.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | 1 | 2019-11-25T10:52:01.000Z | 2019-11-25T10:52:01.000Z | rpython/memory/test/test_transformed_gc.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | null | null | null | rpython/memory/test/test_transformed_gc.py | jptomo/pypy-lang-scheme | 55edb2cec69d78f86793282a4566fcbc1ef9fcac | [
"MIT"
] | null | null | null | import py
import inspect
from rpython.rlib.objectmodel import compute_hash, compute_identity_hash
from rpython.translator.c import gc
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
from rpython.memory.gctransform import framework, shadowstack
from rpython.rtyper.lltypesystem.lloperation import llop, void
from rpython.rlib.objectmodel import compute_unique_id, we_are_translated
from rpython.rlib.debug import ll_assert
from rpython.rlib import rgc
from rpython.conftest import option
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import LONG_BIT
WORD = LONG_BIT // 8
ARGS = lltype.FixedSizeArray(lltype.Signed, 3)
# ________________________________________________________________
# ________________________________________________________________
# tagged pointers
from rpython.rlib.objectmodel import UnboxedValue
| 31.904628 | 93 | 0.512638 |
d4b2424c1e77c6c44ed58c02c4ec0dcbec8b6934 | 132 | py | Python | build/lib/rigidregistration/__init__.py | kem-group/rigidRegistration | cd6bef208d4b475954e2b3970d6ec11c15f61d70 | [
"MIT"
] | 3 | 2021-10-07T18:01:32.000Z | 2022-03-10T17:01:32.000Z | build/lib/rigidregistration/__init__.py | kem-group/rigidRegistration | cd6bef208d4b475954e2b3970d6ec11c15f61d70 | [
"MIT"
] | null | null | null | build/lib/rigidregistration/__init__.py | kem-group/rigidRegistration | cd6bef208d4b475954e2b3970d6ec11c15f61d70 | [
"MIT"
] | 1 | 2022-03-10T17:01:36.000Z | 2022-03-10T17:01:36.000Z | from . import utils
from . import display
from . import save
from . import FFTW
from . import stackregistration
__version__="0.2.1" | 18.857143 | 31 | 0.765152 |
d4b2a9a044269ea09a095573c7237e7f034915c1 | 5,359 | py | Python | torchmetrics/retrieval/retrieval_fallout.py | rudaoshi/metrics | c018348619bd7e375cb86abf7dfcaddb7208a36d | [
"Apache-2.0"
] | null | null | null | torchmetrics/retrieval/retrieval_fallout.py | rudaoshi/metrics | c018348619bd7e375cb86abf7dfcaddb7208a36d | [
"Apache-2.0"
] | null | null | null | torchmetrics/retrieval/retrieval_fallout.py | rudaoshi/metrics | c018348619bd7e375cb86abf7dfcaddb7208a36d | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import pangu.core.backend as B
from pangu.core.backend import Tensor, tensor
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.retrieval.retrieval_metric import RetrievalMetric
from torchmetrics.utilities.data import get_group_indexes
| 40.598485 | 120 | 0.630715 |
d4b30ff4f1cdba84270695cf02e3415880246ea6 | 5,458 | py | Python | pydlm/tests/base/testKalmanFilter.py | onnheimm/pydlm | 4693af6e621e3b75feda7ca15327b69a4ca622a7 | [
"BSD-3-Clause"
] | 423 | 2016-09-15T06:45:26.000Z | 2022-03-29T08:41:11.000Z | pydlm/tests/base/testKalmanFilter.py | onnheimm/pydlm | 4693af6e621e3b75feda7ca15327b69a4ca622a7 | [
"BSD-3-Clause"
] | 50 | 2016-09-14T19:45:49.000Z | 2021-07-26T17:04:10.000Z | pydlm/tests/base/testKalmanFilter.py | onnheimm/pydlm | 4693af6e621e3b75feda7ca15327b69a4ca622a7 | [
"BSD-3-Clause"
] | 99 | 2016-09-19T08:08:41.000Z | 2022-03-07T13:47:36.000Z | import numpy as np
import unittest
from pydlm.modeler.trends import trend
from pydlm.modeler.seasonality import seasonality
from pydlm.modeler.builder import builder
from pydlm.base.kalmanFilter import kalmanFilter
if __name__ == '__main__':
unittest.main()
| 35.212903 | 80 | 0.622756 |
d4b39516d2e47e56ba5e7898643ba4593ea3b27e | 349 | py | Python | change_threshold_migration.py | arcapix/gpfsapi-examples | 15bff7fda7b0a576209253dee48eb44e4c0d565f | [
"MIT"
] | 10 | 2016-05-17T12:58:35.000Z | 2022-01-10T05:23:45.000Z | change_threshold_migration.py | arcapix/gpfsapi-examples | 15bff7fda7b0a576209253dee48eb44e4c0d565f | [
"MIT"
] | null | null | null | change_threshold_migration.py | arcapix/gpfsapi-examples | 15bff7fda7b0a576209253dee48eb44e4c0d565f | [
"MIT"
] | 1 | 2016-09-12T09:07:00.000Z | 2016-09-12T09:07:00.000Z | from arcapix.fs.gpfs.policy import PlacementPolicy
from arcapix.fs.gpfs.rule import MigrateRule
# load placement policy for mmfs1
policy = PlacementPolicy('mmfs1')
# create a new migrate rule for 'sata1'
r = MigrateRule(source='sata1', threshold=(90, 50))
# add rule to start of the policy
policy.rules.insert(r, 0)
# save changes
policy.save()
| 23.266667 | 51 | 0.759312 |
d4b440c6e516a3bf9860aad41ef519824e8ea929 | 158 | py | Python | 1/puzzle1.py | tjol/advent-of-code-2021 | 16def395df091d5a8ae9ceb66ba3370554bdf40b | [
"0BSD"
] | 1 | 2021-12-20T19:56:56.000Z | 2021-12-20T19:56:56.000Z | 1/puzzle1.py | tjol/advent-of-code-2021 | 16def395df091d5a8ae9ceb66ba3370554bdf40b | [
"0BSD"
] | null | null | null | 1/puzzle1.py | tjol/advent-of-code-2021 | 16def395df091d5a8ae9ceb66ba3370554bdf40b | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
import sys
depths = list(map(int, sys.stdin))
increased = [a > b for (a, b) in zip(depths[1:], depths[:-1])]
print(sum(increased))
| 15.8 | 62 | 0.639241 |
d4b523573d56f337047743520fa550fd29576318 | 13,961 | py | Python | project/app/paste/controllers.py | An0nYm0u5101/Pastebin | aef35abee69ce7ce240d3a3f64bb19446468d30d | [
"MIT"
] | 1 | 2020-08-08T06:07:47.000Z | 2020-08-08T06:07:47.000Z | project/app/paste/controllers.py | An0nYm0u5101/Pastebin | aef35abee69ce7ce240d3a3f64bb19446468d30d | [
"MIT"
] | null | null | null | project/app/paste/controllers.py | An0nYm0u5101/Pastebin | aef35abee69ce7ce240d3a3f64bb19446468d30d | [
"MIT"
] | 1 | 2020-08-08T06:07:50.000Z | 2020-08-08T06:07:50.000Z | from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for, jsonify
from app import db, requires_auth
from flask_cors import CORS
from .models import Paste
import uuid
from datetime import datetime
from app.user.models import User
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter
from functools import wraps
from datetime import datetime
from dateutil import parser
mod_paste = Blueprint('paste', __name__)
CORS(mod_paste)
# @mod_paste.route('/<url>/embed', methods=['POST'])
# def embed_code(url):
# paste = Paste.query.filter(Paste.url == url).first()
# return jsonify(paste_text = paste.text,paste_link = url)
# @mod_paste.route('/paste', methods=['GET'])
# @requires_auth
# def get_all_pastes():
# # user_id = session['user_id']
# # pastes = paste.query.filter(paste.user_id == user_id).all()
# curr_id = session['user_id']
# user = User.query.filter(User.id == curr_id).first()
# paste_list = Paste.query.filter(curr_id == Paste.user_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# if user.user_type == 1:
# return render_template('mypaste.html', paste_list=paste_list)
# return render_template('admin_mypaste.html',paste_list = paste_list)
# # return jsonify(success=True, pastes=[paste.to_dict() for paste in
# # pastes])
#
#
# @mod_paste.route('/api/paste', methods=['POST'])
# @requires_auth
# def get_all_pastes_object():
# user_id = session['user_id']
# user = User.query.filter(user_id == User.id).first()
# pastes = Paste.query.filter(Paste.user_id == user_id).all()
# active = []
# for paste in pastes:
# temp_paste = {}
# if paste.is_active():
# temp_paste['title'] = paste.title
# temp_paste['add_time']=paste.add_time
# temp_paste['expire_time']=paste.expire_time
# temp_paste['lang']=paste.lang
# temp_paste['url']=paste.url
# active.append(temp_paste)
#
# return jsonify({'paste_list':active,'username':user.username}),200
# @mod_paste.route('/paste/<id>', methods=['GET'])
# @requires_auth
# def get_paste(id):
# user_id = session['user_id']
# paste = paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# return jsonify(success=True, paste=paste.to_dict())
# @mod_paste.route('/paste/<id>', methods=['POST'])
# @requires_auth
# def edit_paste(id):
# user_id = session['user_id']
# paste = Paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# paste.title = request.form['title']
# paste.text = request.form['text']
# paste.color = request.form['color']
# paste.lang = request.form['lang']
# db.session.commit()
# return jsonify(success=True)
# @mod_paste.route('/<url>', methods=['GET'])
# def display_paste(url):
# paste = Paste.query.filter(Paste.url == url).first()
# style = HtmlFormatter().get_style_defs('.highlight')
# lexer = get_lexer_by_name(paste.lang)
# formatter = HtmlFormatter(linenos=True, cssclass="highlight")
# result = highlight(paste.text, lexer, formatter)
# return render_template("view_paste.html", paste_title=paste.title,
# paste_lang=paste.lang, highlight_style=style,
# @mod_paste.route('/<url>/add_report', methods=['POST'])
# @requires_auth
# def to_delete(url):
# paste_to_delete = Paste.query.filter(Paste.url == url).first()
# if paste_to_delete.report_count > 5:
# db.session.delete(paste_to_delete)
# else:
# paste_to_delete.report_count = paste_to_delete.report_count + 1
# db.session.commit()
# curr_id = session['user_id']
# paste_list = Paste.query.filter(Paste.user_id == curr_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# return render_template('mypaste.html', paste_list=paste_list)
| 34.302211 | 197 | 0.698016 |
d4b56ca40567b39870ee94f1ef850a0b0b2f1d60 | 8,333 | py | Python | control_drone/run_model_on_cam.py | Apiquet/DeepLearningFrameworkFromScratch | 798ac42aa1a05286eb148576072e015fd94dbf94 | [
"MIT"
] | 1 | 2020-12-18T14:40:49.000Z | 2020-12-18T14:40:49.000Z | control_drone/run_model_on_cam.py | Apiquet/DeepLearningFrameworkFromScratch | 798ac42aa1a05286eb148576072e015fd94dbf94 | [
"MIT"
] | null | null | null | control_drone/run_model_on_cam.py | Apiquet/DeepLearningFrameworkFromScratch | 798ac42aa1a05286eb148576072e015fd94dbf94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script run neural network model on a camera live stream
"""
import argparse
import cv2
import numpy as np
import os
import time
import sys
COMMANDS = {0: "move_forward", 1: "go_down", 2: "rot_10_deg",
3: "go_up", 4: "take_off", 5: "land", 6: "idle"}
def send_command(anafi, command_id):
"""
Function to send commands to an Anafi drone in function of the command id
"""
if command_id not in COMMANDS:
raise f"Command id not in COMMANDS choices: {command_id}"
print("The following command will be sent: ", COMMANDS[command_id])
if COMMANDS[command_id] == "move_forward":
anafi.move_relative(dx=1, dy=0, dz=0, dradians=0)
if COMMANDS[command_id] == "go_down":
anafi.move_relative(dx=0, dy=0, dz=-0.5, dradians=0)
if COMMANDS[command_id] == "rot_10_deg":
anafi.move_relative(dx=0, dy=0, dz=0, dradians=0.785)
if COMMANDS[command_id] == "go_up":
anafi.move_relative(dx=0, dy=0, dz=0.5, dradians=0)
if COMMANDS[command_id] == "take_off":
anafi.safe_takeoff(5)
if COMMANDS[command_id] == "land":
anafi.safe_land(5)
return
if __name__ == '__main__':
main()
| 29.867384 | 78 | 0.531621 |
d4b5c94f17a9cee798f64b657926900668bb67f6 | 5,431 | py | Python | classify_images.py | rmsare/cs231a-project | 91776ada3512d3805de0e66940c9f1c5b3c4c641 | [
"MIT"
] | 2 | 2017-11-06T10:23:16.000Z | 2019-11-09T15:11:19.000Z | classify_images.py | rmsare/cs231a-project | 91776ada3512d3805de0e66940c9f1c5b3c4c641 | [
"MIT"
] | null | null | null | classify_images.py | rmsare/cs231a-project | 91776ada3512d3805de0e66940c9f1c5b3c4c641 | [
"MIT"
] | null | null | null | """
Classification of pixels in images using color and other features.
General pipeline usage:
1. Load and segment images (img_utils.py)
2. Prepare training data (label_image.py)
3. Train classifier or cluster data (sklearn KMeans, MeanShift, SVC, etc.)
4. Predict labels on new image or directory (classify_directory())
5. Apply classification to 3D points and estimate ground plane orientation (process_pointcloud.py)
Project uses the following directory structure:
images/ - contains binary files of numpy arrays corresponding to survey images and segmentations
labelled/ - contains labelled ground truth images or training data
results/ - contains results of classification
I store randomly split training and testing images in test/ and train/ directories.
Author: Robert Sare
E-mail: [email protected]
Date: 8 June 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import skimage.color, skimage.io
from skimage.segmentation import mark_boundaries
from sklearn.svm import SVC
from sklearn.cluster import KMeans, MeanShift
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import os, fnmatch
def classify_directory(classifier, test_dir, train_dir='train/'):
"""
Classify all images in a directory using an arbitrary sklearn classifier.
Saves results to results/ directory.
"""
# XXX: This is here if the classifier needs to be trained from scratch
#print("Preparing training data...")
#n_samples = 1000
#train_data, train_labels = load_training_images(train_dir, n_samples)
#
#print("Training classifier...")
#classifier = ImageSVC()
#classifier.fit(train_data, train_labels)
files = os.listdir(test_dir)
for f in files:
image = skimage.io.imread(f)
height, width, depth = image.shape
print("Predicting labels for " + f.strip('.JPG') + ".jpg")
features = compute_colorxy_features(image)
features /= features.max(axis=0)
pred_labels = classifier.predict(features)
print("Saving predictions for " + f.strip('.JPG') + ".jpg")
plt.figure()
plt.imshow(image)
plt.imshow(pred_labels.reshape((height, width)), alpha=0.5, vmin=0, vmax=2)
plt.show(block=False)
plt.savefig('results/' + f.strip('.JPG') + '_svm_pred.png')
plt.close()
np.save('results/' + f.strip('.JPG') + 'svm.npy', pred_labels.reshape((height,width)))
def compute_colorxy_features(image):
"""
Extract and normalize color and pixel location features from image data
"""
height, width, depth = image.shape
colors = skimage.color.rgb2lab(image.reshape((height*width, depth))
X, Y = np.meshgrid(np.arange(height), np.arange(width))
xy = np.hstack([X.reshape((height*width, 1)), Y.reshape((height*width, 1))])
colorxy = np.hstack([xy, colors])
colorxy /= colorxy.max(axis=0)
return colorxy
def load_ground_truth(filename):
"""
Load ground truth or training image array and redefine labelling for nice
default colors
"""
truth = np.load(filename)
# Change labels for nice default colorscale when plotted
truth = truth - 1
truth[truth == -1] = 0
truth[truth == 0] = 5
truth[truth == 2] = 0
truth[truth == 5] = 2
return truth
def load_image_labels(name):
"""
Load image and labels from previous labelling session
"""
fname = 'images/' + name + '_image.npy'
image = np.load(fname)
fname = 'labelled/' + name + '_labels.npy'
labels = np.load(fname)
return image, labels
def plot_class_image(image, segments, labels):
"""
Display image with segments and class label overlay
"""
plt.figure()
plt.subplot(1,2,1)
plt.imshow(mark_boundaries(image, segments, color=(1,0,0), mode='thick'))
plt.title('segmented image')
plt.subplot(1,2,2)
plt.imshow(image)
plt.imshow(labels, alpha=0.75)
cb = plt.colorbar(orientation='horizontal', shrink=0.5)
plt.title('predicted class labels')
plt.show(block=False)
def load_training_images(train_dir, n_samples=1000, n_features=3):
"""
Load training images from directory and subsample for training or validation
"""
train_data = np.empty((0, n_features))
train_labels = np.empty(0)
files = os.listdir(train_dir)
for f in files:
name = parse_filename(f)
image, labels = load_image_labels(name)
ht, wid, depth = image.shape
train_data = np.append(train_data,
compute_color_features(image), axis=0)
train_labels = np.append(train_labels,
labels.reshape(wid*ht, 1).ravel())
train_data, train_labels = shuffle(train_data, train_labels,
random_state=0, n_samples=n_samples)
return train_data, train_labels
def save_prediction(name, pred_labels):
"""
Save predicted class labels
"""
np.save('results/' + name + '_pred', pred_labels)
if __name__ == "__main__":
# Load training data
train_dir = 'train/'
test_dir = 'test/'
train_data, train_labels = load_training_data(train_dir)
# Train classifier
clf = SVC()
clf.fit(train_data, train_labels)
# Predict labels for test images
classify_directory(clf, test_dir)
| 30.857955 | 104 | 0.662861 |
d4b78df5fd076f594376f0529e58415b66407a89 | 579 | py | Python | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 71 | 2020-06-06T03:12:44.000Z | 2022-03-12T20:21:48.000Z | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 50 | 2020-06-18T14:19:15.000Z | 2022-03-28T07:04:16.000Z | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 37 | 2020-06-05T19:08:44.000Z | 2022-03-17T08:23:41.000Z | from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
from overrides import overrides
| 34.058824 | 65 | 0.775475 |
d4b832afc1a419832477a3ad699f701ea5d77522 | 3,357 | py | Python | ciphers/SKINNY-TK2/SKINNY-TK2/skinnytk2.py | j-danner/autoguess | 712a8dcfb259a277b2b2a499bd7c5fc4aab97b67 | [
"MIT"
] | 7 | 2021-11-29T07:25:43.000Z | 2022-03-02T10:15:30.000Z | ciphers/SKINNY-TK2/SKINNY-TK2/skinnytk2.py | j-danner/autoguess | 712a8dcfb259a277b2b2a499bd7c5fc4aab97b67 | [
"MIT"
] | 1 | 2022-03-30T16:29:50.000Z | 2022-03-30T16:29:50.000Z | ciphers/SKINNY-TK2/SKINNY-TK2/skinnytk2.py | j-danner/autoguess | 712a8dcfb259a277b2b2a499bd7c5fc4aab97b67 | [
"MIT"
] | 1 | 2022-03-30T13:40:12.000Z | 2022-03-30T13:40:12.000Z | # Created on Sep 7, 2020
# author: Hosein Hadipour
# contact: [email protected]
import os
output_dir = os.path.curdir
def skinnytk2(R=1):
"""
This function generates the relations of Skinny-n-n for R rounds.
tk ================================================> TWEAKEY_P(tk) ===> ---
SB AC | P MC SB AC |
x_0 ===> x_0 ===> x_0 ===> + ===> y_0 ===> P(y_0) ===> x_1 ===> x_1 ===> x_1 ===> + ===> y_1 ===> ---
"""
cipher_name = 'skinnytk2'
P = [0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12]
TKP = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7]
tk1 = ['tk1_%d' % i for i in range(16)]
tk2 = ['tk2_%d' % i for i in range(16)]
# 1 round
# recommended_mg = 8
# recommended_ms = 4
# 2 rounds
# recommended_mg = 16
# recommended_ms = 8
# 3 rounds
# recommended_mg = 19
# recommended_ms = 24
# 4 rounds
# recommended_mg = 21
# recommended_ms = 27
# 5 rounds
# recommended_mg = 22
# recommended_ms = 35
# 6 rounds
# recommended_mg = 25
# recommended_ms = 40
# 7 rounds
# recommended_mg = 26
# recommended_ms = 70
# 8 rounds
# recommended_mg = 28
# recommended_ms = 80
# 9 rounds
# recommended_mg = 28
# recommended_ms = 100
# 10 rounds
recommended_mg = 30
recommended_ms = 100
# 11 rounds
# recommended_mg = 31
# recommended_ms = 100
eqs = '#%s %d Rounds\n' % (cipher_name, R)
eqs += 'connection relations\n'
for r in range(R):
xin = ['x_%d_%d' % (r, i) for i in range(16)]
xout = ['x_%d_%d' % (r + 1, i) for i in range(16)]
y = ['y_%d_%d' % (r, i) for i in range(16)]
tk = ['tk_%d_%d' % (r, i) for i in range(8)]
# Generaete AddTweakey relations
for i in range(4):
for j in range(4):
if i < 2:
eqs += '%s, %s, %s\n' % (tk1[j + 4*i], tk2[j + 4*i], tk[j + 4*i])
eqs += '%s, %s, %s\n' % (xin[j + 4*i], tk[j + 4*i], y[j + 4*i])
else:
eqs += '%s, %s\n' % (xin[j + 4*i], y[j + 4*i])
# Apply ShiftRows
py = [y[P[i]] for i in range(16)]
# Generate MixColumn relations
for j in range(4):
eqs += '%s, %s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], py[j + 3*4], xout[j + 0*4])
eqs += '%s, %s\n' % (py[j], xout[j + 1*4])
eqs += '%s, %s, %s\n' % (py[j + 1*4], py[j + 2*4], xout[j + 2*4])
eqs += '%s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], xout[j + 3*4])
# Update Tweakey
temp1 = tk1.copy()
temp2 = tk2.copy()
tk1 = [temp1[TKP[i]] for i in range(16)]
tk2 = [temp2[TKP[i]] for i in range(16)]
plaintext = ['x_0_%d' % i for i in range(16)]
ciphertext = ['x_%d_%d' % (R, i) for i in range(16)]
eqs += 'known\n' + '\n'.join(plaintext + ciphertext)
eqs += '\nend'
relation_file_path = os.path.join(output_dir, 'relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name, R, recommended_mg, recommended_ms))
with open(relation_file_path, 'w') as relation_file:
relation_file.write(eqs)
if __name__ == '__main__':
main()
| 33.909091 | 137 | 0.472148 |
d4bad788e453eaffecc4387f4afebe5f25e9867c | 2,447 | py | Python | tests/test_bmipy.py | visr/bmi-python | 0fcca448d097bc001f7492094ce1fd95d041b81d | [
"MIT"
] | 14 | 2015-01-13T16:26:12.000Z | 2021-07-22T04:56:59.000Z | tests/test_bmipy.py | visr/bmi-python | 0fcca448d097bc001f7492094ce1fd95d041b81d | [
"MIT"
] | 11 | 2015-03-17T21:15:57.000Z | 2021-03-24T21:31:00.000Z | tests/test_bmipy.py | visr/bmi-python | 0fcca448d097bc001f7492094ce1fd95d041b81d | [
"MIT"
] | 9 | 2015-03-13T15:59:52.000Z | 2021-06-28T11:40:51.000Z | import pytest
from bmipy import Bmi
| 16.993056 | 60 | 0.608909 |
d4bc5b3a862989ca34a4883d8781d87ac17bd277 | 592 | py | Python | scrapy_compose/fields/parser/string_field.py | Sphynx-HenryAY/scrapy-compose | bac45ee51bf4a49b3d4a9902767a17072137f869 | [
"MIT"
] | null | null | null | scrapy_compose/fields/parser/string_field.py | Sphynx-HenryAY/scrapy-compose | bac45ee51bf4a49b3d4a9902767a17072137f869 | [
"MIT"
] | 18 | 2019-10-17T10:51:30.000Z | 2020-05-12T10:00:49.000Z | scrapy_compose/fields/parser/string_field.py | Sphynx-HenryAY/scrapy-compose | bac45ee51bf4a49b3d4a9902767a17072137f869 | [
"MIT"
] | null | null | null |
from scrapy_compose.utils.context import realize
from .field import FuncField as BaseField
| 34.823529 | 96 | 0.6875 |
d4bc84fe21a49ee4da04551b3e65cc3308167280 | 2,449 | py | Python | app/request.py | vincentmuya/News-highlight | 67f61bb0bea69ec004c11a2148c62cd892a19615 | [
"CNRI-Python"
] | null | null | null | app/request.py | vincentmuya/News-highlight | 67f61bb0bea69ec004c11a2148c62cd892a19615 | [
"CNRI-Python"
] | null | null | null | app/request.py | vincentmuya/News-highlight | 67f61bb0bea69ec004c11a2148c62cd892a19615 | [
"CNRI-Python"
] | null | null | null | import urllib.request
import json
from .models import News
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def get_news_source(country,category):
'''
Function that gets the json response to our url request
'''
get_news_source_url = base_url.format(country,category,api_key)
with urllib.request.urlopen(get_news_source_url)as url:
get_news_source_data = url.read()
get_news_source_response = json.loads(get_news_source_data)
print(get_news_source_response)
source_result = None
if get_news_source_response['articles']:
source_result_list = get_news_source_response['articles']
source_result = process_result(source_result_list)
return source_result
def process_result(source_list):
'''
this function processes the results and converts them into a list
the source list is a list of dictionaries containing news results
'''
source_result= []
for source_item in source_list:
source = source_item.get('source')
author = source_item.get('author')
title = source_item.get('title')
description = source_item.get('description')
url = source_item.get('url')
urlToImage = source_item.get('urlToImage')
publishedAt = source_item.get('publishedAt')
if urlToImage:
source_object = News(source,author,title,description,url,urlToImage,publishedAt)
source_result.append(source_object)
return source_result
| 33.094595 | 91 | 0.703144 |
d4bca411ec322bf0d2f4684e172c03b2076797b4 | 3,590 | py | Python | hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from hypernet.src.general import const
from hypernet.src.general import utils
from hypernet.src.thermophysicalModels.reactionThermo.mixture import Basic
| 30.423729 | 79 | 0.479666 |
d4bd39d2862e151f45c3d33b0cd79ef62c908dbf | 1,760 | py | Python | Exercises/W08D04_Exercise_01_Django_Cat_Collector/main_app/models.py | Roger-Takeshita/Software_Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | 2 | 2019-12-27T06:15:26.000Z | 2020-05-21T17:37:12.000Z | Exercises/W08D04_Exercise_01_Django_Cat_Collector/main_app/models.py | Roger-Takeshita/Bootcamp-Software-Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | null | null | null | Exercises/W08D04_Exercise_01_Django_Cat_Collector/main_app/models.py | Roger-Takeshita/Bootcamp-Software-Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from datetime import date
from django.contrib.auth.models import User #! 1 - Import user models
MEALS = (
('B', 'Breakfast'),
('L', 'Lunch'),
('D', 'Dinner')
) | 28.852459 | 160 | 0.691477 |
d4bdefb01d0a762af075c93831d87a0e10dd81de | 79 | py | Python | kedro-airflow/kedro_airflow/__init__.py | kedro-org/kedro-plugins | ad0755f503b275b73aeb8feb592a0ec0ea1bca8e | [
"Apache-2.0"
] | 6 | 2022-01-21T07:37:05.000Z | 2022-03-31T09:41:29.000Z | kedro-airflow/kedro_airflow/__init__.py | kedro-org/kedro-plugins | ad0755f503b275b73aeb8feb592a0ec0ea1bca8e | [
"Apache-2.0"
] | 7 | 2022-01-20T10:59:29.000Z | 2022-03-30T17:59:12.000Z | kedro-airflow/kedro_airflow/__init__.py | kedro-org/kedro-plugins | ad0755f503b275b73aeb8feb592a0ec0ea1bca8e | [
"Apache-2.0"
] | 1 | 2022-03-29T09:12:00.000Z | 2022-03-29T09:12:00.000Z | """ Kedro plugin for running a project with Airflow """
__version__ = "0.5.0"
| 19.75 | 55 | 0.683544 |
d4be731c2fefcf29455273684888ea746824bba4 | 713 | py | Python | soccer/gameplay/plays/testing/debug_window_evaluator.py | AniruddhaG123/robocup-software | 0eb3b3957428894f2f39341594800be803665f44 | [
"Apache-2.0"
] | 1 | 2019-09-24T22:59:25.000Z | 2019-09-24T22:59:25.000Z | soccer/gameplay/plays/testing/debug_window_evaluator.py | ananth-kumar01/robocup-software | 4043a7f9590d02f617d8e9a762697e4aaa27f1a6 | [
"Apache-2.0"
] | null | null | null | soccer/gameplay/plays/testing/debug_window_evaluator.py | ananth-kumar01/robocup-software | 4043a7f9590d02f617d8e9a762697e4aaa27f1a6 | [
"Apache-2.0"
] | null | null | null | import play
import behavior
import main
import robocup
import constants
import time
import math
## This isn't a real play, but it's pretty useful
# Turn it on and we'll draw the window evaluator stuff on-screen from the ball to our goal
| 29.708333 | 90 | 0.678822 |
d4bf375fc83f7c0bd614c1589d9466c6217e84ec | 331 | py | Python | rainbowconnection/sources/phoenix/utils.py | zkbt/rainbow-connection | 53828fd0b63a552a22a6aa38393cefda27c61b9a | [
"MIT"
] | 6 | 2019-09-04T20:22:02.000Z | 2020-12-30T05:00:10.000Z | rainbowconnection/sources/phoenix/utils.py | zkbt/rainbow-connection | 53828fd0b63a552a22a6aa38393cefda27c61b9a | [
"MIT"
] | 8 | 2019-05-23T18:06:51.000Z | 2020-02-13T22:15:07.000Z | rainbowconnection/sources/phoenix/utils.py | zkbt/rainbow-connection | 53828fd0b63a552a22a6aa38393cefda27c61b9a | [
"MIT"
] | null | null | null | from ...imports import *
def stringify_metallicity(Z):
"""
Convert a metallicity into a PHOENIX-style string.
Parameters
----------
Z : float
[Fe/H]-style metallicity (= 0.0 for solar)
"""
if Z <= 0:
return "-{:03.1f}".format(np.abs(Z))
else:
return "+{:03.1f}".format(Z)
| 19.470588 | 54 | 0.531722 |
d4bffb102dcb1752fbd5cc9d9f62656784042e5e | 1,506 | py | Python | shipane_sdk/transaction.py | awfssv/ShiPanE-Python-SDK | 678790e5eb220cf685e5f8d03ba3310f3fbb8d22 | [
"MIT"
] | 1 | 2016-12-19T16:05:23.000Z | 2016-12-19T16:05:23.000Z | shipane_sdk/transaction.py | awfssv/ShiPanE-Python-SDK | 678790e5eb220cf685e5f8d03ba3310f3fbb8d22 | [
"MIT"
] | null | null | null | shipane_sdk/transaction.py | awfssv/ShiPanE-Python-SDK | 678790e5eb220cf685e5f8d03ba3310f3fbb8d22 | [
"MIT"
] | 1 | 2021-05-21T02:12:04.000Z | 2021-05-21T02:12:04.000Z | # -*- coding: utf-8 -*-
| 22.477612 | 55 | 0.581009 |
d4c06417dd5e89491398d91b568c1842895c3961 | 14,779 | py | Python | tensorflow_probability/python/distributions/laplace_test.py | wataruhashimoto52/probability | 12e3f256544eadea6e863868da825614f4423eb0 | [
"Apache-2.0"
] | 1 | 2020-04-13T12:31:12.000Z | 2020-04-13T12:31:12.000Z | tensorflow_probability/python/distributions/laplace_test.py | wataruhashimoto52/probability | 12e3f256544eadea6e863868da825614f4423eb0 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/laplace_test.py | wataruhashimoto52/probability | 12e3f256544eadea6e863868da825614f4423eb0 | [
"Apache-2.0"
] | 1 | 2020-12-19T13:05:15.000Z | 2020-12-19T13:05:15.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
if __name__ == '__main__':
tf.test.main()
| 38.790026 | 109 | 0.66175 |
d4c0845bc0b80a14fbe5e783d9ed64b00db19bce | 3,383 | py | Python | app/__init__.py | credwood/bitplayers | 4ca6b6c6a21bb21d7cd963c64028415559c3dcc4 | [
"MIT"
] | 1 | 2020-06-26T21:49:14.000Z | 2020-06-26T21:49:14.000Z | app/__init__.py | credwood/bitplayers | 4ca6b6c6a21bb21d7cd963c64028415559c3dcc4 | [
"MIT"
] | 2 | 2020-03-31T11:11:04.000Z | 2021-12-13T20:38:48.000Z | app/__init__.py | credwood/bitplayers | 4ca6b6c6a21bb21d7cd963c64028415559c3dcc4 | [
"MIT"
] | null | null | null | import dash
from flask import Flask
from flask.helpers import get_root_path
from flask_login import login_required
from flask_wtf.csrf import CSRFProtect
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.sqla import ModelView
from datetime import datetime
from dateutil import parser
import pytz
from pytz import timezone
from config import BaseConfig
csrf = CSRFProtect()
| 35.610526 | 111 | 0.738693 |
d4c0decddfc9adf11a583ac3c85b167de4ffaed9 | 33,707 | py | Python | selectinf/randomized/approx_reference_grouplasso.py | kevinbfry/selective-inference | 4e846877b5c23969fc420b452f20cc3b16b6cb78 | [
"BSD-3-Clause"
] | 14 | 2015-09-01T19:31:25.000Z | 2021-11-26T08:47:10.000Z | selectinf/randomized/approx_reference_grouplasso.py | kevinbfry/selective-inference | 4e846877b5c23969fc420b452f20cc3b16b6cb78 | [
"BSD-3-Clause"
] | 7 | 2016-09-12T20:41:41.000Z | 2018-06-26T02:10:30.000Z | selectinf/randomized/approx_reference_grouplasso.py | kevinbfry/selective-inference | 4e846877b5c23969fc420b452f20cc3b16b6cb78 | [
"BSD-3-Clause"
] | 10 | 2015-09-01T19:31:28.000Z | 2021-02-23T01:16:20.000Z | from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
| 37.830527 | 116 | 0.556709 |
d4c1d2fbba6d7c550c2607f8f36af9eb36384e04 | 18,606 | py | Python | internals/states.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | null | null | null | internals/states.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | null | null | null | internals/states.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | 1 | 2021-10-06T15:12:44.000Z | 2021-10-06T15:12:44.000Z | from __future__ import division
import numpy as np
na = np.newaxis
import collections, itertools
import abc
from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata
from pyhsmm.util.general import rle as rle
# NOTE: assumes censoring. can make no censoring by adding to score of last
# segment
SAMPLING = -1 # special constant for indicating a state or state range that is being resampled
NEW = -2 # special constant indicating a potentially new label
ABIGNUMBER = 10000 # state labels are sampled uniformly from 0 to abignumber exclusive
####################
# States Classes #
####################
# TODO an array class that maintains its own rle
# must override set methods
# type(x).__setitem__(x,i) classmethod
# also has members norep and lens (or something)
# that are either read-only or also override setters
# for now, i'll just make sure outside that anything that sets self.stateseq
# also sets self.stateseq_norep and self.durations
# it should also call beta updates...
class collapsed_stickyhdphmm_states(collapsed_states):
class collapsed_hdphsmm_states(collapsed_states):
### label sampler stuff
def _local_group(self,t,k):
'''
returns a sequence of length between 1 and 3, where each sequence element is
((data,otherdata), (dur,otherdurs))
'''
# temporarily modifies members, like self.stateseq and maybe self.data
assert self.stateseq[t] == SAMPLING
orig_stateseq = self.stateseq.copy()
# temporarily set stateseq to hypothetical stateseq
# so that we can get the indicator sequence
# TODO if i write the special stateseq class, this will need fixing
self.stateseq[t] = k
wholegroup, pieces = self._local_slices(self.stateseq,t)
self.stateseq[t] = SAMPLING
# build local group of statistics
localgroup = []
self.stateseq[wholegroup] = SAMPLING
for piece, val in pieces:
# get all the other data
otherdata, otherdurs = self.model._data_withlabel(val), self.model._durs_withlabel(val)
# add a piece to our localgroup
localgroup.append(((self.data[piece],otherdata),(piece.stop-piece.start,otherdurs)))
# remove the used piece from the exclusion
self.stateseq[piece] = orig_stateseq[piece]
# restore original views
self.stateseq = orig_stateseq
# return
return localgroup
#######################
# Utility Functions #
#######################
| 35.849711 | 110 | 0.577448 |
d4c20caa8c6caaf656d4639f0a7424aba4ba6e44 | 1,406 | py | Python | exporters/contrib/writers/odo_writer.py | scrapinghub/exporters | b14f70530826bbbd6163d9e56e74345e762a9189 | [
"BSD-3-Clause"
] | 41 | 2016-06-16T15:29:39.000Z | 2021-08-06T03:29:13.000Z | exporters/contrib/writers/odo_writer.py | bbotella/fluxo | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | [
"BSD-3-Clause"
] | 52 | 2016-06-20T12:46:57.000Z | 2018-02-08T12:22:03.000Z | exporters/contrib/writers/odo_writer.py | bbotella/fluxo | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | [
"BSD-3-Clause"
] | 10 | 2016-06-23T08:49:36.000Z | 2018-01-13T10:12:10.000Z | import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
| 31.244444 | 93 | 0.642959 |
d4c271023ce05496e3aeca43f2ffb25c230ab172 | 184 | py | Python | x7/geom/needs_test.py | gribbg/x7-geom | a01ef29dc47f1587e3390b552decf92db0bbaa20 | [
"BSD-2-Clause"
] | null | null | null | x7/geom/needs_test.py | gribbg/x7-geom | a01ef29dc47f1587e3390b552decf92db0bbaa20 | [
"BSD-2-Clause"
] | null | null | null | x7/geom/needs_test.py | gribbg/x7-geom | a01ef29dc47f1587e3390b552decf92db0bbaa20 | [
"BSD-2-Clause"
] | null | null | null | """
Simple file to validate that maketests is working. Call maketests via:
>>> from x7.shell import *; maketests('x7.sample.needs_tests')
"""
| 20.444444 | 71 | 0.695652 |
d4c391278bd0cf509c7b23a6660f7d6beb4dfdb7 | 3,960 | py | Python | python/SHA3_hashlib_based_concept.py | feketebv/SCA_proof_SHA3-512 | 5a7689ea307463d5b797e49142c349b02cdcda03 | [
"MIT"
] | 1 | 2021-05-19T00:08:15.000Z | 2021-05-19T00:08:15.000Z | python/SHA3_hashlib_based_concept.py | feketebv/SCA_proof_SHA3-512 | 5a7689ea307463d5b797e49142c349b02cdcda03 | [
"MIT"
] | null | null | null | python/SHA3_hashlib_based_concept.py | feketebv/SCA_proof_SHA3-512 | 5a7689ea307463d5b797e49142c349b02cdcda03 | [
"MIT"
] | null | null | null | '''
Written by: Balazs Valer Fekete [email protected] [email protected]
Last updated: 29.01.2021
'''
# the concept is to generate a side channel resistant initialisation of the hashing function based on
# one secret key and several openly known initialisation vectors (IV) in a manner that the same input
# is not hashed too more than two times, which is hopefully not sufficient for side channel
# measurements based computations: the number of consecutive measurements for a successful attack on
# the CHI function in a practically noiseless computer simulation (see "chi_cpa.py") takes around a
# 100 measurements
# this concept is achieved by taking a counter of a certain bitlength, and twice as many IVs as bits in
# the counter: "IV0s" and "IV1s" and compute a series of hashes starting with the secret key then with a
# correspong IV of the sets 0 and 1 based on whether the counter's corresponding bit - starting at MSB -
# is 0 or 1; this way every hash output is exactly used 2 times if the intermediate values are STORTED
# and the entire series of initial hashes are NOT fully recomputed only such whose corresponding
# counter bits has changed and all the next levels too down to the LSB of the counter
# the working solution is going to based on the algorithms presented here, although
# in this file the algorithm here does the full padding so the results won't equal to
# a scheme where the rate is fully filled with IVs and the data comes only afterwards...
import hashlib
# KEY DATA STRUCTURES' INTERPRETATION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IV0s = [658678, 6785697, 254376, 67856, 1432543, 786, 124345, 5443654]
IV1s = [2565, 256658, 985, 218996, 255, 685652, 28552, 3256565]
# LSB ... MSB
hash_copies = [None for i in range(len(IV0s))]
# LSB ... MSB
# counter
# MSB ... LSB
# COMPUTING HASHES FOR EVERY COUNTER VALUE INDIVIDUALLY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for counter in range(11):
hash = hashlib.sha3_512()
# looping from MSB to LSB in counter too
for i in range(len(IV0s)-1, -1, -1):
if (counter>>i) & 1 == 1:
IV = bytes(IV1s[i])
else:
IV = bytes(IV0s[i])
hash.update(IV)
print(hash.hexdigest())
print()
# COMPUTING HASHES BASED ON THE NATURE OF BINARY INCREMENTATION:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# only fewer values need to be recomputed, those whose corresponding
# bits have changed, down until LSB
# initialize
hash = hashlib.sha3_512()
# looping from MSB to LSB
for i in range(len(IV0s)-1, -1, -1):
# addressing "MSB" of IVs at first, "LSB" at last!
IV = bytes(IV0s[i])
hash.update(IV)
# index 0 of hash_copies changes the most frequently ie. according to counter's LSB
hash_copies[i] = hash.copy()
# compute
last_counter = 0
for counter in range(11):
IV_mask = last_counter ^ counter
last_counter = counter
# determine the highest non-zero bit of IV_mask, LSB is 1, 0 means there was no change
nz = 0
while IV_mask > 0:
IV_mask >>= 1
nz += 1
# initialize hash to the last value whose corresponding counter bit didn't switch
# have to copy object otherwise the originally pointed version gets updated!
hash = hash_copies[nz].copy() # LSB is index 0
# compute only the remaining hashes
while nz != 0: # nz=0 is the initial condition, nothing needs to be done
nz -= 1
if (counter>>nz) & 1 == 1:
IV = bytes(IV1s[nz])
else:
IV = bytes(IV0s[nz])
hash.update(IV)
# needs to be copied again because of object orientation
hash_copies[nz] = hash.copy()
# showing the hash copies' entire table after each computation
#for hashes in hash_copies:
# print(hashes.hexdigest())
print(hash_copies[0].hexdigest())
| 40 | 105 | 0.65303 |
d4c3adf62c8a44bad01c91e8ccec7e900d2597c3 | 1,573 | py | Python | graphstar/utils.py | pengboomouch/graphstar | f7f3537aa92118765b358dd3a47b4fa5cea8587c | [
"MIT"
] | null | null | null | graphstar/utils.py | pengboomouch/graphstar | f7f3537aa92118765b358dd3a47b4fa5cea8587c | [
"MIT"
] | null | null | null | graphstar/utils.py | pengboomouch/graphstar | f7f3537aa92118765b358dd3a47b4fa5cea8587c | [
"MIT"
] | null | null | null | """
graphstar.utils
~~~~~~~~~~~~~~~
Cristian Cornea
A simple bedirectional graph with A* and breadth-first pathfinding.
Utils are either used by the search algorithm, or when needed :)
Pretty self explainatory (I hope)
For more information see the examples and tests folder
"""
def clean_route_list(route_stack: list, goal_node_id: int):
"""
Creates an ordered route list from start to finish
with all node ids needed to traverse to the goal.
:param route_stack: All routes found until goal
:param goal_node: int ID of the goal node
:return: list A ordered list from start to goal
"""
r = []
next_node = goal_node_id
reversed_stack = reversed(route_stack)
for c in reversed_stack:
if c.to_node.id == next_node:
r.append(c.to_node.id)
r.append(c.from_node.id)
next_node = c.from_node.id
return list(set(r))
| 24.968254 | 68 | 0.688493 |
d4c411c2e8e16ded3277d3bfc3c35dd1f462b513 | 527 | py | Python | jinchi/demo/foobar.py | jiz148/py-test | d976265d065c760f2e8b55302dedbfebd01bec28 | [
"Apache-2.0"
] | null | null | null | jinchi/demo/foobar.py | jiz148/py-test | d976265d065c760f2e8b55302dedbfebd01bec28 | [
"Apache-2.0"
] | null | null | null | jinchi/demo/foobar.py | jiz148/py-test | d976265d065c760f2e8b55302dedbfebd01bec28 | [
"Apache-2.0"
] | 1 | 2019-01-07T18:42:53.000Z | 2019-01-07T18:42:53.000Z | import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| 18.821429 | 57 | 0.578748 |
d4c4c2df87ed6c462e4aab6092109b050d3d20d5 | 759 | py | Python | sound/serializers.py | Anirudhchoudhary/ApnaGanna__backend | 52e6c3100fdb289e8bf64a1a4007eeb2eb66a022 | [
"MIT"
] | null | null | null | sound/serializers.py | Anirudhchoudhary/ApnaGanna__backend | 52e6c3100fdb289e8bf64a1a4007eeb2eb66a022 | [
"MIT"
] | null | null | null | sound/serializers.py | Anirudhchoudhary/ApnaGanna__backend | 52e6c3100fdb289e8bf64a1a4007eeb2eb66a022 | [
"MIT"
] | null | null | null | from .models import Sound , Album
from rest_framework import serializers
| 27.107143 | 102 | 0.637681 |
d4c56f7b05d7fe221ca2f682d2bea0e270121b36 | 2,000 | py | Python | tracking/utils.py | WGBH/django-tracking | 80e8bc44521820eab956d2264d6df0b6987429e0 | [
"MIT"
] | null | null | null | tracking/utils.py | WGBH/django-tracking | 80e8bc44521820eab956d2264d6df0b6987429e0 | [
"MIT"
] | null | null | null | tracking/utils.py | WGBH/django-tracking | 80e8bc44521820eab956d2264d6df0b6987429e0 | [
"MIT"
] | null | null | null | from datetime import datetime
from django.conf import settings
import pytz
DEFAULT_TRACKER_POSITIONS = [
('tracker-head-top', 'Head - near top'),
('tracker-head-bottom', 'Head - near bottom'),
('tracker-body-top', 'Body - near top'),
('tracker-body-bottom', 'Body - near bottom')
]
def get_tracker_position_options():
"""
This creates the dropdown in the Admin for where to put each tracker.
It defaults to the obvious 4 location (top/bottom of the head/body);
however the user can create more by adding a list of 3-ples in the settings
file under ADDITIONAL_TRACKER_POSITIONS.
(2-letter-code, description, block name), e.g.
('HN', 'Header Navigation', 'header-navigation-trackers')
would allow for the user to have tracking code in a navbar (no, I don't know
why they'd want this) if they put
{% block header-navigation-trackers %}{% generate_trackers 'HN' %}{% endblock %}
in their template.
"""
tracker_position_list = DEFAULT_TRACKER_POSITIONS
additional_tracker_positions = getattr(settings, "ADDITIONAL_TRACKER_POSITIONS", [])
full_list = list()
for x in (tracker_position_list + additional_tracker_positions):
full_list.append((x[0], x[1]))
return full_list | 35.087719 | 88 | 0.665 |
d4c5d71a8319e8e4743e5c7446b67b54ee62af61 | 256 | py | Python | devtools/api/health.py | ankeshkhemani/devtools | beb9a46c27b6b4c02a2e8729af0c971cc175f134 | [
"Apache-2.0"
] | null | null | null | devtools/api/health.py | ankeshkhemani/devtools | beb9a46c27b6b4c02a2e8729af0c971cc175f134 | [
"Apache-2.0"
] | null | null | null | devtools/api/health.py | ankeshkhemani/devtools | beb9a46c27b6b4c02a2e8729af0c971cc175f134 | [
"Apache-2.0"
] | null | null | null | import datetime
from fastapi import APIRouter
router = APIRouter()
| 17.066667 | 56 | 0.605469 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.