blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98f76ec619a2e488aa99de17c4447d474c1cb2e1 | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1046095393/atexit.py | 3b4fb40c097ce9444aa1ae283f0da5efbfc50ffd | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 4,738 | py | # encoding: utf-8
# module atexit
# from (built-in)
# by generator 1.147
"""
allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
# no imports
# functions
def register(func, *args, **kwargs): # real signature unknown; restored from __doc__
"""
register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
pass
def unregister(func): # real signature unknown; restored from __doc__
"""
unregister(func) -> None
Unregister an exit function which was previously registered using
atexit.register
func - function to be unregistered
"""
pass
def _clear(): # real signature unknown; restored from __doc__
"""
_clear() -> None
Clear the list of previously registered exit functions.
"""
pass
def _ncallbacks(): # real signature unknown; restored from __doc__
"""
_ncallbacks() -> int
Return the number of registered exit functions.
"""
return 0
def _run_exitfuncs(): # real signature unknown; restored from __doc__
"""
_run_exitfuncs() -> None
Run all registered exit functions.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f1f2a7150f0>, 'find_spec': <classmethod object at 0x7f1f2a715128>, 'find_module': <classmethod object at 0x7f1f2a715160>, 'create_module': <classmethod object at 0x7f1f2a715198>, 'exec_module': <classmethod object at 0x7f1f2a7151d0>, 'get_code': <classmethod object at 0x7f1f2a715240>, 'get_source': <classmethod object at 0x7f1f2a7152b0>, 'is_package': <classmethod object at 0x7f1f2a715320>, 'load_module': <classmethod object at 0x7f1f2a715358>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='atexit', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
| [
"[email protected]"
] | |
17a0b25b7520802c0316a50b66f74a804df1a76e | caaf56727714f8c03be38710bc7d0434c3ec5b11 | /tests/components/abode/test_light.py | 6506746783c2c8bc154c57ee3317833d02c7ff28 | [
"Apache-2.0"
] | permissive | tchellomello/home-assistant | c8db86880619d7467901fd145f27e0f2f1a79acc | ed4ab403deaed9e8c95e0db728477fcb012bf4fa | refs/heads/dev | 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 | Apache-2.0 | 2023-01-13T06:02:03 | 2016-07-06T04:13:49 | Python | UTF-8 | Python | false | false | 4,040 | py | """Tests for the Abode light device."""
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from .common import setup_platform
from tests.async_mock import patch
DEVICE_ID = "light.living_room_lamp"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, LIGHT_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "741385f4388b2637df4c6b398fe50581"
async def test_attributes(hass):
"""Test the light attributes are correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 204
assert state.attributes.get(ATTR_RGB_COLOR) == (0, 63, 255)
assert state.attributes.get(ATTR_COLOR_TEMP) == 280
assert state.attributes.get(ATTR_DEVICE_ID) == "ZB:db5b1a"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "RGB Dimmer"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Living Room Lamp"
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 19
async def test_switch_off(hass):
"""Test the light can be turned off."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_off") as mock_switch_off:
assert await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_off.assert_called_once()
async def test_switch_on(hass):
"""Test the light can be turned on."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_on") as mock_switch_on:
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_on.assert_called_once()
async def test_set_brightness(hass):
"""Test the brightness can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_level") as mock_set_level:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "brightness": 100},
blocking=True,
)
await hass.async_block_till_done()
# Brightness is converted in abode.light.AbodeLight.turn_on
mock_set_level.assert_called_once_with(39)
async def test_set_color(hass):
"""Test the color can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color") as mock_set_color:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "hs_color": [240, 100]},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_once_with((240.0, 100.0))
async def test_set_color_temp(hass):
"""Test the color temp can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color_temp") as mock_set_color_temp:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "color_temp": 309},
blocking=True,
)
await hass.async_block_till_done()
# Color temp is converted in abode.light.AbodeLight.turn_on
mock_set_color_temp.assert_called_once_with(3236)
| [
"[email protected]"
] | |
5efc101cdbf8e412920f0ccebaf0c2a572e6f7ba | af6e7f0927517375cb4af833f4c52e301bad0af5 | /corpus_processor/topic_aware/filter_qa_corpus_by_topic_list.py | 90d3fa8fa6d532a86b504d45378701a28a47ca24 | [] | no_license | wolfhu/DialogPretraining | 470334fd815e1299981b827fdc933d237a489efd | eeeada92146d652d81ca6e961d1298924ac8435d | refs/heads/main | 2023-06-25T15:22:54.728187 | 2021-07-21T01:40:23 | 2021-07-21T01:40:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | # encoding: utf-8
import sys
from util.trie import Trie
tag_file_path = '/home/t-yuniu/xiaoice/yuniu/dataset/processed/domain/sport/keywords'
# Tag 黑名单
tag_black_dict = {}
# tag_black_dict.setdefault('游戏', True)
tag_trie = Trie()
def detect_tag(sentence):
"""
Judge if sentence contain as least a tag.
:param sentence: query or answer
:return: boolean, True if contain, False otherwise.
"""
length = len(sentence)
detected_tags = []
for idx in range(length):
node = tag_trie.lookup
idx_tmp = idx
while True:
if idx_tmp >= length:
break
if sentence[idx_tmp] in node:
node = node[sentence[idx_tmp]]
idx_tmp += 1
if Trie.END in node:
detected_tags.append(sentence[idx:idx_tmp])
else:
break
return detected_tags
if __name__ == '__main__':
# build trie from tag file
with open(tag_file_path) as douban_tag_file:
for line in douban_tag_file.readlines():
tag = line.strip()
if len(tag) == 1 or tag in tag_black_dict:
continue
tag_trie.insert(tag)
# filter corpus contain tags
while True:
line = sys.stdin.readline().strip()
if line:
try:
line = line.replace('#', '')
query, answer = line.split('\t')[:2]
# detected_tags = detect_tag(query)
detected_tags = []
detected_tags.extend(detect_tag(answer))
if len(detected_tags) > 0:
print('\t'.join([' '.join(set(detected_tags)), query, answer]))
except ValueError:
sys.stdout.write('Illegal line.\n')
else:
break
| [
"[email protected]"
] | |
4dad72ebc7956f2e83c677733d880dec2b2fd50f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_10938.py | 978f85ef643f608646c73eface82e3ca6748bc7b | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # Installing psycopg2 on Mountain Lion (brew + pip)
defaults write com.apple.versioner.python Prefer-32-Bit -bool no
| [
"[email protected]"
] | |
08de5456e8af14a088ef40511b9b3774b8805421 | a7807e4a49a06b748cff273fe8c0dc79b5e64ca8 | /orby/Scripts/django-admin.py | e15c2e9f1d2447bd8344d9aa8ae6a8f207aaf426 | [] | no_license | orhunakar01/labotestalep | 0cb864522821f9d4f168996db15a38fc166d57b3 | 6c6958d49e65d30d5f80c09ee1618c8cc7dd8100 | refs/heads/master | 2023-03-23T00:32:49.474106 | 2021-03-16T18:46:08 | 2021-03-16T18:46:08 | 348,455,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #!c:\users\orhun\desktop\djangologin\orby\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
] | |
66aefdce6c1839e0f4b8dfbe62df72f1d60af25d | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/match2/operations/backfill/accept_backfill.py | e35c0aa725d1ba4f04c748c7ad397734654b2930 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 8,605 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Match Service V2 (2.8.4)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ApiBackFillAcceptRequest
from ...models import ModelsGameSession
from ...models import ResponseError
class AcceptBackfill(Operation):
"""Accept a backfill proposal (AcceptBackfill)
Required Permission: NAMESPACE:{namespace}:MATCHMAKING:BACKFILL [UPDATE]
Required Scope: social
Accept backfill proposal
Required Permission(s):
- NAMESPACE:{namespace}:MATCHMAKING:BACKFILL [UPDATE]
Required Scope(s):
- social
Properties:
url: /match2/v1/namespaces/{namespace}/backfill/{backfillID}/proposal/accept
method: PUT
tags: ["Backfill", "public"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ApiBackFillAcceptRequest in body
backfill_id: (backfillID) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ModelsGameSession (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
403: Forbidden - ResponseError (Forbidden)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = (
"/match2/v1/namespaces/{namespace}/backfill/{backfillID}/proposal/accept"
)
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ApiBackFillAcceptRequest # REQUIRED in [body]
backfill_id: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "backfill_id"):
result["backfillID"] = self.backfill_id
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ApiBackFillAcceptRequest) -> AcceptBackfill:
self.body = value
return self
def with_backfill_id(self, value: str) -> AcceptBackfill:
self.backfill_id = value
return self
def with_namespace(self, value: str) -> AcceptBackfill:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ApiBackFillAcceptRequest()
if hasattr(self, "backfill_id") and self.backfill_id:
result["backfillID"] = str(self.backfill_id)
elif include_empty:
result["backfillID"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[
Union[None, ModelsGameSession], Union[None, HttpResponse, ResponseError]
]:
"""Parse the given response.
200: OK - ModelsGameSession (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
403: Forbidden - ResponseError (Forbidden)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelsGameSession.create_from_dict(content), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 403:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls, body: ApiBackFillAcceptRequest, backfill_id: str, namespace: str, **kwargs
) -> AcceptBackfill:
instance = cls()
instance.body = body
instance.backfill_id = backfill_id
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> AcceptBackfill:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ApiBackFillAcceptRequest.create_from_dict(
dict_["body"], include_empty=include_empty
)
elif include_empty:
instance.body = ApiBackFillAcceptRequest()
if "backfillID" in dict_ and dict_["backfillID"] is not None:
instance.backfill_id = str(dict_["backfillID"])
elif include_empty:
instance.backfill_id = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"backfillID": "backfill_id",
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"backfillID": True,
"namespace": True,
}
# endregion static methods
| [
"[email protected]"
] | |
94e3d38dd3a5674a0272aeb4ea010d9f7a9abfd2 | 7dcdd5de0640f07b01b1707c134ec0bd168f641d | /fedora_college/modules/content/views.py | b1019c221326d657588aa1b01f790aaa7115edba | [
"BSD-3-Clause"
] | permissive | MSheezan/fedora-college | 8e3e741f6ddac481c2bb7bbcde1e70e2b4b56774 | 07dbce3652c6c1796fb0f7b208a706c9e9d90dc1 | refs/heads/master | 2021-01-15T22:38:16.831830 | 2014-06-26T07:04:33 | 2014-06-26T07:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,439 | py | # -*- coding: utf-8 -*-
import re
#import time
from unicodedata import normalize
from flask import Blueprint, render_template
from flask import redirect, url_for, g
from sqlalchemy import desc
from fedora_college.core.database import db
from fedora_college.modules.content.forms import * # noqa
from fedora_college.core.models import * # noqa
from flask_fas_openid import fas_login_required
bundle = Blueprint('content', __name__, template_folder='templates')
from fedora_college.modules.content.media import * # noqa
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
#stri = (time.strftime("%d/%m/%Y"))
#text = stri + "-" + text
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def attach_tags(tags, content):
rem = TagsMap.query.filter_by(content_id=content.content_id).all()
for r in rem:
db.session.delete(r)
db.session.commit()
for tag in tags:
tag_db = Tags.query.filter_by(tag_text=tag).first()
if tag_db is None:
tag_db = Tags(tag)
db.session.add(tag_db)
db.session.commit()
Map = TagsMap(tag_db.tag_id, content.content_id)
db.session.add(Map)
db.session.commit()
@bundle.route('/content/add/', methods=['GET', 'POST'])
@bundle.route('/content/add', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>', methods=['GET', 'POST'])
@fas_login_required
def addcontent(posturl=None):
form = CreateContent()
form_action = url_for('content.addcontent')
media = Media.query.order_by(desc(Media.timestamp)).limit(10).all()
if posturl is not None:
content = Content.query.filter_by(slug=posturl).first_or_404()
form = CreateContent(obj=content)
if form.validate_on_submit():
form.populate_obj(content)
tags = str(form.tags.data).split(',')
attach_tags(tags, content)
content.rehtml()
db.session.commit()
return redirect(url_for('content.addcontent',
posturl=posturl,
updated="Successfully updated")
)
else:
if form.validate_on_submit():
url_name = slugify(form.title.data)
query = Content(form.title.data,
url_name,
form.description.data,
form.active.data,
form.tags.data,
g.fas_user['username'],
form.type_content.data
)
tags = str(form.tags.data).split(',')
try:
db.session.add(query)
db.session.commit()
attach_tags(tags, query)
return redirect(url_for('content.addcontent',
posturl=url_name,
updated="Successfully updated",
media=media)
)
# Duplicate entry
except Exception as e:
db.session.rollback()
print e
pass
return render_template('content/edit_content.html', form=form,
form_action=form_action, title="Create Content",
media=media)
@bundle.route('/blog', methods=['GET', 'POST'])
@bundle.route('/blog/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>', methods=['GET', 'POST'])
def blog(slug=None):
if slug is not None:
try:
posts = Content.query. \
filter_by(slug=slug).all()
except:
posts = "No such posts in database."
else:
try:
posts = Content.query. \
filter_by(type_content="blog").all()
except:
posts = "Databse is empty"
return render_template('blog/index.html',
title='Blog',
content=posts)
| [
"[email protected]"
] | |
c2eab84e232f590469f2bb0cea19a803ec121d0f | 2fabc9255adbe1cc055eb4b2402f8526f389f257 | /model/modules.py | 86464633b715d37b344f74882941fce2b5d70ab8 | [
"MIT"
] | permissive | asr2021/WaveGrad2 | 657323be12d16667fc0a3b7f2a168101e6e913cb | ba7715d760999093dd99283f48971c5115210b51 | refs/heads/main | 2023-06-02T18:48:56.830462 | 2021-06-23T07:22:10 | 2021-06-23T08:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,959 | py | import os
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from .blocks import (
ZoneOutBiLSTM,
LinearNorm,
ConvBlock,
)
from text.symbols import symbols
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TextEncoder(nn.Module):
""" Text Encoder """
def __init__(self, config):
super(TextEncoder, self).__init__()
n_src_vocab = len(symbols) + 1
d_word_vec = config["transformer"]["encoder_hidden"]
n_layers = config["transformer"]["encoder_layer"]
d_model = config["transformer"]["encoder_hidden"]
kernel_size = config["transformer"]["encoder_kernel_size"]
dropout = config["transformer"]["encoder_dropout"]
zoneout = config["transformer"]["encoder_zoneout"]
self.d_model = d_model
self.src_word_emb = nn.Embedding(
n_src_vocab, d_word_vec, padding_idx=0
)
self.conv_stack = nn.ModuleList(
[
ConvBlock(
d_model, d_model, kernel_size=kernel_size, dropout=dropout
)
for _ in range(n_layers)
]
)
self.lstm = ZoneOutBiLSTM(
d_model, zoneout_rate=zoneout
)
def forward(self, src_seq, mask=None):
enc_output = self.src_word_emb(src_seq)
for conv in self.conv_stack:
enc_output = conv(enc_output, mask=mask)
enc_output = self.lstm(enc_output)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0.)
return enc_output
class VarianceAdaptor(nn.Module):
""" Variance Adaptor """
def __init__(self, preprocess_config, model_config):
super(VarianceAdaptor, self).__init__()
self.duration_predictor = DurationPredictor(model_config)
self.gaussian_upsampling = GaussianUpsampling(model_config)
def forward(
self,
x,
src_mask,
duration_target=None,
d_control=1.0,
):
log_duration_prediction = self.duration_predictor(x, src_mask)
if duration_target is not None:
x, attn = self.gaussian_upsampling(x, duration_target, src_mask)
duration_rounded = duration_target
else:
duration_rounded = torch.clamp(
(torch.round(torch.exp(log_duration_prediction) - 1) * d_control),
min=0,
)
x, attn = self.gaussian_upsampling(x, duration_rounded, src_mask)
return (
x,
log_duration_prediction,
duration_rounded,
attn,
)
class GaussianUpsampling(nn.Module):
""" Gaussian Upsampling """
def __init__(self, model_config):
super(GaussianUpsampling, self).__init__()
# self.range_param_predictor = RangeParameterPredictor(model_config)
def forward(self, encoder_outputs, duration, mask):
device = encoder_outputs.device
# range_param = self.range_param_predictor(encoder_outputs, duration, mask)
t = torch.sum(duration, dim=-1, keepdim=True) #[B, 1]
e = torch.cumsum(duration, dim=-1).float() #[B, L]
c = e - 0.5 * duration #[B, L]
t = torch.arange(1, torch.max(t).item()+1, device=device) # (1, ..., T)
t = t.unsqueeze(0).unsqueeze(1) #[1, 1, T]
c = c.unsqueeze(2)
# print(range_param, 0.1*(range_param ** 2))
# w_1 = torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2) # [B, L, T]
# w_2 = torch.sum(torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_1 = torch.exp(-0.1 * (t - c) ** 2) # [B, L, T]
w_2 = torch.sum(torch.exp(-0.1 * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_2[w_2==0.] = 1.
# w_1 = self.normpdf(t, c, range_param.unsqueeze(-1)) # [B, L, T]
# w_1 = torch.distributions.normal.Normal(c, 0.1).log_prob(t) # [B, L, T]
# w_2 = torch.sum(w_1, dim=1, keepdim=True) # [B, 1, T]
# w_2[w_2==0.] = 1.
w = w_1 / w_2
out = torch.matmul(w.transpose(1, 2), encoder_outputs)
return out, w
class DurationPredictor(nn.Module):
""" Duration Parameter Predictor """
def __init__(self, model_config):
super(DurationPredictor, self).__init__()
encoder_hidden = model_config["transformer"]["encoder_hidden"]
variance_hidden = model_config["variance_predictor"]["variance_hidden"]
self.duration_lstm = nn.LSTM(
encoder_hidden,
int(variance_hidden / 2), 2,
batch_first=True, bidirectional=True
)
self.duration_proj = nn.Sequential(
LinearNorm(variance_hidden, 1),
nn.ReLU(),
)
def forward(self, encoder_output, mask):
duration_prediction, _ = self.duration_lstm(encoder_output)
duration_prediction = self.duration_proj(duration_prediction)
duration_prediction = duration_prediction.squeeze(-1) # [B, L]
if mask is not None:
duration_prediction = duration_prediction.masked_fill(mask, 0.0)
return duration_prediction
# class RangeParameterPredictor(nn.Module):
# """ Range Parameter Predictor """
# def __init__(self, model_config):
# super(RangeParameterPredictor, self).__init__()
# encoder_hidden = model_config["transformer"]["encoder_hidden"]
# variance_hidden = model_config["variance_predictor"]["variance_hidden"]
# self.range_param_lstm = nn.LSTM(
# encoder_hidden + 1,
# int(variance_hidden / 2), 2,
# batch_first=True, bidirectional=True
# )
# self.range_param_proj = nn.Sequential(
# LinearNorm(variance_hidden, 1),
# nn.Softplus(),
# )
# def forward(self, encoder_output, duration, mask):
# range_param_input = torch.cat([encoder_output, duration.unsqueeze(-1)], dim=-1)
# range_param_prediction, _ = self.range_param_lstm(range_param_input)
# range_param_prediction = self.range_param_proj(range_param_prediction)
# range_param_prediction = range_param_prediction.squeeze(-1) # [B, L]
# if mask is not None:
# range_param_prediction = range_param_prediction.masked_fill(mask, 0.0)
# return range_param_prediction
class SamplingWindow(nn.Module):
""" Sampling Window """
def __init__(self, model_config, train_config):
super(SamplingWindow, self).__init__()
self.upsampling_rate = model_config["wavegrad"]["upsampling_rate"]
self.segment_length_up = train_config["window"]["segment_length"]
self.segment_length = train_config["window"]["segment_length"] // self.upsampling_rate
def pad_seq(self, seq, segment_length):
if len(seq.shape) > 2:
return torch.nn.functional.pad(
seq.transpose(-2, -1), (0, segment_length - seq.shape[1]), 'constant'
).data.transpose(-2, -1)
return torch.nn.functional.pad(
seq, (0, segment_length - seq.shape[1]), 'constant'
).data
def get_hidden_segment(self, hiddens, seq_starts):
batch = list()
for i, (hidden, seq_start) in enumerate(zip(hiddens, seq_starts)):
batch.append(hidden[seq_start:seq_start+self.segment_length])
return torch.stack(batch)
def forward(self, encoder_output, audio, seq_starts=None, full_len=False):
if full_len:
return encoder_output, audio
if encoder_output.shape[1] > self.segment_length:
encoder_segment = self.get_hidden_segment(encoder_output, seq_starts)
encoder_segment = self.pad_seq(encoder_output, self.segment_length)
audio_segment = self.pad_seq(audio, self.segment_length_up)
return encoder_segment, audio_segment
| [
"[email protected]"
] | |
ee4ca603bda625183659e699f11fd7d710b1f6e2 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/W_w_Mgt_to_C/pyramid_tight_crop_size256_pad60_jit15/pyr_2s/bce_s001_tv_s0p1_L4/step10_a.py | eb5df86c53b26c8ee94837558fc32d4559cdad0a | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,105 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_wc_flow
use_loss_obj = [G_mae_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), G_mae_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### x, y 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_0.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
166670300dc3fb39d4e1883bb546d056fe08ce1f | dd09f3ad02785935043b56ea3ef85ed603f4065d | /Sorting_Function/Selection_Sorting.py | 6f03147ffab2db72cf7d3f242eb1efd76270e240 | [] | no_license | RishavMishraRM/Data_Structure | ed70f5a04c2fa8153433e830ef54deb7b9c8bf21 | 0d31d16b48989359d5fef79b00aac1b9ca112a22 | refs/heads/main | 2023-06-27T02:40:18.031146 | 2021-07-25T19:01:51 | 2021-07-25T19:01:51 | 330,320,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | def selection_sort(A):
n = len(A)
for i in range(n-1):
position = i
for j in range(i+1, n):
if A[j] < A[position]:
position = j
temp = A[i]
A[i] = A[position]
A[position] = temp
A = [3, 5, 8, 9, 6, 2]
print('Original Array:',A)
selection_sort(A)
print('Sorted Array:',A)
| [
"[email protected]"
] | |
2fb93afe829de7491a458ced6b6568ea178817ff | 488e0934b8cd97e202ae05368c855a57b299bfd1 | /Django/advanced/change_admin/change_admin/settings.py | 52ac0975d8daac947ffc100a34d19c9282aa57ff | [] | no_license | didemertens/udemy_webdev | 4d96a5e7abeec1848ecedb97f0c440cd50eb27ac | 306215571be8e4dcb939e79b18ff6b302b75c952 | refs/heads/master | 2020-04-25T00:24:45.654136 | 2019-04-13T16:00:47 | 2019-04-13T16:00:47 | 172,377,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | """
Django settings for change_admin project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(w#6#!6oi75z@e2d&((yalznx95yk7exe5fbbx#f1l#0uc=(3w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_videos'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'change_admin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'change_admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
94469e411f69931b1aa7dec9d60e62e9d87a7eff | 3e917645a0e1375189c8ee8c1e93ed15348111ef | /projects/usxp/archive/parrallel/parallel_nibble_v2.py | 792bbb8be009b4feb157af5c7e2bf1c7bf54ad07 | [] | no_license | mbougie/gibbs | d4544e688ce2b63530535e1f5102328aece30e0d | 39d5dc0866fc0dd149d0cf1f22bfd20911a9d29e | refs/heads/master | 2021-01-12T06:59:27.214123 | 2020-01-07T15:48:12 | 2020-01-07T15:48:12 | 83,906,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,710 | py | import arcpy
from arcpy import env
from arcpy.sa import *
import multiprocessing
import os
import glob
import sys
import time
import logging
from multiprocessing import Process, Queue, Pool, cpu_count, current_process, Manager
import general as gen
# arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = "in_memory"
case=['Bougie','Gibbs']
#import extension
arcpy.CheckOutExtension("Spatial")
#establish root path for this the main project (i.e. usxp)
rootpath = 'C:/Users/Bougie/Desktop/Gibbs/data/usxp/'
# rootpath = 'D:/projects/ksu/v2/'
### establish gdb path ####
def defineGDBpath(arg_list):
gdb_path = '{}{}/{}/{}.gdb/'.format(rootpath,arg_list[0],arg_list[1],arg_list[2])
# print 'gdb path: ', gdb_path
return gdb_path
####### define raster and mask ####################
class ProcessingObject(object):
def __init__(self, series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
self.series = series
self.res = str(res)
self.mmu =str(mmu)
self.years = years
self.name = name
self.subname = subname
self.parent_seq = parent_seq
self.mask_seq = mask_seq
self.outraster_seq = outraster_seq
self.datarange = str(self.years[0])+'to'+str(self.years[1])
print 'self.datarange:', self.datarange
self.dir_tiles = 'C:/Users/Bougie/Desktop/Gibbs/tiles/'
# s9_ytc30_2008to2016_mmu5_nbl_bfc
if self.name == 'mtr':
self.traj = self.series+'_traj_cdl'+self.res+'_b_'+self.datarange+'_rfnd'
self.gdb_parent = defineGDBpath(gdb_parent)
self.raster_parent = self.traj+self.parent_seq
self.path_parent = self.gdb_parent + self.raster_parent
print 'self.path_parent', self.path_parent
self.gdb_child = defineGDBpath(gdb_child)
self.raster_mask = self.raster_parent + self.mask_seq
self.path_mask = self.gdb_child + self.raster_mask
self.raster_nbl = self.raster_parent + self.outraster_seq
self.path_nbl = self.gdb_child + self.raster_nbl
print 'self.path_nbl', self.path_nbl
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_mtr'
print self.out_fishnet
self.pixel_type = "16_BIT_UNSIGNED"
else:
self.gdb_parent = defineGDBpath(['s14', 'post', self.name])
self.yxc_foundation = self.series+'_'+self.name+self.res+'_'+self.datarange+'_mmu'+self.mmu
print 'self.yxc_foundation', self.yxc_foundation
self.path_parent = self.gdb_parent + self.yxc_foundation
print 'self.path_parent', self.path_parent
self.raster_mask = self.yxc_foundation + '_msk'
self.path_mask = self.gdb_parent + self.raster_mask
print 'self.path_mask', self.path_mask
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_ytc'
self.pixel_type = "16_BIT_UNSIGNED"
self.raster_nbl = self.yxc_foundation + '_nbl'
print 'self.raster_nbl:', self.raster_nbl
self.path_nbl = self.gdb_parent + self.raster_nbl
print 'self.path_nbl', self.path_nbl
# def existsDataset(self):
# dataset = self.gdb_parent + self.raster_parent + '_nbl'
# if arcpy.Exists(dataset):
# print 'dataset already exists'
# return
# else:
# print 'dataset: ', dataset
# return self.raster_parent + '_nbl'
def create_fishnet():
#delete previous fishnet feature class
arcpy.Delete_management(nibble.out_fishnet)
#acquire parameters for creatfisnet function
XMin = nibble.path_parent.extent.XMin
YMin = nibble.path_parent.extent.YMin
XMax = nibble.path_parent.extent.XMax
YMax = nibble.path_parent.extent.YMax
origCord = "{} {}".format(XMin, YMin)
YAxisCord = "{} {}".format(XMin, YMax)
cornerCord = "{} {}".format(XMax, YMax)
cellSizeW = "0"
cellSizeH = "0"
numRows = 7
numCols = 7
geotype = "POLYGON"
arcpy.env.outputCoordinateSystem = nibble.path_parent.spatialReference
print nibble.path_parent.spatialReference.name
#call CreateFishnet_management function
arcpy.CreateFishnet_management(nibble.out_fishnet, origCord, YAxisCord, cellSizeW, cellSizeH, numRows, numCols, cornerCord, "NO_LABELS", "", geotype)
def execute_task(args):
in_extentDict, nibble = args
fc_count = in_extentDict[0]
# print fc_count
procExt = in_extentDict[1]
# print procExt
XMin = procExt[0]
YMin = procExt[1]
XMax = procExt[2]
YMax = procExt[3]
#set environments
#The brilliant thing here is that using the extents with the full dataset!!!!!! DONT EVEN NEED TO CLIP THE FULL RASTER TO THE FISHNET BECASUE
arcpy.env.snapRaster = nibble.path_parent
arcpy.env.cellsize = nibble.path_parent
arcpy.env.extent = arcpy.Extent(XMin, YMin, XMax, YMax)
### Execute Nibble #####################
ras_out = arcpy.sa.Nibble(nibble.path_parent, nibble.path_mask, "DATA_ONLY")
#clear out the extent for next time
arcpy.ClearEnvironment("extent")
# print fc_count
outname = "tile_" + str(fc_count) +'.tif'
#create Directory
outpath = os.path.join("C:/Users/Bougie/Desktop/Gibbs/", r"tiles", outname)
ras_out.save(outpath)
def mosiacRasters(nibble):
tilelist = glob.glob(nibble.dir_tiles+'*.tif')
print tilelist
######mosiac tiles together into a new raster
arcpy.MosaicToNewRaster_management(tilelist, nibble.gdb_parent, nibble.raster_nbl, Raster(nibble.path_parent).spatialReference, nibble.pixel_type, nibble.res, "1", "LAST","FIRST")
##Overwrite the existing attribute table file
arcpy.BuildRasterAttributeTable_management(nibble.path_nbl, "Overwrite")
## Overwrite pyramids
gen.buildPyramids(nibble.path_nbl)
def run(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
#instantiate the class inside run() function
nibble = ProcessingObject(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq)
print nibble.res
# need to create a unique fishnet for each dataset
#create_fishnet()
#remove a files in tiles directory
tiles = glob.glob(nibble.dir_tiles+"*")
for tile in tiles:
os.remove(tile)
#get extents of individual features and add it to a dictionary
extDict = {}
count = 1
for row in arcpy.da.SearchCursor(nibble.out_fishnet, ["SHAPE@"]):
extent_curr = row[0].extent
ls = []
ls.append(extent_curr.XMin)
ls.append(extent_curr.YMin)
ls.append(extent_curr.XMax)
ls.append(extent_curr.YMax)
extDict[count] = ls
count+=1
# print 'extDict', extDict
# print'extDict.items()', extDict.items()
######create a process and pass dictionary of extent to execute task
pool = Pool(processes=cpu_count())
# pool = Pool(processes=1)
pool.map(execute_task, [(ed, nibble) for ed in extDict.items()])
pool.close()
pool.join
mosiacRasters(nibble) | [
"[email protected]"
] | |
82d8e508bea9d27e596ec5fd5f94d4d16fc0ca40 | 085406a6754c33957ca694878db9bbe37f84b970 | /网络编程/08-ssh_socket_client.py | b91da548705606b59b6c0eb6b8d70cdbb3050767 | [] | no_license | dewlytg/Python-example | 82157958da198ce42014e678dfe507c72ed67ef0 | 1e179e4037eccd9fefabefd252b060564a2eafce | refs/heads/master | 2021-01-01T18:36:08.868861 | 2019-01-18T10:39:08 | 2019-01-18T10:39:08 | 98,375,528 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
"""
socket client for ssh
"""
import socket
client = socket.socket()
client.connect(("localhost",9999))
while True:
#支持客户端循环发送数据到服务端
cmd = input(">>:").strip()
if len(cmd) == 0:continue
client.send(cmd.encode()) #python3中必须把字符串转换为bytes类型,这里可以理解字符串类型是utf-8
cmd_res_size = client.recv(1024)
print("命令结果大小:",cmd_res_size)
client.send("please input somthing in order to packet splicing".encode()) #把代码放到Linux执行会发生粘包错误,这个可以避免错误发生
received_size = 0
received_data = b''
while received_size != int(cmd_res_size.decode()): #cmd_res_size是bytes类型的数据,需要使用decode转换为字符串
data = client.recv(1024)
received_size += len(data)
received_data += data
else:
print("cmd res receive done...",received_size)
print(received_data.decode())
client.close() | [
"[email protected]"
] | |
1c633bb83ec340755424794ca77ec8a5cecdcbf1 | c253e3c94b66e85d52b1c274e649a8431db0d7d5 | /IT-Lab/assignment-6/codes/1.py | ab5c61b8787c25c90e6d39eaf115480cc804383b | [] | no_license | Abhinal/college-assignments | bfecc9d8dd05b7da5348def9990f42ff28329328 | a93aeee086eb681f946cc343869610e4588af307 | refs/heads/master | 2023-08-16T12:04:35.543135 | 2021-10-22T16:27:33 | 2021-10-22T16:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | lst = []
i = 0
while i < 5:
player = input()
lst.append(player)
i += 1
print(lst) | [
"[email protected]"
] | |
15efb02d15bd410d19b8018e6c307a75b9f04eb4 | 5fdcb39eaa9d1f44e2ba0130bc0d6ece3f5ff354 | /code/cheshire3/record.py | 728ddee847b4f75341864dad4eff2244263dd3c4 | [] | no_license | Cheshire-Grampa/cheshire3 | 0a653d6372497290d938e098b6acf8366348133f | 616ab36cd8442cd5f4712a9fccf65ca7ae9f692c | refs/heads/master | 2020-12-25T07:26:16.366754 | 2012-06-06T09:52:53 | 2012-06-06T10:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,269 | py |
from cheshire3.baseObjects import Record
from cheshire3.exceptions import C3Exception
from cheshire3.utils import flattenTexts, elementType
from cheshire3.marc_utils import MARC, MARC8_to_Unicode
import unicodedata
import types, utils, os, re, sys
from cStringIO import StringIO
from xml.sax.saxutils import escape
from xml.sax import ContentHandler
# 1 <name> <attrHash> parent predicate end
# Element
# 4 <as 1>
# Namespaced Element
# 2 <name> <startLine>
# End Element
# 5 <as 2>
# End Namespaced
# 3 <text>
# Characters
# 9 <element hash>
# pickled hash of locations
# Split to separate object to allow for DOM->SAX direct conversion
# by throwing events from DOM tree to handler.
class SaxContentHandler(ContentHandler):
currentText = []
currentPath = []
pathLines = []
currentLine = -1
recordWordCount = 0
elementHash = {}
namespaces = []
hashAttributesNames = {}
hashAttributes = []
stripWS = 0
saveElementIndexes = 1
def __init__(self):
self.saveElementIndexes = 1
self.hashAttributesNames = {}
self.hashAttributes = []
self.stripWS = 0
self.reinit()
def reinit(self):
self.currentText = []
self.currentPath = []
self.pathLines = []
self.currentLine = -1
self.recordWordCount = 0
self.elementHash = {}
self.elementIndexes = []
self.namespaces = []
def startPrefixMapping(self, pfx, uri):
self.currentLine += 1
if (pfx == None):
pfx = ''
self.currentText.append("6 %r, %r" % (pfx, uri))
# We want to fwd elems to NS elem handlers with default NS?
def startElement(self, name, attrs):
self.currentLine += 1
self.pathLines.append(self.currentLine)
try:
parent = self.pathLines[-2]
except IndexError:
parent = -1
attrHash = {}
if (attrs):
for k in attrs.keys():
attrHash[k] = escape(attrs[k])
try:
npred = self.elementIndexes[-1][name] + 1
self.elementIndexes[-1][name] += 1
except IndexError:
# Empty
npred = 1
self.elementIndexes = [{name: npred}]
except KeyError:
# First occurence of Element
npred = 1
self.elementIndexes[-1][name] = 1
except:
raise
self.elementIndexes.append({})
self.currentText.append("1 %s %s %d %d" % (name, repr(attrHash), parent, npred))
saveAttrs = []
try:
hashAttrList = self.hashAttributesNames[name]
for a in hashAttrList:
try:
saveAttrs.append("%s[@%s='%s']" % (name, a, attrHash[a]))
except:
pass
except:
pass
try:
starAttrList = self.hashAttributesNames['*']
for a in starAttrList:
try:
saveAttrs.append("*[@%s='%s']" % (a, attrHash[a]))
except:
pass
except:
pass
if saveAttrs:
self.hashAttributes.append((self.currentLine, saveAttrs))
def endElement(self, name):
self.currentLine += 1
start = self.pathLines.pop()
self.currentText.append("2 %s %d" % (name, start))
self.currentText[start] = "%s %d" % (self.currentText[start], self.currentLine)
self.elementIndexes.pop()
try:
self.elementHash[name].append([start, self.currentLine])
except:
self.elementHash[name] = [[start, self.currentLine]]
if self.hashAttributes and self.hashAttributes[-1][0] == start:
attrs = self.hashAttributes.pop()[1]
for sa in attrs:
try:
self.elementHash[sa].append([start, self.currentLine])
except:
self.elementHash[sa] = [[start, self.currentLine]]
def startElementNS(self, name, qname, attrs):
self.currentLine += 1
self.pathLines.append(self.currentLine)
try:
parent = self.pathLines[-2]
except:
parent = -1
attrHash = {}
# Convert from weird sax thing
if (attrs):
for k in attrs.keys():
attrHash[k] = attrs[k]
simpleName = name[1]
try:
npred = self.elementIndexes[-1][simpleName] + 1
self.elementIndexes[-1][simpleName] += 1
except IndexError:
# Empty
npred = 1
self.elementIndexes = [{simpleName: npred}]
except KeyError:
# First occurence of Element
npred = 1
self.elementIndexes[-1][simpleName] = 1
self.elementIndexes.append({})
self.currentText.append("4 %r, %r, %r, %r %d %d" % (name[0], simpleName, qname, attrHash, parent, npred))
saveAttrs = []
try:
hashAttrList = self.hashAttributesNames[simpleName]
for a in hashAttrList:
try:
saveAttrs.append("%s[@%s='%s']" % (simpleName, a, attrHash[a]))
except:
pass
except:
pass
try:
starAttrList = self.hashAttributesNames['*']
for a in starAttrList:
try:
saveAttrs.append("*[@%s='%s']" % (a, attrHash[a]))
except:
pass
except:
pass
if saveAttrs:
self.hashAttributes.append((self.currentLine, saveAttrs))
def endElementNS(self, name, qname):
self.currentLine += 1
start = self.pathLines.pop()
self.currentText.append("5 %r, %r, %r %d" % (name[0], name[1], qname, start))
self.currentText[start] ="%s %d" % (self.currentText[start], self.currentLine)
self.elementIndexes.pop()
try:
self.elementHash[name[1]].append([start, self.currentLine])
except:
self.elementHash[name[1]] = [[start, self.currentLine]]
if self.hashAttributes and self.hashAttributes[-1][0] == start:
attrs = self.hashAttributes.pop()[1]
for sa in attrs:
try:
self.elementHash[sa].append([start, self.currentLine])
except:
self.elementHash[sa] = [[start, self.currentLine]]
def characters(self, text, start=0, length=-1):
# if text.isspace():
# text = " "
prev = self.currentText[-1]
if self.stripWS and text.isspace():
return
self.currentLine += 1
if (len(text) != 1 and len(prev) != 3 and prev[0] == "3" and not prev[-1] in [' ', '-']):
# Adjacent lines of text, ensure spaces
text = ' ' + text
self.currentText.append("3 %s" % (text))
self.recordWordCount += len(text.split())
def ignorableWhitespace(self, ws):
# ... ignore! :D
pass
def processingInstruction(self, target, data):
pass
def skippedEntity(self, name):
pass
class SaxToDomHandler:
nodeStack = []
document = None
currText = ""
def initState(self):
self.nodeStack = []
self.document=None
self.top = None
def startElement(self, name, attribs={}):
if (not self.document):
self.document = implementation.createDocument(None, name, None)
elem = self.document.childNodes[0]
else:
elem = self.document.createElementNS(None,name)
for a in attribs:
elem.setAttributeNS(None,a,attribs[a])
if (self.nodeStack):
self.nodeStack[-1].appendChild(elem)
else:
self.document.appendChild(elem)
self.nodeStack.append(elem)
def endElement(self, foo):
self.nodeStack.pop()
def characters(self, text, zero=0, length=0):
if (self.nodeStack):
if (text.isspace()):
text = " "
# Is this escape necessary?
text = escape(text)
d = self.document.createTextNode(text)
self.nodeStack[-1].appendChild(d)
def startElementNS(self, name, qname, attribs):
if (not self.document):
self.document = implementation.createDocument(name[0], name[1], None)
elem = self.document.childNodes[0]
else:
elem = self.document.createElementNS(name[0],name[1])
for a in attribs:
elem.setAttributeNS(a[0],a[1],attribs[a])
if (self.nodeStack):
self.nodeStack[-1].appendChild(elem)
else:
self.document.appendChild(elem)
self.nodeStack.append(elem)
def endElementNS(self, name,qname):
self.nodeStack.pop()
def startPrefixMapping(self, pref, uri):
pass
def getRootNode(self):
return self.document
s2dhandler = SaxToDomHandler()
class SaxToXmlHandler:
xml = []
currNs = 0
newNamespaces = {}
def initState(self):
self.xml = []
self.namespaces = {}
self.currNs = 0
self.newNamespaces = {}
def startPrefixMapping(self, pref, uri):
self.namespaces[uri] = pref
self.newNamespaces[pref] = uri
def startElement(self, name, attribs={}):
attrs = []
for a in attribs:
attrs.append('%s="%s"' % (a, attribs[a]))
attribtxt = ' '.join(attrs)
if (attribtxt):
attribtxt = " " + attribtxt
self.xml.append("<%s%s>" % (name, attribtxt))
def endElement(self, name):
self.xml.append("</%s>" % (name))
def _getPrefix(self, ns):
if (not ns):
return ""
pref = self.namespaces.get(ns, None)
if (pref == None):
self.currNs += 1
pref = "ns%d" % (self.currNs)
self.namespaces[ns] = pref
self.newNamespaces[pref] = ns
return pref
def startElementNS(self, n, qn=None, attrs={}):
pref = self._getPrefix(n[0])
if (pref):
name = "%s:%s" % (pref, n[1])
else:
name = n[1]
attrlist = []
for ns,aname in attrs:
p2 = self._getPrefix(ns)
if (p2):
nsaname = "%s:%s" % (p2, aname)
else:
nsaname = aname
attrlist.append('%s="%s"' % (nsaname, attrs[(ns,aname)]))
for x in self.newNamespaces.iteritems():
if (x[0]):
attrlist.append('xmlns:%s="%s"' % (x[0], x[1]))
else:
attrlist.append('xmlns="%s"' % (x[1]))
self.newNamespaces = {}
attribtxt = ' '.join(attrlist)
if (attribtxt):
attribtxt = " " + attribtxt
self.xml.append("<%s%s>" % (name,attribtxt))
def endElementNS(self, n, qn=None):
pref = self._getPrefix(n[0])
if (pref):
name = "%s:%s" % (pref, n[1])
else:
name = n[1]
self.xml.append("</%s>" % (name))
def characters(self, text, zero=0, length=0):
text = escape(text)
self.xml.append(text)
def get_xmlString(self):
return ''.join(self.xml)
s2xhandler = SaxToXmlHandler()
class NumericPredicateException(C3Exception):
pass
class DomRecord(Record):
context = None
size = 0
def __init__(self, data, xml="", docId=None, wordCount=0, byteCount=0):
self.dom = data
self.xml = xml
self.id = docId
self.parent = ('','',-1)
self.context = None
self.metadata = {}
if wordCount:
self.wordCount = wordCount
else:
try:
# Sometimes this blows up
self.wordCount = len(flattenTexts(data).split())
except:
self.wordCount = 0
self.byteCount = byteCount
def _walk(self, node):
pass
def get_sax(self, session):
if (not self.sax):
self.handler = SaxContentHandler()
for c in self.dom.childNodes:
self._walkTop(c)
self.sax = self.handler.currentText
self.sax.append("9 %r" % self.handler.elementHash)
self.handler = None
return self.sax
def get_dom(self, session):
return self.dom
def fetch_vector(self, session, index, summary=False):
return index.indexStore.fetch_vector(session, index, self, summary)
def fetch_proxVector(self, session, index, elem=-1):
return index.indexStore.fetch_proxVector(session, index, self, elem)
class MinidomRecord(DomRecord):
useNamespace = 1
def get_xml(self, session):
if (self.xml):
return self.xml
else:
self.xml = self.dom.toxml()
return self.xml
def _walkTop(self, node):
# top level node
if node.nodeType == utils.elementType:
self.namespaces = node.namespaceURI != None
self._walk(node)
def _walk(self, node):
if (node.nodeType == utils.elementType):
name = node.localName
ns = node.namespaceURI
attrHash = {}
for ai in range(node.attributes.length):
attr = node.attributes.item(ai)
if self.namespaces:
if attr.namespaceURI == 'http://www.w3.org/2000/xmlns/':
self.handler.startPrefixMapping(attr.localName, attr.value)
else:
attrHash[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrHash[attr.localName] = attr.value
if self.namespaces:
self.handler.startElementNS((node.namespaceURI, node.localName), None, attrHash)
else:
self.handler.startElement(node.localName, attrHash)
for c in node.childNodes:
self._walk(c)
if self.namespaces:
self.handler.endElementNS((node.namespaceURI, node.localName), None)
else:
self.handler.endElement(node.localName)
elif node.nodeType == utils.textType:
self.handler.characters(node.data)
def process_xpath(self, session, xpath, maps={}):
raise NotImplementedError
try:
from lxml import etree, sax
class LxmlRecord(DomRecord):
def process_xpath(self, session, xpath, maps={}):
global prefixRe
if (isinstance(xpath, list)):
xpath = repr(xpath[0])
if xpath[0] != "/" and xpath[-1] != ')':
xpath = "//" + xpath
if maps:
retval = self.dom.xpath(xpath, namespaces=maps)
else:
retval = self.dom.xpath(xpath)
if isinstance(retval, list):
return retval
else:
return [retval]
def get_xml(self, session):
return etree.tostring(self.dom)
def get_sax(self, session):
if (not self.sax):
handler = SaxContentHandler()
sax.saxify(self.dom, handler)
self.sax = handler.currentText
self.sax.append("9 %r" % handler.elementHash)
return self.sax
def get_dom(self, session):
try:
return self.dom.getroot()
except AttributeError:
return self.dom
except:
class LxmlRecord(DomRecord):
pass
try:
from xpath import ParsedRelativeLocationPath, ParsedAbsoluteLocationPath, \
ParsedStep, ParsedNodeTest, ParsedExpr, Compile, \
ParsedAbbreviatedAbsoluteLocationPath, ParsedAbbreviatedRelativeLocationPath, \
ParsedNodeTest
except:
# This means we can't do xpaths on SaxRecords...
# making them a bit pointless, but not fatal as we likely don't need them
pass
def traversePath(node):
if (isinstance(node, ParsedRelativeLocationPath.ParsedRelativeLocationPath)):
left = traversePath(node._left)
right = traversePath(node._right)
if (left == []):
# self::node()
return [right]
elif (type(left[0]) in types.StringTypes):
return [left, right]
else:
left.append(right)
return left
elif (isinstance(node, ParsedAbsoluteLocationPath.ParsedAbsoluteLocationPath)):
left = ['/']
if (node._child):
right = traversePath(node._child)
else:
return left
if (type(right[0]) == types.StringType):
return [left, right]
else:
left.extend(right)
return left
elif (isinstance(node, ParsedAbbreviatedRelativeLocationPath.ParsedAbbreviatedRelativeLocationPath)):
left = traversePath(node._left)
right = traversePath(node._right)
right[0] = 'descendant'
if (left == []):
# self::node()
return [right]
elif (type(left[0]) in types.StringTypes):
return [left, right]
else:
left.append(right)
return left
elif (isinstance(node, ParsedStep.ParsedStep)):
# TODO: Check that axis is something we can parse
a = node._axis._axis
if (a == 'self'):
return []
n = node._nodeTest
local = ParsedNodeTest.NodeNameTest
nameattr = "_nodeName"
if (isinstance(n, local)):
n = getattr(n, nameattr)
elif (isinstance(n, ParsedNodeTest.TextNodeTest)):
n = "__text()"
elif (isinstance(n, ParsedNodeTest.QualifiedNameTest)):
n = n._prefix + ":" + n._localName
elif (isinstance(n, ParsedNodeTest.PrincipalTypeTest)):
n = "*"
else:
raise(NotImplementedError)
preds = node._predicates
pp = []
if (preds):
for pred in preds:
pp.append(traversePath(pred))
return [a, n, pp]
elif (isinstance(node, ParsedExpr.ParsedEqualityExpr) or isinstance(node, ParsedExpr.ParsedRelationalExpr)):
# @id="fish"
op = node._op
# Override check for common: [position()=int]
if (op == '=' and isinstance(node._left, ParsedExpr.FunctionCall) and node._left._name == 'position' and isinstance(node._right, ParsedExpr.ParsedNLiteralExpr)):
return node._right._literal
left = traversePath(node._left)
if (type(left) == types.ListType and left[0] == "attribute"):
left = left[1]
right = traversePath(node._right)
if not op in ('=', '!='):
op = ['<', '<=', '>', '>='][op]
return [left, op, right]
elif (isinstance(node, ParsedExpr.ParsedNLiteralExpr) or isinstance(node, ParsedExpr.ParsedLiteralExpr)):
# 7 or "fish"
return node._literal
elif (isinstance(node, ParsedExpr.FunctionCall)):
if (node._name == 'last'):
# Override for last using Pythonic expr
return -1
elif node._name == 'name':
return ['FUNCTION', '__name()']
elif node._name == 'starts-with':
# only for foo[starts-with(@bar, 'baz')]
return ['FUNCTION', 'starts-with', traversePath(node._arg0)[1], node._arg1._literal]
elif node._name == 'regexp':
return ['FUNCTION', 'regexp', traversePath(node._arg0)[1], re.compile(node._arg1._literal)]
elif node._name == 'count':
return ['FUNCTION', 'count', traversePath(node._arg0)]
else:
raise(NotImplementedError)
elif (isinstance(node, ParsedExpr.ParsedAndExpr)):
return [traversePath(node._left), 'and', traversePath(node._right)]
elif (isinstance(node, ParsedExpr.ParsedOrExpr)):
return [traversePath(node._left), 'or', traversePath(node._right)]
else:
# We'll need to do full XPath vs DOM
raise(NotImplementedError)
def parseOldXPath(p):
xpObj = Compile(p)
t = traversePath(xpObj)
if (t[0] <> '/' and type(t[0]) in types.StringTypes):
t= [t]
return [xpObj, t]
class SaxRecord(Record):
def __init__(self, data, xml="", docId=None, wordCount=0, byteCount=0):
self.sax = data
self.id = docId
self.xml = xml
self.history = []
self.rights = []
self.elementHash = {}
self.wordCount = wordCount
self.byteCount = byteCount
self.parent = ('','',-1)
self.attrRe = re.compile("u['\"](.+?)['\"]: u['\"](.*?)['\"](, |})")
#self.attrRe = re.compile("u(?P<quote>['\"])(.+?)(?P=quote): u(?P<quoteb>['\"])(.*?)(?P=quoteb)(, |})")
self.recordStore = ""
def process_xpath(self, session, xpath, maps={}):
if (not isinstance(xpath, list)):
# Raw XPath
xpath = parseOldXPath(xpath)
xp = xpath[1]
try:
flatten = 0
if xp[0][0] == "FUNCTION" and xp[0][1] == 'count':
# process xpath and return number of matches
if isinstance(xp[0][2][0], str) and xp[0][2][0] != '/':
data = self.process_xpath(session, [None, [xp[0][2]]], maps)
else:
data = self.process_xpath(session, [None, xp[0][2]], maps)
return len(data)
if (xp[-1][0] == 'child' and xp[-1][1] == "__text()"):
flatten = 1
xp = xp[:-1]
if (xp[-1][0] == 'attribute'):
return self._handleAttribute(xp, maps)
elif (xp[-1][0] == "/"):
# Return top level element
for x in xrange(len(self.sax)):
if self.sax[x][0] in ['1', '4']:
return self.sax[x:]
elif(xp[-1][0] in ['child', 'descendant']):
data = []
# Extracting element
elemName = xp[-1][1]
nselem = elemName.split(":")
if (len(nselem) == 2):
# Namespaced.
nsUri = maps[nselem[0]]
elemName = nselem[1]
else:
nsUri = ""
attr = xp[-1][2]
elemLines = []
if elemName == '*' and attr:
for p in attr:
if p[0] == 'FUNCTION' and p[2] == '__name()':
names = self.elementHash.keys()
if p[1] == 'starts-with' and p[2] == '__name()':
for x in names:
if x.find(p[3]) == 0:
elemLines.extend(self.elementHash[x])
elif p[1] == 'regexp' and p[2] == '__name()':
for x in names:
if p[3].search(x):
elemLines.extend(self.elementHash[x])
elif (not elemName in self.elementHash):
return []
if (len(attr) == 1 and type(attr[0]) == types.ListType and attr[0][1] == "="):
n = u"%s[@%s='%s']" % (elemName, attr[0][0], attr[0][2])
elemLines = self.elementHash.get(n, [])
if elemLines == []:
try:
elemLines = self.elementHash[elemName]
except:
# might really be empty
pass
for e in elemLines:
if (not nsUri or self.sax[e[0]][4:4+len(nsUri)] == nsUri):
match = self._checkSaxXPathLine(xp, e[0])
if (match):
# Return event chunk
l = self.sax[e[0]]
end = int(l[l.rfind(' ')+1:])
data.append(self.sax[e[0]:end+1])
else:
# Unsupported final axis
raise(NotImplementedError)
if flatten and data:
# Flatten to text nodes
ndata = []
for match in data:
txt = []
for ev in match:
if ev[0] == '3':
txt.append(ev[2:])
ndata.append(''.join(txt))
return ndata
else:
return data
except NotImplementedError:
# Convert to DOM (slow) and reapply (slower still)
dom = self.get_dom(session)
xp = xpTuple[0]
try:
return utils.evaluateXPath(xp, dom)
except:
self.log_critical("Buggy Xpath: %r" % xp)
return []
# Otherwise just fall over as we've hit a real bug
def _handleAttribute(self, xp, maps={}):
attrName = xp[-1][1]
nselem = attrName.split(":")
if (len(nselem) == 2):
# Namespaced attribute
nsUri = maps[nselem[0]]
attrName = nselem[1]
else:
nsUri = None
data = []
if (len(xp) == 1):
# Extracting all occs of attribute anywhere!?
# Check predicates... (only support one numeric predicate)
if (len(xp[0][2]) == 1 and type(xp[0][2][0]) == types.FloatType):
nth = int(xp[0][2][0])
elif (len(xp[0][2])):
# Non index or multiple predicates??
raise(NotImplementedError)
else:
nth = 0
currn = 0
for l in self.sax:
if (l[0] == "1"):
(name, attrs) = self._convert_elem(l)
if (attrName in attrs):
currn += 1
content = attrs[attrName]
if (currn == nth):
data.append(content)
break
elif (not nth):
data.append(content)
else:
elemName = xp[-2][1]
flatten = 0
if (elemName == "*"):
# Let DOM code handle this monstrosity :P
raise(NotImplementedError)
nselem = elemName.split(":")
if (len(nselem) == 2):
# Namespaced.
elemNsUri = maps[nselem[0]]
elemName = nselem[1]
else:
elemNsUri = ""
if (elemName in self.elementHash):
elemLines = self.elementHash[elemName]
for e in elemLines:
if (not elemNsUri or self.sax[e[0]][4:4+len(elemNsUri)] == elemNsUri):
line = self.sax[e[0]]
(name, attrs) = self._convert_elem(line)
if (attrName == '*'):
# All attributes' values
match = self._checkSaxXPathLine(xp[:-1], e[0])
if (match):
for k in attrs.keys():
data.append(attrs[k])
else:
if (not attrName in attrs):
attrName = (nsUri, attrName)
if (not attrName in attrs and not nsUri):
# step through and take first
content = None
for key in attrs:
if key[1] == attrName[1]:
content = attrs[key]
else:
content = attrs.get(attrName, None)
if (content):
# Now check rest of path
match = self._checkSaxXPathLine(xp[:-1], e[0])
if (match):
data.append(content)
return data
def _checkSaxXPathLine(self, xp, line):
# Check that event at line in record matches xpath up tree
# Pass by reference, need a copy to pop! Looks like a hack...
xpath = xp[:]
climb = False
while (xpath):
posn = len(xpath)
node = xpath.pop()
if (line == -1):
if node != "/" and node != ['/']:
return 0
else:
elem = self.sax[line]
(name, attrs) = self._convert_elem(elem)
match = self._checkSaxXPathNode(node, name, attrs, line, posn)
if not match:
if not climb:
return 0
else:
# Previous was a descendant, keep looking
while not match:
start = elem.rfind("}") + 2
end = elem.find(" ", start)
line = int(elem[start:end])
if line != -1:
elem = self.sax[line]
(name, attrs) = self._convert_elem(elem)
match = self._checkSaxXPathNode(node, name, attrs, line, posn)
else:
return 0
if xpath:
start = elem.rfind("}") + 2
end = elem.find(" ", start)
line = int(elem[start:end])
climb = (node and node[0] == "descendant")
return 1
def _checkSaxXPathNode(self, step, name, attrs, line, posn):
# name already checked, strip
if step in ['/', ['/']] and name:
return 0
if (step[1] != name and step[1] != '*' and step[1][step[1].find(":")+1:] != name):
return 0
elif (not step[0] in ['child', 'descendant']):
# Unsupported axis
raise(NotImplementedError)
elif (step[2]):
# Check predicates
predPosn = 0
for pred in (step[2]):
predPosn += 1
m = self._checkSaxXPathPredicate(pred, name, attrs, line, posn, predPosn)
if (not m):
return 0
return 1
def _checkSaxXPathPredicate(self, pred, name, attrs, line, posn, predPosn):
if (type(pred) != types.ListType):
# Numeric Predicate. (eg /foo/bar[1])
if (predPosn != 1):
# Can't do numeric predicate on already predicated nodeset
# eg: text[@type='main'][2]
raise(NotImplementedError)
if (posn == 1):
# First position in relative path.
# Check against position in elementHash
if (name in self.elementHash):
all = self.elementHash[name]
p = int(pred)
if (len(all) < p):
return 0
return all[int(pred)-1][0] == line
return 0
else:
# Not first position, so it applies to parent elem
# Which we record during parsing
elem = self.sax[line]
end = elem.rfind("}") + 2
start = elem.find(' ', end) + 1
end = elem.find(' ', start)
npred = float(elem[start:end])
return npred == pred
elif (pred[1] in ['=', '!=', '<', '>', '<=', '>=']):
# Single attribute
return self._checkSaxXPathAttr(pred, attrs)
elif (pred[1] in ['and', 'or']):
# Attribute combinations
left = self._checkSaxXPathPredicate(pred[0], name, attrs, line, posn, predPosn)
right = self._checkSaxXPathPredicate(pred[2], name, attrs, line, posn, predPosn)
if (pred[1] == 'and' and left and right):
return 1
elif (pred[1] == 'or' and (left or right)):
return 1
return 0
elif (pred[0] == 'attribute'):
# Attribute exists test
return pred[1] in attrs
elif (pred[0] == 'FUNCTION'):
if pred[2] == "__name()":
return True
if pred[1] == 'starts-with':
if pred[2] in attrs:
val = attrs[pred[2]]
return not val.find(pred[3])
else:
return False
elif pred[1] == 'regexp':
if pred[2] in attrs:
return pred[3].search(attrs[pred[2]]) != None
else:
return False
raise NotImplementedError
else:
# No idea!!
raise(NotImplementedError)
return 1
def _checkSaxXPathAttr(self, pred, attrs):
# Namespacey
if (not pred[0] in attrs):
if ((None, pred[0]) in attrs):
pred[0] = (None, pred[0])
else:
return 0
rel = pred[1]
# -Much- faster than eval
if (type(pred[2]) == types.FloatType):
attrValue = float(attrs[pred[0]])
else:
attrValue = attrs[pred[0]]
comp = cmp(attrValue, pred[2])
if rel == "=":
return comp == 0
elif rel == ">":
return comp == 1
elif rel == "<":
return comp == -1
elif rel == "<=":
return comp in (-1, 0)
elif rel == ">=":
return comp in (1, 0)
elif rel == "!=":
return comp in (1, -1)
else:
raise(NotImplementedError)
def _convert_elem(self, line):
# Currently: 1 name {attrs} parent npred end
if (line[0] == '1'):
start = line.find("{")
name = line[2:start-1]
if line[start+1] == '}':
attrs = {}
else:
attrList = self.attrRe.findall(line)
attrs = {}
for m in attrList:
attrs[unicode(m[0])] = unicode(m[1])
return [name, attrs]
elif (line[0] == '4'):
end = line.rfind("}")
stuff = eval(line[2:end+1])
return [stuff[1], stuff[3]]
else:
raise ValueError("Called convert on non element.")
def saxify(self, session, handler=None, sax=[]):
if handler == None:
handler = self
if not sax:
sax = self.get_sax(session)
for l in sax:
line = l
# line = l.strip()
if line[0] == "1":
# String manipulation method
(name, attrs) = self._convert_elem(line)
handler.startElement(name, attrs)
elif line[0] == "3":
handler.characters(line[2:], 0, len(line)-2)
elif line[0] == "2":
end = line.rfind(' ')
handler.endElement(line[2:end])
elif line[0] == "9":
pass
elif line[0] == '4':
# 4 ns,name,qname, {}
idx = line.rfind(' ')
idx = line[:idx].rfind(' ')
idx = line[:idx].rfind(' ')
line = line[:idx]
(ns, name, qname, attrs) = eval(line[2:])
handler.startElementNS((ns,name), qname, attrs)
elif line[0] == '5':
# 5 ns,name,qname parent pred end
idx = line.rfind(' ')
line = line[:idx]
(ns, name, qname) = eval(line[2:])
handler.endElementNS((ns,name),qname)
elif line[0] == '6':
# 6 pref, uri
pref, uri = eval(line[2:])
handler.startPrefixMapping(pref, uri)
else:
# Unknown type
raise ValueError(line)
def get_dom(self, session):
if (self.dom):
return self.dom
else:
# Turn SAX into DOM and cache
s2dhandler.initState()
self.saxify(session, s2dhandler);
self.dom = s2dhandler.getRootNode()
return self.dom
def get_xml(self, session, events=[]):
if (not events and self.xml):
return self.xml
else:
# Turn SAX into XML and cache
if not events:
process = self.sax
else:
process = events
s2xhandler.initState()
self.saxify(session, s2xhandler, process)
if not events:
self.xml = s2xhandler.get_xmlString()
return self.xml
else:
return s2xhandler.get_xmlString()
def get_sax(self, session):
return self.sax
def fetch_vector(self, session, index, summary=False):
return index.indexStore.fetch_vector(session, index, self, summary)
class MarcRecord(Record):
"""For dealing with Library MARC Records."""
def __init__(self, data, xml="", docId=0, wordCount=0, byteCount=0):
txt = doc.get_raw(session)
self.marc = MARC(txt)
self.id = docId
# Estimate number of words...
display = str(self.marc)
if wordCount:
self.wordCount=wordCount
else:
self.wordCount = len(display.split()) - ( len(display.split('\n')) * 2)
if byteCount:
self.byteCount = byteCount
else:
self.byteCount = len(display)
self.decoder = MARC8_to_Unicode()
self.asciiRe = re.compile('([\x0e-\x1f]|[\x7b-\xff])')
def process_xpath(self, session, xpath, maps={}):
if (not isinstance(xpath, list)):
# Raw XPath
# c = utils.verifyXPaths([xpath])
if (not c or not c[0][1]):
return []
else:
xpath = c[0]
xp = xpath[1]
# format: fldNNN/a
try:
fld = int(xp[0][1][3:])
except ValueError:
# not a NNN not an int
return []
if fld in self.marc.fields:
data = self.marc.fields[fld]
else:
return []
if len(xp) > 1:
subfield = xp[1][1]
else:
subfield = ""
vals = []
if fld in [0,1]:
vals = data
else:
for d in data:
if not subfield:
vals.append(' '.join([x[1] for x in d[2]]))
elif subfield == 'ind1':
vals.append(d[0])
elif subfield == 'ind2':
vals.append(d[1])
elif fld == 8:
if not subfield:
vals.append(d)
elif subfield == 'lang':
vals.append(d[35:38])
elif subfield == 'date':
vals.append(d[:6])
elif subfield == 'pubStatus':
vals.append(d[6])
elif subfield == 'date1':
vals.append(d[7:11])
elif subfield == 'date2':
vals.append(d[11:15])
elif subfield == 'pubPlace':
vals.append(d[15:18])
else:
for x in d[2]:
try:
if x[0] == subfield:
vals.append(x[1])
except:
# broken
pass
nvals = []
for v in vals:
try:
nvals.append(v.decode('utf-8'))
except:
try:
convtd = self.decoder.translate(v)
nvals.append(unicodedata.normalize('NFC', convtd))
except:
# strip out any totally @^%(ed characters
v = self.asciiRe.sub('?', v)
nvals.append(v)
return nvals
def get_dom(self, session):
raise(NotImplementedError)
def get_sax(self, session):
raise(NotImplementedError)
def get_xml(self, session):
return self.marc.toMARCXML()
def fetch_vector(self, session, index, summary=False):
return index.indexStore.fetch_vector(session, index, self, summary)
| [
"[email protected]"
] | |
5c2482df35a2b3e2793446e744596a4eff53075d | 920ab19b73a7cba21d340a49d9d24e2d1eeabf3d | /idpsreact/bin/automat-visualize | 518eafa6739f15f864b7d8624057a1b909d8f1e5 | [
"MIT"
] | permissive | DTrafford/IDPS | 5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4 | 1eaccfc218adcb7231e64271731c765f8362b891 | refs/heads/master | 2022-12-16T16:28:34.801962 | 2020-03-30T18:08:09 | 2020-03-30T18:08:09 | 234,163,829 | 0 | 0 | MIT | 2020-09-10T06:26:02 | 2020-01-15T20:10:09 | Python | UTF-8 | Python | false | false | 281 | #!/Users/sangit/Downloads/django-react-boilerplate-master/idpsreact/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
| [
"[email protected]"
] | ||
95e69f614829e398941039bb5e7c6b54d7912473 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/operations/policy_definitions_operations.py | 35d46e882383af32ab876701a41bdc8be7b23c00 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 31,610 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class PolicyDefinitionsOperations(object):
"""PolicyDefinitionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2018-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01"
self.config = config
def create_or_update(
self, policy_definition_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition in a subscription.
This operation creates or updates a policy definition in the given
subscription with the given name.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def delete(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition in a subscription.
This operation deletes the policy definition in the given subscription
with the given name.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a policy definition in a subscription.
This operation retrieves the policy definition in the given
subscription with the given name.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get_built_in(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a built-in policy definition.
This operation retrieves the built-in policy definition with the given
name.
:param policy_definition_name: The name of the built-in policy
definition to get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_built_in.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def create_or_update_at_management_group(
self, policy_definition_name, parameters, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition in a management group.
This operation creates or updates a policy definition in the given
management group with the given name.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def delete_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition in a management group.
This operation deletes the policy definition in the given management
group with the given name.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve a policy definition in a management group.
This operation retrieves the policy definition in the given management
group with the given name.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves policy definitions in a subscription.
This operation retrieves a list of all the policy definitions in a
given subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions'}
def list_built_in(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieve built-in policy definitions.
This operation retrieves a list of all the built-in policy definitions.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_built_in.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions'}
def list_by_management_group(
self, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve policy definitions in a management group.
This operation retrieves a list of all the policy definitions in a
given management group.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_management_group.metadata['url']
path_format_arguments = {
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions'}
| [
"[email protected]"
] | |
6684ca9dd67bacb41767bd65a1c0c1f2dd8193ce | e07f6ac5559d09eb6f5393650af135c7474f5003 | /recent_news.py | e27c23ffb42fa9cdf553ea3b1d714c6870d9ef68 | [] | no_license | Money-fin/backend | 21e188f3f59ccaa216d1ea4bb7b78f670831cb6f | 909961dc33df84ba3663e622bfdf6ab98f915f5f | refs/heads/master | 2022-12-04T08:32:10.094335 | 2020-08-29T09:57:28 | 2020-08-29T09:57:28 | 291,008,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | import requests
import sys
sys.path.append("/home/jylee/backend")
import urllib
import os
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from helper import KafkaHelper
def new_crawl(link, kafka=False):
url = link
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
title = soup.select('div.content03 header.title-article01 h1')[0].get_text()
time = soup.select('div.content03 header.title-article01 p')[0].get_text()[4:]
img_url = f"https:{soup.select('div.img-con span img')[0]['src']}"
raw_content = soup.select('div.story-news.article')
# print(raw_content)
content_p = [item.select("p") for item in raw_content]
content_text = [item.get_text().strip() for item in content_p[0]]
content = "\n".join(content_text[1:])
data_dict = {
"title": title,
"content": content,
"link": link
}
if kafka:
KafkaHelper.pub_ninput(data_dict)
else:
data_dict["time"] = time
data_dict["img_url"] = img_url
return data_dict
def recent_new_check():
past_list = ""
while True:
url = f'https://www.yna.co.kr/news?site=navi_latest_depth01'
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
new_a_tag = soup.select('div.list-type038 ul')[0].select("li")[0].select("div div a.tit-wrap")
current_link = f"https:{new_a_tag[0]['href']}"
if past_list == current_link:
continue
else:
new_crawl(current_link, True)
past_list = current_link
recent_new_check() | [
"[email protected]"
] | |
d309ba906885b2264436cea4fe7c0b1cb6487058 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/special_equipment_1.py | d0b34a9eefba484eaeb14ea03e11c478e502ee89 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,577 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.type_element_status_1 import TypeElementStatus1
__NAMESPACE__ = "http://www.travelport.com/schema/common_v52_0"
@dataclass
class SpecialEquipment1:
"""
Parameters
----------
key
type_value
Special equipment associated with a specific vehicle
el_stat
This attribute is used to show the action results of an element.
Possible values are "A" (when elements have been added to the UR)
and "M" (when existing elements have been modified). Response only.
key_override
If a duplicate key is found where we are adding elements in some
cases like URAdd, then instead of erroring out set this attribute to
true.
"""
class Meta:
name = "SpecialEquipment"
namespace = "http://www.travelport.com/schema/common_v52_0"
key: None | str = field(
default=None,
metadata={
"name": "Key",
"type": "Attribute",
}
)
type_value: None | str = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
"required": True,
}
)
el_stat: None | TypeElementStatus1 = field(
default=None,
metadata={
"name": "ElStat",
"type": "Attribute",
}
)
key_override: None | bool = field(
default=None,
metadata={
"name": "KeyOverride",
"type": "Attribute",
}
)
| [
"[email protected]"
] | |
e8611029177ec93e595d82b86b795cbc307b7108 | d4ab63e2ff846ff509ab3b8a191381bdf8197325 | /project/test_main.py | 8544ed907817ff34f90b366519a3db4337d52c5e | [] | no_license | ibrobabs/task | c2c95d8c83340a38be0ff8a1d7d3da55de33a097 | 82adc4fa54ab9c3606b2770325454916c7f75693 | refs/heads/master | 2021-01-18T17:45:31.392805 | 2017-04-01T05:22:24 | 2017-04-01T05:22:24 | 86,812,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import os
import unittest
from project import app, db
from project.config import basedir
from project.models import User
TEST_DB = 'test.db'
class MainTests(unittest.TestCase):
#Setup and Teardown
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
# app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
# helper methods
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
# tests
def test_404_error(self):
response = self.app.get('/this-route-does-not-exist/')
self.assertEquals(response.status_code, 404)
self.assertIn(b"Sorry. There's nothing here.", response.data)
def test_500_error(self):
bad_user = User(
name='Jeremy',
email='[email protected]',
password='django'
)
db.session.add(bad_user)
db.session.commit()
self.assertRaises(ValueError, self.login, 'Jeremy', 'django')
try:
response = self.login('Jeremy', 'django')
self.assertEquals(response.status_code, 500)
except ValueError:
pass
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
37e7b65b2eb87e028e91d5e800045af24ea8b6c0 | b0a217700c563c4f057f2aebbde8faba4b1b26d2 | /software/glasgow/arch/jtag.py | 7c4fe835ca1a2bd2417ce6ed37892e998c03caf9 | [
"0BSD",
"Apache-2.0"
] | permissive | kbeckmann/Glasgow | 5d183865da4fb499099d4c17e878a76192b691e7 | cd31e293cb99ee10a3e4a03ff26f6f124e512c64 | refs/heads/master | 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 | NOASSERTION | 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null | UTF-8 | Python | false | false | 250 | py | # Ref: IEEE 1149.1
from bitarray import bitarray
from ..support.bits import *
__all__ = [
# DR
"DR_IDCODE",
]
DR_IDCODE = Bitfield("DR_IDCODE", 4, [
("present", 1),
("mfg_id", 11),
("part_id", 16),
("version", 4),
])
| [
"[email protected]"
] | |
97450e3407268358d4f64aefe3120b8487b3401e | 425db5a849281d333e68c26a26678e7c8ce11b66 | /maths/fast_pow_and_matrix_multi.py | 987f29bb269b191cf1b8759d9bc80770e1b3e800 | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,500 | py | import random
def fpowx(x, n):
"""
quick pow: x ** n
"""
res = 1
while n:
if n & 1:
res = res * x
# compute x^2 x^4 x^8
x *= x
n >>= 1
return res
def fmulti(m, n, mod=10 ** 9 + 7):
"""
并没有提速的效果
只是对于其他语言 如c
防止溢出
对 python 没有任何帮助
"""
res = 0
while n:
if n & 1:
res += m
m = (m + m) % mod
res %= mod
n >>= 1
return res
def matrix_multiply(matrix_a, matrix_b):
# 模 MOD 乘法/加法
MOD = 10 ** 9 + 7
n_row = len(matrix_a)
n_col = len(matrix_b[0])
n_tmp = len(matrix_a[0])
matrix_c = [[0 for _ in range(n_col)] for _ in range(n_row)]
for i in range(n_row):
for j in range(n_col):
for k in range(n_tmp):
matrix_c[i][j] += matrix_a[i][k] * matrix_b[k][j] % MOD
matrix_c[i][j] %= MOD
return matrix_c
def get_unit_matrix(n):
# matrix I
unit_matrix = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(n):
unit_matrix[_][_] = 1
return unit_matrix
def quick_matrix_pow(matrix_a, n):
# A ^ n
l = len(matrix_a)
res = get_unit_matrix(l)
while n:
if n & 1:
res = matrix_multiply(res, matrix_a)
a = matrix_multiply(a, a)
n >>= 1
return res
def test_fmulti():
m = random.randint(10 ** 9, 10 ** 15)
n = random.randint(10 ** 9, 10 ** 15)
res = fmulti(m, n)
return res
def multi(m, n, mod=10 ** 9 + 7):
return m * n % mod
def test_multi():
m = random.randint(10 ** 9, 10 ** 15)
n = random.randint(10 ** 9, 10 ** 15)
res = multi(m, n)
return res
if __name__ == '__main__':
print('fast pow: 2 ** 11: {}'.format(fpowx(2, 11)))
print(fmulti(987654, 987654321))
print(987654 * 987654321 % (10 ** 9 + 7))
# test the speed of fast(?)-multi
import timeit
T_fmulti = timeit.Timer('test_fmulti()',
'from __main__ import test_fmulti')
print('f_multi: {:.6f}s'.format(T_fmulti.timeit(number=1000)))
T_multi = timeit.Timer('test_multi()',
'from __main__ import test_multi')
print('s_multi: {:.6f}s'.format(T_multi.timeit(number=1000)))
# test matrix multiply
a = [[1, 2, 3], [4, 5, 6]]
b = [[1, 2], [3, 4], [5, 6]]
c = matrix_multiply(a, b)
print("a = {}\nb = {}\nc = {}".format(a, b, c))
| [
"[email protected]"
] | |
2fae047ea5b7af3cba687716d80fa7aab18a4d0a | 4d259f441632f5c45b94e8d816fc31a4f022af3c | /date/tt.py | 9f4bc74af2f7f817b5cc2a96f52b570bd76401f0 | [] | no_license | xiaoruiguo/lab | c37224fd4eb604aa2b39fe18ba64e93b7159a1eb | ec99f51b498244c414b025d7dae91fdad2f8ef46 | refs/heads/master | 2020-05-25T01:37:42.070770 | 2016-05-16T23:24:26 | 2016-05-16T23:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | a=['1','2','3']
s = ['sss'*3]+a
print s
| [
"[email protected]"
] | |
f4506a41f21652bd250f6896810cd6fbdec72bfb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s013075072.py | 044f87c3be49952ef7be8bf867e28108c9b4cd05 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | s=int(input())
a=s//100
b=s%100
if a>0 and a<=12:
if b>0 and b<=12:
print("AMBIGUOUS")
else:
print("MMYY")
else:
if b>0 and b<=12:
print("YYMM")
else:
print("NA") | [
"[email protected]"
] | |
62b6273166486acf1ece5437a98e41a0350b1124 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_celebrating.py | 305a78d8f0d008577d0f029e5a82a8910f663133 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._celebrate import _CELEBRATE
#calss header
class _CELEBRATING(_CELEBRATE, ):
def __init__(self,):
_CELEBRATE.__init__(self)
self.name = "CELEBRATING"
self.specie = 'verbs'
self.basic = "celebrate"
self.jsondata = {}
| [
"[email protected]"
] | |
2f0cb96aaa337f7309712bd930d65de11673c433 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Pytest/pytest-django/pytest_django/plugin.py | cbfe15f79cb04f0e152ebe02bc8b4d3886108f5f | [
"BSD-3-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4b9c174912c01ae59fb496601d8c4ecf26765ee33134d079295304c25873875a
size 26008
| [
"[email protected]"
] | |
1731a6bc44fffbafb6437d4bb39a9bb76acfeb29 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/coghq/BossbotCountryClubMazeRoom_Battle00.py | 218b80966c9553066709cc1c2f781554cc97b785 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 2,389 | py | #Embedded file name: toontown.coghq.BossbotCountryClubMazeRoom_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_12/models/bossbotHQ/BossbotMazex1_C',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110000: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-131.21, 84.92, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'radius': 10},
110202: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 110001,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 110000,
'unlock2Event': 0,
'unlock3Event': 0},
110002: {'type': 'maze',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-141.563, -78.8353, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'numSections': 1},
10002: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
110001: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-106.91, 82.6953, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1)}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
| [
"[email protected]"
] | |
c5020aa411c33ba9eb808cd247fe814f9c0ece17 | 8f5f92beeaefcd9effc93da87b26acb5ea159274 | /xtorch/modules/seq2seq_encoders/seq2seq_encoder.py | edcdada140696dba36c224bbb20440c20a1c8b5f | [
"MIT"
] | permissive | altescy/xtorch | 15f984bf08654dc00fc1be603cca696676428cc1 | bcbbbe645f4d62c211af5b3555c526cc60792c32 | refs/heads/main | 2023-04-12T15:45:52.192602 | 2021-04-25T11:35:45 | 2021-04-25T11:35:45 | 361,373,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from typing import Optional
import torch
class Seq2seqEncoder(torch.nn.Module):
def forward(
self,
inputs: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
"""
Parameters
==========
inputs: `torch.Tensor`
Tensor of shape (batch_size, sequence_length, embedding_size).
mask: `torch.BoolTensor`, optional (default = None)
BoolTensor of shape (batch_size, sequence_length).
Return
======
output:
Tensor of shape (batch_size, sequence_length, encoding_size).
"""
raise NotImplementedError
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
| [
"[email protected]"
] | |
e32d9ecd5addc70ef1833cfb869c834a230a4f2c | 7f97814acd76ca96aee877fd70d401380f848fae | /7_training/re_start_end.py | e5842c00b391813441ccd2346854697e29805bbb | [] | no_license | tberhanu/all_trainings | 80cc4948868928af3da16cc3c5b8a9ab18377d08 | e4e83d7c71a72e64c6e55096a609cec9091b78fa | refs/heads/master | 2020-04-13T12:12:21.272316 | 2019-03-16T04:22:20 | 2019-03-16T04:22:20 | 163,195,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | """
https://www.hackerrank.com/challenges/re-start-re-end/problem?h_r=next-challenge&h_v=zen
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
s, k = input(), input()
i = 0
found = False
while i < len(s):
string = s[i:]
match = re.match(r'{}'.format(k), string)
if match == None:
i = i + 1
else:
found = True
print((match.start() + i, match.end() + i - 1))
i = i + 1
if not found:
print('(-1, -1') | [
"[email protected]"
] | |
edcbbc430b0d1a558d19be8a4a2625b7c762eb20 | 5add80be09ee754fced03e512a9acc214971cddf | /python-code/openvx-learning/helloworld.py | 61352b55542a81f5e56cc66c6767ea1beb6c1d65 | [
"Apache-2.0"
] | permissive | juxiangwu/image-processing | f774a9164de9c57e88742e6185ac3b28320eae69 | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | refs/heads/master | 2021-06-24T15:13:08.900960 | 2019-04-03T10:28:44 | 2019-04-03T10:28:44 | 134,564,878 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from pyvx import vx
context = vx.CreateContext()
images = [
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_UYVY),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_S16),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_U8),
]
graph = vx.CreateGraph(context)
virts = [
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
]
vx.ChannelExtractNode(graph, images[0], vx.CHANNEL_Y, virts[0])
vx.Gaussian3x3Node(graph, virts[0], virts[1])
vx.Sobel3x3Node(graph, virts[1], virts[2], virts[3])
vx.MagnitudeNode(graph, virts[2], virts[3], images[1])
vx.PhaseNode(graph, virts[2], virts[3], images[2])
status = vx.VerifyGraph(graph)
if status == vx.SUCCESS:
status = vx.ProcessGraph(graph)
else:
print("Verification failed.")
vx.ReleaseContext(context) | [
"[email protected]"
] | |
d92df5cd630581d42b06e50bdc1070c5d414a17c | 9647524c0f4d93fb1c8a992c20fe9f9d2710cde3 | /2-content/Python/intro_programming-master/scripts/remove_input_references.py | 2ab8878b1a362f079adf49a971ef71aa7677a4ea | [
"MIT"
] | permissive | bgoonz/web-dev-notes-resource-site | 16161aa68e8eecafeaba4dc7abeb957aaee864c5 | e7dc9c30393597cb39830c49c3f51c1486b97584 | refs/heads/master | 2023-09-01T14:04:20.867818 | 2021-06-17T07:56:20 | 2021-06-17T07:56:20 | 329,194,347 | 7 | 5 | MIT | 2021-07-05T06:36:49 | 2021-01-13T04:34:20 | JavaScript | UTF-8 | Python | false | false | 1,306 | py | # This script removes the input reference numbers from html pages.
# They play a useful role in scientific notebooks, but they are really
# just visual clutter in this project.
# Could be an nbconvert setting, but it's an easy enough scripting job.
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
| [
"[email protected]"
] | |
dd55eae4011f0cb80d47c940385e7a3ff85cd7a3 | 602fa0e4ce194d3073d78230c61f7053281f9f9b | /code/python/src/categories/catutil.py | df03a0027b66f8d76d4265de7c7074d56b487bab | [] | no_license | ziqizhang/wop | 111cfdda1686a874ff1fc11a453a23fb52d43af1 | ea0c37f444de9f2d5303f74b989f6d1a09feb61d | refs/heads/master | 2022-09-14T20:14:11.575021 | 2021-12-10T21:23:24 | 2021-12-10T21:23:24 | 166,239,995 | 2 | 1 | null | 2022-09-01T23:11:13 | 2019-01-17T14:33:51 | Python | UTF-8 | Python | false | false | 2,128 | py | import pandas as pd
from nltk import PorterStemmer, WordNetLemmatizer
import numpy
from categories import cleanCategories as cc
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
#0=stem; 1=lem; else=nothing
def normalise_categories(in_file_name, col, stem_or_lem):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats=set()
max_toks=0
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
c="NONE"
toks = len(c.split(" "))
if toks>max_toks:
max_toks=toks
if stem_or_lem==0:
c=stemmer.stem(c).strip()
if len(c)>2:
norm_cats.add(c)
elif stem_or_lem==1:
c=lemmatizer.lemmatize(c).strip()
if len(c)>2:
norm_cats.add(c)
else:
norm_cats.add(c)
norm_cats_list=list(norm_cats)
norm_cats_list=sorted(norm_cats_list)
print(len(norm_cats_list))
print(max_toks)
for nc in norm_cats_list:
print(nc)
def get_parent_category_level(in_file_name, col):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats = set()
norm_cats_list=[]
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
continue
c= cc.normaliseCategories(c)
try:
trim = c.index(">")
except ValueError:
continue
c=c[0:trim].strip()
norm_cats.add(c)
norm_cats_list.append(c)
norm_cats_unique_list=sorted(list(norm_cats))
norm_cats=sorted(norm_cats)
for nc in norm_cats:
print(nc)
print("\n\n>>>>>>>>>\n\n")
for nc in norm_cats_unique_list:
print(nc)
if __name__ == "__main__":
# normalise_categories("/home/zz/Work/data/wop_data/goldstandard_eng_v1_cleanedCategories.csv",
# 13,0)
get_parent_category_level("/home/zz/Work/data/wop_data/goldstandard_eng_v1_utf8.csv",
8) | [
"[email protected]"
] | |
d384f24b5c0b0b257f66b1db1a63854c59b95395 | 3e4c69317323bca865b025503b60bf83d3ae65f8 | /tests/server/blueprints/variants/test_variant_views_variant.py | c1fd7fe078f8967099df90b24cb215c5a79a60ac | [
"BSD-3-Clause"
] | permissive | tapaswenipathak/scout | f59beaa997a45487ac96c3b3e560b5e5aa9b30ae | c9b3ec14f5105abe6066337110145a263320b4c5 | refs/heads/master | 2020-05-30T11:13:25.662300 | 2019-05-28T09:26:25 | 2019-05-28T09:26:25 | 189,694,812 | 1 | 0 | BSD-3-Clause | 2019-06-01T05:36:35 | 2019-06-01T05:36:34 | null | UTF-8 | Python | false | false | 1,207 | py | # -*- coding: utf-8 -*-
import logging
from flask import url_for
log = logging.getLogger(__name__)
def test_server_variant(app, real_adapter):
# GIVEN an initialized app
# GIVEN a valid user, institute, case and variant
adapter = real_adapter
variant_obj = adapter.variant_collection.find_one()
assert variant_obj
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
internal_case_id = variant_obj['case_id']
case = adapter.case(internal_case_id)
case_name = case['display_name']
owner = case['owner']
# NOTE needs the actual document_id, not the variant_id
variant_id = variant_obj['_id']
log.debug('Inst {} case {} variant {}'.format(owner,case_name,
variant_id))
# WHEN accessing the variant page
resp = client.get(url_for('variants.variant',
institute_id=owner,
case_name=case_name,
variant_id=variant_id))
log.debug("{}",resp.data)
# THEN it should return a page
assert resp.status_code == 200
| [
"[email protected]"
] | |
d0a3f8fea955cd6b7239c30eb4bde72572683e27 | f2f88a578165a764d2ebb4a022d19e2ea4cc9946 | /pyvisdk/do/guest_authentication.py | f16ac39d82372db0665b605fca27476d5d281d82 | [
"MIT"
] | permissive | pombredanne/pyvisdk | 1ecc68a1bf264095f72f274c776e5868fb302673 | de24eb4426eb76233dc2e57640d3274ffd304eb3 | refs/heads/master | 2021-01-21T16:18:39.233611 | 2014-07-28T19:50:38 | 2014-07-28T19:50:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def GuestAuthentication(vim, *args, **kwargs):
'''GuestAuthentication is an abstract base class for authentication in the guest.'''
obj = vim.client.factory.create('ns0:GuestAuthentication')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'interactiveSession' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
] | |
dd42b52d712e69767f647a33a975f897d68b913f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/OssDirectoryDetail.py | 7b7aed746981c86b4885e7159246c6f7d6a7017c | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,270 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OssDirectoryDetail(object):
def __init__(self):
self._acl = None
self._file_id = None
self._file_name = None
self._last_modified = None
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, value):
self._acl = value
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
self._file_name = value
@property
def last_modified(self):
return self._last_modified
@last_modified.setter
def last_modified(self, value):
self._last_modified = value
def to_alipay_dict(self):
params = dict()
if self.acl:
if hasattr(self.acl, 'to_alipay_dict'):
params['acl'] = self.acl.to_alipay_dict()
else:
params['acl'] = self.acl
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_name:
if hasattr(self.file_name, 'to_alipay_dict'):
params['file_name'] = self.file_name.to_alipay_dict()
else:
params['file_name'] = self.file_name
if self.last_modified:
if hasattr(self.last_modified, 'to_alipay_dict'):
params['last_modified'] = self.last_modified.to_alipay_dict()
else:
params['last_modified'] = self.last_modified
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OssDirectoryDetail()
if 'acl' in d:
o.acl = d['acl']
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_name' in d:
o.file_name = d['file_name']
if 'last_modified' in d:
o.last_modified = d['last_modified']
return o
| [
"[email protected]"
] | |
93013a6c44645ef61cb45e633030c20663c3fde6 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/classification/torchvision/pytorch/train.py | 1c16c81bc51ace035a2653350c088a3888b0904f | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 15,577 | py | # Copyright (c) 2022 Iluvatar CoreX. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import warnings
warnings.filterwarnings('ignore')
import datetime
import os
import logging
import time
import torch
import torch.utils.data
try:
from apex import amp as apex_amp
except:
apex_amp = None
try:
from torch.cuda.amp import autocast, GradScaler
scaler = GradScaler()
except:
autocast = None
scaler = None
from torch import nn
import torch.distributed as dist
import torchvision
import utils
from utils import (MetricLogger, SmoothedValue, accuracy, mkdir,\
init_distributed_mode, manual_seed,\
is_main_process, save_on_master, write_on_master)
from dataloader.classification import get_datasets, create_dataloader
def compute_loss(model, image, target, criterion):
output = model(image)
if not isinstance(output, (tuple, list)):
output = [output]
losses = []
for out in output:
losses.append(criterion(out, target))
loss = sum(losses)
return loss, output[0]
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, print_freq, use_amp=False, use_dali=False):
model.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
all_fps = []
for data in metric_logger.log_every(data_loader, print_freq, header):
if use_dali:
image, target = data[0]["data"], data[0]["label"][:, 0].long()
else:
image, target = data
start_time = time.time()
image, target = image.to(device, non_blocking=True), target.to(device, non_blocking=True)
loss, output = compute_loss(model, image, target, criterion)
if use_amp:
with apex_amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
fps = batch_size / (end_time - start_time) * utils.get_world_size()
metric_logger.meters['img/s'].update(fps)
all_fps.append(fps)
fps = round(sum(all_fps) / len(all_fps), 2)
print(header, 'Avg img/s:', fps)
return fps
def evaluate(model, criterion, data_loader, device, print_freq=100, use_dali=False):
model.eval()
metric_logger = MetricLogger(delimiter=" ")
header = 'Test:'
with torch.no_grad():
for data in metric_logger.log_every(data_loader, print_freq, header):
if use_dali:
image, target = data[0]["data"], data[0]["label"][:, 0].long()
else:
image, target = data
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5))
return round(metric_logger.acc1.global_avg, 2)
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def create_optimzier(params, args):
opt_name = args.opt.lower()
if opt_name == 'sgd':
optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif opt_name == 'rmsprop':
optimizer = torch.optim.RMSprop(params, lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay, eps=0.0316, alpha=0.9)
elif opt_name == "fused_sgd":
from apex.optimizers import FusedSGD
optimizer = FusedSGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
else:
raise RuntimeError("Invalid optimizer {}. Only SGD and RMSprop are supported.".format(args.opt))
return optimizer
def main(args):
init_distributed_mode(args)
print(args)
device = torch.device(args.device)
manual_seed(args.seed, deterministic=args.deterministic)
# WARN:
if dist.is_initialized():
num_gpu = dist.get_world_size()
else:
num_gpu = 1
global_batch_size = num_gpu * args.batch_size
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
num_classes = len(os.listdir(train_dir))
if 0 < num_classes < 13:
if global_batch_size > 512:
if is_main_process():
print("WARN: Updating global batch size to 512, avoid non-convergence when training small dataset.")
args.batch_size = 512 // num_gpu
if args.pretrained:
num_classes = 1000
args.num_classes = num_classes
print("Creating model")
if hasattr(args, "model_cls"):
model = args.model_cls(args)
else:
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained, num_classes=num_classes)
if args.padding_channel:
print("WARN: Cannot convert first conv to N4HW.")
data_loader, data_loader_test = create_dataloader(train_dir, val_dir, args)
if args.padding_channel and isinstance(data_loader, torch.utils.data.DataLoader):
data_loader.collate_fn = utils.nhwc_collect_fn(data_loader.collate_fn, fp16=args.amp, padding=args.padding_channel)
data_loader_test.collate_fn = utils.nhwc_collect_fn(data_loader_test.collate_fn, fp16=args.amp, padding=args.padding_channel)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
if args.nhwc:
model = model.cuda().to(memory_format=torch.channels_last)
optimizer = create_optimzier(model.parameters(), args)
if args.amp:
model, optimizer = apex_amp.initialize(model, optimizer, opt_level="O2",
loss_scale="dynamic",
master_weights=True)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
best_acc1 = 0
best_epoch = 0
for epoch in range(args.start_epoch, args.epochs):
epoch_start_time = time.time()
if args.distributed and not args.dali:
data_loader.sampler.set_epoch(epoch)
fps = train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq, args.amp, use_dali=args.dali)
lr_scheduler.step()
acc1 = evaluate(model, criterion, data_loader_test, device=device, use_dali=args.dali)
if acc1 > best_acc1:
best_acc1 = acc1
best_epoch = epoch
if args.output_dir is not None:
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args}
save_on_master(
checkpoint,
os.path.join(args.output_dir, 'best.pth'.format(epoch)))
save_on_master(
checkpoint,
os.path.join(args.output_dir, 'latest.pth'))
epoch_total_time = time.time() - epoch_start_time
epoch_total_time_str = str(datetime.timedelta(seconds=int(epoch_total_time)))
print('epoch time {}'.format(epoch_total_time_str))
if args.dali:
data_loader.reset()
data_loader_test.reset()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('* Acc@1: {} at epoch {}'.format(round(best_acc1, 2), best_epoch))
print('Training time {}'.format(total_time_str))
if args.output_dir:
write_on_master({"Name":os.path.basename(args.output_dir),
"Model": args.model, "Dataset": os.path.basename(args.data_path), "AMP":args.amp,
"Acc@1":best_acc1, "FPS":fps, "Time": total_time_str}, os.path.join(args.output_dir, 'result.json'))
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description='PyTorch Classification Training', add_help=add_help)
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', help='dataset')
parser.add_argument('--model', default='resnet18', help='model')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int)
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--opt', default='sgd', type=str, help='optimizer')
parser.add_argument('--lr', default=0.128, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-step-size', default=30, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output-dir', default=None, help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--deterministic",
help="Do not benchmark conv algo",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument('--auto-augment', default=None, help='auto augment policy (default: None)')
parser.add_argument('--random-erase', default=0.0, type=float, help='random erasing probability (default: 0.0)')
parser.add_argument(
"--dali",
help="Use dali as dataloader",
default=False,
action="store_true",
)
# distributed training parameters
parser.add_argument('--local_rank', default=-1, type=int,
help='Local rank')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# other
parser.add_argument('--amp', action='store_true', help='Automatic Mixed Precision training')
parser.add_argument('--nhwc', action='store_true', help='Use NHWC')
parser.add_argument('--padding-channel', action='store_true', help='Padding the channels of image to 4')
parser.add_argument('--dali-cpu', action='store_true')
parser.add_argument('--seed', default=42, type=int, help='Random seed')
parser.add_argument('--crop-size', default=224, type=int)
parser.add_argument('--base-size', default=256, type=int)
return parser
def check_agrs(args):
if args.nhwc:
args.amp = True
if args.output_dir:
prefix=args.output_dir
names = [args.model, os.path.basename(args.data_path)]
if args.amp:
names.append("amp")
if torch.cuda.device_count():
names.append(f"dist_{utils.get_world_size()}x{torch.cuda.device_count()}")
exp_dir = "_".join(map(str, names))
args.output_dir = os.path.join(prefix, exp_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
if args.amp:
if apex_amp is None:
raise RuntimeError("Not found apex in installed packages, cannot enable amp.")
def train_model(model_cls=None):
args = get_args_parser().parse_args()
check_agrs(args)
if utils.is_main_process():
setup_logging(args.output_dir)
if hasattr(torch, "corex") and args.dali:
args.dali_cpu = True
if model_cls is not None:
args.model_cls = model_cls
main(args)
def setup_logging(prefix):
if prefix:
handlers=[
logging.FileHandler(os.path.join(prefix, "train.log"), mode='w'),
logging.StreamHandler(),
]
else:
handlers = None
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=handlers
)
if __name__ == "__main__":
args = get_args_parser().parse_args()
check_agrs(args)
if utils.is_main_process():
setup_logging(args.output_dir)
try:
main(args)
except Exception as e:
logging.exception(e)
| [
"[email protected]"
] | |
dfc0cc855a774de8fa89bf5d0af2e7761c1399da | cf0ab8503d4d704045070deea1e2125375711e86 | /apps/apikeys/v1/urls.py | 1a8b15c264dc105260d2432da2775b98a3fb3a99 | [] | no_license | faierbol/syncano-platform | c3c6468600115752fd9fa5e46a0ad59f75f6bc9c | 879111874d1ef70418b4890cf970720b0a2be4d8 | refs/heads/master | 2023-07-20T10:13:40.066127 | 2021-02-08T15:01:13 | 2021-02-08T15:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # coding=UTF8
from rest_framework.routers import SimpleRouter
from apps.apikeys.v1 import views
router = SimpleRouter()
router.register('api_keys', views.ApiKeyViewSet)
urlpatterns = router.urls
| [
"[email protected]"
] | |
42bdb6a885ac58d51bad36beea8877307f7902a5 | eda9187adfd53c03f55207ad05d09d2d118baa4f | /algo/Transfer_Learning/Transfer_learning.py | 725a6e82bceb8aa1d09e9cb263fc2fdf9da6aea1 | [] | no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,586 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
-------------------------------------------------
File Name : Transfer_learning
Description : 迁移学习
Envs : pytorch
Author : yanerrol
Date : 2020/2/17 09:58
-------------------------------------------------
Change Activity:
2020/2/17 : new
-------------------------------------------------
'''
__author__ = 'yanerrol'
import torch
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
#######################################
### PRE-TRAINED MODELS AVAILABLE HERE
## https://pytorch.org/docs/stable/torchvision/models.html
from torchvision import models
#######################################
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
NUM_CLASSES = 10
# Hyperparameters
random_seed = 1
learning_rate = 0.0001
num_epochs = 10
batch_size = 128
##########################
### MNIST DATASET
##########################
custom_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
## Note that this particular normalization scheme is
## necessary since it was used for pre-training
## the network on ImageNet.
## These are the channel-means and standard deviations
## for z-score normalization.
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=custom_transform,
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=custom_transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
##########################
### Loading Pre-Trained Model
##########################
model = models.vgg16(pretrained=True)
##########################
### Freezing Model
##########################
for param in model.parameters():
param.requires_grad = False
model.classifier[3].requires_grad = True
model.classifier[6] = nn.Sequential(
nn.Linear(4096, 512),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(512, NUM_CLASSES))
##########################
### Training as usual
##########################
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters())
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
_, predicted_labels = torch.max(logits, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float() / num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
% (epoch + 1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % (
epoch + 1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time) / 60))
print('Total Training Time: %.2f min' % ((time.time() - start_time) / 60))
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))
##########################
### Training as usual
##########################
import matplotlib.pyplot as plt
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
logits = model(features.to(DEVICE))
_, predicted_labels = torch.max(logits, 1)
def unnormalize(tensor, mean, std):
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
n_images = 10
fig, axes = plt.subplots(nrows=1, ncols=n_images,
sharex=True, sharey=True, figsize=(20, 2.5))
orig_images = features[:n_images]
for i in range(n_images):
curr_img = orig_images[i].detach().to(torch.device('cpu'))
curr_img = unnormalize(curr_img,
torch.tensor([0.485, 0.456, 0.406]),
torch.tensor([0.229, 0.224, 0.225]))
curr_img = curr_img.permute((1, 2, 0))
axes[i].imshow(curr_img)
axes[i].set_title(classes[predicted_labels[i]]) | [
"[email protected]"
] | |
57bfefceefd25252047dcd608dff497f0c347b82 | 988dd821269be12c2f56f62b0c35546fd3050537 | /python/quaternions/rotations.py | 852c8839c1435519fcbc0675bd055c4d8af732b7 | [] | no_license | gdiazh/adcs_models | fb19f541eeb9b01ae49ec98719c508d084e4fd7a | 51d0829cc777d2e345e4fabe406ec7f54e661117 | refs/heads/master | 2020-03-28T13:04:56.174852 | 2018-09-28T22:08:25 | 2018-09-28T22:08:25 | 148,364,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | #!/usr/bin/python
__author__ = 'gdiaz'
import matplotlib as mpl
from plotVectors import PlotVectors
import numpy as np
class Rotation(object):
def __init__(self):
self.vectors = PlotVectors()
self.a = [0, 0, 0]
def rotate_z(self, a, yaw):
Az = np.matrix([[np.cos(yaw), -np.sin(yaw), 0],
[np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
a_ = np.matrix([[a[0]],
[a[1]],
[a[2]]])
u = Az*a_
return [u.item(0), u.item(1), u.item(2)]
def rotate_frame_z(self, I, J, K, yaw):
Az = np.matrix([[np.cos(yaw), np.sin(yaw), 0],
[-np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
I_ = np.matrix([I[0], I[1], I[2]])
J_ = np.matrix([J[0], J[1], J[2]])
K_ = np.matrix([K[0], K[1], K[2]])
i_ = I_*Az
j_ = J_*Az
k_ = K_*Az
i = [i_.item(0), i_.item(1), i_.item(2)]
j = [j_.item(0), j_.item(1), j_.item(2)]
k = [k_.item(0), k_.item(1), k_.item(2)]
return [i, j, k]
def vectorRotationTest(self):
# Calcs
p1 = [2, 0, 0]
yaw = 90*np.pi/180
p1_rot = self.rotate_z(p1, yaw)
print p1_rot
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(p1)
self.vectors.plot(p1_rot)
self.vectors.show()
def frameRotationTest(self):
# Calcs
I = [1, 0, 0]
J = [0, 1, 0]
K = [0, 0, 1]
yaw = 45*np.pi/180
ijk = self.rotate_frame_z(I, J, K, yaw)
print ijk
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(ijk[0])
self.vectors.plot(ijk[1])
self.vectors.plot(ijk[2])
self.vectors.show()
def get_qT(self, yawT): #Return quaternion target given yaw target
AT = np.matrix([[np.cos(yawT), np.sin(yawT), 0],
[-np.sin(yawT), np.cos(yawT), 0],
[0, 0, 1]])
q4 = 0.5*np.sqrt(1+AT[0,0]+AT[1,1]+AT[2,2])
q1 = 0.25*(AT[1,2]-AT[2,1])/q4
q2 = 0.25*(AT[2,0]-AT[0,2])/q4
q3 = 0.25*(AT[0,1]-AT[1,0])/q4
return [q4, q1, q2, q3]
def get_qE_(self, qT, qS):
qT_ = np.matrix([[qT[0], qT[3], -qT[2], qT[1]],
[-qT[3], qT[0], qT[1], qT[2]],
[qT[2], -qT[1], qT[0], qT[3]],
[-qT[1], -qT[2], -qT[3], qT[0]]])
qS_ = np.matrix([[-qS[1]],
[-qS[2]],
[-qS[3]],
[qS[0]]])
qE = qT_*qS_
return [qE.item(0), qE.item(1), qE.item(2), qE.item(3)]
def get_qE(self, yawT, qS):
qT = self.get_qT(yawT)
qE = self.get_qE_(qT, qS)
return qE
if __name__ == '__main__':
rotation = Rotation()
# Test Example
# rotation.vectorRotationTest()
rotation.frameRotationTest() | [
"[email protected]"
] | |
f281fed287dbd357fea0ab3bb3bd35efc0794cf4 | 51d65cbed3df1e9e3a0d51f79590ee12f88291d1 | /object_detection/inference_over_image.py | 0bbbdb9954ca69ffd0cf92de7a7cbb7577cf8043 | [
"MIT"
] | permissive | apacha/Mensural-Detector | f9332c23854263c6a3f89e8b92f3f666f8377ed8 | 05c91204cf268feaae84cd079dbe7a1852fba216 | refs/heads/master | 2022-09-23T21:20:53.376367 | 2022-08-31T08:36:35 | 2022-08-31T08:36:35 | 137,372,669 | 12 | 6 | null | null | null | null | UTF-8 | Python | false | false | 6,444 | py | import numpy as np
import tensorflow as tf
import argparse
from PIL import Image
from object_detection.utils import ops as utils_ops, label_map_util, visualization_utils as vis_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def load_detection_graph(path_to_checkpoint):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_checkpoint, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_category_index(path_to_labels, number_of_classes):
# Load label map
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=number_of_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Performs detection over input image given a trained detector.')
parser.add_argument('--inference_graph', dest='inference_graph', type=str, required=True,
help='Path to the frozen inference graph.')
parser.add_argument('--label_map', dest='label_map', type=str, required=True,
help='Path to the label map, which is json-file that maps each category name to a unique number.',
default="mapping.txt")
parser.add_argument('--number_of_classes', dest='number_of_classes', type=int, default=32,
help='Number of classes.')
parser.add_argument('--input_image', dest='input_image', type=str, required=True, help='Path to the input image.')
parser.add_argument('--output_image', dest='output_image', type=str, default='detection.jpg',
help='Path to the output image.')
args = parser.parse_args()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = '/home/jcalvo/Escritorio/Current/Mensural Detector/mensural-detector/output_inference_graph.pb/frozen_inference_graph.pb'
path_to_frozen_inference_graph = args.inference_graph
path_to_labels = args.label_map
number_of_classes = args.number_of_classes
input_image = args.input_image
output_image = args.output_image
# Read frozen graph
detection_graph = load_detection_graph(path_to_frozen_inference_graph)
category_index = load_category_index(path_to_labels, number_of_classes)
image = Image.open(input_image)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=2)
Image.fromarray(image_np).save(output_image)
| [
"[email protected]"
] | |
524db47926d6c1b18a65735cec61aad5f9e91b97 | d2c163f246d28b8519f8c89de23556e43be91684 | /www/ad_board/urls.py | 9309b9dfb201f43c13a2ec3d393148de00aea612 | [] | no_license | boogiiieee/Iskcon | d7a2b8bdc3002ef3306fc5e7ddc577504d8533c9 | b672dbafee06af3ee6d646c75f442d97133f5ec9 | refs/heads/master | 2021-09-04T03:11:06.770094 | 2018-01-15T04:21:36 | 2018-01-15T04:21:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('ad_board.views',
url(r'^$', 'full', name='ad_board_url'),
url(r'^category/(?P<id>[0-9]+)/$', 'category', name='category_ad_board_url'),
url(r'^(?P<id>[0-9]+)/$', 'item', name='ad_board_item_url'),
url(r'^category/(?P<id>[0-9]+)/add/$', 'add', name='add_ad_board_url'),
)
| [
"[email protected]"
] | |
198442838c9414d3f62f9b0af071a325589a66ae | 8840b69e4341f4ed030c8b33151db205b8db3640 | /flask_minijax.py | a5036e1c916ae910ed2af7e28ecdc01b86534110 | [
"MIT"
] | permissive | FidgetYou/proj3-anagrams | b5fe7ccc333bca0895c12590142b9f0e30f10b83 | 86923a696794b7098940023d57aaef679a52b3ac | refs/heads/master | 2021-01-11T01:03:32.507679 | 2016-10-18T01:58:25 | 2016-10-18T01:58:25 | 70,846,302 | 0 | 0 | null | 2016-10-13T20:39:51 | 2016-10-13T20:39:50 | null | UTF-8 | Python | false | false | 1,317 | py | """
Tiny demo of Ajax interaction
"""
import flask
from flask import request # Data from a submitted form
from flask import url_for
from flask import jsonify # For AJAX transactions
import json
import logging
import argparse # For the vocabulary list
import sys
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
app.secret_key = CONFIG.secret_key # Should allow using session variables
###
# Pages
###
@app.route("/")
def index():
return flask.render_template('minijax.html')
###############
# AJAX request handlers
# These return JSON to the JavaScript function on
# an existing page, rather than rendering a new page.
###############
@app.route("/_countem")
def countem():
text = request.args.get("text", type=str)
length = len(text)
rslt = { "long_enough": length >= 5 }
return jsonify(result=rslt)
#############
# Run locally
if __name__ == "__main__":
# Standalone.
app.debug = True
app.logger.setLevel(logging.DEBUG)
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
# If we run 'python3 flask_minijax.py, we get the above 'main'.
# If we run 'gunicorn flask_minijax:app', we instead get a
# 'main' inside gunicorn, which loads this file as a module
# and accesses the Flask 'app' object.
#
| [
"[email protected]"
] | |
6d346848a2eed9d5be67fdb017a17285227f874a | bd5a3b59a5ca9f0c0394c8bf90e818c3967778d9 | /vre/apps/xauth/urls.py | 2ba5dfc62bf27aafa163e3cf36365c4b0ea01be0 | [] | no_license | BlickLabs/vre | 85f377c04406c163464f7ddade7eafb579f1dfb1 | 6f3644fb9295f6355057cfa64a1156a329b4b4b8 | refs/heads/develop | 2020-05-22T04:28:31.913667 | 2018-07-06T21:12:14 | 2018-07-06T21:12:14 | 62,763,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(regex=r'^login/$',
view=views.LoginView.as_view(),
name='login'),
url(regex=r'^logout/$',
view=views.logout_view,
name='logout'),
]
| [
"[email protected]"
] | |
de57cedbc86dec255b93ebc77daf153a873f5256 | 1422a57e98aba02321b772d72f8f0ada6d8b8cba | /friday/friday-vendor/vendor-scripts/test-resources/scripts/pylib/hue_turn_on_light.py | 152b15f1a6ee7c7306946bab089ea4f1578d9421 | [
"MIT"
] | permissive | JonasRSV/Friday | e1908a411aa133bc5bd2f383b0a995f7e028092d | f959eff95ba7b11525f97099c8f5ea0e325face7 | refs/heads/main | 2023-05-15T03:33:21.542621 | 2021-06-12T10:34:50 | 2021-06-12T10:34:50 | 315,309,991 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import phue
import sys
if __name__ == "__main__":
b = phue.Bridge(config_file_path="credentials.json")
b.set_light(int(sys.argv[1]), parameter={"on": True, "bri": 200}, transitiontime=5)
| [
"[email protected]"
] | |
1a94d4955bc1347ae86d5992a523abcfbfb17267 | 5da2c116d3d0dc4f3811cec144c9f8b5a74afede | /lncrawl/assets/user_agents.py | fbec17aabe02c7b79f52106cf5ee397fca225e17 | [
"Apache-2.0"
] | permissive | NNTin/lightnovel-crawler | a08bd252f2e72f41f931f0b2165f906b64d33692 | 451e816ab03c8466be90f6f0b3eaa52d799140ce | refs/heads/master | 2021-06-23T12:07:43.668329 | 2021-04-25T01:51:26 | 2021-04-25T01:51:26 | 361,695,538 | 2 | 0 | Apache-2.0 | 2021-04-26T16:48:21 | 2021-04-26T09:40:46 | null | UTF-8 | Python | false | false | 6,302 | py | # -*- coding: utf-8 -*-
user_agents = [
# "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0",
# "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
# "Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
# "Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.3",
# "Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0.1; SHIELD Tablet K1 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Safari/537.36",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/13.2b11866 Mobile/16A366 Safari/605.1.15",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A5370a Safari/604.1",
# "Mozilla/5.0 (iPhone9,3; U; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1",
# "Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; RM-1152) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15254",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36"
]
| [
"[email protected]"
] | |
c43501f1134f44d9e0c3c38a8ce719ea17e5bbcb | 3253da5603971958d69df0ed442e3341a8d3bff4 | /1-Iniciante/1914.py | 67fa34c039b20ad33bd528808a4ce2d4016000af | [] | no_license | CleitonSilvaT/URI_Python | 1c73ec0852ae87c6138baa148ad8c2cb56bb723e | a8510bab2fa8f680b54058fafebff3a2727617d9 | refs/heads/master | 2021-06-20T08:18:50.104839 | 2021-05-20T08:59:19 | 2021-05-20T08:59:19 | 213,665,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
if __name__ == '__main__':
# Entrada
casos_teste = int(input())
while(casos_teste > 0):
# Entrada
dados = input()
escolha = dados.split(' ')
# nomepessoa1 - escolha[0]
# escolhapessoa1 - escolha[1]
# nomepessoa2 - escolha[2]
# escolhapessoa2 - escolha[3]
# Entrada
valores = input()
numeros = valores.split(' ')
# Calculando soma dos valores
total = int(numeros[0]) + int(numeros[1])
# Identificando se a soma eh PAR ou IMPAR
if((total % 2) == 0):
# Imprimindo o vencedor
if(escolha[1] == 'PAR'):
print(escolha[0])
else:
print(escolha[2])
else:
# Imprimindo o vencedor
if(escolha[1] == 'IMPAR'):
print(escolha[0])
else:
print(escolha[2])
casos_teste -= 1 | [
"[email protected]"
] | |
b672c87e3458490ceb0e8b3852355a8c15a2c399 | d1fadc514274711a7986a6b3caaaee7e8d48b4a6 | /plot_scripts/scratch29.py | 9b454212d7485e7e1237f495490e6b1a3e2c0169 | [
"MIT"
] | permissive | lbaiao/sys-simulator-2 | 24d940db6423070818c23b6ffefbc5da4a1030a0 | 94f00d43309fe7b56dac5099bd4024695ba317b6 | refs/heads/master | 2021-08-20T08:30:06.864473 | 2021-06-30T10:37:26 | 2021-06-30T10:37:26 | 230,333,523 | 1 | 0 | null | 2021-06-30T10:37:27 | 2019-12-26T22:02:59 | Jupyter Notebook | UTF-8 | Python | false | false | 1,688 | py | import pickle
import matplotlib.pyplot as plt
import numpy as np
filepath = 'D:/Dev/sys-simulator-2/data/scratch29.pickle'
file = open(filepath, 'rb')
data = pickle.load(file)
aux_range = [10,15,20]
action_counts_total = data['action_counts_total']
d2d_spectral_effs = data['d2d_speffs_avg_total']
mue_success_rate = data['mue_success_rate']
equals_counts_total = data['equals_counts_total']
d2d_speffs_avg = list()
for i, d in enumerate(d2d_spectral_effs):
d2d_speffs_avg.append(np.average(d))
fig2, ax1 = plt.subplots()
ax1.set_xlabel('Number of D2D pairs in the RB')
ax1.set_ylabel('D2D Average Spectral Efficiency [bps/Hz]', color='tab:blue')
ax1.plot(d2d_speffs_avg, '.', color='tab:blue')
ax2 = ax1.twinx()
ax2.set_ylabel('MUE Success Rate', color='tab:red')
ax2.plot(mue_success_rate, '.', color='tab:red')
fig2.tight_layout()
xi = list(range(len(aux_range)))
ax = [0,1,2,3,4]
axi = list(range(len(ax)))
for i, c in enumerate(action_counts_total):
if i in aux_range:
plt.figure()
plt.plot(np.mean(c, axis=0)/i*100, '*',label='mean')
plt.plot(np.std(c, axis=0)/i*100, 'x', label='std')
plt.legend()
plt.title(f'N={i}')
plt.xlabel('Action Index')
plt.ylabel('Average Action Ocurrency [%]')
plt.xticks(axi, ax)
mean_equals = np.array([np.mean(c) for c in equals_counts_total])
std_equals = np.array([np.std(c) for c in equals_counts_total])
plt.figure()
plt.plot(mean_equals[aux_range]*100, '*',label='mean')
plt.plot(std_equals[aux_range]*100, 'x', label='std')
plt.legend()
plt.xlabel('Amount of D2D Devices')
plt.ylabel('Average Equal Actions Ocurrency [%]')
plt.xticks(xi, aux_range)
plt.show() | [
"[email protected]"
] | |
0ec404b9b92a1950ead916d9356841cf3bb18eb4 | d7bf691c35d7bf2a5707e47d7aca98b509e02eb9 | /pddlstream/algorithms/algorithm.py | 7a29c0eba6f399ea3752c4684788b164a65873f9 | [
"MIT"
] | permissive | himanshisyadav/pddlstream | 7d43c16da903504a0232408a7d8077fd4da95d87 | 1038e702f1d4625791f1da7867d6226b02af8c3a | refs/heads/master | 2020-04-11T11:48:19.324553 | 2018-11-14T18:28:27 | 2018-11-14T18:28:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,291 | py | import time
from collections import OrderedDict, deque, namedtuple, Counter
from pddlstream.algorithms.downward import parse_domain, get_problem, task_from_domain_problem, \
parse_lisp, sas_from_pddl, parse_goal
from pddlstream.algorithms.search import abstrips_solve_from_task
from pddlstream.language.constants import get_prefix, get_args
from pddlstream.language.conversion import obj_from_value_expression, obj_from_pddl_plan, \
evaluation_from_fact, substitute_expression
from pddlstream.language.exogenous import compile_to_exogenous, replace_literals
from pddlstream.language.external import External, DEBUG, get_plan_effort
from pddlstream.language.function import parse_function, parse_predicate, Function, Predicate
from pddlstream.language.object import Object
from pddlstream.language.rule import parse_rule
from pddlstream.language.stream import parse_stream, Stream
from pddlstream.utils import elapsed_time, INF, get_mapping, find_unique, get_length, str_from_plan
from pddlstream.language.optimizer import parse_optimizer, VariableStream, ConstraintStream
# TODO: way of programmatically specifying streams/actions
INITIAL_EVALUATION = None
def parse_constants(domain, constant_map):
obj_from_constant = {}
for constant in domain.constants:
if constant.name.startswith(Object._prefix): # TODO: check other prefixes
raise NotImplementedError('Constants are not currently allowed to begin with {}'.format(Object._prefix))
if constant.name not in constant_map:
raise ValueError('Undefined constant {}'.format(constant.name))
value = constant_map.get(constant.name, constant.name)
obj_from_constant[constant.name] = Object(value, name=constant.name) # TODO: remap names
# TODO: add object predicate
for name in constant_map:
for constant in domain.constants:
if constant.name == name:
break
else:
raise ValueError('Constant map value {} not mentioned in domain :constants'.format(name))
del domain.constants[:] # So not set twice
return obj_from_constant
def check_problem(domain, streams, obj_from_constant):
for action in domain.actions + domain.axioms:
for p, c in Counter(action.parameters).items():
if c != 1:
raise ValueError('Parameter [{}] for action [{}] is not unique'.format(p.name, action.name))
# TODO: check that no undeclared parameters & constants
#action.dump()
for stream in streams:
# TODO: domain.functions
facts = list(stream.domain)
if isinstance(stream, Stream):
facts.extend(stream.certified)
for fact in facts:
name = get_prefix(fact)
if name not in domain.predicate_dict: # Undeclared predicate: {}
print('Warning! Undeclared predicate used in stream [{}]: {}'.format(stream.name, name))
elif len(get_args(fact)) != domain.predicate_dict[name].get_arity(): # predicate used with wrong arity: {}
print('Warning! predicate used with wrong arity in stream [{}]: {}'.format(stream.name, fact))
for constant in stream.constants:
if constant not in obj_from_constant:
raise ValueError('Undefined constant in stream [{}]: {}'.format(stream.name, constant))
def parse_problem(problem, stream_info={}):
# TODO: just return the problem if already written programmatically
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
domain = parse_domain(domain_pddl)
if len(domain.types) != 1:
raise NotImplementedError('Types are not currently supported')
obj_from_constant = parse_constants(domain, constant_map)
streams = parse_stream_pddl(stream_pddl, stream_map, stream_info)
evaluations = OrderedDict((evaluation_from_fact(obj_from_value_expression(f)), INITIAL_EVALUATION) for f in init)
goal_expression = obj_from_value_expression(goal)
check_problem(domain, streams, obj_from_constant)
parse_goal(goal_expression, domain) # Just to check that it parses
#normalize_domain_goal(domain, goal_expression)
# TODO: refactor the following?
compile_to_exogenous(evaluations, domain, streams)
compile_fluent_streams(domain, streams)
enforce_simultaneous(domain, streams)
return evaluations, goal_expression, domain, streams
##################################################
def get_predicates(expression):
import pddl.conditions
if isinstance(expression, pddl.conditions.ConstantCondition):
return set()
if isinstance(expression, pddl.conditions.JunctorCondition) or \
isinstance(expression, pddl.conditions.QuantifiedCondition):
predicates = set()
for part in expression.parts:
predicates.update(get_predicates(part))
return predicates
if isinstance(expression, pddl.conditions.Literal):
return {expression.predicate}
raise ValueError(expression)
def enforce_simultaneous(domain, externals):
axiom_predicates = set()
for axiom in domain.axioms:
axiom_predicates.update(get_predicates(axiom.condition))
for external in externals:
if (type(external) in [VariableStream, ConstraintStream]) and not external.info.simultaneous:
predicates = {get_prefix(fact) for fact in external.certified}
if predicates & axiom_predicates:
external.info.simultaneous = True
#print(external, (predicates & axiom_predicates))
##################################################
def has_costs(domain):
for action in domain.actions:
if action.cost is not None:
return True
return False
def solve_finite(evaluations, goal_expression, domain, unit_costs=None, debug=False, **kwargs):
if unit_costs is None:
unit_costs = not has_costs(domain)
problem = get_problem(evaluations, goal_expression, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
sas_task = sas_from_pddl(task, debug=debug)
plan_pddl, cost = abstrips_solve_from_task(sas_task, debug=debug, **kwargs)
return obj_from_pddl_plan(plan_pddl), cost
##################################################
Solution = namedtuple('Solution', ['plan', 'cost'])
class SolutionStore(object):
def __init__(self, max_time, max_cost, verbose):
# TODO: store evaluations here as well as map from head to value?
self.start_time = time.time()
self.max_time = max_time
#self.cost_fn = get_length if unit_costs else None
self.max_cost = max_cost
self.verbose = verbose
self.best_plan = None
self.best_cost = INF
#self.best_cost = self.cost_fn(self.best_plan)
self.solutions = []
def add_plan(self, plan, cost):
# TODO: double-check that this is a solution
self.solutions.append(Solution(plan, cost))
if cost < self.best_cost:
self.best_plan = plan
self.best_cost = cost
def is_solved(self):
return self.best_cost < self.max_cost
def elapsed_time(self):
return elapsed_time(self.start_time)
def is_timeout(self):
return self.max_time <= self.elapsed_time()
def is_terminated(self):
return self.is_solved() or self.is_timeout()
def add_facts(evaluations, fact, result=None):
new_evaluations = []
for fact in fact:
evaluation = evaluation_from_fact(fact)
if evaluation not in evaluations:
evaluations[evaluation] = result
new_evaluations.append(evaluation)
return new_evaluations
def add_certified(evaluations, result):
return add_facts(evaluations, result.get_certified(), result=result)
##################################################
def get_domain_predicates(external):
return set(map(get_prefix, external.domain))
def get_certified_predicates(external):
if isinstance(external, Stream):
return set(map(get_prefix, external.certified))
if isinstance(external, Function):
return {get_prefix(external.head)}
raise ValueError(external)
def get_non_producers(externals):
# TODO: handle case where no domain conditions
pairs = set()
for external1 in externals:
for external2 in externals:
if get_certified_predicates(external1) & get_domain_predicates(external2):
pairs.add((external1, external2))
producers = {e1 for e1, _ in pairs}
non_producers = set(externals) - producers
# TODO: these are streams that be evaluated at the end as tests
return non_producers
##################################################
def apply_rules_to_streams(rules, streams):
# TODO: can actually this with multiple condition if stream certified contains all
# TODO: do also when no domain conditions
processed_rules = deque(rules)
while processed_rules:
rule = processed_rules.popleft()
if len(rule.domain) != 1:
continue
[rule_fact] = rule.domain
rule.info.p_success = 0 # Need not be applied
for stream in streams:
if not isinstance(stream, Stream):
continue
for stream_fact in stream.certified:
if get_prefix(rule_fact) == get_prefix(stream_fact):
mapping = get_mapping(get_args(rule_fact), get_args(stream_fact))
new_facts = set(substitute_expression(rule.certified, mapping)) - set(stream.certified)
stream.certified = stream.certified + tuple(new_facts)
if new_facts and (stream in rules):
processed_rules.append(stream)
def parse_streams(streams, rules, stream_pddl, procedure_map, procedure_info):
stream_iter = iter(parse_lisp(stream_pddl))
assert('define' == next(stream_iter))
pddl_type, pddl_name = next(stream_iter)
assert('stream' == pddl_type)
for lisp_list in stream_iter:
name = lisp_list[0] # TODO: refactor at this point
if name in (':stream', ':wild-stream'):
externals = [parse_stream(lisp_list, procedure_map, procedure_info)]
elif name == ':rule':
externals = [parse_rule(lisp_list, procedure_map, procedure_info)]
elif name == ':function':
externals = [parse_function(lisp_list, procedure_map, procedure_info)]
elif name == ':predicate': # Cannot just use args if want a bound
externals = [parse_predicate(lisp_list, procedure_map, procedure_info)]
elif name == ':optimizer':
externals = parse_optimizer(lisp_list, procedure_map, procedure_info)
else:
raise ValueError(name)
for external in externals:
if any(e.name == external.name for e in streams):
raise ValueError('Stream [{}] is not unique'.format(external.name))
if name == ':rule':
rules.append(external)
external.pddl_name = pddl_name # TODO: move within constructors
streams.append(external)
def parse_stream_pddl(pddl_list, procedures, infos):
streams = []
if pddl_list is None:
return streams
if isinstance(pddl_list, str):
pddl_list = [pddl_list]
#if all(isinstance(e, External) for e in stream_pddl):
# return stream_pddl
if procedures != DEBUG:
procedures = {k.lower(): v for k, v in procedures.items()}
infos = {k.lower(): v for k, v in infos.items()}
rules = []
for pddl in pddl_list:
parse_streams(streams, rules, pddl, procedures, infos)
apply_rules_to_streams(rules, streams)
return streams
##################################################
def compile_fluent_streams(domain, externals):
state_streams = list(filter(lambda e: isinstance(e, Stream) and
(e.is_negated() or e.is_fluent()), externals))
predicate_map = {}
for stream in state_streams:
for fact in stream.certified:
predicate = get_prefix(fact)
assert predicate not in predicate_map # TODO: could make a conjunction condition instead
predicate_map[predicate] = stream
if not predicate_map:
return state_streams
# TODO: could make free parameters free
# TODO: allow functions on top the produced values?
# TODO: check that generated values are not used in the effects of any actions
# TODO: could treat like a normal stream that generates values (but with no inputs required/needed)
def fn(literal):
if literal.predicate not in predicate_map:
return literal
# TODO: other checks on only inputs
stream = predicate_map[literal.predicate]
certified = find_unique(lambda f: get_prefix(f) == literal.predicate, stream.certified)
mapping = get_mapping(get_args(certified), literal.args)
#assert all(arg in mapping for arg in stream.inputs) # Certified must contain all inputs
if not all(arg in mapping for arg in stream.inputs):
# TODO: this excludes typing. This is not entirely safe
return literal
blocked_args = tuple(mapping[arg] for arg in stream.inputs)
blocked_literal = literal.__class__(stream.blocked_predicate, blocked_args).negate()
if stream.is_negated():
# TODO: add stream conditions here
return blocked_literal
return pddl.Conjunction([literal, blocked_literal])
import pddl
for action in domain.actions:
action.precondition = replace_literals(fn, action.precondition).simplified()
# TODO: throw an error if the effect would be altered
for effect in action.effects:
if not isinstance(effect.condition, pddl.Truth):
raise NotImplementedError(effect.condition)
#assert(isinstance(effect, pddl.Effect))
#effect.condition = replace_literals(fn, effect.condition)
for axiom in domain.axioms:
axiom.condition = replace_literals(fn, axiom.condition).simplified()
return state_streams
def dump_plans(stream_plan, action_plan, cost):
print('Stream plan ({}, {:.1f}): {}\nAction plan ({}, {}): {}'.format(get_length(stream_plan),
get_plan_effort(stream_plan),
stream_plan,
get_length(action_plan), cost,
str_from_plan(action_plan)))
def partition_externals(externals):
functions = list(filter(lambda s: type(s) is Function, externals))
predicates = list(filter(lambda s: type(s) is Predicate, externals)) # and s.is_negative()
negated_streams = list(filter(lambda s: (type(s) is Stream) and s.is_negated(), externals)) # and s.is_negative()
negative = predicates + negated_streams
streams = list(filter(lambda s: s not in (functions + negative), externals))
#optimizers = list(filter(lambda s: type(s) in [VariableStream, ConstraintStream], externals))
return streams, functions, negative #, optimizers
| [
"[email protected]"
] | |
2445240430a4f61b9f76afca22102c4397f33bd7 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/gkioxari_RstarCNN/RstarCNN-master/lib/datasets/attr_bpad.py | 1d8c0fb80696afdd175613117b34dc6d6c4573fd | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 10,478 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) Microsoft. All rights reserved.
# Written by Ross Girshick, 2015.
# Licensed under the BSD 2-clause "Simplified" license.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
import datasets.pascal_voc
import os
import datasets.imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
import pdb
class attr_bpad(datasets.imdb):
def __init__(self, image_set, devkit_path=None):
datasets.imdb.__init__(self, 'bpad_' + image_set)
self._year = '2015'
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._base_path = os.path.join(self._devkit_path, 'BAPD')
self._classes = ('is_male', 'has_long_hair', 'has_glasses',
'has_hat', 'has_tshirt', 'has_long_sleeves',
'has_shorts', 'has_jeans', 'has_long_pants')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._base_path), \
'Path does not exist: {}'.format(self._base_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._base_path, 'Images',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._base_path, 'selective_search',
'ss_attributes_' + self._image_set + '.mat')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
raw_data = sio.loadmat(image_set_file)
images = raw_data['images'].ravel()
image_index = [im[0].strip() for im in images]
return image_index
def _get_default_path(self):
"""
Return the default path where data is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
# Load all annotation file data (should take < 30 s).
gt_roidb = self._load_annotation()
# print number of ground truth classes
cc = np.zeros(len(self._classes), dtype = np.int16)
for i in xrange(len(gt_roidb)):
gt_classes = gt_roidb[i]['gt_classes']
num_objs = gt_classes.shape[0]
for n in xrange(num_objs):
valid_classes = np.where(gt_classes[n] == 1)[0]
cc[valid_classes] +=1
for ic,nc in enumerate(cc):
print "Count {:s} : {:d}".format(self._classes[ic], nc)
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = self._merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _merge_roidbs(self, a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.vstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
return a
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.join(self._base_path, 'selective_search',
'ss_attributes_' + self._image_set + '.mat')
# filename = op.path.join(self.cache_path, 'MCG_data', self.name + '.mat')
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)
num_images = raw_data['boxes'].ravel().shape[0]
ss_roidb = []
for i in xrange(num_images):
boxes = raw_data['boxes'].ravel()[i][:, (1, 0, 3, 2)] - 1
num_boxes = boxes.shape[0]
gt_boxes = gt_roidb[i]['boxes']
num_objs = gt_boxes.shape[0]
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = \
utils.cython_bbox.bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
overlaps = scipy.sparse.csr_matrix(gt_overlaps)
ss_roidb.append({'boxes' : boxes,
'gt_classes' : np.zeros((num_boxes, self.num_classes),
dtype=np.int32),
'gt_overlaps' : overlaps,
'flipped' : False})
return ss_roidb
def _load_annotation(self):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
gt_roidb = []
filename = os.path.join(self._base_path, 'ground_truth',
'gt_attributes_' + self._image_set + '.mat')
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename, mat_dtype=True)
all_boxes = raw_data['boxes'].ravel()
all_images = raw_data['images'].ravel()
all_attributes = raw_data['attributes'].ravel()
num_images = len(all_images)
for imi in xrange(num_images):
num_objs = all_boxes[imi].shape[0]
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
overlaps = np.zeros((num_objs, num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for i in xrange(num_objs):
# Make pixel indexes 0-based
box = all_boxes[imi][i]
assert(not np.any(np.isnan(box)))
# Read attributes labels
attr = all_attributes[imi][i]
# Change attributes labels
# -1 -> 0
# 0 -> -1
unknown_attr = attr == 0
neg_attr = attr == -1
attr[neg_attr] = 0
attr[unknown_attr] = -1
boxes[i, :] = box - 1
gt_classes[i, :] = attr
overlaps[i, i] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_roidb.append({'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False})
return gt_roidb
def _write_results_file(self, all_boxes, comp):
path = os.path.join(self._devkit_path, 'results', 'BAPD')
print 'Writing results file'.format(cls)
filename = path + comp + '.txt'
with open(filename, 'wt') as f:
for i in xrange(all_boxes.shape[0]):
ind = all_boxes[i,0].astype(np.int64)
index = self.image_index[ind-1]
voc_id = all_boxes[i,1].astype(np.int64)
f.write('{:s} {:d}'.format(index, voc_id))
for cli in xrange(self.num_classes):
score = all_boxes[i,2+cli]
f.write(' {:.3f}'.format(score))
f.write('\n')
if __name__ == '__main__':
d = datasets.pascal_voc('trainval', '2012')
res = d.roidb
from IPython import embed; embed()
| [
"[email protected]"
] | |
6e412c2830f0c0210c5542502eff73dfa2776a76 | 1b78ca7f3250ebed418717c6ea28b5a77367f1b8 | /411.minimum-unique-word-abbreviation/minimum-unique-word-abbreviation.py | 70887cecba089f780017d17a96ca6739c187979c | [] | no_license | JaniceLC/lc-all-solutions | ced854f31b94f44c0b03a0677988805e3b9ee718 | 3f2a4ee8c09a8890423c6a22c73f470eccf979a2 | refs/heads/master | 2020-04-05T19:53:31.307528 | 2018-11-12T04:18:45 | 2018-11-12T04:18:45 | 157,155,285 | 0 | 2 | null | 2018-11-12T04:13:22 | 2018-11-12T04:13:22 | null | UTF-8 | Python | false | false | 1,290 | py | class Solution(object):
def minAbbreviation(self, target, dictionary):
"""
:type target: str
:type dictionary: List[str]
:rtype: str
"""
def dfs(w, start, res):
res.append(w)
for i in xrange(start, len(w)):
for l in reversed(xrange(1, len(w) - i + 1)):
dfs(w[:i] + [str(l)] + w[i+l:], i + 2, res)
def match(src, dest):
i = 0
for c in src:
if c.isdigit():
jump = int(c)
i += jump
else:
if c != dest[i]:
return False
i += 1
return True
if not dictionary:
return str(len(target))
wordLen = len(target)
res = []
dfs(list(target), 0, res)
res.sort(key=lambda x:len(x))
dictionary = filter(lambda s: len(s) == wordLen, dictionary)
for w in res:
allMiss = True
for d in dictionary:
if match(w, d):
allMiss = False
break
if allMiss:
return "".join(w)
return None | [
"[email protected]"
] | |
8cf1337f8036de2054ba11a4c1ef5921ff9e2863 | 641f76328bfeb7e54f0793a18c5b7c00595b98fd | /apps/goods/migrations/0015_auto_20181019_1007.py | a9bf43d5073534905d8a89c4b1ee68ce1ac10451 | [
"Apache-2.0"
] | permissive | lianxiaopang/camel-store-api | 1d16060af92eb01607757c0423377a8c94c3a726 | b8021250bf3d8cf7adc566deebdba55225148316 | refs/heads/master | 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 | Apache-2.0 | 2020-02-07T14:28:35 | 2020-02-06T06:17:47 | Python | UTF-8 | Python | false | false | 1,439 | py | # Generated by Django 2.1.2 on 2018-10-19 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0014_auto_20181011_1646'),
]
operations = [
migrations.AlterModelOptions(
name='goodscategory',
options={'ordering': ('index', '-is_active'), 'verbose_name': '商品类别', 'verbose_name_plural': '商品类别'},
),
migrations.AlterModelOptions(
name='goodtype',
options={'ordering': ('index',), 'verbose_name': '商品规格', 'verbose_name_plural': '商品规格'},
),
migrations.AddField(
model_name='goodscategory',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
migrations.AddField(
model_name='goodscategory',
name='is_active',
field=models.BooleanField(default=True, verbose_name='是否启用'),
),
migrations.AddField(
model_name='goodtype',
name='asset_ratio',
field=models.PositiveSmallIntegerField(default=0, help_text='单位:%', verbose_name='返利比例'),
),
migrations.AddField(
model_name='goodtype',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
]
| [
"[email protected]"
] | |
d01b1468d7aaf781d587e8b861611e92d26f28dd | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /imooc/python3_shizhan/ten/c1.py | 6a78a3e875eb35796ea35e07c606f9f44d0ef637 | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/5/2 21:48'
"""
第10章 正则表达式与JSON
正则表达式
JSON XML
正则表达式是一个特殊的字符序列,一个字符串是否与我们所设定的这样的字符序列相匹配。
快速检索文本、实现一些替换文本的操作
1.检查一串数字是否是电话号码
2.检测一个字符串是否符合email
3.把一个文本里指定的单词替换为另外一个单词
如果正则用的6,可以不用很多内置方法
"""
a = 'C|C++|Java|C#|Python|Javascript'
# Python内置函数,用来判断字符串是否包含Python
print(a.index('Python'))
print(a.index('Python') > -1)
print('Python' in a)
| [
"[email protected]"
] | |
b4e2926b4134199eaadf96a67e52631ed4a9bbce | 427200bdf814d859665f449542fe6c9c1de5a96c | /doc/source/conf.py | a9715d0ad0714672009bacc401a85b5984fd9da9 | [
"BSD-3-Clause"
] | permissive | giltis/pyRafters | c54f6c4c8f02370ad168a3c90d1ce490077b5d78 | 94bf0e1d671ce58f6cbc09600e99a6d2a4b0127c | refs/heads/master | 2021-01-22T13:22:19.768905 | 2014-03-28T13:40:24 | 2014-03-28T13:40:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,737 | py | # -*- coding: utf-8 -*-
#
# PyLight documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 30 13:08:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyLight'
copyright = u'2014, Brookhaven National Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyLightdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyLight.tex', u'PyLight Documentation',
u'Brookhaven National Lab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pylight', u'PyLight Documentation',
[u'Brookhaven National Lab'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyLight', u'PyLight Documentation',
u'Brookhaven National Lab', 'PyLight', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"[email protected]"
] | |
a502baacd568f4ec8f715ef459a5d0689434064b | 5e557741c8867bca4c4bcf2d5e67409211d059a3 | /torch/distributed/elastic/agent/server/local_elastic_agent.py | c84df1a8e434267abf07aca90210e89b834c1b00 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | Pandinosaurus/pytorch | a2bb724cfc548f0f2278b5af2fd8b1d2758adb76 | bb8978f605e203fbb780f03010fefbece35ac51c | refs/heads/master | 2023-05-02T20:07:23.577610 | 2021-11-05T14:01:30 | 2021-11-05T14:04:40 | 119,666,381 | 2 | 0 | NOASSERTION | 2021-11-05T19:55:56 | 2018-01-31T09:37:34 | C++ | UTF-8 | Python | false | false | 9,100 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import signal
import tempfile
from typing import Any, Dict, Optional, Tuple
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.metrics.api import prof
from torch.distributed.elastic.multiprocessing import PContext, start_processes
from torch.distributed.elastic.utils import macros
from torch.distributed.elastic.utils.logging import get_logger
log = get_logger()
class LocalElasticAgent(SimpleElasticAgent):
"""
An implementation of :py:class:`torchelastic.agent.server.ElasticAgent`
that handles host-local workers.
This agent is deployed per host and is configured to spawn ``n`` workers.
When using GPUs, ``n`` maps to the number of GPUs available on the host.
The local agent does not communicate to other local agents deployed on
other hosts, even if the workers may communicate inter-host. The worker id
is interpreted to be a local process. The agent starts and stops all worker
processes as a single unit.
The worker function and argument passed to the worker function must be
python multiprocessing compatible. To pass multiprocessing data structures
to the workers you may create the data structure in the same multiprocessing
context as the specified ``start_method`` and pass it as a function argument.
The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
for other agents to finish. This acts as a safety net to handle cases where
workers finish at different times, to prevent agents from viewing workers
that finished early as a scale-down event. It is strongly advised that the
user code deal with ensuring that workers are terminated in a synchronous
manner rather than relying on the exit_barrier_timeout.
Example launching function
::
def trainer(args) -> str:
return "do train"
def main():
start_method="spawn"
shared_queue= multiprocessing.get_context(start_method).Queue()
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint=trainer,
args=("foobar",),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec, start_method)
results = agent.run()
if results.is_failed():
print("trainer failed")
else:
print(f"rank 0 return value: {results.return_values[0]}")
# prints -> rank 0 return value: do train
Example launching binary
::
def main():
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint="/usr/local/bin/trainer",
args=("--trainer_args", "foobar"),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec)
results = agent.run()
if not results.is_failed():
print("binary launches do not have return values")
"""
def __init__(
self,
spec: WorkerSpec,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, exit_barrier_timeout)
self._start_method = start_method
self._pcontext: Optional[PContext] = None
rdzv_run_id = spec.rdzv_handler.get_run_id()
self._log_dir = self._make_log_dir(log_dir, rdzv_run_id)
def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str):
base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_")
os.makedirs(base_log_dir, exist_ok=True)
dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir)
log.info(f"log directory set to: {dir}")
return dir
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _stop_workers(self, worker_group: WorkerGroup) -> None:
self._shutdown()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": str(1),
}
if "OMP_NUM_THREADS" in os.environ:
worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
if self._pcontext:
self._pcontext.close(death_sig)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
role = worker_group.spec.role
worker_pids = {w.id for w in worker_group.workers}
assert self._pcontext is not None
pc_pids = set(self._pcontext.pids().values())
if worker_pids != pc_pids:
log.error(
f"[{role}] worker pids do not match process_context pids."
f" Expected: {worker_pids}, actual: {pc_pids}"
)
return RunResult(state=WorkerState.UNKNOWN)
result = self._pcontext.wait(0)
if result:
if result.is_failed():
# map local rank failure to global rank
worker_failures = {}
for local_rank, failure in result.failures.items():
worker = worker_group.workers[local_rank]
worker_failures[worker.global_rank] = failure
return RunResult(
state=WorkerState.FAILED,
failures=worker_failures,
)
else:
# copy ret_val_queue into a map with a global ranks
workers_ret_vals = {}
for local_rank, ret_val in result.return_values.items():
worker = worker_group.workers[local_rank]
workers_ret_vals[worker.global_rank] = ret_val
return RunResult(
state=WorkerState.SUCCEEDED,
return_values=workers_ret_vals,
)
else:
return RunResult(state=WorkerState.HEALTHY)
| [
"[email protected]"
] | |
197926393868d21e6ae154a9dd519b9c67bbad9c | cd014fae6791f51a9a382f34dbdcee6d61d84e30 | /64_eqf_fveqf_fvf_fvegf/64.py | 64fae91ef51cb384faf818ac502876f63733d358 | [
"Apache-2.0"
] | permissive | ckclark/Hackquest | 1505f50fc2c735db059205d1c9bbba1832cc5059 | 65ed5fd32e79906c0e36175bbd280d976c6134bd | refs/heads/master | 2021-01-16T19:32:29.434790 | 2015-09-29T13:39:04 | 2015-09-29T13:39:04 | 42,388,846 | 13 | 5 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | lines = [x.strip() for x in open('64.txt').readlines()]
for shift in [16]: #range(len(lines[0])):
out_graph = []
for line in lines:
out_line = []
for i in range(len(line) - shift):
if line[i] == line[i + shift]:
out_line.append(' ')
else:
out_line.append('*')
out_line = ''.join(out_line)
out_graph.append(out_line)
print shift
print '\n'.join(out_graph)
| [
"[email protected]"
] | |
5920ba78e09eb4f5be44b465dda4879c3b817140 | 1bfebc7e1c95cd3c25024b6b1adbf518e55513bf | /src/pykit/strutil/test/test_hex.py | 111d8a160a9a91f0c53b0653ae2f85d8536d8489 | [
"MIT"
] | permissive | bsc-s2/ops | a9a217a47dad558285ca8064fa29fdff10ab4ad7 | 6fb8ad758b328a445005627ac1e5736f17088cee | refs/heads/master | 2021-06-24T09:32:49.057026 | 2020-11-02T06:50:01 | 2020-11-02T06:50:01 | 123,527,739 | 8 | 0 | MIT | 2020-09-03T04:58:26 | 2018-03-02T03:54:20 | Python | UTF-8 | Python | false | false | 5,256 | py | #!/usr/bin/env python2
# coding: utf-8
import os
import unittest
from pykit import strutil
from pykit.strutil import Hex
from pykit import ututil
from pykit import utfjson
dd = ututil.dd
class TestHex(unittest.TestCase):
def test_init(self):
byte_length = 3
cases = (
(0, 0),
('000000', 0),
('\0\0\0', 0),
(256**2 + 2*256 + 3, 0x010203),
('010203', 0x010203),
('\1\2\3', 0x010203),
)
for inp, expected in cases:
dd(inp, expected)
c = Hex(inp, byte_length)
self.assertEqual(expected, c.int)
self.assertEqual('%06x' % expected, c)
def test_attr(self):
c = Hex('010203', 3)
self.assertEqual('010203', c.hex)
self.assertEqual('\1\2\3', c.bytes)
self.assertEqual(256**2 + 2*256 + 3, c.int)
self.assertIs('010203', c.hex)
self.assertIsNot('010203', c)
def test_init_invalid(self):
byte_length = 3
cases = (
(256**3-1, None),
(256**3, ValueError),
(-1, ValueError),
('\1\2', ValueError),
('\1\2\3\4', ValueError),
('0102', ValueError),
('01020', ValueError),
('0102030', ValueError),
('01020304', ValueError),
({}, TypeError),
)
for inp, err in cases:
dd(inp, err)
if err is None:
c = Hex(inp, byte_length)
else:
self.assertRaises(err, Hex, inp, byte_length)
def test_named_length(self):
val = 0x010203
cases = (
('crc32', '00010203'),
('Crc32', '00010203'),
('CRC32', '00010203'),
('md5', '00000000000000000000000000010203'),
('Md5', '00000000000000000000000000010203'),
('MD5', '00000000000000000000000000010203'),
('sha1', '0000000000000000000000000000000000010203'),
('Sha1', '0000000000000000000000000000000000010203'),
('SHA1', '0000000000000000000000000000000000010203'),
('sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('Sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('SHA256', '0000000000000000000000000000000000000000000000000000000000010203'),
)
for typ, expected in cases:
c = Hex(val, typ)
self.assertEqual(expected, c)
def test_checksum_shortcut(self):
val = 0x010203
self.assertEqual(Hex(val, 'crc32'), Hex.crc32(val))
self.assertEqual(Hex(val, 'md5'), Hex.md5(val))
self.assertEqual(Hex(val, 'sha1'), Hex.sha1(val))
self.assertEqual(Hex(val, 'sha256'), Hex.sha256(val))
def test_prefix(self):
pref = '1234'
cases = (
('crc32', '12340000'),
('md5', '12340000000000000000000000000000'),
('sha1', '1234000000000000000000000000000000000000'),
('sha256', '1234000000000000000000000000000000000000000000000000000000000000'),
)
for typ, expected in cases:
dd('typ:', typ)
c = Hex((pref, 0), typ)
self.assertEqual(expected, c)
self.assertEqual('12340101', Hex((pref, 1), 'crc32'))
def test_str_repr(self):
c = Hex.crc32(1)
self.assertEqual('00000001', str(c))
self.assertEqual("'00000001'", repr(c))
def test_json(self):
c = Hex.crc32(('0002', 0))
rst = utfjson.dump(c)
self.assertEqual('"00020000"', rst)
self.assertEqual(c, utfjson.load(rst))
def test_arithmetic(self):
c = Hex.crc32(5)
self.assertEqual(6, (c+1).int)
self.assertEqual(10, (c*2).int)
self.assertEqual(2, (c/2).int)
self.assertEqual(0, (c/6).int)
self.assertEqual(1, (c % 2).int)
self.assertEqual(25, (c**2).int)
self.assertEqual('00000006', (c+1))
self.assertEqual('0000000a', (c*2))
self.assertEqual('00000002', (c/2))
self.assertEqual('00000000', (c/6))
self.assertEqual('00000001', (c % 2))
self.assertEqual('00000019', (c**2))
self.assertEqual(6, (c + Hex.crc32(1)).int)
# overflow protection
self.assertEqual(0, (c-5).int)
self.assertEqual(0, (c-6).int)
d = Hex.crc32(('', 0xff))
self.assertEqual(d, d+1)
def test_arithmetic_error(self):
c = Hex.crc32(5)
cases = (
[],
(),
{},
'x',
u'我',
)
for inp in cases:
with self.assertRaises(TypeError):
c + inp
with self.assertRaises(TypeError):
c - inp
with self.assertRaises(TypeError):
c * inp
with self.assertRaises(TypeError):
c / inp
with self.assertRaises(TypeError):
c % inp
with self.assertRaises(TypeError):
c ** inp
| [
"[email protected]"
] | |
9ecc842f23895f3713c99a55702174b7192797fa | 31e7aa5176876e6caf7ff9b37336b39292c9dd5b | /selfdrive/controls/lib/pathplanner.py | de43c041805990c89541efeab04f50f6241ea132 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | avolmensky/openpilot | 02d822f7eb50bb74368c794a3d580f95a53c2ca4 | dc61915529aabfad62061e784f277af311013cf1 | refs/heads/devel | 2021-12-15T01:43:10.994332 | 2020-02-14T01:30:43 | 2020-02-14T02:33:40 | 191,065,999 | 2 | 9 | MIT | 2019-06-26T10:13:29 | 2019-06-09T23:32:13 | C | UTF-8 | Python | false | false | 9,158 | py | import os
import math
from common.realtime import sec_since_boot, DT_MDL
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LAT
from selfdrive.controls.lib.lane_planner import LanePlanner
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.PathPlan.LaneChangeState
LaneChangeDirection = log.PathPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 45 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.PathPlan.Desire.none,
LaneChangeState.preLaneChange: log.PathPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.PathPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.PathPlan.Desire.laneChangeRight,
},
}
def calc_states_after_delay(states, v_ego, steer_angle, curvature_factor, steer_ratio, delay):
states[0].x = v_ego * delay
states[0].psi = v_ego * curvature_factor * math.radians(steer_angle) / steer_ratio * delay
return states
class PathPlanner():
def __init__(self, CP):
self.LP = LanePlanner()
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.prev_one_blinker = False
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].delta = 0.0
self.angle_steers_des = 0.0
self.angle_steers_des_mpc = 0.0
self.angle_steers_des_prev = 0.0
self.angle_steers_des_time = 0.0
def update(self, sm, pm, CP, VM):
v_ego = sm['carState'].vEgo
angle_steers = sm['carState'].steeringAngle
active = sm['controlsState'].active
angle_offset = sm['liveParameters'].angleOffset
# Run MPC
self.angle_steers_des_prev = self.angle_steers_des_mpc
VM.update_params(sm['liveParameters'].stiffnessFactor, sm['liveParameters'].steerRatio)
curvature_factor = VM.curvature_factor(v_ego)
self.LP.parse_model(sm['model'])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not one_blinker):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or \
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
# State transitions
# off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
# pre
elif self.lane_change_state == LaneChangeState.preLaneChange:
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied:
self.lane_change_state = LaneChangeState.laneChangeStarting
# starting
elif self.lane_change_state == LaneChangeState.laneChangeStarting and lane_change_prob > 0.5:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# finishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing and lane_change_prob < 0.2:
if one_blinker:
self.lane_change_state = LaneChangeState.preLaneChange
else:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Turn off lanes during lane change
if desire == log.PathPlan.Desire.laneChangeRight or desire == log.PathPlan.Desire.laneChangeLeft:
self.LP.l_prob = 0.
self.LP.r_prob = 0.
self.libmpc.init_weights(MPC_COST_LAT.PATH / 10.0, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
else:
self.libmpc.init_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.LP.update_d_poly(v_ego)
# account for actuation delay
self.cur_state = calc_states_after_delay(self.cur_state, v_ego, angle_steers - angle_offset, curvature_factor, VM.sR, CP.steerActuatorDelay)
v_ego_mpc = max(v_ego, 5.0) # avoid mpc roughness due to low speed
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
list(self.LP.l_poly), list(self.LP.r_poly), list(self.LP.d_poly),
self.LP.l_prob, self.LP.r_prob, curvature_factor, v_ego_mpc, self.LP.lane_width)
# reset to current steer angle if not active or overriding
if active:
delta_desired = self.mpc_solution[0].delta[1]
rate_desired = math.degrees(self.mpc_solution[0].rate[0] * VM.sR)
else:
delta_desired = math.radians(angle_steers - angle_offset) / VM.sR
rate_desired = 0.0
self.cur_state[0].delta = delta_desired
self.angle_steers_des_mpc = float(math.degrees(delta_desired * VM.sR) + angle_offset)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution[0].delta)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.LANE, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.cur_state[0].delta = math.radians(angle_steers - angle_offset) / VM.sR
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message()
plan_send.init('pathPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'liveParameters', 'model'])
plan_send.pathPlan.laneWidth = float(self.LP.lane_width)
plan_send.pathPlan.dPoly = [float(x) for x in self.LP.d_poly]
plan_send.pathPlan.lPoly = [float(x) for x in self.LP.l_poly]
plan_send.pathPlan.lProb = float(self.LP.l_prob)
plan_send.pathPlan.rPoly = [float(x) for x in self.LP.r_poly]
plan_send.pathPlan.rProb = float(self.LP.r_prob)
plan_send.pathPlan.angleSteers = float(self.angle_steers_des_mpc)
plan_send.pathPlan.rateSteers = float(rate_desired)
plan_send.pathPlan.angleOffset = float(sm['liveParameters'].angleOffsetAverage)
plan_send.pathPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.pathPlan.paramsValid = bool(sm['liveParameters'].valid)
plan_send.pathPlan.sensorValid = bool(sm['liveParameters'].sensorValid)
plan_send.pathPlan.posenetValid = bool(sm['liveParameters'].posenetValid)
plan_send.pathPlan.desire = desire
plan_send.pathPlan.laneChangeState = self.lane_change_state
plan_send.pathPlan.laneChangeDirection = self.lane_change_direction
pm.send('pathPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message()
dat.init('liveMpc')
dat.liveMpc.x = list(self.mpc_solution[0].x)
dat.liveMpc.y = list(self.mpc_solution[0].y)
dat.liveMpc.psi = list(self.mpc_solution[0].psi)
dat.liveMpc.delta = list(self.mpc_solution[0].delta)
dat.liveMpc.cost = self.mpc_solution[0].cost
pm.send('liveMpc', dat)
| [
"[email protected]"
] | |
ea9891c42ef6fc7f1ca7896c9b1e6aadd4fe3db7 | 38221ca553059a83ed8f64e2cb25181ed88da275 | /deeppavlov/models/spelling_correction/levenstein/tabled_trie.py | 502376482ef26e8fc4feca5ddd880788e9dcc39f | [
"Apache-2.0",
"Python-2.0"
] | permissive | stenpiren/DeepPavlov | 7153ce828225d9d1fdf1c171794efe463f2e9dea | fe59facab4854f2fe56ed126e27eb9696ad6dfd8 | refs/heads/master | 2020-03-23T10:08:53.962961 | 2018-07-16T22:10:17 | 2018-07-16T22:10:17 | 141,427,836 | 1 | 0 | Apache-2.0 | 2018-07-18T11:50:30 | 2018-07-18T11:50:30 | null | UTF-8 | Python | false | false | 19,969 | py | import copy
from collections import defaultdict
import numpy as np
class Trie:
"""
Реализация префиксного бора (точнее, корневого направленного ациклического графа)
Атрибуты
--------
alphabet: list, алфавит
alphabet_codes: dict, словарь символ:код
compressed: bool, индикатор сжатия
cashed: bool, индикатор кэширования запросов к функции descend
root: int, индекс корня
graph: array, type=int, shape=(число вершин, размер алфавита), матрица потомков
graph[i][j] = k <-> вершина k --- потомок вершины i по ребру, помеченному символом alphabet[j]
data: array, type=object, shape=(число вершин), массив с данными, хранящямися в вершинах
final: array, type=bool, shape=(число вершин), массив индикаторов
final[i] = True <-> i --- финальная вершина
"""
NO_NODE = -1
SPACE_CODE = -1
ATTRS = ['is_numpied', 'precompute_symbols', 'allow_spaces',
'is_terminated', 'to_make_cashed']
def __init__(self, alphabet, make_sorted=True, make_alphabet_codes=True,
is_numpied=False, to_make_cashed=False,
precompute_symbols=None, allow_spaces=False, dict_storage=False):
self.alphabet = sorted(alphabet) if make_sorted else alphabet
self.alphabet_codes = ({a: i for i, a in enumerate(self.alphabet)}
if make_alphabet_codes else self.alphabet)
self.alphabet_codes[" "] = Trie.SPACE_CODE
self.is_numpied = is_numpied
self.to_make_cashed = to_make_cashed
self.dict_storage = dict_storage
self.precompute_symbols = precompute_symbols
self.allow_spaces = allow_spaces
self.initialize()
def initialize(self):
self.root = 0
self.graph = [self._make_default_node()]
self.data, self.final = [None], [False]
self.nodes_number = 1
self.descend = self._descend_simple
self.is_terminated = False
def _make_default_node(self):
if self.dict_storage:
return defaultdict(lambda: -1)
elif self.is_numpied:
return np.full(shape=(len(self.alphabet),),
fill_value=Trie.NO_NODE, dtype=int)
else:
return [Trie.NO_NODE] * len(self.alphabet)
def save(self, outfile):
"""
Сохраняет дерево для дальнейшего использования
"""
with open(outfile, "w", encoding="utf8") as fout:
attr_values = [getattr(self, attr) for attr in Trie.ATTRS]
attr_values.append(any(x is not None for x in self.data))
fout.write("{}\n{}\t{}\n".format(
" ".join("T" if x else "F" for x in attr_values),
self.nodes_number, self.root))
fout.write(" ".join(str(a) for a in self.alphabet) + "\n")
for index, label in enumerate(self.final):
letters = self._get_letters(index, return_indexes=True)
children = self._get_children(index)
fout.write("{}\t{}\n".format(
"T" if label else "F", " ".join("{}:{}".format(*elem)
for elem in zip(letters, children))))
if self.precompute_symbols is not None:
for elem in self.data:
fout.write(":".join(",".join(
map(str, symbols)) for symbols in elem) + "\n")
return
def make_cashed(self):
'''
Включает кэширование запросов к descend
'''
self._descendance_cash = [dict() for _ in self.graph]
self.descend = self._descend_cashed
def make_numpied(self):
self.graph = np.array(self.graph)
self.final = np.asarray(self.final, dtype=bool)
self.is_numpied = True
def add(self, s):
'''
Добавление строки s в префиксный бор
'''
if self.is_terminated:
raise TypeError("Impossible to add string to fitted trie")
if s == "":
self._set_final(self.root)
return
curr = self.root
for i, a in enumerate(s):
code = self.alphabet_codes[a]
next = self.graph[curr][code]
if next == Trie.NO_NODE:
curr = self._add_descendant(curr, s[i:])
break
else:
curr = next
self._set_final(curr)
return self
def fit(self, words):
for s in words:
self.add(s)
self.terminate()
def terminate(self):
if self.is_numpied:
self.make_numpied()
self.terminated = True
if self.precompute_symbols is not None:
precompute_future_symbols(self, self.precompute_symbols,
allow_spaces=self.allow_spaces)
if self.to_make_cashed:
self.make_cashed()
def __contains__(self, s):
if any(a not in self.alphabet for a in s):
return False
# word = tuple(self.alphabet_codes[a] for a in s)
node = self.descend(self.root, s)
return (node != Trie.NO_NODE) and self.is_final(node)
def words(self):
"""
Возвращает итератор по словам, содержащимся в боре
"""
branch, word, indexes = [self.root], [], [0]
letters_with_children = [self._get_children_and_letters(self.root)]
while len(branch) > 0:
if self.is_final(branch[-1]):
yield "".join(word)
while indexes[-1] == len(letters_with_children[-1]):
indexes.pop()
letters_with_children.pop()
branch.pop()
if len(indexes) == 0:
raise StopIteration()
word.pop()
next_letter, next_child = letters_with_children[-1][indexes[-1]]
indexes[-1] += 1
indexes.append(0)
word.append(next_letter)
branch.append(next_child)
letters_with_children.append(self._get_children_and_letters(branch[-1]))
def is_final(self, index):
'''
Аргументы
---------
index: int, номер вершины
Возвращает
----------
True: если index --- номер финальной вершины
'''
return self.final[index]
def find_partitions(self, s, max_count=1):
"""
Находит все разбиения s = s_1 ... s_m на словарные слова s_1, ..., s_m
для m <= max_count
"""
curr_agenda = [(self.root, [], 0)]
for i, a in enumerate(s):
next_agenda = []
for curr, borders, cost in curr_agenda:
if cost >= max_count:
continue
child = self.graph[curr][self.alphabet_codes[a]]
# child = self.graph[curr][a]
if child == Trie.NO_NODE:
continue
next_agenda.append((child, borders, cost))
if self.is_final(child):
next_agenda.append((self.root, borders + [i+1], cost+1))
curr_agenda = next_agenda
answer = []
for curr, borders, cost in curr_agenda:
if curr == self.root:
borders = [0] + borders
answer.append([s[left:borders[i+1]] for i, left in enumerate(borders[:-1])])
return answer
def __len__(self):
return self.nodes_number
def __repr__(self):
answer = ""
for i, (final, data) in enumerate(zip(self.final, self.data)):
letters, children = self._get_letters(i), self._get_children(i)
answer += "{0}".format(i)
if final:
answer += "F"
for a, index in zip(letters, children):
answer += " {0}:{1}".format(a, index)
answer += "\n"
if data is not None:
answer += "data:{0} {1}\n".format(len(data), " ".join(str(elem) for elem in data))
return answer
def _add_descendant(self, parent, s, final=False):
for a in s:
code = self.alphabet_codes[a]
parent = self._add_empty_child(parent, code, final)
return parent
def _add_empty_child(self, parent, code, final=False):
'''
Добавление ребёнка к вершине parent по символу с кодом code
'''
self.graph[parent][code] = self.nodes_number
self.graph.append(self._make_default_node())
self.data.append(None)
self.final.append(final)
self.nodes_number += 1
return (self.nodes_number - 1)
def _descend_simple(self, curr, s):
'''
Спуск из вершины curr по строке s
'''
for a in s:
curr = self.graph[curr][self.alphabet_codes[a]]
if curr == Trie.NO_NODE:
break
return curr
def _descend_cashed(self, curr, s):
'''
Спуск из вершины curr по строке s с кэшированием
'''
if s == "":
return curr
curr_cash = self._descendance_cash[curr]
answer = curr_cash.get(s, None)
if answer is not None:
return answer
# для оптимизации дублируем код
res = curr
for a in s:
res = self.graph[res][self.alphabet_codes[a]]
# res = self.graph[res][a]
if res == Trie.NO_NODE:
break
curr_cash[s] = res
return res
def _set_final(self, curr):
'''
Делает состояние curr завершающим
'''
self.final[curr] = True
def _get_letters(self, index, return_indexes=False):
"""
Извлекает все метки выходных рёбер вершины с номером index
"""
if self.dict_storage:
answer = list(self.graph[index].keys())
else:
answer = [i for i, elem in enumerate(self.graph[index])
if elem != Trie.NO_NODE]
if not return_indexes:
answer = [(self.alphabet[i] if i >= 0 else " ") for i in answer]
return answer
def _get_children_and_letters(self, index, return_indexes=False):
if self.dict_storage:
answer = list(self.graph[index].items())
else:
answer = [elem for elem in enumerate(self.graph[index])
if elem[1] != Trie.NO_NODE]
if not return_indexes:
for i, (letter_index, child) in enumerate(answer):
answer[i] = (self.alphabet[letter_index], child)
return answer
def _get_children(self, index):
"""
Извлекает всех потомков вершины с номером index
"""
if self.dict_storage:
return list(self.graph[index].values())
else:
return [elem for elem in self.graph[index] if elem != Trie.NO_NODE]
class TrieMinimizer:
def __init__(self):
pass
def minimize(self, trie, dict_storage=False, make_cashed=False, make_numpied=False,
precompute_symbols=None, allow_spaces=False, return_groups=False):
N = len(trie)
if N == 0:
raise ValueError("Trie should be non-empty")
node_classes = np.full(shape=(N,), fill_value=-1, dtype=int)
order = self.generate_postorder(trie)
# processing the first node
index = order[0]
node_classes[index] = 0
class_representatives = [index]
node_key = ((), (), trie.is_final(index))
classes, class_keys = {node_key : 0}, [node_key]
curr_index = 1
for index in order[1:]:
letter_indexes = tuple(trie._get_letters(index, return_indexes=True))
children = trie._get_children(index)
children_classes = tuple(node_classes[i] for i in children)
key = (letter_indexes, children_classes, trie.is_final(index))
key_class = classes.get(key, None)
if key_class is not None:
node_classes[index] = key_class
else:
# появился новый класс
class_keys.append(key)
classes[key] = node_classes[index] = curr_index
class_representatives.append(curr_index)
curr_index += 1
# построение нового дерева
compressed = Trie(trie.alphabet, is_numpied=make_numpied,
dict_storage=dict_storage, allow_spaces=allow_spaces,
precompute_symbols=precompute_symbols)
L = len(classes)
new_final = [elem[2] for elem in class_keys[::-1]]
if dict_storage:
new_graph = [defaultdict(int) for _ in range(L)]
elif make_numpied:
new_graph = np.full(shape=(L, len(trie.alphabet)),
fill_value=Trie.NO_NODE, dtype=int)
new_final = np.array(new_final, dtype=bool)
else:
new_graph = [[Trie.NO_NODE for a in trie.alphabet] for i in range(L)]
for (indexes, children, final), class_index in\
sorted(classes.items(), key=(lambda x: x[1])):
row = new_graph[L-class_index-1]
for i, child_index in zip(indexes, children):
row[i] = L - child_index - 1
compressed.graph = new_graph
compressed.root = L - node_classes[trie.root] - 1
compressed.final = new_final
compressed.nodes_number = L
compressed.data = [None] * L
if make_cashed:
compressed.make_cashed()
if precompute_symbols is not None:
if (trie.is_terminated and trie.precompute_symbols
and trie.allow_spaces == allow_spaces):
# копируем будущие символы из исходного дерева
# нужно, чтобы возврат из финальных состояний в начальное был одинаковым в обоих деревьях
for i, node_index in enumerate(class_representatives[::-1]):
# будущие символы для представителя i-го класса
compressed.data[i] = copy.copy(trie.data[node_index])
else:
precompute_future_symbols(compressed, precompute_symbols, allow_spaces)
if return_groups:
node_classes = [L - i - 1 for i in node_classes]
return compressed, node_classes
else:
return compressed
def generate_postorder(self, trie):
"""
Обратная топологическая сортировка
"""
order, stack = [], []
stack.append(trie.root)
colors = ['white'] * len(trie)
while len(stack) > 0:
index = stack[-1]
color = colors[index]
if color == 'white': # вершина ещё не обрабатывалась
colors[index] = 'grey'
for child in trie._get_children(index):
# проверяем, посещали ли мы ребёнка раньше
if child != Trie.NO_NODE and colors[child] == 'white':
stack.append(child)
else:
if color == 'grey':
colors[index] = 'black'
order.append(index)
stack = stack[:-1]
return order
def load_trie(infile):
with open(infile, "r", encoding="utf8") as fin:
line = fin.readline().strip()
flags = [x=='T' for x in line.split()]
if len(flags) != len(Trie.ATTRS) + 1:
raise ValueError("Wrong file format")
nodes_number, root = map(int, fin.readline().strip().split())
alphabet = fin.readline().strip().split()
trie = Trie(alphabet)
for i, attr in enumerate(Trie.ATTRS):
setattr(trie, attr, flags[i])
read_data = flags[-1]
final = [False] * nodes_number
#print(len(alphabet), nodes_number)
if trie.dict_storage:
graph = [defaultdict(lambda: -1) for _ in range(nodes_number)]
elif trie.is_numpied:
final = np.array(final)
graph = np.full(shape=(nodes_number, len(alphabet)),
fill_value=Trie.NO_NODE, dtype=int)
else:
graph = [[Trie.NO_NODE for a in alphabet] for i in range(nodes_number)]
for i in range(nodes_number):
line = fin.readline().strip()
if "\t" in line:
label, transitions = line.split("\t")
final[i] = (label == "T")
else:
label = line
final[i] = (label == "T")
continue
transitions = [x.split(":") for x in transitions.split()]
for code, value in transitions:
graph[i][int(code)] = int(value)
trie.graph = graph
trie.root = root
trie.final = final
trie.nodes_number = nodes_number
trie.data = [None] * nodes_number
if read_data:
for i in range(nodes_number):
line = fin.readline().strip("\n")
trie.data[i] = [set(elem.split(",")) for elem in line.split(":")]
if trie.to_make_cashed:
trie.make_cashed()
return trie
def make_trie(alphabet, words, compressed=True, is_numpied=False,
make_cashed=False, precompute_symbols=False,
allow_spaces=False, dict_storage=False):
trie = Trie(alphabet, is_numpied=is_numpied, to_make_cashed=make_cashed,
precompute_symbols=precompute_symbols, dict_storage=dict_storage)
trie.fit(words)
if compressed:
tm = TrieMinimizer()
trie = tm.minimize(trie, dict_storage=dict_storage, make_cashed=make_cashed,
make_numpied=is_numpied, precompute_symbols=precompute_symbols,
allow_spaces=allow_spaces)
return trie
def precompute_future_symbols(trie, n, allow_spaces=False):
"""
Collecting possible continuations of length <= n for every node
"""
if n == 0:
return
if trie.is_terminated and trie.precompute_symbols:
# символы уже предпосчитаны
return
for index, final in enumerate(trie.final):
trie.data[index] = [set() for i in range(n)]
for index, (node_data, final) in enumerate(zip(trie.data, trie.final)):
node_data[0] = set(trie._get_letters(index))
if allow_spaces and final:
node_data[0].add(" ")
for d in range(1, n):
for index, (node_data, final) in enumerate(zip(trie.data, trie.final)):
children = set(trie._get_children(index))
for child in children:
node_data[d] |= trie.data[child][d - 1]
# в случае, если разрешён возврат по пробелу в стартовое состояние
if allow_spaces and final:
node_data[d] |= trie.data[trie.root][d - 1]
trie.terminated = True
| [
"[email protected]"
] | |
50b28d0ed7daa7be97decf477b846c80cd2df47e | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /框架/crawler/scrapy/scrapy_demo/scrapy_demo/spiders/quotes.py | 8c9928611b92d882b2c0eebf7d5163ee20e145da | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,268 | py | # -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import scrapy
from scrapy_demo import items
from scrapy_demo import settings
import scrapy.settings
from scrapy.mail import MailSender
# 这是最普通的爬虫形式,
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').get(),
# 'author': quote.css('small.author::text').get(),
# 'tags': quote.css('div.tags a.tag::text').getall(),
# }
#
# next_page = response.css('li.next a::attr(href)').get()
# if next_page is not None:
# next_page = response.urljoin(next_page) # 这个urljoin 会用start_url中的域名。
# yield scrapy.Request(next_page, callback=self.parse)
# scrapy.follow 的形式,和Request的区别:不需要在urljoin一次,直接就是拼接好的url
# class QuotesSpider(scrapy.Spider):
# name = 'quotes'
# start_urls = [
# 'http://quotes.toscrape.com/tag/humor/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'author': quote.xpath('span/small/text()').get(),
# 'text': quote.css('span.text::text').get(),
# }
#
# next_page = response.css('li.next a::attr("href")').get()
# if next_page is not None:
# yield response.follow(next_page, self.parse)
# follow_all 的形式,然后加上另一个回调函数。
# class AuthorSpider(scrapy.Spider):
# name = 'author'
#
# start_urls = ['http://quotes.toscrape.com/']
#
# def parse(self, response):
# author_page_links = response.css('.author + a')
# yield from response.follow_all(author_page_links, self.parse_author)
#
# pagination_links = response.css('li.next a')
# yield from response.follow_all(pagination_links, self.parse)
#
# def parse_author(self, response):
# def extract_with_css(query):
# return response.css(query).get(default='').strip()
#
# yield {
# 'name': extract_with_css('h3.author-title::text'),
# 'birthdate': extract_with_css('.author-born-date::text'),
# 'bio': extract_with_css('.author-description::text'),
# }
#
#
# 在命令行中传入参数,然后重写start_request 这样就不用start_url
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
#
# def start_requests(self):
# url = 'http://quotes.toscrape.com/'
# tag = getattr(self, 'tag', None)
# if tag is not None:
# url = url + 'tag/' + tag
# yield scrapy.Request(url, self.parse)
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').extract_first(),
# 'author': quote.css('small.author::text').extract_first(),
# }
#
# next_page = response.css('li.next a::attr(href)').extract_first()
# if next_page is not None:
# next_page = response.urljoin(next_page)
# yield scrapy.Request(next_page, self.parse)
# class DianyingSpider(scrapy.Spider):
# MAIL_HOST = 'smtp.exmail.qq.com'
# MAIL_PORT = 25
# MAIL_USER = "[email protected]"
# MAIL_PASS = "6bH9KPQoKD"
# MAIL_TLS = False
# MAIL_SSL = False
#
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
]
# 这是使用FEED exporter的默认配置选项。这里没有用到itemexporter的配置
# custom_settings = {
# 'FEED_URI': "file:///tmp/zzz.marshal",
# 'FEED_FORMAT': 'marshal',
# 'FEED_EXPORT_ENCODING':'utf8',
# 'FEED_EXPORT_FIELDS': ["url", "title"]
# }
# 程序入口
# def parse(self, response):
# mailer = MailSender(
# smtphost=settings.py.MAIL_HOST,
# smtpuser=settings.py.MAIL_USER,
# mailfrom=settings.py.MAIL_USER,
# smtppass=settings.py.MAIL_PASS,
# smtpport=settings.py.MAIL_PORT,
# smtptls=settings.py.MAIL_TLS,
# smtpssl=settings.py.MAIL_SSL,
# )
# mailer = MailSender.from_settings(self.settings.py)
#
# mailer.send(to=["[email protected]"], subject="北京新橙科技有限公司", body="Some body")
#
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# self.logger.info('aaaaa %s' % url)
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# self.logger.info('bbbbbb %s' % url)
# yield scrapy.Request(url, callback=self.parseChild)
#
# # 处理单个电影详情页
# def parseChild(self, response):
# # 获取电影信息,并提取数据
# item = items.DianyingItem()
# item['url'] = response.url
# item['title'] = response.xpath('//div[@class="title_all"]/h1/text()').extract()
# item['magnet'] = response.xpath('//div[@id="Zoom"]//a[starts-with(@href, "magnet:")]/@href').extract()
# self.logger.info('ccccc %s' % item)
# yield item
# itemloader 的形式
# class DianyingSpider(scrapy.Spider):
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
# ]
#
# # 程序入口
# def parse(self, response):
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# yield scrapy.Request(url, callback=self.parseChild)
#
#
# def parseChild(self, response):
# l = items.ArticleItemLoader(item=items.DianyingItem(), response=response)
# l.add_value('url', response.url)
# l.add_xpath('title', '//div[@class="title_all"]/h1/text()')
# l.add_xpath('magnet', '//div[@id="Zoom"]//img/@src')
# l.add_value('date', '20200611')
# l.add_value('name','fls')
# l.add_value('create_time','test')
# yield l.load_item()
#
# class DianyingSpider(scrapy.Spider):
#
# name = "dianying"
# start_urls = [
# "https://www.thepaper.cn/allGovUsers.jsp",
# ]
#
# def parse(self, response):
| [
"[email protected]"
] | |
ec31acbdb0cf41622d1a325d3f894382ad8fd78f | d4fa331d7d8a00865f99ee2c05ec8efc0468fb63 | /alg/remove_k_digits.py | f25427c08b7db78277402c25b6aa25fed1054238 | [] | no_license | nyannko/leetcode-python | 5342620c789a02c7ae3478d7ecf149b640779932 | f234bd7b62cb7bc2150faa764bf05a9095e19192 | refs/heads/master | 2021-08-11T04:11:00.715244 | 2019-02-05T15:26:43 | 2019-02-05T15:26:43 | 145,757,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
if len(num) <= k:
return '0'
stack = []
for i in num:
while stack and k > 0 and stack[-1] > i:
stack.pop()
k -= 1
stack.append(i)
# while k > 0:
# stack.pop()
# k -= 1
if k:
stack = stack[:-k]
return ''.join(stack).lstrip('0') or '0'
| [
"[email protected]"
] | |
1f97596a4534396f4848c29caeee8100eb7f788e | de1abd0ebbb817aa5f23d369e7dda360fd6f1c32 | /chapter3/scrapy/wikiSpider/wikiSpider/settings.py | 9bf879252847b3f89efa7323e1c40f4f86ae3b30 | [] | no_license | CodedQuen/Web-Scraping-with-Python- | 33aaa2e3733aa1f2b8c7a533d74f5d08ac868197 | 67f2d5f57726d5a943f5f044480e68c36076965b | refs/heads/master | 2022-06-13T01:34:39.764531 | 2020-05-05T11:07:01 | 2020-05-05T11:07:01 | 261,435,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | # -*- coding: utf-8 -*-
# Scrapy settings for wikiSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wikiSpider'
SPIDER_MODULES = ['wikiSpider.spiders']
NEWSPIDER_MODULE = 'wikiSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikiSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikiSpider.middlewares.WikispiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikiSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'wikiSpider.pipelines.WikispiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
ba1cba5c8a2a1b7898a46fb6a4abeebd84541336 | 51885da54b320351bfea42c7dd629f41985454cd | /abc075/c.py | 18f98c98169acb0c09d089c7c2b89ef4b8bc0bd0 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | #
# abc075 c
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """7 7
1 3
2 7
3 4
4 5
4 6
5 6
6 7"""
output = """4"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 3
1 2
1 3
2 3"""
output = """0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """6 5
1 2
2 3
3 4
4 5
5 6"""
output = """5"""
self.assertIO(input, output)
def resolve():
N, M = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(M)]
ans = 0
for i in range(M):
Target = AB[:]
Target.pop(i)
G = [[i+1, 0] for i in range(N)]
for ab in Target:
a, b = ab
G[a-1][1] += 1
G[b-1][1] += 1
G[a-1].append(b)
G[b-1].append(a)
F = [False] * N
Q = deque()
Q.append(1)
F[0] = True
while Q:
p = Q.pop()
if G[p-1][1] == 0:
continue
for np in G[p-1][2:]:
if F[np-1]:
continue
Q.append(np)
F[np-1] = True
for f in F:
if f == False:
ans += 1
break
print(ans)
if __name__ == "__main__":
# unittest.main()
resolve()
| [
"[email protected]"
] | |
70e19baa27259958c38615665bee3f6c8ac77d48 | b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339 | /入门学习/threading_dead_lock-eg.py | 277a2b79b337003460067bedae3cb0eeca00cd29 | [] | no_license | python-yc/pycharm_script | ae0e72898ef44a9de47e7548170a030c0a752eb5 | c8947849090c71e131df5dc32173ebe9754df951 | refs/heads/master | 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | """
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
lock_1.acquire()
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
lock_2.acquire()
print("func_1 申请了 lock 2 ......")
lock_2.release()
print("func_1 释放了lock_2")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
time.sleep(3)
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
#将这个函数内的第一个sleep注释,然后将下面这个取消注释,就会出现死锁现象
#time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
"""
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
#给一个申请时间,如果超时就放弃
lock_1.acquire(timeout=4)
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
rst = lock_2.acquire(timeout=2)
if rst:
print("func_1已经得到锁lock_2")
lock_2.release()
print("func_1 释放了lock_2")
else:
print("func_1注定没申请到lock_2....")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
| [
"15655982512.com"
] | 15655982512.com |
90d662d9b82ee1a8490bdc09aa96fc25d2c0ce6e | 832852c679816673f708860929a36a20ca8d3e32 | /Configurations/HighMass/Full2017/configuration_mm.py | 1ee0bb7d5dbf9cfab8779a7973ed2065f8bd52d3 | [] | no_license | UniMiBAnalyses/PlotsConfigurations | c4ec7376e2757b838930dfb2615e1dc99a64e542 | 578fe518cfc608169d3418bcb63a8342d3a24390 | refs/heads/master | 2023-08-31T17:57:45.396325 | 2022-09-01T10:13:14 | 2022-09-01T10:13:14 | 172,092,793 | 0 | 13 | null | 2023-04-27T10:26:52 | 2019-02-22T15:52:44 | Python | UTF-8 | Python | false | false | 905 | py | # example of configuration file
treeName= 'Events'
tag = 'Full2017_mm'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts_ee_mm.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 41.5
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
#structureFile = 'structure.py' # Is this even needed still?
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"[email protected]"
] | |
e1c8772a70ff0b7a5ead0b6c73d8adda9807dd1a | 28c598bf75f3ab287697c7f0ff1fb13bebb7cf75 | /testgame.mmo/genesis/spawn/spawnmain.py | d1a6e96ee033931ad1e1cf4df3507ff6d4965fc9 | [] | no_license | keaysma/solinia_depreciated | 4cb8811df4427261960af375cf749903d0ca6bd1 | 4c265449a5e9ca91f7acf7ac05cd9ff2949214ac | refs/heads/master | 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py |
import races
import animal
import npc
"""
#Critter Pack
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import critters
"""
#Monster Pack Examples
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import monsters
"""
Mythical Creature Pack Examples
http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import mythical
| [
"[email protected]"
] | |
0ce5054c29d7414e6c56e074af1b1ef1b32afe58 | f95e73867e4383784d6fdd6a1c9fe06cffbfd019 | /CheckIO/HOME/pawn_brotherhood.py | 4b0929a05d3c3562eadcb0a6374c8a5fdf00444c | [] | no_license | linxiaohui/CodeLibrary | da03a9ed631d1d44b098ae393b4bd9e378ab38d3 | 96a5d22a8c442c4aec8a064ce383aba8a7559b2c | refs/heads/master | 2021-01-18T03:42:39.536939 | 2018-12-11T06:47:15 | 2018-12-11T06:47:15 | 85,795,767 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #!/usr/bin/env python
# *-* coding:UTF-8 *-*
def safe_pawns(pawns):
cnt=0
for l in pawns:
col,row=l.lower()
if int(row)==1:
continue
if col>='b' and chr(ord(col)-1)+str(int(row)-1) in pawns or col<='g' and chr(ord(col)+1)+str(int(row)-1) in pawns:
cnt+=1
return cnt
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
| [
"[email protected]"
] | |
6fef01c2498c9a9b7a52d8a294080b7fe61d6627 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_2_1_Dom_ju.py | c726b4de6450f76ad915989d09c20461a1c9a8cd | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 538 | py | DOWNLOAD_DIR = "/Users/Dom/Downloads/"
def jopen( filename ):
return open( DOWNLOAD_DIR+filename+".in", "r")
def jout( filename, results, linebreaks=False ):
f = open(DOWNLOAD_DIR+filename+".out","w")
for n in range(len(results)):
f.write( "Case #" + str(n+1) + ": " )
if isinstance(n, list):
if linebreaks:
f.write( "\n" )
f.write( " ".join(n) )
else:
if linebreaks:
f.write( "\n" )
f.write( str(results[n]) + "\n" )
| [
"[[email protected]]"
] | |
1e4f57cb7ae54552f4520fc68b828043c2167752 | e41c10e0b17265509fd460f860306784522eedc3 | /basic_config.py | 8e0791dbf7f899d792c04ef3414e39b0ef1d7b41 | [
"CC0-1.0"
] | permissive | hyyc116/research_paradigm_changing | c77ecf2533a6b2e2cd3f74fc3d3073454bffc55c | eac69c45a7a17eb70ace185fa22831ac785e504e | refs/heads/master | 2020-11-24T05:48:07.973347 | 2019-12-18T12:17:02 | 2019-12-18T12:17:02 | 227,992,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,102 | py | #coding:utf-8
import os
import sys
import json
from collections import defaultdict
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import math
import numpy as np
import random
import logging
import networkx as nx
from itertools import combinations
import pylab
import itertools
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import spline
from multiprocessing.dummy import Pool as ThreadPool
from networkx.algorithms import isomorphism
from matplotlib import cm as CM
from collections import Counter
from scipy.signal import wiener
import matplotlib as mpl
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from matplotlib.colors import LinearSegmentedColormap
from networkx.algorithms.core import core_number
from networkx.algorithms.core import k_core
import psycopg2
from cycler import cycler
import six
# from gini import gini
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
mpl.rcParams['agg.path.chunksize'] = 10000
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
mpl.rcParams['axes.prop_cycle'] = cycler('color', color_sequence)
# color = plt.cm.viridis(np.linspace(0.01,0.99,6)) # This returns RGBA; convert:
# hexcolor = map(lambda rgb:'#%02x%02x%02x' % (rgb[0]*255,rgb[1]*255,rgb[2]*255),
# tuple(color[:,0:-1]))
# mpl.rcParams['axes.prop_cycle'] = cycler('color', hexcolor)
params = {'legend.fontsize': 8,
'axes.labelsize': 8,
'axes.titlesize':10,
'xtick.labelsize':8,
'ytick.labelsize':8}
pylab.rcParams.update(params)
# from paths import *
def circle(ax,x,y,radius=0.15):
circle = Circle((x, y), radius, clip_on=False, zorder=10, linewidth=1,
edgecolor='black', facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
ax.add_artist(circle)
def autolabel(rects,ax,total_count=None,step=1,):
"""
Attach a text label above each bar displaying its height
"""
for index in np.arange(len(rects),step=step):
rect = rects[index]
height = rect.get_height()
# print height
if not total_count is None:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}\n({:.6f})'.format(int(height),height/float(total_count)),
ha='center', va='bottom')
else:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}'.format(int(height)),
ha='center', va='bottom')
class dbop:
def __init__(self,insert_index=0):
self._insert_index=insert_index
self._insert_values=[]
logging.debug("connect database with normal cursor.")
self._db = psycopg2.connect(database='core_data',user="buyi",password = "ruth_hardtop_isthmus_bubbly")
self._cursor = self._db.cursor()
def query_database(self,sql):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.execute(sql)
logging.debug("query database with sql {:}".format(sql))
return self._cursor
def insert_database(self,sql,values):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.executemany(sql,values)
logging.debug("insert data to database with sql {:}".format(sql))
self._db.commit()
def batch_insert(self,sql,row,step,is_auto=True,end=False):
if end:
if len(self._insert_values)!=0:
logging.info("insert {:}th data into database,final insert.".format(self._insert_index))
self.insert_database(sql,self._insert_values)
else:
self._insert_index+=1
if is_auto:
row[0] = self._insert_index
self._insert_values.append(tuple(row))
if self._insert_index%step==0:
logging.info("insert {:}th data into database".format(self._insert_index))
self.insert_database(sql,self._insert_values)
self._insert_values=[]
def get_insert_count(self):
return self._insert_index
def execute_del_update(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute delete or update sql {:}.".format(sql))
def execute_sql(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute sql {:}.".format(sql))
def close_db(self):
self._db.close()
def hist_2_bar(data,bins=50):
n,bins,patches = plt.hist(data,bins=bins)
return [x for x in bins[:-1]],[x for x in n]
| [
"[email protected]"
] | |
fc9e559deb7f5bddce6f8748ac93e3cc190dfb31 | 0130533e0f40a0f1cf476f519a3673b10ceabff3 | /teste/maximo.py | b0fd9c6f4d4edd354a14ef1c57bb97f12fe9654e | [] | no_license | danielcanuto/revisao_python | d79c8fbf475e1cea12ca9719d02868666e0591db | 3dbd2af74c7cc94f8e1962acb4069f40d0e71772 | refs/heads/main | 2023-03-02T04:37:30.777336 | 2021-02-11T11:16:54 | 2021-02-11T11:16:54 | 337,031,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | def maior(x, y):
if x > y:
return x
else:
return y
def maximo(x, y, z):
a = maior(x, y)
return maior(a, z)
| [
"[email protected]"
] | |
abcfc7f85883e49ffa5113a31431886ddf533f5c | 5b1b478b0e7b8069762855baa8a2a4f6ff48ebf4 | /src/reviews/forms.py | bf83b29d371abc3b2b2686430c5fe69d7b383f5e | [
"MIT"
] | permissive | junaidq1/greendot | 9e4a0402fcee7182ca7531a0dd4a48edb43f79c5 | cd9e7791523317d759e0f5f9cf544deff34a8c79 | refs/heads/master | 2020-04-06T06:54:07.994376 | 2016-09-11T18:33:15 | 2016-09-11T18:33:15 | 61,906,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,047 | py | from django import forms
from .models import Review, Employee
from registration.forms import RegistrationFormUniqueEmail #this is to edit the registration redux form
# class ReviewForm(forms.ModelForm):
# class Meta:
# model = Review
# fields = [
# "content",
# "employee",
# "work_again",
# ]
#actual review post form
class ReviewForm2(forms.ModelForm):
class Meta:
model = Review
fields = ["length_working", "ques1", "ques2", "ques3","work_again", "content"]
# def content_clean(self):
# content = self.cleaned_data.get('content')
# print "jimmy"
# print len(content)
# if len(content) < 70:
# raise forms.ValidationError("Please provide a more impactful review")
# return content
#this form edits the registration redux form
class UserLevelRegistrationForm(RegistrationFormUniqueEmail):
LEVEL_CHOICES = (
('PPD', 'PPD'),
('BA', 'BA'),
('C', 'C'),
('SC', 'SC'),
('M', 'M'),
('SM', 'SM'),
('Other', 'other'),
)
OFFICE_CHOICES = (
('Kansas City', 'Kansas City'),
('Atlanta', 'Atlanta'),
('Austin', 'Austin'),
('Bengaluru', 'Bengaluru'),
('Boston', 'Boston'),
('Charlotte', 'Charlotte'),
('Chicago', 'Chicago'),
('Cincinnati', 'Cincinnati'),
('Cleveland', 'Cleveland'),
('Dallas', 'Dallas'),
('Denver', 'Denver'),
('Detroit', 'Detroit'),
('Gurgaon', 'Gurgaon'),
('Houston', 'Houston'),
('Los Angeles', 'Los Angeles'),
('McLean', 'McLean'),
('Miami', 'Miami'),
('Minneapolis', 'Minneapolis'),
('Mumbai', 'Mumbai'),
('New York City', 'New York City'),
('Orange County', 'Orange County'),
('Parsippany', 'Parsippany'),
('Philadelphia', 'Philadelphia'),
('Pittsburgh', 'Pittsburgh'),
('San Francisco', 'San Francisco'),
('Seattle', 'Seattle'),
('Other', 'other'),
)
ServiceArea_CHOICES = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('Human Capital', 'Human Capital'),
)
level = forms.ChoiceField(choices=LEVEL_CHOICES, label="What is your level at the firm?")
office = forms.ChoiceField(choices=OFFICE_CHOICES, label="What office are you based out of?")
service_area = forms.ChoiceField(choices=ServiceArea_CHOICES, label="What Service Area are you a part of?")
# form to validate that person signing up knows the answer to the impact day question
class ValidationForm(forms.Form):
answer = forms.CharField()
class ContactForm(forms.Form):
username = forms.CharField(label="Please enter your username (if applicable)", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(widget=forms.Textarea)
class AccessIssuesForm(forms.Form):
username = forms.CharField(label="Please enter your username", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(label="Please describe the access issues you are having", widget=forms.Textarea)
class ReportDataForm(forms.Form):
DataReportChoices = (
('Incorrect', 'Incorrect practitioner data'),
('Missing', 'Missing practitioner data'),
)
data_issue = forms.ChoiceField(choices=DataReportChoices,
label="What kind of data issue would you like to report?")
practitioner_first_name = forms.CharField(label="First name of practitoner", max_length=120)
practitioner_last_name = forms.CharField(label="Last name of practitoner", max_length=120)
service_area = forms.CharField(label="Service Area of practitoner", max_length=120)
level = forms.CharField(label="Level of practitoner", max_length=120)
office = forms.CharField(label="Office of practitoner", max_length=120)
message = forms.CharField(label="Describe data issue", max_length=1500)
class PartnerForm(forms.Form):
service_area_options = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('HCap', 'HCap'),
)
service_ar = forms.ChoiceField(choices=service_area_options,
label="What Service Area are you aligned with?")
message = forms.CharField(label="What makes you a good fit for the team?", widget=forms.Textarea)
contact_email = forms.EmailField(label="Email address")
| [
"[email protected]"
] | |
e1c50ce55b94d0b8974045c6d12124d2db102332 | 21b39d50e4df56ea01453001845d1580729af1df | /jdcloud_sdk/services/redis/apis/DescribeClientListRequest.py | 450146bb94baa2db571d11a497779f82c80cb4ac | [
"Apache-2.0"
] | permissive | Tanc009/jdcloud-sdk-python | ef46eac7731aa8a1839b1fc1efd93249b7a977f0 | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | refs/heads/master | 2021-08-09T14:49:16.177709 | 2021-06-25T02:38:41 | 2021-06-25T02:38:41 | 141,714,695 | 0 | 0 | Apache-2.0 | 2018-07-20T13:21:17 | 2018-07-20T13:21:16 | null | UTF-8 | Python | false | false | 1,572 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeClientListRequest(JDCloudRequest):
"""
查询当前客户端IP列表
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeClientListRequest, self).__init__(
'/regions/{regionId}/cacheInstance/{cacheInstanceId}/clientList', 'GET', header, version)
self.parameters = parameters
class DescribeClientListParameters(object):
def __init__(self, regionId, cacheInstanceId, ):
"""
:param regionId: 缓存Redis实例所在区域的Region ID。目前有华北-北京、华南-广州、华东-上海三个区域,Region ID分别为cn-north-1、cn-south-1、cn-east-2
:param cacheInstanceId: 缓存Redis实例ID,是访问实例的唯一标识
"""
self.regionId = regionId
self.cacheInstanceId = cacheInstanceId
| [
"[email protected]"
] | |
425bbfbbe5ae1399dac988c42a53fa836aa09111 | cbfddfdf5c7fa8354162efe50b41f84e55aff118 | /venv/lib/python3.7/site-packages/nltk/tokenize/punkt.py | f0dcaca359521808d4344948c5389317ab0fdec1 | [
"MIT",
"Apache-2.0"
] | permissive | tclerico/SAAC | 8d2245221dd135aea67c5e079ac7eaf542b25e2f | 2f52007ae8043096662e76da828a84e87f71091e | refs/heads/master | 2022-12-09T21:56:33.430404 | 2019-02-20T14:23:51 | 2019-02-20T14:23:51 | 153,152,229 | 3 | 0 | MIT | 2022-09-16T17:52:47 | 2018-10-15T17:13:29 | Python | UTF-8 | Python | false | false | 62,162 | py | # Natural Language Toolkit: Punkt sentence tokenizer
#
# Copyright (C) 2001-2018 NLTK Project
# Algorithm: Kiss & Strunk (2006)
# Author: Willy <[email protected]> (original Python port)
# Steven Bird <[email protected]> (additions)
# Edward Loper <[email protected]> (rewrite)
# Joel Nothman <[email protected]> (almost rewrite)
# Arthur Darcet <[email protected]> (fixes)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
r"""
Punkt Sentence Tokenizer
This tokenizer divides a text into a list of sentences
by using an unsupervised algorithm to build a model for abbreviation
words, collocations, and words that start sentences. It must be
trained on a large collection of plaintext in the target language
before it can be used.
The NLTK data package includes a pre-trained Punkt tokenizer for
English.
>>> import nltk.data
>>> text = '''
... Punkt knows that the periods in Mr. Smith and Johann S. Bach
... do not mark sentence boundaries. And sometimes sentences
... can start with non-capitalized words. i is a good variable
... name.
... '''
>>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
>>> print('\n-----\n'.join(sent_detector.tokenize(text.strip())))
Punkt knows that the periods in Mr. Smith and Johann S. Bach
do not mark sentence boundaries.
-----
And sometimes sentences
can start with non-capitalized words.
-----
i is a good variable
name.
(Note that whitespace from the original text, including newlines, is
retained in the output.)
Punctuation following sentences is also included by default
(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries
flag.
>>> text = '''
... (How does it deal with this parenthesis?) "It should be part of the
... previous sentence." "(And the same with this one.)" ('And this one!')
... "('(And (this)) '?)" [(and this. )]
... '''
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip())))
(How does it deal with this parenthesis?)
-----
"It should be part of the
previous sentence."
-----
"(And the same with this one.)"
-----
('And this one!')
-----
"('(And (this)) '?)"
-----
[(and this. )]
>>> print('\n-----\n'.join(
... sent_detector.tokenize(text.strip(), realign_boundaries=False)))
(How does it deal with this parenthesis?
-----
) "It should be part of the
previous sentence.
-----
" "(And the same with this one.
-----
)" ('And this one!
-----
')
"('(And (this)) '?
-----
)" [(and this.
-----
)]
However, Punkt is designed to learn parameters (a list of abbreviations, etc.)
unsupervised from a corpus similar to the target domain. The pre-packaged models
may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn
parameters from the given text.
:class:`.PunktTrainer` learns parameters such as a list of abbreviations
(without supervision) from portions of text. Using a ``PunktTrainer`` directly
allows for incremental training and modification of the hyper-parameters used
to decide what is considered an abbreviation, etc.
The algorithm for this tokenizer is described in::
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence
Boundary Detection. Computational Linguistics 32: 485-525.
"""
from __future__ import print_function, unicode_literals, division
# TODO: Make orthographic heuristic less susceptible to overtraining
# TODO: Frequent sentence starters optionally exclude always-capitalised words
# FIXME: Problem with ending string with e.g. '!!!' -> '!! !'
import re
import math
from collections import defaultdict
from six import string_types
from nltk.compat import unicode_repr, python_2_unicode_compatible
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
######################################################################
# { Orthographic Context Constants
######################################################################
# The following constants are used to describe the orthographic
# contexts in which a word can occur. BEG=beginning, MID=middle,
# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case.
_ORTHO_BEG_UC = 1 << 1
"""Orthographic context: beginning of a sentence with upper case."""
_ORTHO_MID_UC = 1 << 2
"""Orthographic context: middle of a sentence with upper case."""
_ORTHO_UNK_UC = 1 << 3
"""Orthographic context: unknown position in a sentence with upper case."""
_ORTHO_BEG_LC = 1 << 4
"""Orthographic context: beginning of a sentence with lower case."""
_ORTHO_MID_LC = 1 << 5
"""Orthographic context: middle of a sentence with lower case."""
_ORTHO_UNK_LC = 1 << 6
"""Orthographic context: unknown position in a sentence with lower case."""
_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC
"""Orthographic context: occurs with upper case."""
_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC
"""Orthographic context: occurs with lower case."""
_ORTHO_MAP = {
('initial', 'upper'): _ORTHO_BEG_UC,
('internal', 'upper'): _ORTHO_MID_UC,
('unknown', 'upper'): _ORTHO_UNK_UC,
('initial', 'lower'): _ORTHO_BEG_LC,
('internal', 'lower'): _ORTHO_MID_LC,
('unknown', 'lower'): _ORTHO_UNK_LC,
}
"""A map from context position and first-letter case to the
appropriate orthographic context flag."""
# } (end orthographic context constants)
######################################################################
######################################################################
# { Decision reasons for debugging
######################################################################
REASON_DEFAULT_DECISION = 'default decision'
REASON_KNOWN_COLLOCATION = 'known collocation (both words)'
REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = 'abbreviation + orthographic heuristic'
REASON_ABBR_WITH_SENTENCE_STARTER = 'abbreviation + frequent sentence starter'
REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = 'initial + special orthographic heuristic'
# } (end decision reasons for debugging)
######################################################################
######################################################################
# { Language-dependent variables
######################################################################
class PunktLanguageVars(object):
"""
Stores variables, mostly regular expressions, which may be
language-dependent for correct application of the algorithm.
An extension of this class may modify its properties to suit
a language other than English; an instance can then be passed
as an argument to PunktSentenceTokenizer and PunktTrainer
constructors.
"""
__slots__ = ('_re_period_context', '_re_word_tokenizer')
def __getstate__(self):
# All modifications to the class are performed by inheritance.
# Non-default parameters to be pickled must be defined in the inherited
# class.
return 1
def __setstate__(self, state):
return 1
sent_end_chars = ('.', '?', '!')
"""Characters which are candidates for sentence boundaries"""
@property
def _re_sent_end_chars(self):
return '[%s]' % re.escape(''.join(self.sent_end_chars))
internal_punctuation = ',:;' # might want to extend this..
"""sentence internal punctuation, which indicates an abbreviation if
preceded by a period-final token."""
re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)',
re.MULTILINE)
"""Used to realign punctuation that should be included in a sentence
although it follows the period (or ?, !)."""
_re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]"
"""Excludes some characters from starting word tokens"""
_re_non_word_chars = r"(?:[?!)\";}\]\*:@\'\({\[])"
"""Characters that cannot appear within words"""
_re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)"
"""Hyphen and ellipsis are multi-character punctuation"""
_word_tokenize_fmt = r'''(
%(MultiChar)s
|
(?=%(WordStart)s)\S+? # Accept word characters until end is found
(?= # Sequences marking a word's end
\s| # White-space
$| # End-of-string
%(NonWord)s|%(MultiChar)s| # Punctuation
,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word
)
|
\S
)'''
"""Format of a regular expression to split punctuation from words,
excluding period."""
def _word_tokenizer_re(self):
"""Compiles and returns a regular expression for word tokenization"""
try:
return self._re_word_tokenizer
except AttributeError:
self._re_word_tokenizer = re.compile(
self._word_tokenize_fmt %
{
'NonWord': self._re_non_word_chars,
'MultiChar': self._re_multi_char_punct,
'WordStart': self._re_word_start,
},
re.UNICODE | re.VERBOSE
)
return self._re_word_tokenizer
def word_tokenize(self, s):
"""Tokenize a string to split off punctuation other than periods"""
return self._word_tokenizer_re().findall(s)
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
\s+(?P<next_tok>\S+) # or whitespace and some other token
))"""
"""Format of a regular expression to find contexts including possible
sentence boundaries. Matches token which the possible sentence boundary
ends, and matches the following token within a lookahead expression."""
def period_context_re(self):
"""Compiles and returns a regular expression to find contexts
including possible sentence boundaries."""
try:
return self._re_period_context
except:
self._re_period_context = re.compile(
self._period_context_fmt %
{
'NonWord': self._re_non_word_chars,
'SentEndChars': self._re_sent_end_chars,
},
re.UNICODE | re.VERBOSE)
return self._re_period_context
_re_non_punct = re.compile(r'[^\W\d]', re.UNICODE)
"""Matches token types that are not merely punctuation. (Types for
numeric tokens are changed to ##number## and hence contain alpha.)"""
# }
######################################################################
# ////////////////////////////////////////////////////////////
# { Helper Functions
# ////////////////////////////////////////////////////////////
def _pair_iter(it):
"""
Yields pairs of tokens from the given iterator such that each input
token will appear as the first element in a yielded tuple. The last
pair will have None as its second element.
"""
it = iter(it)
prev = next(it)
for el in it:
yield (prev, el)
prev = el
yield (prev, None)
######################################################################
# { Punkt Parameters
######################################################################
class PunktParameters(object):
"""Stores data used to perform sentence boundary detection with Punkt."""
def __init__(self):
self.abbrev_types = set()
"""A set of word types for known abbreviations."""
self.collocations = set()
"""A set of word type tuples for known common collocations
where the first word ends in a period. E.g., ('S.', 'Bach')
is a common collocation in a text that discusses 'Johann
S. Bach'. These count as negative evidence for sentence
boundaries."""
self.sent_starters = set()
"""A set of word types for words that often appear at the
beginning of sentences."""
self.ortho_context = defaultdict(int)
"""A dictionary mapping word types to the set of orthographic
contexts that word type appears in. Contexts are represented
by adding orthographic context flags: ..."""
def clear_abbrevs(self):
self.abbrev_types = set()
def clear_collocations(self):
self.collocations = set()
def clear_sent_starters(self):
self.sent_starters = set()
def clear_ortho_context(self):
self.ortho_context = defaultdict(int)
def add_ortho_context(self, typ, flag):
self.ortho_context[typ] |= flag
def _debug_ortho_context(self, typ):
c = self.ortho_context[typ]
if c & _ORTHO_BEG_UC:
yield 'BEG-UC'
if c & _ORTHO_MID_UC:
yield 'MID-UC'
if c & _ORTHO_UNK_UC:
yield 'UNK-UC'
if c & _ORTHO_BEG_LC:
yield 'BEG-LC'
if c & _ORTHO_MID_LC:
yield 'MID-LC'
if c & _ORTHO_UNK_LC:
yield 'UNK-LC'
######################################################################
# { PunktToken
######################################################################
@python_2_unicode_compatible
class PunktToken(object):
"""Stores a token of text with annotations produced during
sentence boundary detection."""
_properties = [
'parastart', 'linestart',
'sentbreak', 'abbr', 'ellipsis'
]
__slots__ = ['tok', 'type', 'period_final'] + _properties
def __init__(self, tok, **params):
self.tok = tok
self.type = self._get_type(tok)
self.period_final = tok.endswith('.')
for p in self._properties:
setattr(self, p, None)
for k in params:
setattr(self, k, params[k])
# ////////////////////////////////////////////////////////////
# { Regular expressions for properties
# ////////////////////////////////////////////////////////////
# Note: [A-Za-z] is approximated by [^\W\d] in the general case.
_RE_ELLIPSIS = re.compile(r'\.\.+$')
_RE_NUMERIC = re.compile(r'^-?[\.,]?\d[\d,\.-]*\.?$')
_RE_INITIAL = re.compile(r'[^\W\d]\.$', re.UNICODE)
_RE_ALPHA = re.compile(r'[^\W\d]+$', re.UNICODE)
# ////////////////////////////////////////////////////////////
# { Derived properties
# ////////////////////////////////////////////////////////////
def _get_type(self, tok):
"""Returns a case-normalized representation of the token."""
return self._RE_NUMERIC.sub('##number##', tok.lower())
@property
def type_no_period(self):
"""
The type with its final period removed if it has one.
"""
if len(self.type) > 1 and self.type[-1] == '.':
return self.type[:-1]
return self.type
@property
def type_no_sentperiod(self):
"""
The type with its final period removed if it is marked as a
sentence break.
"""
if self.sentbreak:
return self.type_no_period
return self.type
@property
def first_upper(self):
"""True if the token's first character is uppercase."""
return self.tok[0].isupper()
@property
def first_lower(self):
"""True if the token's first character is lowercase."""
return self.tok[0].islower()
@property
def first_case(self):
if self.first_lower:
return 'lower'
elif self.first_upper:
return 'upper'
return 'none'
@property
def is_ellipsis(self):
"""True if the token text is that of an ellipsis."""
return self._RE_ELLIPSIS.match(self.tok)
@property
def is_number(self):
"""True if the token text is that of a number."""
return self.type.startswith('##number##')
@property
def is_initial(self):
"""True if the token text is that of an initial."""
return self._RE_INITIAL.match(self.tok)
@property
def is_alpha(self):
"""True if the token text is all alphabetic."""
return self._RE_ALPHA.match(self.tok)
@property
def is_non_punct(self):
"""True if the token is either a number or is alphabetic."""
return _re_non_punct.search(self.type)
# ////////////////////////////////////////////////////////////
# { String representation
# ////////////////////////////////////////////////////////////
def __repr__(self):
"""
A string representation of the token that can reproduce it
with eval(), which lists all the token's non-default
annotations.
"""
typestr = (' type=%s,' % unicode_repr(self.type)
if self.type != self.tok else '')
propvals = ', '.join(
'%s=%s' % (p, unicode_repr(getattr(self, p)))
for p in self._properties
if getattr(self, p)
)
return '%s(%s,%s %s)' % (self.__class__.__name__,
unicode_repr(self.tok), typestr, propvals)
def __str__(self):
"""
A string representation akin to that used by Kiss and Strunk.
"""
res = self.tok
if self.abbr:
res += '<A>'
if self.ellipsis:
res += '<E>'
if self.sentbreak:
res += '<S>'
return res
######################################################################
# { Punkt base class
######################################################################
class PunktBaseClass(object):
"""
Includes common components of PunktTrainer and PunktSentenceTokenizer.
"""
def __init__(self, lang_vars=PunktLanguageVars(), token_cls=PunktToken,
params=None):
if params is None:
params = PunktParameters()
self._params = params
self._lang_vars = lang_vars
self._Token = token_cls
"""The collection of parameters that determines the behavior
of the punkt tokenizer."""
# ////////////////////////////////////////////////////////////
# { Word tokenization
# ////////////////////////////////////////////////////////////
def _tokenize_words(self, plaintext):
"""
Divide the given text into tokens, using the punkt word
segmentation regular expression, and generate the resulting list
of tokens augmented as three-tuples with two boolean values for whether
the given token occurs at the start of a paragraph or a new line,
respectively.
"""
parastart = False
for line in plaintext.split('\n'):
if line.strip():
line_toks = iter(self._lang_vars.word_tokenize(line))
yield self._Token(next(line_toks),
parastart=parastart, linestart=True)
parastart = False
for t in line_toks:
yield self._Token(t)
else:
parastart = True
# ////////////////////////////////////////////////////////////
# { Annotation Procedures
# ////////////////////////////////////////////////////////////
def _annotate_first_pass(self, tokens):
"""
Perform the first pass of annotation, which makes decisions
based purely based on the word type of each word:
- '?', '!', and '.' are marked as sentence breaks.
- sequences of two or more periods are marked as ellipsis.
- any word ending in '.' that's a known abbreviation is
marked as an abbreviation.
- any other word ending in '.' is marked as a sentence break.
Return these annotations as a tuple of three sets:
- sentbreak_toks: The indices of all sentence breaks.
- abbrev_toks: The indices of all abbreviations.
- ellipsis_toks: The indices of all ellipsis marks.
"""
for aug_tok in tokens:
self._first_pass_annotation(aug_tok)
yield aug_tok
def _first_pass_annotation(self, aug_tok):
"""
Performs type-based annotation on a single token.
"""
tok = aug_tok.tok
if tok in self._lang_vars.sent_end_chars:
aug_tok.sentbreak = True
elif aug_tok.is_ellipsis:
aug_tok.ellipsis = True
elif aug_tok.period_final and not tok.endswith('..'):
if (tok[:-1].lower() in self._params.abbrev_types or
tok[:-1].lower().split('-')[-1] in self._params.abbrev_types):
aug_tok.abbr = True
else:
aug_tok.sentbreak = True
return
######################################################################
# { Punkt Trainer
######################################################################
class PunktTrainer(PunktBaseClass):
"""Learns parameters used in Punkt sentence boundary detection."""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
self._type_fdist = FreqDist()
"""A frequency distribution giving the frequency of each
case-normalized token type in the training data."""
self._num_period_toks = 0
"""The number of words ending in period in the training data."""
self._collocation_fdist = FreqDist()
"""A frequency distribution giving the frequency of all
bigrams in the training data where the first word ends in a
period. Bigrams are encoded as tuples of word types.
Especially common collocations are extracted from this
frequency distribution, and stored in
``_params``.``collocations <PunktParameters.collocations>``."""
self._sent_starter_fdist = FreqDist()
"""A frequency distribution giving the frequency of all words
that occur at the training data at the beginning of a sentence
(after the first pass of annotation). Especially common
sentence starters are extracted from this frequency
distribution, and stored in ``_params.sent_starters``.
"""
self._sentbreak_count = 0
"""The total number of sentence breaks identified in training, used for
calculating the frequent sentence starter heuristic."""
self._finalized = True
"""A flag as to whether the training has been finalized by finding
collocations and sentence starters, or whether finalize_training()
still needs to be called."""
if train_text:
self.train(train_text, verbose, finalize=True)
def get_params(self):
"""
Calculates and returns parameters for sentence boundary detection as
derived from training."""
if not self._finalized:
self.finalize_training()
return self._params
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
ABBREV = 0.3
"""cut-off value whether a 'token' is an abbreviation"""
IGNORE_ABBREV_PENALTY = False
"""allows the disabling of the abbreviation penalty heuristic, which
exponentially disadvantages words that are found at times without a
final period."""
ABBREV_BACKOFF = 5
"""upper cut-off for Mikheev's(2002) abbreviation detection algorithm"""
COLLOCATION = 7.88
"""minimal log-likelihood value that two tokens need to be considered
as a collocation"""
SENT_STARTER = 30
"""minimal log-likelihood value that a token requires to be considered
as a frequent sentence starter"""
INCLUDE_ALL_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word ends in a period. It may be useful in corpora where there is a lot
of variation that makes abbreviations like Mr difficult to identify."""
INCLUDE_ABBREV_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word is an abbreviation. Such collocations override the orthographic
heuristic, but not the sentence starter heuristic. This is overridden by
INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials
and ordinals are considered."""
""""""
MIN_COLLOC_FREQ = 1
"""this sets a minimum bound on the number of times a bigram needs to
appear before it can be considered a collocation, in addition to log
likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True."""
# ////////////////////////////////////////////////////////////
# { Training..
# ////////////////////////////////////////////////////////////
def train(self, text, verbose=False, finalize=True):
"""
Collects training data from a given text. If finalize is True, it
will determine all the parameters for sentence boundary detection. If
not, this will be delayed until get_params() or finalize_training() is
called. If verbose is True, abbreviations found will be listed.
"""
# Break the text into tokens; record which token indices correspond to
# line starts and paragraph starts; and determine their types.
self._train_tokens(self._tokenize_words(text), verbose)
if finalize:
self.finalize_training(verbose)
def train_tokens(self, tokens, verbose=False, finalize=True):
"""
Collects training data from a given list of tokens.
"""
self._train_tokens((self._Token(t) for t in tokens), verbose)
if finalize:
self.finalize_training(verbose)
def _train_tokens(self, tokens, verbose):
self._finalized = False
# Ensure tokens are a list
tokens = list(tokens)
# Find the frequency of each case-normalized type. (Don't
# strip off final periods.) Also keep track of the number of
# tokens that end in periods.
for aug_tok in tokens:
self._type_fdist[aug_tok.type] += 1
if aug_tok.period_final:
self._num_period_toks += 1
# Look for new abbreviations, and for types that no longer are
unique_types = self._unique_types(tokens)
for abbr, score, is_add in self._reclassify_abbrev_types(unique_types):
if score >= self.ABBREV:
if is_add:
self._params.abbrev_types.add(abbr)
if verbose:
print((' Abbreviation: [%6.4f] %s' %
(score, abbr)))
else:
if not is_add:
self._params.abbrev_types.remove(abbr)
if verbose:
print((' Removed abbreviation: [%6.4f] %s' %
(score, abbr)))
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = list(self._annotate_first_pass(tokens))
# Check what contexts each word type can appear in, given the
# case of its first letter.
self._get_orthography_data(tokens)
# We need total number of sentence breaks to find sentence starters
self._sentbreak_count += self._get_sentbreak_count(tokens)
# The remaining heuristics relate to pairs of tokens where the first
# ends in a period.
for aug_tok1, aug_tok2 in _pair_iter(tokens):
if not aug_tok1.period_final or not aug_tok2:
continue
# Is the first token a rare abbreviation?
if self._is_rare_abbrev_type(aug_tok1, aug_tok2):
self._params.abbrev_types.add(aug_tok1.type_no_period)
if verbose:
print((' Rare Abbrev: %s' % aug_tok1.type))
# Does second token have a high likelihood of starting a sentence?
if self._is_potential_sent_starter(aug_tok2, aug_tok1):
self._sent_starter_fdist[aug_tok2.type] += 1
# Is this bigram a potential collocation?
if self._is_potential_collocation(aug_tok1, aug_tok2):
self._collocation_fdist[
(aug_tok1.type_no_period, aug_tok2.type_no_sentperiod)] += 1
def _unique_types(self, tokens):
return set(aug_tok.type for aug_tok in tokens)
def finalize_training(self, verbose=False):
"""
Uses data that has been gathered in training to determine likely
collocations and sentence starters.
"""
self._params.clear_sent_starters()
for typ, ll in self._find_sent_starters():
self._params.sent_starters.add(typ)
if verbose:
print((' Sent Starter: [%6.4f] %r' % (ll, typ)))
self._params.clear_collocations()
for (typ1, typ2), ll in self._find_collocations():
self._params.collocations.add((typ1, typ2))
if verbose:
print((' Collocation: [%6.4f] %r+%r' %
(ll, typ1, typ2)))
self._finalized = True
# ////////////////////////////////////////////////////////////
# { Overhead reduction
# ////////////////////////////////////////////////////////////
def freq_threshold(self, ortho_thresh=2, type_thresh=2, colloc_thres=2,
sentstart_thresh=2):
"""
Allows memory use to be reduced after much training by removing data
about rare tokens that are unlikely to have a statistical effect with
further training. Entries occurring above the given thresholds will be
retained.
"""
if ortho_thresh > 1:
old_oc = self._params.ortho_context
self._params.clear_ortho_context()
for tok in self._type_fdist:
count = self._type_fdist[tok]
if count >= ortho_thresh:
self._params.ortho_context[tok] = old_oc[tok]
self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh)
self._collocation_fdist = self._freq_threshold(
self._collocation_fdist, colloc_thres)
self._sent_starter_fdist = self._freq_threshold(
self._sent_starter_fdist, sentstart_thresh)
def _freq_threshold(self, fdist, threshold):
"""
Returns a FreqDist containing only data with counts below a given
threshold, as well as a mapping (None -> count_removed).
"""
# We assume that there is more data below the threshold than above it
# and so create a new FreqDist rather than working in place.
res = FreqDist()
num_removed = 0
for tok in fdist:
count = fdist[tok]
if count < threshold:
num_removed += 1
else:
res[tok] += count
res[None] += num_removed
return res
# ////////////////////////////////////////////////////////////
# { Orthographic data
# ////////////////////////////////////////////////////////////
def _get_orthography_data(self, tokens):
"""
Collect information about whether each token type occurs
with different case patterns (i) overall, (ii) at
sentence-initial positions, and (iii) at sentence-internal
positions.
"""
# 'initial' or 'internal' or 'unknown'
context = 'internal'
tokens = list(tokens)
for aug_tok in tokens:
# If we encounter a paragraph break, then it's a good sign
# that it's a sentence break. But err on the side of
# caution (by not positing a sentence break) if we just
# saw an abbreviation.
if aug_tok.parastart and context != 'unknown':
context = 'initial'
# If we're at the beginning of a line, then we can't decide
# between 'internal' and 'initial'.
if aug_tok.linestart and context == 'internal':
context = 'unknown'
# Find the case-normalized type of the token. If it's a
# sentence-final token, strip off the period.
typ = aug_tok.type_no_sentperiod
# Update the orthographic context table.
flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0)
if flag:
self._params.add_ortho_context(typ, flag)
# Decide whether the next word is at a sentence boundary.
if aug_tok.sentbreak:
if not (aug_tok.is_number or aug_tok.is_initial):
context = 'initial'
else:
context = 'unknown'
elif aug_tok.ellipsis or aug_tok.abbr:
context = 'unknown'
else:
context = 'internal'
# ////////////////////////////////////////////////////////////
# { Abbreviations
# ////////////////////////////////////////////////////////////
def _reclassify_abbrev_types(self, types):
"""
(Re)classifies each given token if
- it is period-final and not a known abbreviation; or
- it is not period-final and is otherwise a known abbreviation
by checking whether its previous classification still holds according
to the heuristics of section 3.
Yields triples (abbr, score, is_add) where abbr is the type in question,
score is its log-likelihood with penalties applied, and is_add specifies
whether the present type is a candidate for inclusion or exclusion as an
abbreviation, such that:
- (is_add and score >= 0.3) suggests a new abbreviation; and
- (not is_add and score < 0.3) suggests excluding an abbreviation.
"""
# (While one could recalculate abbreviations from all .-final tokens at
# every iteration, in cases requiring efficiency, the number of tokens
# in the present training document will be much less.)
for typ in types:
# Check some basic conditions, to rule out words that are
# clearly not abbrev_types.
if not _re_non_punct.search(typ) or typ == '##number##':
continue
if typ.endswith('.'):
if typ in self._params.abbrev_types:
continue
typ = typ[:-1]
is_add = True
else:
if typ not in self._params.abbrev_types:
continue
is_add = False
# Count how many periods & nonperiods are in the
# candidate.
num_periods = typ.count('.') + 1
num_nonperiods = len(typ) - num_periods + 1
# Let <a> be the candidate without the period, and <b>
# be the period. Find a log likelihood ratio that
# indicates whether <ab> occurs as a single unit (high
# value of ll), or as two independent units <a> and
# <b> (low value of ll).
count_with_period = self._type_fdist[typ + '.']
count_without_period = self._type_fdist[typ]
ll = self._dunning_log_likelihood(
count_with_period + count_without_period,
self._num_period_toks, count_with_period,
self._type_fdist.N())
# Apply three scaling factors to 'tweak' the basic log
# likelihood ratio:
# F_length: long word -> less likely to be an abbrev
# F_periods: more periods -> more likely to be an abbrev
# F_penalty: penalize occurrences w/o a period
f_length = math.exp(-num_nonperiods)
f_periods = num_periods
f_penalty = (int(self.IGNORE_ABBREV_PENALTY)
or math.pow(num_nonperiods, -count_without_period))
score = ll * f_length * f_periods * f_penalty
yield typ, score, is_add
def find_abbrev_types(self):
"""
Recalculates abbreviations given type frequencies, despite no prior
determination of abbreviations.
This fails to include abbreviations otherwise found as "rare".
"""
self._params.clear_abbrevs()
tokens = (typ for typ in self._type_fdist if typ and typ.endswith('.'))
for abbr, score, is_add in self._reclassify_abbrev_types(tokens):
if score >= self.ABBREV:
self._params.abbrev_types.add(abbr)
# This function combines the work done by the original code's
# functions `count_orthography_context`, `get_orthography_count`,
# and `get_rare_abbreviations`.
def _is_rare_abbrev_type(self, cur_tok, next_tok):
"""
A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences.
"""
if cur_tok.abbr or not cur_tok.sentbreak:
return False
# Find the case-normalized type of the token. If it's
# a sentence-final token, strip off the period.
typ = cur_tok.type_no_sentperiod
# Proceed only if the type hasn't been categorized as an
# abbreviation already, and is sufficiently rare...
count = self._type_fdist[typ] + self._type_fdist[typ[:-1]]
if (typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF):
return False
# Record this token as an abbreviation if the next
# token is a sentence-internal punctuation mark.
# [XX] :1 or check the whole thing??
if next_tok.tok[:1] in self._lang_vars.internal_punctuation:
return True
# Record this type as an abbreviation if the next
# token... (i) starts with a lower case letter,
# (ii) sometimes occurs with an uppercase letter,
# and (iii) never occus with an uppercase letter
# sentence-internally.
# [xx] should the check for (ii) be modified??
elif next_tok.first_lower:
typ2 = next_tok.type_no_sentperiod
typ2ortho_context = self._params.ortho_context[typ2]
if ((typ2ortho_context & _ORTHO_BEG_UC) and
not (typ2ortho_context & _ORTHO_MID_UC)):
return True
# ////////////////////////////////////////////////////////////
# { Log Likelihoods
# ////////////////////////////////////////////////////////////
# helper for _reclassify_abbrev_types:
@staticmethod
def _dunning_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that calculates the modified Dunning log-likelihood
ratio scores for abbreviation candidates. The details of how
this works is available in the paper.
"""
p1 = count_b / N
p2 = 0.99
null_hypo = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
alt_hypo = (count_ab * math.log(p2) +
(count_a - count_ab) * math.log(1.0 - p2))
likelihood = null_hypo - alt_hypo
return (-2.0 * likelihood)
@staticmethod
def _col_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that will just compute log-likelihood estimate, in
the original paper it's described in algorithm 6 and 7.
This *should* be the original Dunning log-likelihood values,
unlike the previous log_l function where it used modified
Dunning log-likelihood values
"""
p = count_b / N
p1 = count_ab / count_a
try:
p2 = (count_b - count_ab) / (N - count_a)
except ZeroDivisionError as e:
p2 = 1
try:
summand1 = (count_ab * math.log(p) +
(count_a - count_ab) * math.log(1.0 - p))
except ValueError as e:
summand1 = 0
try:
summand2 = ((count_b - count_ab) * math.log(p) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p))
except ValueError as e:
summand2 = 0
if count_a == count_ab or p1 <= 0 or p1 >= 1:
summand3 = 0
else:
summand3 = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
if count_b == count_ab or p2 <= 0 or p2 >= 1:
summand4 = 0
else:
summand4 = ((count_b - count_ab) * math.log(p2) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p2))
likelihood = summand1 + summand2 - summand3 - summand4
return (-2.0 * likelihood)
# ////////////////////////////////////////////////////////////
# { Collocation Finder
# ////////////////////////////////////////////////////////////
def _is_potential_collocation(self, aug_tok1, aug_tok2):
"""
Returns True if the pair of tokens may form a collocation given
log-likelihood statistics.
"""
return ((self.INCLUDE_ALL_COLLOCS or
(self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) or
(aug_tok1.sentbreak and
(aug_tok1.is_number or aug_tok1.is_initial)))
and aug_tok1.is_non_punct
and aug_tok2.is_non_punct)
def _find_collocations(self):
"""
Generates likely collocations and their log-likelihood.
"""
for types in self._collocation_fdist:
try:
typ1, typ2 = types
except TypeError:
# types may be None after calling freq_threshold()
continue
if typ2 in self._params.sent_starters:
continue
col_count = self._collocation_fdist[types]
typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + '.']
typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + '.']
if (typ1_count > 1 and typ2_count > 1
and self.MIN_COLLOC_FREQ <
col_count <= min(typ1_count, typ2_count)):
ll = self._col_log_likelihood(typ1_count, typ2_count,
col_count, self._type_fdist.N())
# Filter out the not-so-collocative
if (ll >= self.COLLOCATION and
(self._type_fdist.N() / typ1_count >
typ2_count / col_count)):
yield (typ1, typ2), ll
# ////////////////////////////////////////////////////////////
# { Sentence-Starter Finder
# ////////////////////////////////////////////////////////////
def _is_potential_sent_starter(self, cur_tok, prev_tok):
"""
Returns True given a token and the token that preceds it if it
seems clear that the token is beginning a sentence.
"""
# If a token (i) is preceded by a sentece break that is
# not a potential ordinal number or initial, and (ii) is
# alphabetic, then it is a a sentence-starter.
return (prev_tok.sentbreak and
not (prev_tok.is_number or prev_tok.is_initial) and
cur_tok.is_alpha)
def _find_sent_starters(self):
"""
Uses collocation heuristics for each candidate token to
determine if it frequently starts sentences.
"""
for typ in self._sent_starter_fdist:
if not typ:
continue
typ_at_break_count = self._sent_starter_fdist[typ]
typ_count = self._type_fdist[typ] + self._type_fdist[typ + '.']
if typ_count < typ_at_break_count:
# needed after freq_threshold
continue
ll = self._col_log_likelihood(self._sentbreak_count, typ_count,
typ_at_break_count,
self._type_fdist.N())
if (ll >= self.SENT_STARTER and
self._type_fdist.N() / self._sentbreak_count >
typ_count / typ_at_break_count):
yield typ, ll
def _get_sentbreak_count(self, tokens):
"""
Returns the number of sentence breaks marked in a given set of
augmented tokens.
"""
return sum(1 for aug_tok in tokens if aug_tok.sentbreak)
######################################################################
# { Punkt Sentence Tokenizer
######################################################################
class PunktSentenceTokenizer(PunktBaseClass, TokenizerI):
"""
A sentence tokenizer which uses an unsupervised algorithm to build
a model for abbreviation words, collocations, and words that start
sentences; and then uses that model to find sentence boundaries.
This approach has been shown to work well for many European
languages.
"""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
"""
train_text can either be the sole training text for this sentence
boundary detector, or can be a PunktParameters object.
"""
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
if train_text:
self._params = self.train(train_text, verbose)
def train(self, train_text, verbose=False):
"""
Derives parameters from a given training text, or uses the parameters
given. Repeated calls to this method destroy previous parameters. For
incremental training, instantiate a separate PunktTrainer instance.
"""
if not isinstance(train_text, string_types):
return train_text
return PunktTrainer(train_text, lang_vars=self._lang_vars,
token_cls=self._Token).get_params()
# ////////////////////////////////////////////////////////////
# { Tokenization
# ////////////////////////////////////////////////////////////
def tokenize(self, text, realign_boundaries=True):
"""
Given a text, returns a list of the sentences in that text.
"""
return list(self.sentences_from_text(text, realign_boundaries))
def debug_decisions(self, text):
"""
Classifies candidate periods as sentence breaks, yielding a dict for
each that may be used to understand why the decision was made.
See format_debug_decision() to help make this output readable.
"""
for match in self._lang_vars.period_context_re().finditer(text):
decision_text = match.group() + match.group('after_tok')
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while not tokens[0].period_final:
tokens.pop(0)
yield dict(period_index=match.end() - 1,
text=decision_text,
type1=tokens[0].type,
type2=tokens[1].type,
type1_in_abbrs=bool(tokens[0].abbr),
type1_is_initial=bool(tokens[0].is_initial),
type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters,
type2_ortho_heuristic=self._ortho_heuristic(tokens[1]),
type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)),
collocation=(tokens[0].type_no_sentperiod,
tokens[1].type_no_sentperiod) in self._params.collocations,
reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION,
break_decision=tokens[0].sentbreak,
)
def span_tokenize(self, text, realign_boundaries=True):
"""
Given a text, generates (start, end) spans of sentences
in the text.
"""
slices = self._slices_from_text(text)
if realign_boundaries:
slices = self._realign_boundaries(text, slices)
for sl in slices:
yield (sl.start, sl.stop)
def sentences_from_text(self, text, realign_boundaries=True):
"""
Given a text, generates the sentences in that text by only
testing candidate sentence breaks. If realign_boundaries is
True, includes in the sentence closing punctuation that
follows the period.
"""
return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
def _slices_from_text(self, text):
last_break = 0
for match in self._lang_vars.period_context_re().finditer(text):
context = match.group() + match.group('after_tok')
if self.text_contains_sentbreak(context):
yield slice(last_break, match.end())
if match.group('next_tok'):
# next sentence starts after whitespace
last_break = match.start('next_tok')
else:
# next sentence starts at following punctuation
last_break = match.end()
# The last sentence should not contain trailing whitespace.
yield slice(last_break, len(text.rstrip()))
def _realign_boundaries(self, text, slices):
"""
Attempts to realign punctuation that falls after the period but
should otherwise be included in the same sentence.
For example: "(Sent1.) Sent2." will otherwise be split as::
["(Sent1.", ") Sent1."].
This method will produce::
["(Sent1.)", "Sent2."].
"""
realign = 0
for sl1, sl2 in _pair_iter(slices):
sl1 = slice(sl1.start + realign, sl1.stop)
if not sl2:
if text[sl1]:
yield sl1
continue
m = self._lang_vars.re_boundary_realignment.match(text[sl2])
if m:
yield slice(sl1.start, sl2.start + len(m.group(0).rstrip()))
realign = m.end()
else:
realign = 0
if text[sl1]:
yield sl1
def text_contains_sentbreak(self, text):
"""
Returns True if the given text includes a sentence break.
"""
found = False # used to ignore last token
for t in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if t.sentbreak:
found = True
return False
def sentences_from_text_legacy(self, text):
"""
Given a text, generates the sentences in that text. Annotates all
tokens, rather than just those with possible sentence breaks. Should
produce the same results as ``sentences_from_text``.
"""
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
def sentences_from_tokens(self, tokens):
"""
Given a sequence of tokens, generates lists of tokens, each list
corresponding to a sentence.
"""
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
def _annotate_tokens(self, tokens):
"""
Given a set of tokens augmented with markers for line-start and
paragraph-start, returns an iterator through those tokens with full
annotation including predicted sentence breaks.
"""
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
## [XX] TESTING
# tokens = list(tokens)
# self.dump(tokens)
return tokens
def _build_sentence_list(self, text, tokens):
"""
Given the original text and the list of augmented word tokens,
construct and return a tokenized list of sentence strings.
"""
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
WS_REGEXP = re.compile(r'\s*')
sentence = ''
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
ws = WS_REGEXP.match(text, pos).group()
pos += len(ws)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos:pos + len(tok)] != tok:
pat = '\s*'.join(re.escape(c) for c in tok)
m = re.compile(pat).match(text, pos)
if m: tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos:pos + len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += ws
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ''
# If the last sentence is emtpy, discard it.
if sentence:
yield sentence
# [XX] TESTING
def dump(self, tokens):
print('writing to /tmp/punkt.new...')
with open('/tmp/punkt.new', 'w') as outfile:
for aug_tok in tokens:
if aug_tok.parastart:
outfile.write('\n\n')
elif aug_tok.linestart:
outfile.write('\n')
else:
outfile.write(' ')
outfile.write(str(aug_tok))
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
PUNCTUATION = tuple(';:,.!?')
# ////////////////////////////////////////////////////////////
# { Annotation Procedures
# ////////////////////////////////////////////////////////////
def _annotate_second_pass(self, tokens):
"""
Performs a token-based classification (section 4) over the given
tokens, making use of the orthographic heuristic (4.1.1), collocation
heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3).
"""
for t1, t2 in _pair_iter(tokens):
self._second_pass_annotation(t1, t2)
yield t1
def _second_pass_annotation(self, aug_tok1, aug_tok2):
"""
Performs token-based classification over a pair of contiguous tokens
updating the first.
"""
# Is it the last token? We can't do anything then.
if not aug_tok2:
return
tok = aug_tok1.tok
if not aug_tok1.period_final:
# We only care about words ending in periods.
return
typ = aug_tok1.type_no_period
next_tok = aug_tok2.tok
next_typ = aug_tok2.type_no_sentperiod
tok_is_initial = aug_tok1.is_initial
# [4.1.2. Collocation Heuristic] If there's a
# collocation between the word before and after the
# period, then label tok as an abbreviation and NOT
# a sentence break. Note that collocations with
# frequent sentence starters as their second word are
# excluded in training.
if (typ, next_typ) in self._params.collocations:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_KNOWN_COLLOCATION
# [4.2. Token-Based Reclassification of Abbreviations] If
# the token is an abbreviation or an ellipsis, then decide
# whether we should *also* classify it as a sentbreak.
if ((aug_tok1.abbr or aug_tok1.ellipsis) and
(not tok_is_initial)):
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == True:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC
# [4.1.3. Frequent Sentence Starter Heruistic] If the
# next word is capitalized, and is a member of the
# frequent-sentence-starters list, then label tok as a
# sentence break.
if (aug_tok2.first_upper and
next_typ in self._params.sent_starters):
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_SENTENCE_STARTER
# [4.3. Token-Based Detection of Initials and Ordinals]
# Check if any initials or ordinals tokens that are marked
# as sentbreaks should be reclassified as abbreviations.
if tok_is_initial or typ == '##number##':
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == False:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
if tok_is_initial:
return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC
else:
return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC
# Special heuristic for initials: if orthogrpahic
# heuristc is unknown, and next word is always
# capitalized, then mark as abbrev (eg: J. Bach).
if (is_sent_starter == 'unknown' and tok_is_initial and
aug_tok2.first_upper and
not (self._params.ortho_context[next_typ] & _ORTHO_LC)):
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC
return
def _ortho_heuristic(self, aug_tok):
"""
Decide whether the given token is the first token in a sentence.
"""
# Sentences don't start with punctuation marks:
if aug_tok.tok in self.PUNCTUATION:
return False
ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod]
# If the word is capitalized, occurs at least once with a
# lower case first letter, and never occurs with an upper case
# first letter sentence-internally, then it's a sentence starter.
if (aug_tok.first_upper and
(ortho_context & _ORTHO_LC) and
not (ortho_context & _ORTHO_MID_UC)):
return True
# If the word is lower case, and either (a) we've seen it used
# with upper case, or (b) we've never seen it used
# sentence-initially with lower case, then it's not a sentence
# starter.
if (aug_tok.first_lower and
((ortho_context & _ORTHO_UC) or
not (ortho_context & _ORTHO_BEG_LC))):
return False
# Otherwise, we're not sure.
return 'unknown'
DEBUG_DECISION_FMT = '''Text: %(text)r (at offset %(period_index)d)
Sentence break? %(break_decision)s (%(reason)s)
Collocation? %(collocation)s
%(type1)r:
known abbreviation: %(type1_in_abbrs)s
is initial: %(type1_is_initial)s
%(type2)r:
known sentence starter: %(type2_is_sent_starter)s
orthographic heuristic suggests is a sentence starter? %(type2_ortho_heuristic)s
orthographic contexts in training: %(type2_ortho_contexts)s
'''
def format_debug_decision(d):
return DEBUG_DECISION_FMT % d
def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer):
"""Builds a punkt model and applies it to the same text"""
cleanup = lambda s: re.compile(r'(?:\r|^\s+)', re.MULTILINE).sub('', s).replace('\n', ' ')
trainer = train_cls()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
sbd = tok_cls(trainer.get_params())
for l in sbd.sentences_from_text(text):
print(cleanup(l))
| [
"[email protected]"
] | |
7c6e2ad300adefc46b95d659f9cefe698aeb499b | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/events/contributions/models/subcontributions.py | 9ff806fba366acfa3d3ecfa78f127ae91c426fa9 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,998 | py |
from __future__ import unicode_literals
from fossir.core.db import db
from fossir.core.db.sqlalchemy.attachments import AttachedItemsMixin
from fossir.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from fossir.core.db.sqlalchemy.notes import AttachedNotesMixin
from fossir.core.db.sqlalchemy.util.queries import increment_and_get
from fossir.util.locators import locator_property
from fossir.util.string import format_repr, return_ascii
def _get_next_friendly_id(context):
"""Get the next friendly id for a sub-contribution."""
from fossir.modules.events.contributions.models.contributions import Contribution
contribution_id = context.current_parameters['contribution_id']
assert contribution_id is not None
return increment_and_get(Contribution._last_friendly_subcontribution_id, Contribution.id == contribution_id)
def _get_next_position(context):
"""Get the next menu entry position for the event."""
contribution_id = context.current_parameters['contribution_id']
res = db.session.query(db.func.max(SubContribution.position)).filter_by(contribution_id=contribution_id).one()
return (res[0] or 0) + 1
class SubContribution(DescriptionMixin, AttachedItemsMixin, AttachedNotesMixin, db.Model):
__tablename__ = 'subcontributions'
__table_args__ = (db.Index(None, 'friendly_id', 'contribution_id', unique=True),
{'schema': 'events'})
PRELOAD_EVENT_ATTACHED_ITEMS = True
PRELOAD_EVENT_NOTES = True
ATTACHMENT_FOLDER_ID_COLUMN = 'subcontribution_id'
possible_render_modes = {RenderMode.html, RenderMode.markdown}
default_render_mode = RenderMode.markdown
id = db.Column(
db.Integer,
primary_key=True
)
#: The human-friendly ID for the sub-contribution
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
title = db.Column(
db.String,
nullable=False
)
duration = db.Column(
db.Interval,
nullable=False
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: External references associated with this contribution
references = db.relationship(
'SubContributionReference',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
#: Persons associated with this contribution
person_links = db.relationship(
'SubContributionPersonLink',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
# relationship backrefs:
# - attachment_folders (AttachmentFolder.subcontribution)
# - contribution (Contribution.subcontributions)
# - legacy_mapping (LegacySubContributionMapping.subcontribution)
# - note (EventNote.subcontribution)
def __init__(self, **kwargs):
# explicitly initialize this relationship with None to avoid
# an extra query to check whether there is an object associated
# when assigning a new one (e.g. during cloning)
kwargs.setdefault('note', None)
super(SubContribution, self).__init__(**kwargs)
@property
def event(self):
return self.contribution.event
@locator_property
def locator(self):
return dict(self.contribution.locator, subcontrib_id=self.id)
@property
def is_protected(self):
return self.contribution.is_protected
@property
def session(self):
"""Convenience property so all event entities have it"""
return self.contribution.session if self.contribution.session_id is not None else None
@property
def timetable_entry(self):
"""Convenience property so all event entities have it"""
return self.contribution.timetable_entry
@property
def speakers(self):
return self.person_links
@speakers.setter
def speakers(self, value):
self.person_links = value.keys()
@property
def location_parent(self):
return self.contribution
def get_access_list(self):
return self.contribution.get_access_list()
def get_manager_list(self, recursive=False):
return self.contribution.get_manager_list(recursive=recursive)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', is_deleted=False, _text=self.title)
def can_access(self, user, **kwargs):
return self.contribution.can_access(user, **kwargs)
def can_manage(self, user, role=None, **kwargs):
return self.contribution.can_manage(user, role, **kwargs)
| [
"[email protected]"
] | |
6305acaf43a088e91df5df323d21cd70ced14c36 | a062669a7f37412f016534ae30bd41e9efe6afa5 | /product/migrations/0013_auto_20201127_0026.py | 8b034f4bd8a91d3a1e265777d20c4ce041f762fb | [] | no_license | techappg/meat_fun_backend | 7c05045ae0ca6a442eb6e24693a800ca98447e9b | e16da0ec1ccfb583a43f534ad9fd6cb79fe1e6c1 | refs/heads/main | 2023-04-16T22:42:38.183722 | 2021-04-22T07:37:07 | 2021-04-22T07:37:07 | 360,430,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.1 on 2020-11-27 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0012_auto_20201127_0024'),
]
operations = [
migrations.AlterField(
model_name='contact_us',
name='mobile',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
ed6a4ab01226c402541becc7afe28423eff22758 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /0201-0300/0248/0248_Python_1.py | 760cb2e6b8f7b3dda42f9d212933b86444a78d20 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,018 | py | class Solution:
# 已知开始范围,计算两个数之间的数量
@staticmethod
def num1(low, high, middle=False):
if middle:
return len([str(i) for i in [0, 1, 8] if int(low) <= i <= int(high)])
else:
return len([str(i) for i in [0, 1, 6, 8, 9] if int(low) < i < int(high)])
# 计算各个位的数量
@staticmethod
def count(n, first):
if n == 0:
return 1
if n == 1:
return 3
if n == 2:
return 4 if first else 5
if first:
return 4 * Solution.count(n - 2, first=False)
else:
return 5 * Solution.count(n - 2, first=False)
def strobogrammaticInRange(self, low: str, high: str) -> int:
# 字符串交换列表
reverse_lst = {
"0": "0",
"1": "1",
"6": "9",
"8": "8",
"9": "6"
}
# print("当前计算:", low, high)
# 如果顺序相反则返回0
if int(low) > int(high):
return 0
# 处理两个数完全相同的情况
if low == high:
return 1 if low == low[::-1] else 0
a, b = len(low), len(high)
# 处理两数位数不同的情况
# 例:(150-525) -> (150-199) + (200-499) + (500-525)
if a == b:
# 寻找两个数第一个不同的位数
i = 0
while i < a and low[i] == high[i]:
i += 1
s = a // 2
# 处理只有一位的情况
# 处理奇数长度的中间位的情况
if a == 1 or (a % 2 == 1 and i == s):
return self.num1(low[i], high[i], middle=True)
# 处理在中间位之前的情况
if (a % 2 == 0 and i < s) or (a % 2 == 1 and i < s):
ans = self.num1(low[i], high[i]) * self.count(a - (i + 1) * 2, first=False)
# print(low, high, "(", i, ")", "=",
# self.num1(low[i], high[i]), "*", self.count(a - (i + 1) * 2, first=False), "=", ans,
# "->",
# (low, low[:i + 1] + "9" * (a - i - 1)) if low[i] in reverse_lst else (),
# (high[:i + 1] + "0" * (a - i - 1), high) if high[i] in reverse_lst else ())
if low[i] in reverse_lst:
high2 = low[:i + 1] + "9" * (a - i - 1)
ans += self.strobogrammaticInRange(low, high2)
if high[i] in reverse_lst:
low2 = high[:i + 1] + "0" * (a - i - 1)
ans += self.strobogrammaticInRange(low2, high)
return ans
# 处理中心位之后的情况
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
# 计算是否超出情况
if int(low[i]) < int(ch) < int(high[i]):
return 1
elif int(low[i]) == int(ch):
while i < a:
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
if int(ch) > int(low[i]):
return 1
elif int(ch) == int(low[i]):
i += 1
else:
return 0
return 1
elif int(ch) == int(high[i]):
while i < a:
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
if int(ch) < int(high[i]):
return 1
elif int(ch) == int(high[i]):
i += 1
else:
return 0
return 1
else:
return 0
# 处理两个数位数不同的情况
# 例:(50-4050) -> (50-99) + 3位数的情况数 + (1000-4050)
else:
ans = 0
for i in range(a + 1, b):
ans += self.count(i, first=True)
# print(low, high, "=", ans, "->", (low, "9" * a), ("1" + "0" * (b - 1), high))
return (ans +
self.strobogrammaticInRange(low, "9" * a) +
self.strobogrammaticInRange("1" + "0" * (b - 1), high))
if __name__ == "__main__":
print(Solution().strobogrammaticInRange(low="50", high="100")) # 3
print(Solution().strobogrammaticInRange(low="0", high="9")) # 3
print(Solution().strobogrammaticInRange(low="100", high="50")) # 0
print(Solution().strobogrammaticInRange(low="1", high="0")) # 0
print(Solution().strobogrammaticInRange(low="0", high="100")) # 7
print(Solution().strobogrammaticInRange(low="100", high="1000")) # 12
print(Solution().strobogrammaticInRange(low="0", high="1680")) # 21
print(Solution().strobogrammaticInRange(low="0", high="2147483647")) # 3124
| [
"[email protected]"
] | |
9277ddc026afe786dbfa6c7fce9b98dc97c38959 | 19cec240505e27546cb9b10104ecb16cc2454702 | /linux/app/web/python/wikicode/dc/__init__.py | 92f91ec3adc810b7ed3614687a82c4219108541c | [] | no_license | imosts/flume | 1a9b746c5f080c826c1f316a8008d8ea1b145a89 | a17b987c5adaa13befb0fd74ac400c8edbe62ef5 | refs/heads/master | 2021-01-10T09:43:03.931167 | 2016-03-09T12:09:53 | 2016-03-09T12:09:53 | 53,101,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | import sys, socket, os, wikicode
import flume.flmos as flmo
from wikicode import to_rpc_proxy
class Declassifier (object):
def config (self):
"""
This is a CGI program used to configure the declassifier
"""
import wikicode
class Config (wikicode.extension):
def run (self):
self.send_page ("Generic DC Setup")
wikicode.run_extension (Config)
def declassify_ok (self, *args):
"""
This is a method that returns True or False depending on whether
the user with uid <owner_uid> is willing to declassify to user <recipient_uid>
"""
raise NotImplementedError, 'subclass must implement this method'
def run (self):
if len (sys.argv) > 1:
tagval = int (sys.argv[1])
instance_tagval = int (sys.argv[2])
owner_name = sys.argv[3]
owner_uid = int (sys.argv[4])
devel_homedir = sys.argv[5]
recipient_uid = int (sys.argv[6])
rpc_fd, rpc_proxy = to_rpc_proxy (os.environ[wikicode.RPC_TAG_ENV])
if self.declassify_ok (tagval, instance_tagval,
owner_name, owner_uid,
devel_homedir,
recipient_uid, rpc_fd, rpc_proxy):
rpc_proxy.set_dc_ok (True)
sys.exit (0)
else:
sys.exit (-1)
else:
self.config ()
if __name__ == '__main__':
obj = Declassifier ()
obj.run ()
| [
"imosts"
] | imosts |
3e200464fcd0c7743e17cb6998f1810928aa115a | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /Cookbook/Array/岛屿数量.py | 571395c6c2f6f2f328b0dda10d09b4a6f34e41e6 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | '''
给你一个由 '1'(陆地)和 '0'(水)组成的的二维网格,请你计算网格中岛屿的数量。
岛屿总是被水包围,并且每座岛屿只能由水平方向和/或竖直方向上相邻的陆地连接形成。
此外,你可以假设该网格的四条边均被水包围。
示例 1:
输入:grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
输出:1
示例 2:
输入:grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
输出:3
提示:
m == grid.length
n == grid[i].length
1 <= m, n <= 300
grid[i][j] 的值为 '0' 或 '1'
'''
from typing import List
class UnionFind:
def __init__(self,grid):
row, col = len(grid), len(grid[0])
self.count = 0
self.parent = [-1] * (row * col)
self.rank = [0] * (row * col)
for i in range(row):
for j in range(col):
if grid[i][j] == "1":
self.parent[i * col + j] = i * col + j
self.count += 1
def find(self, i):
if self.parent[i] == i:
return i
self.parent[i] = self.find(self.parent[i]) #路径压缩
return self.parent[i]
def union(self, x, y):
rootx = self.find(x)
rooty = self.find(y)
if rootx != rooty:
if self.rank[rootx] < self.rank[rooty]: #将秩,即树的深度小的父节点设为深度大的节点
rootx, rooty = rooty, rootx
self.parent[rooty] = rootx
if self.rank[rootx] == self.rank[rooty]:
self.rank[rootx] += 1
self.count -= 1 #合并一个节点,就少一个岛
def getCount(self):
return self.count
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
row = len(grid)
if row == 0:
return 0
col = len(grid[0])
uf = UnionFind(grid)
for r in range(row):
for c in range(col):
if grid[r][c] == "1":
grid[r][c] = "0"
for x, y in ((r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)):
if 0 <= x < row and 0 <= y < col and grid[x][y] == "1":
uf.union(r * col + c, x * col + y)
return uf.getCount()
if __name__ == '__main__':
grid = [
["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"]
]
sol = Solution()
print(sol.numIslands(grid))
| [
"[email protected]"
] | |
8cb78ff77c9fbf845afbc5b63f5cb829ce8da914 | 0ffb18f4d58961ca675d8294eb2154f69061989f | /auto_process_ngs/test/test_analysis.py | 6b0aeb3108c417fc199c12786a9c0484bc38a1cd | [] | no_license | nandr0id/auto_process_ngs | a794e904e6d24b0e0403941b44c884374f95850e | 9b09f20b344d0ee87227e8771a479aa7c04f1837 | refs/heads/master | 2020-06-26T03:23:53.225029 | 2019-06-12T12:11:32 | 2019-06-12T12:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,655 | py | #######################################################################
# Tests for analysis.py module
#######################################################################
import unittest
import tempfile
import shutil
import zipfile
import pickle
import cloudpickle
from bcftbx.JobRunner import SimpleJobRunner,GEJobRunner
from bcftbx.utils import find_program
from auto_process_ngs.mock import MockAnalysisDirFactory
from auto_process_ngs.mock import MockAnalysisProject
from auto_process_ngs.applications import Command
from auto_process_ngs.fastq_utils import BaseFastqAttrs
from auto_process_ngs.analysis import *
class TestAnalysisFastq(unittest.TestCase):
"""
Tests for the AnalysisFastq class
"""
def test_illumina_style_with_extras(self):
"""AnalysisFastq: Illumina-style fastq name with extra elements appended
"""
fq = AnalysisFastq('M_19_0040_S13-A40h-R_D709-D505_L007_R1_001_repeated_1')
self.assertEqual(fq.sample_name,'M_19_0040_S13-A40h-R_D709-D505')
self.assertEqual(fq.basename,
'M_19_0040_S13-A40h-R_D709-D505_L007_R1_001_repeated_1')
self.assertEqual(fq.extension,'')
self.assertEqual(fq.sample_number,None)
self.assertEqual(fq.barcode_sequence,None)
self.assertEqual(fq.lane_number,7)
self.assertEqual(fq.read_number,1)
self.assertEqual(fq.set_number,1)
self.assertFalse(fq.is_index_read)
self.assertEqual(fq.canonical_name,'M_19_0040_S13-A40h-R_D709-D505_L007_R1_001')
self.assertEqual(fq.extras,'_repeated_1')
self.assertEqual(str(fq),
'M_19_0040_S13-A40h-R_D709-D505_L007_R1_001_repeated_1')
def test_non_canonical_fastq_name(self):
"""AnalysisFastq: Illumina-style fastq name with extra elements appended
"""
fq = AnalysisFastq('PB04_trimmoPE_bowtie2_notHg38.1')
self.assertEqual(fq.sample_name,'PB04_trimmoPE_bowtie2_notHg38.1')
self.assertEqual(fq.basename,'PB04_trimmoPE_bowtie2_notHg38.1')
self.assertEqual(fq.extension,'')
self.assertEqual(fq.sample_number,None)
self.assertEqual(fq.barcode_sequence,None)
self.assertEqual(fq.lane_number,None)
self.assertEqual(fq.read_number,None)
self.assertEqual(fq.set_number,None)
self.assertFalse(fq.is_index_read)
self.assertEqual(fq.canonical_name,None)
self.assertEqual(fq.extras,None)
self.assertEqual(str(fq),'PB04_trimmoPE_bowtie2_notHg38.1')
def test_reproduces_illumina_fastq_attrs(self):
"""AnalysisFastq: reproduces IlluminaFastqAttr behaviour
"""
for name in ('NH1_ChIP-seq_Gli1_ACAGTG_L003_R2_001',
'NH1_ChIP-seq_Gli1_ACAGTG-GTTCAC_L003_R2_001',
'NH1_ChIP-seq_Gli1_S4_L003_R2_001',
'NH1_ChIP-seq_Gli1_S4_L003_I1_001',
'NH1_ChIP-seq_Gli1_S4_R2_001',
'NH1_ChIP-seq_Gli1',
'NH1_ChIP-seq_Gli1_R2',
'NH1_ChIP-seq_Gli1_L001',
'NH1_ChIP-seq_Gli1_L001_R2',
'NH1_ChIP-seq_Gli1_ACAGTG',
'NH1_ChIP-seq_Gli1_ACAGTG_R2',
'NH1_ChIP-seq_Gli1_ACAGTG_L001',
'NH1_ChIP-seq_Gli1_ACAGTG_L001_R2',):
illumina_fastq_attrs = IlluminaFastqAttrs(name)
fq = AnalysisFastq(name)
self.assertEqual(fq.sample_name,illumina_fastq_attrs.sample_name)
self.assertEqual(fq.basename,illumina_fastq_attrs.basename)
self.assertEqual(fq.extension,illumina_fastq_attrs.extension)
self.assertEqual(fq.sample_number,illumina_fastq_attrs.sample_number)
self.assertEqual(fq.barcode_sequence,illumina_fastq_attrs.barcode_sequence)
self.assertEqual(fq.lane_number,illumina_fastq_attrs.lane_number)
self.assertEqual(fq.read_number,illumina_fastq_attrs.read_number)
self.assertEqual(fq.is_index_read,illumina_fastq_attrs.is_index_read)
self.assertEqual(str(fq),str(illumina_fastq_attrs))
def test_changing_attribute_updates_canonical_name(self):
"""AnalysisFastq: changing attributes is reflected in canonical name
"""
fq = AnalysisFastq('M_19_0040_S13-A40h-R_D709-D505_L007_R1_001_repeated_1')
fq.read_number = 2
self.assertEqual(fq.canonical_name,
'M_19_0040_S13-A40h-R_D709-D505_L007_R2_001')
def test_changing_attribute_updates_repr(self):
"""AnalysisFastq: changing attributes is reflected in repr
"""
fq = AnalysisFastq('M_19_0040_S13-A40h-R_D709-D505_L007_R1_001_repeated_1')
fq.read_number = 2
self.assertEqual(str(fq),
'M_19_0040_S13-A40h-R_D709-D505_L007_R2_001_repeated_1')
class TestAnalysisDir(unittest.TestCase):
"""Tests for the AnalysisDir class
"""
def setUp(self):
self.dirn = tempfile.mkdtemp(suffix='TestAnalysisDir')
def tearDown(self):
# Remove the temporary test directory
shutil.rmtree(self.dirn)
def test_casava(self):
"""Check AnalysisDir against CASAVA-style output
"""
mockdir = MockAnalysisDirFactory.casava(
'160621_M00879_0087_000000000-AGEW9',
'miseq',
top_dir=self.dirn)
mockdir.create()
analysis_dir = AnalysisDir(mockdir.dirn)
self.assertEqual(analysis_dir.run_name,mockdir.run_name)
self.assertEqual(analysis_dir.n_sequencing_data,1)
self.assertEqual(analysis_dir.n_projects,2)
self.assertTrue(analysis_dir.paired_end)
def test_bcl2fastq2(self):
"""Check AnalysisDir against bcl2fastq v2-style output
"""
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_M00879_0087_000000000-AGEW9',
'miseq',
top_dir=self.dirn)
mockdir.create()
analysis_dir = AnalysisDir(mockdir.dirn)
self.assertEqual(analysis_dir.run_name,mockdir.run_name)
self.assertEqual(analysis_dir.n_sequencing_data,1)
self.assertEqual(analysis_dir.n_projects,2)
self.assertTrue(analysis_dir.paired_end)
def test_handle_non_project_dir(self):
"""Check AnalysisDir with non-project directory
"""
mockdir = MockAnalysisDirFactory.bcl2fastq2(
'160621_M00879_0087_000000000-AGEW9',
'miseq',
top_dir=self.dirn)
mockdir.create()
# Add non-project dir
non_project_dir = os.path.join(self.dirn,
'160621_M00879_0087_000000000-AGEW9_analysis',
'extras')
fqs_dir = os.path.join(non_project_dir,'fastqs')
os.mkdir(non_project_dir)
os.mkdir(fqs_dir)
for fq in ('PB04_S4_R1_unpaired.fastq.gz',
'PB04_trimmoPE_bowtie2_notHg38.1.fastq.gz'):
with open(os.path.join(fqs_dir,fq),'w') as fp:
fp.write("")
# Load and check the analysis dir
analysis_dir = AnalysisDir(mockdir.dirn)
self.assertEqual(analysis_dir.run_name,mockdir.run_name)
self.assertEqual(analysis_dir.n_sequencing_data,1)
self.assertEqual(analysis_dir.n_projects,2)
self.assertTrue(analysis_dir.paired_end)
class TestAnalysisProject(unittest.TestCase):
"""Tests for the AnalysisProject class
"""
def setUp(self):
# Create a temporary directory for tests
self.dirn = tempfile.mkdtemp(suffix='TestAnalysisProject')
def make_data_dir(self,fastq_list):
# Make a fake data source directory
self.fastqs = []
fake_fastqs_dir = os.path.join(self.dirn,'fake_fastqs')
os.mkdir(fake_fastqs_dir)
for fq in fastq_list:
fastq = os.path.join(fake_fastqs_dir,fq)
open(fastq,'w').close()
self.fastqs.append(fastq)
def make_mock_project_dir(self,name,fastq_list,fastq_dir='fastqs',
primary_fastq_dir=None):
# Make a mock project directory
if primary_fastq_dir is None:
primary_fastq_dir = fastq_dir
sample_names = list(
set([AnalysisFastq(fq).sample_name for fq in fastq_list]))
sample_names.sort()
n_samples = len(sample_names)
sample_names = "%d sample%s (%s)" % \
(n_samples,
'' if n_samples == 1 else 's',
', '.join(sample_names))
metadata = { 'Primary fastqs': primary_fastq_dir,
'Samples': sample_names }
MockAnalysisProject(name,
fastq_list,
fastq_dir=fastq_dir,
metadata=metadata).create(top_dir=self.dirn)
def tearDown(self):
# Remove the temporary test directory
shutil.rmtree(self.dirn)
def test_empty_analysis_project(self):
"""Check empty AnalysisProject class
"""
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertEqual(project.dirn,dirn)
self.assertEqual(project.samples,[])
self.assertFalse(project.multiple_fastqs)
self.assertEqual(project.fastq_dir,None)
self.assertEqual(project.info.library_type,None)
self.assertEqual(project.info.single_cell_platform,None)
self.assertEqual(project.info.organism,None)
self.assertEqual(project.info.number_of_cells,None)
self.assertEqual(project.info.icell8_well_list,None)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.info.platform,None)
self.assertEqual(project.info.primary_fastq_dir,None)
self.assertEqual(project.info.samples,None)
self.assertEqual(project.fastq_dirs,[])
def test_create_single_end_analysis_project(self):
"""Check creation of new single-end AnalysisProject directory
"""
self.make_data_dir(('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
project.create_directory(fastqs=self.fastqs)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,['fastqs',])
def test_create_single_end_analysis_project_multi_fastqs(self):
"""Check creation of new single-end AnalysisProject directory (multi-fastq/sample)
"""
self.make_data_dir(('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
project.create_directory(fastqs=self.fastqs)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertTrue(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.info.samples,
'2 samples (PJB1-A, PJB1-B, multiple fastqs per sample)')
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,['fastqs',])
def test_create_paired_end_analysis_project(self):
"""Check creation of new paired-end AnalysisProject directory
"""
self.make_data_dir(('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
project.create_directory(fastqs=self.fastqs)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertTrue(project.info.paired_end)
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,['fastqs',])
def test_create_paired_end_analysis_project_multi_fastqs(self):
"""Check creation of new paired-end AnalysisProject directory (multi-fastq/sample)
"""
self.make_data_dir(('PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
project.create_directory(fastqs=self.fastqs)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertEqual(project.samples[0].name,'PJB1-B')
self.assertTrue(project.multiple_fastqs)
self.assertTrue(project.info.paired_end)
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,['fastqs',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.info.samples,
'1 sample (PJB1-B, multiple fastqs per sample)')
def test_create_analysis_project_not_standard_fastq_dir(self):
"""Check creation of AnalysisProject directory with non-standard fastq dir
"""
self.make_data_dir(('PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
project.create_directory(fastqs=self.fastqs,fastq_dir='fastqs.test')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertEqual(project.samples[0].name,'PJB1-B')
self.assertTrue(project.multiple_fastqs)
self.assertTrue(project.info.paired_end)
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.test'))
self.assertEqual(project.info.samples,
'1 sample (PJB1-B, multiple fastqs per sample)')
self.assertEqual(project.fastq_dirs,['fastqs.test',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs.test')
def test_load_single_end_analysis_project(self):
"""Check loading of an existing single-end AnalysisProject directory
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,['fastqs',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_load_analysis_project_non_canonical_fastq_dir(self):
"""Check AnalysisProject loading for directory with non-canonical fastq directory
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.test')
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn,fastq_dir='fastqs.test')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.test'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,['fastqs.test',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs.test')
def test_load_analysis_project_non_canonical_fastqs(self):
"""Check AnalysisProject loading fails for directory with non-canonical fastqs
"""
self.make_mock_project_dir(
'PJB',
('PB04_S4_R1_unpaired.fastq.gz',
'PB04_trimmoPE_bowtie2_notHg38.1.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,
'2 samples (PB04, PB04_trimmoPE_bowtie2_notHg38.1)')
self.assertEqual(project.fastq_dirs,['fastqs',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_load_analysis_project_non_with_alternative_fastq_naming(self):
"""Check AnalysisProject loading for directory with alternative fastq naming
"""
self.make_mock_project_dir(
'PJB',
('PB02.trimmed.filtered.r1.fastq.gz',
'PB02.trimmed.filtered.r2.fastq.gz',))
# Create a class to handle the non-canonical Fastq names
class NonCanonicalFastq(BaseFastqAttrs):
def __init__(self,fastq):
BaseFastqAttrs.__init__(self,fastq)
self.sample_name = self.basename.split('.')[0]
self.read_number = int(self.basename.split('.')[-1][1:])
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn,fastq_attrs=NonCanonicalFastq)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertTrue(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PB02')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,
'1 sample (PB02.trimmed.filtered)')
self.assertEqual(project.fastq_dirs,['fastqs',])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_load_analysis_project_detect_multiple_fastq_dirs(self):
"""Check AnalysisProject detects multiple fastqs directories
"""
# Construct test project with two fastq subdirectories
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
self.make_mock_project_dir(
'PJB.untrimmed',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
shutil.move(os.path.join(self.dirn,
'PJB.untrimmed',
'fastqs.untrimmed'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.untrimmed'))
# Load and check AnalysisProject: default fastqs dir
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
# Load and check AnalysisProject: default fastqs dir
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn,
fastq_dir='fastqs.untrimmed')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A-untrimmed')
self.assertEqual(project.samples[1].name,'PJB1-B-untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_analysis_project_switch_fastq_dir(self):
"""Check AnalysisProject can switch between multiple fastqs directories
"""
# Construct test project with two fastq subdirectories
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
self.make_mock_project_dir(
'PJB.untrimmed',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
shutil.move(os.path.join(self.dirn,
'PJB.untrimmed',
'fastqs.untrimmed'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.untrimmed'))
# Load and check AnalysisProject: default fastqs dir
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
# Switch to alternative fastqs dir
project.use_fastq_dir('fastqs.untrimmed')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A-untrimmed')
self.assertEqual(project.samples[1].name,'PJB1-B-untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_analysis_project_switch_to_default_fastq_dir(self):
"""Check AnalysisProject switches to default fastq set
"""
# Construct test project with two fastq subdirectories
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
self.make_mock_project_dir(
'PJB.untrimmed',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
shutil.move(os.path.join(self.dirn,
'PJB.untrimmed',
'fastqs.untrimmed'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.untrimmed'))
# Load and check AnalysisProject with alternative fastq set
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn,fastq_dir='fastqs.untrimmed')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A-untrimmed')
self.assertEqual(project.samples[1].name,'PJB1-B-untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
# Implicitly switch to primary fastq set
project.use_fastq_dir()
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
def test_analysis_project_switch_to_default_non_canonical_fastq_dir(self):
"""Check AnalysisProject switches to default (non-canonical) fastq set
"""
# Construct test project with two fastq subdirectories
# and make non-canonical named primary set
self.make_mock_project_dir(
'PJB',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
self.make_mock_project_dir(
'PJB.trimmed',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
shutil.move(os.path.join(self.dirn,
'PJB.trimmed',
'fastqs'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.trimmed'))
# Load and check AnalysisProject primary fastq set
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.info.primary_fastq_dir,'fastqs.untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,
'2 samples (PJB1-A-untrimmed, PJB1-B-untrimmed)')
# Load again with alternative fastq set
project = AnalysisProject('PJB',dirn,fastq_dir='fastqs')
self.assertEqual(project.info.primary_fastq_dir,'fastqs.untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,
'2 samples (PJB1-A-untrimmed, PJB1-B-untrimmed)')
# Implicitly switch to primary fastq set
project.use_fastq_dir()
self.assertEqual(project.info.primary_fastq_dir,'fastqs.untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,
'2 samples (PJB1-A-untrimmed, PJB1-B-untrimmed)')
def test_analysis_project_update_primary_fastq_dir(self):
"""Check AnalysisProject primary fastq set can be updated
"""
# Construct test project with two fastq subdirectories
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
self.make_mock_project_dir(
'PJB.untrimmed',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
shutil.move(os.path.join(self.dirn,
'PJB.untrimmed',
'fastqs.untrimmed'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.untrimmed'))
# Load and check AnalysisProject primary fastq set
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,['fastqs','fastqs.untrimmed'])
# Update the primary fastq set
project.set_primary_fastq_dir('fastqs.untrimmed')
self.assertEqual(project.info.primary_fastq_dir,'fastqs.untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.info.samples,
'2 samples (PJB1-A-untrimmed, PJB1-B-untrimmed)')
self.assertEqual(project.fastq_dirs,['fastqs','fastqs.untrimmed'])
# Reload the project and check that the change has stuck
project1 = AnalysisProject('PJB',dirn)
self.assertEqual(project1.info.primary_fastq_dir,'fastqs.untrimmed')
self.assertEqual(project1.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project1.fastq_dirs,['fastqs','fastqs.untrimmed'])
self.assertEqual(project1.info.samples,
'2 samples (PJB1-A-untrimmed, PJB1-B-untrimmed)')
def test_analysis_project_switch_to_non_existant_fastq_dir(self):
"""Check AnalysisProject fails when switching to non-existant fastqs dir
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertRaises(Exception,
project.use_fastq_dir,'fastqs.non_existant')
def test_analysis_project_switch_fastq_dir_preserves_qc_dir(self):
"""Check AnalysisProject switch fastqs dirs preserves QC dir
"""
# Construct test project with two fastq subdirectories
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
self.make_mock_project_dir(
'PJB.untrimmed',
('PJB1-A-untrimmed_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B-untrimmed_ACAGTG_L002_R1_001.fastq.gz',),
fastq_dir='fastqs.untrimmed')
shutil.move(os.path.join(self.dirn,
'PJB.untrimmed',
'fastqs.untrimmed'),
os.path.join(self.dirn,'PJB'))
shutil.rmtree(os.path.join(self.dirn,'PJB.untrimmed'))
# Load and check AnalysisProject: default fastqs dir
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A')
self.assertEqual(project.samples[1].name,'PJB1-B')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs'))
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.qc_dir,
os.path.join(project.dirn,'qc'))
# Set new QC dir
project.use_qc_dir('qc.new')
self.assertEqual(project.qc_dir,
os.path.join(project.dirn,'qc.new'))
# Switch to alternative fastqs dir
project.use_fastq_dir('fastqs.untrimmed')
self.assertEqual(project.name,'PJB')
self.assertTrue(os.path.isdir(project.dirn))
self.assertFalse(project.multiple_fastqs)
self.assertFalse(project.info.paired_end)
self.assertEqual(project.samples[0].name,'PJB1-A-untrimmed')
self.assertEqual(project.samples[1].name,'PJB1-B-untrimmed')
self.assertEqual(project.fastq_dir,
os.path.join(project.dirn,'fastqs.untrimmed'))
self.assertEqual(project.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(project.fastq_dirs,
['fastqs','fastqs.untrimmed'])
self.assertEqual(project.info.primary_fastq_dir,'fastqs')
self.assertEqual(project.qc_dir,
os.path.join(project.dirn,'qc.new'))
def test_sample_summary_single_ended(self):
"""AnalysisProject: sample_summary works for SE data
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),
"2 samples (PJB1-A, PJB1-B)")
def test_sample_summary_paired_ended(self):
"""AnalysisProject: sample_summary works for PE data
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),
"2 samples (PJB1-A, PJB1-B)")
def test_sample_summary_single_ended_multiple_fastqs(self):
"""AnalysisProject: sample_summary works for SE data, multiple fastqs
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),
"2 samples (PJB1-A, PJB1-B, multiple fastqs per sample)")
def test_sample_summary_paired_ended_multiple_fastqs(self):
"""AnalysisProject: sample_summary works for PE data, multiple fastqs
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-A_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L002_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),
"2 samples (PJB1-A, PJB1-B, multiple fastqs per sample)")
def test_sample_summary_paired_ended_ignore_index_reads(self):
"""AnalysisProject: sample_summary works for PE data with index reads
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-A_ACAGTG_L001_R2_001.fastq.gz',
'PJB1-A_ACAGTG_L001_I1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R2_001.fastq.gz',
'PJB1-B_ACAGTG_L002_I1_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),
"2 samples (PJB1-A, PJB1-B)")
def test_sample_summary_no_samples(self):
"""AnalysisProject: sample_summary works when there are no samples
"""
self.make_mock_project_dir('PJB',())
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
self.assertEqual(project.sample_summary(),"No samples")
def test_order_samples_by_name(self):
"""AnalysisProject: sample_summary works for SE data
"""
self.make_mock_project_dir(
'PJB',
('PJB1_ACAGTG_L001_R1_001.fastq.gz',
'PJB2_ACAGTG_L001_R1_001.fastq.gz',
'PJB3_ACAGTG_L001_R1_001.fastq.gz',
'PJB10_ACAGTG_L001_R1_001.fastq.gz',
'PJB20_ACAGTG_L001_R1_001.fastq.gz',
'PJB21_ACAGTG_L001_R1_001.fastq.gz',))
project = AnalysisProject('PJB',os.path.join(self.dirn,'PJB'))
sample_names = [s.name for s in project.samples]
self.assertEqual(sample_names,
['PJB1','PJB2','PJB3','PJB10','PJB20','PJB21'])
def test_pickle_analysis_project(self):
"""AnalysisProject: check serialisation with 'pickle'
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
# Pickle project
pickled = pickle.dumps(project)
# Unpickle it
unpickled = pickle.loads(pickled)
# Check the unpickled data
self.assertEqual(unpickled.name,'PJB')
self.assertTrue(os.path.isdir(unpickled.dirn))
self.assertFalse(unpickled.multiple_fastqs)
self.assertFalse(unpickled.info.paired_end)
self.assertEqual(unpickled.info.primary_fastq_dir,'fastqs')
self.assertEqual(unpickled.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(unpickled.samples[0].name,'PJB1-A')
self.assertEqual(unpickled.samples[1].name,'PJB1-B')
self.assertEqual(unpickled.fastq_dir,
os.path.join(unpickled.dirn,'fastqs'))
self.assertEqual(unpickled.fastq_dirs,['fastqs',])
def test_cloudpickle_analysis_project(self):
"""AnalysisProject: check serialisation with 'cloudpickle'
"""
self.make_mock_project_dir(
'PJB',
('PJB1-A_ACAGTG_L001_R1_001.fastq.gz',
'PJB1-B_ACAGTG_L002_R1_001.fastq.gz',))
dirn = os.path.join(self.dirn,'PJB')
project = AnalysisProject('PJB',dirn)
# Pickle project
pickled = cloudpickle.dumps(project)
# Unpickle it
unpickled = cloudpickle.loads(pickled)
# Check the unpickled data
self.assertEqual(unpickled.name,'PJB')
self.assertTrue(os.path.isdir(unpickled.dirn))
self.assertFalse(unpickled.multiple_fastqs)
self.assertFalse(unpickled.info.paired_end)
self.assertEqual(unpickled.info.primary_fastq_dir,'fastqs')
self.assertEqual(unpickled.info.samples,'2 samples (PJB1-A, PJB1-B)')
self.assertEqual(unpickled.samples[0].name,'PJB1-A')
self.assertEqual(unpickled.samples[1].name,'PJB1-B')
self.assertEqual(unpickled.fastq_dir,
os.path.join(unpickled.dirn,'fastqs'))
self.assertEqual(unpickled.fastq_dirs,['fastqs',])
class TestAnalysisSample(unittest.TestCase):
"""Tests for the AnalysisSample class
"""
def test_empty_analysis_sample(self):
"""Check empty AnalysisSample class
"""
sample = AnalysisSample('PJB1-A')
self.assertEqual(sample.name,'PJB1-A')
self.assertEqual(sample.fastq,[])
self.assertFalse(sample.paired_end)
self.assertEqual(str(sample),'PJB1-A')
def test_single_end_analysis_sample(self):
"""Check AnalysisSample class with single-end sample
"""
fq = '/run/sample1/PJB1-B_ACAGTG_L001_R1.fastq.gz'
sample = AnalysisSample('PJB1-B')
sample.add_fastq(fq)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq])
self.assertEqual(sample.fastq_subset(read_number=1),[fq])
self.assertEqual(sample.fastq_subset(read_number=2),[])
self.assertFalse(sample.paired_end)
self.assertEqual(str(sample),'PJB1-B')
def test_single_end_analysis_sample_multiple_fastqs(self):
"""Check AnalysisSample class with single-end sample (multiple fastqs)
"""
sample = AnalysisSample('PJB1-B')
fq_l1 = '/run/sample1/PJB1-B_ACAGTG_L001_R1.fastq.gz'
fq_l2 = '/run/sample1/PJB1-B_ACAGTG_L002_R1.fastq.gz'
sample.add_fastq(fq_l1)
sample.add_fastq(fq_l2)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq_l1,fq_l2])
self.assertEqual(sample.fastq_subset(read_number=1),[fq_l1,fq_l2])
self.assertEqual(sample.fastq_subset(read_number=2),[])
self.assertFalse(sample.paired_end)
self.assertEqual(str(sample),'PJB1-B')
def test_paired_end_analysis_sample(self):
"""Check AnalysisSample class with paired-end sample
"""
sample = AnalysisSample('PJB1-B')
fq_r1 = '/run/sample1/PJB1-B_ACAGTG_L001_R1.fastq.gz'
fq_r2 = '/run/sample1/PJB1-B_ACAGTG_L001_R2.fastq.gz'
sample.add_fastq(fq_r1)
sample.add_fastq(fq_r2)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq_r1,fq_r2])
self.assertEqual(sample.fastq_subset(read_number=1),[fq_r1])
self.assertEqual(sample.fastq_subset(read_number=2),[fq_r2])
def test_paired_end_analysis_sample_index_read_fastq(self):
"""Check AnalysisSample class with index read fastqs
"""
sample = AnalysisSample('PJB1-B')
fq_l1_r1 = '/run/sample1/PJB1-B_S1_L001_R1.fastq.gz'
fq_l1_r2 = '/run/sample1/PJB1-B_S1_L001_R2.fastq.gz'
fq_l1_i1 = '/run/sample1/PJB1-B_S1_L001_I1.fastq.gz'
sample.add_fastq(fq_l1_r1)
sample.add_fastq(fq_l1_r2)
sample.add_fastq(fq_l1_i1)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq_l1_i1,fq_l1_r1,fq_l1_r2])
self.assertEqual(sample.fastq_subset(read_number=1),[fq_l1_r1,])
self.assertEqual(sample.fastq_subset(read_number=2),[fq_l1_r2,])
self.assertTrue(sample.paired_end)
self.assertEqual(str(sample),'PJB1-B')
def test_paired_end_analysis_sample_multiple_fastqs(self):
"""Check AnalysisSample class with paired-end sample (multiple fastqs)
"""
sample = AnalysisSample('PJB1-B')
fq_l1_r1 = '/run/sample1/PJB1-B_ACAGTG_L001_R1.fastq.gz'
fq_l2_r1 = '/run/sample1/PJB1-B_ACAGTG_L002_R1.fastq.gz'
fq_l1_r2 = '/run/sample1/PJB1-B_ACAGTG_L001_R2.fastq.gz'
fq_l2_r2 = '/run/sample1/PJB1-B_ACAGTG_L002_R2.fastq.gz'
sample.add_fastq(fq_l1_r1)
sample.add_fastq(fq_l2_r1)
sample.add_fastq(fq_l1_r2)
sample.add_fastq(fq_l2_r2)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq_l1_r1,fq_l1_r2,
fq_l2_r1,fq_l2_r2])
self.assertEqual(sample.fastq_subset(read_number=1),[fq_l1_r1,fq_l2_r1])
self.assertEqual(sample.fastq_subset(read_number=2),[fq_l1_r2,fq_l2_r2])
self.assertTrue(sample.paired_end)
self.assertEqual(str(sample),'PJB1-B')
def test_analysis_sample_non_canonical_fastq_naming(self):
"""Check AnalysisSample class with non-canonical fastq naming
"""
# Create a class to handle the non-canonical Fastq names
class NonCanonicalFastq(BaseFastqAttrs):
def __init__(self,fastq):
BaseFastqAttrs.__init__(self,fastq)
self.sample_name = self.basename.split('.')[0]
self.read_number = int(self.basename.split('.')[-1][1:])
sample = AnalysisSample('PJB1-B',fastq_attrs=NonCanonicalFastq)
fq_r1 = '/run/sample1/PJB1-B.ACAGTG.L001.R1.fastq.gz'
fq_r2 = '/run/sample1/PJB1-B.ACAGTG.L001.R2.fastq.gz'
sample.add_fastq(fq_r1)
sample.add_fastq(fq_r2)
self.assertEqual(sample.name,'PJB1-B')
self.assertEqual(sample.fastq,[fq_r1,fq_r2])
self.assertEqual(sample.fastq_subset(read_number=1),[fq_r1])
self.assertEqual(sample.fastq_subset(read_number=2),[fq_r2])
self.assertTrue(sample.paired_end)
self.assertEqual(str(sample),'PJB1-B')
class TestRunReferenceIdFunction(unittest.TestCase):
"""
Tests for the 'run_reference_id' function
"""
def test_run_reference_id(self):
"""run_reference_id: run name, platform and facility run number
"""
self.assertEqual(run_reference_id("160621_M00879_0087_000000000-AGEW9",
platform="miseq",
facility_run_number=87),
"MISEQ_160621#87")
self.assertEqual(run_reference_id("/data/160621_M00879_0087_000000000-AGEW9/",
platform="miseq",
facility_run_number=87),
"MISEQ_160621#87")
def test_run_reference_id_no_platform(self):
"""run_reference_id: run name and facility run number (no platform)
"""
self.assertEqual(run_reference_id("160621_M00879_0087_000000000-AGEW9",
platform=None,
facility_run_number=87),
"M00879_160621#87")
self.assertEqual(run_reference_id("160621_M00879_0087_000000000-AGEW9",
platform=None,
facility_run_number=88),
"M00879_160621/87#88")
self.assertEqual(run_reference_id("/data/160621_M00879_0087_000000000-AGEW9/",
platform=None,
facility_run_number=87),
"M00879_160621#87")
def test_run_reference_id_no_facility_run_number(self):
"""run_reference_id: run name and platform (no facility run number)
"""
self.assertEqual(run_reference_id("160621_M00879_0087_000000000-AGEW9",
platform="miseq",
facility_run_number=None),
"MISEQ_160621/87")
def test_run_reference_id_facility_run_number_differs(self):
"""run_reference_id: instrument and facility run numbers differ
"""
self.assertEqual(run_reference_id("160621_M00879_0087_000000000-AGEW9",
platform="miseq",
facility_run_number=90),
"MISEQ_160621/87#90")
self.assertEqual(run_reference_id("/data/160621_M00879_0087_000000000-AGEW9/",
platform="miseq",
facility_run_number=90),
"MISEQ_160621/87#90")
def test_run_reference_id_bad_run_name(self):
"""run_reference_id: handle 'bad' run name (cannot be split)
"""
self.assertEqual(run_reference_id("rag_05_2017",
platform=None,
facility_run_number=None),
"rag_05_2017")
self.assertEqual(run_reference_id("rag_05_2017",
platform="miseq",
facility_run_number=None),
"MISEQ_rag_05_2017")
self.assertEqual(run_reference_id("rag_05_2017",
platform=None,
facility_run_number=90),
"rag_05_2017#90")
self.assertEqual(run_reference_id("rag_05_2017",
platform="miseq",
facility_run_number=90),
"MISEQ_rag_05_2017#90")
class TestSplitSampleNameFunction(unittest.TestCase):
"""
Tests for the 'split_sample_name' function
"""
def test_split_sample_name(self):
"""split_sample_name: check names are split correctly
"""
self.assertEqual(split_sample_name("PJB"),["PJB"])
self.assertEqual(split_sample_name("PJB1"),["PJB",1])
self.assertEqual(split_sample_name("PJB0001"),["PJB",1])
self.assertEqual(split_sample_name("PJB_1-10"),["PJB_",1,"-",10])
class TestCopyAnalysisProject(unittest.TestCase):
"""
Tests for the 'copy_analysis_project' function
"""
def setUp(self):
# Create a temp working dir
self.wd = tempfile.mkdtemp(suffix='TestCopyProject')
def tearDown(self):
# Remove the temporary test directory
shutil.rmtree(self.wd)
def test_copy_project(self):
"""
copy_project: copies project instance
"""
# Make mock analysis project
p = MockAnalysisProject("PJB",("PJB1_S1_R1_001.fastq.gz",
"PJB1_S1_R2_001.fastq.gz",),
metadata={ 'Organism': 'Human' })
p.create(top_dir=self.wd)
# Make initial project
project = AnalysisProject("PJB",os.path.join(self.wd,"PJB"))
# Make a copy
project2 = copy_analysis_project(project)
# Check copy
self.assertEqual(project.name,project2.name)
self.assertEqual(project.dirn,project2.dirn)
self.assertEqual(project.fastq_dir,project2.fastq_dir)
self.assertEqual(project.fastq_dirs,project2.fastq_dirs)
self.assertEqual(project.fastqs,project2.fastqs)
self.assertEqual(project.info.organism,project2.info.organism)
| [
"[email protected]"
] | |
8b822886de793fad5cc78d1bdeeab56f9dfb7197 | 85f1488f3d0996b83292f74b3672793f2778503f | /notebooks/Model Diagnostics.py | 96d24d2bbf464d6e372c397f7b713a044f8955dd | [] | no_license | ceshine/jigsaw-toxic-2019 | 33f66d6643aeeeb20599ab95368ce2c1f6500543 | 34d5df28e1b820725f964fbbdfe039daea31c0d7 | refs/heads/master | 2022-02-22T10:50:51.444794 | 2019-08-04T04:13:00 | 2019-08-04T04:13:00 | 198,053,856 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,796 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.append("..")
# In[2]:
from pathlib import Path
from functools import partial
import numpy as np
import pandas as pd
import torch
import joblib
from torch.utils.data import DataLoader
from toxic.inference_bert import get_token_ids
from toxic.dataset import AUX_COLUMNS, ToxicDataset, collate_examples, SortSampler
from toxic.common import ToxicBot
from toxic.metric import ToxicMetric
# In[3]:
MODEL_PATH = Path("../data/cache/")
DEVICE = "cuda:0"
# In[4]:
tokenizer = joblib.load(str(MODEL_PATH / "bert-base-uncased_tokenizer.jbl"))
model = torch.load(str(MODEL_PATH / "bert-base-uncased_-1_yuval_220_f0.pth")).to(DEVICE)
# In[5]:
collate_fn = partial(
collate_examples,
truncate_len=220,
pad=0,
closing_id=tokenizer.vocab["[SEP]"],
mode="both"
)
# 
# [source](https://twitter.com/jessamyn/status/900867154412699649)
# In[6]:
test_text = [
"I am a man",
"I am a woman",
"I am a lesbian",
"I am gay man",
"I am dyke",
"I am a white man",
"I am a gay woman",
"I am a white woman",
"I am a gay white man",
"I am a black man",
"I am a gay white man",
"I am a gay black man",
"I am a black woman",
"I am a gay black woman"
]
df = pd.DataFrame(dict(comment_text=test_text))
# In[7]:
tokens = get_token_ids(
df, tokenizer, is_bert=True)
test_ds = ToxicDataset(df, tokens, labeled=False)
test_loader = DataLoader(
test_ds,
collate_fn=collate_fn,
batch_size=32,
num_workers=0,
pin_memory=True
)
# In[8]:
with torch.no_grad():
results = []
for batch, _ in test_loader:
results.append(model(batch.cuda()))
results = torch.sigmoid(torch.cat(results)) * 100
results.size()
# In[9]:
predictions = pd.DataFrame(results.cpu().numpy(), columns=AUX_COLUMNS)
predictions["text"] = df["comment_text"].values
predictions.shape
# In[10]:
pd.set_option('display.float_format', lambda x: '%.2f' % x)
# In[11]:
predictions.columns
# In[12]:
predictions[["text", "target", "identity_attack", "female", "homosexual_gay_or_lesbian", "black", "white"]].rename(
columns={"target": "toxic", "homosexual_gay_or_lesbian":"homosexual"})
# ## Other random examples
# In[23]:
test_text = [
"Pretty sad really. Thousands come out to protest Trump who's in another country......and all those NDP and Green members - who were the driving force behind Proportional Representation - couldn't find the time to get out to support their own cause. That said, you can bet that Trudeau has alredy lost a good portion of that NDP/Green vote. The guy's a liar. Period.",
"Idiots.",
"Then put them in jail until they decide to cooperate. This is ridiculous....",
"\"He’s a proven liar. Why would you believe anything he has to say?\"\n\nExactly.\nThat\'s why I do not listen to a word Donnie says.",
"Fascinating young woman, impressive work. It reminds me of one of the reasons I love Alaska so much —\xa0the people Alaska attracts are often just as extraordinary as the landscape itself. Great article, thank you.",
"Well, at least the Russians are white, for heaven's sakes. I'd rather have to live next to a nice white drunk Russian than a black Muslim Kenyan whose children's names are Satanic anagrams.",
"Was I posted yesterday, it is interesting to note that under Session's watch only three black people have been appointed in Alabama for the federal courts. This despite the fact that black people make up over 39% of the population of that state. What underlines this reality must be Session's unconscious, if not conscious, attitude towards blacks in general."
]
df = pd.DataFrame(dict(comment_text=test_text))
# In[24]:
tokens = get_token_ids(
df, tokenizer, is_bert=True)
print([len(x) for x in tokens])
test_ds = ToxicDataset(df, tokens, labeled=False)
test_loader = DataLoader(
test_ds,
collate_fn=collate_fn,
batch_size=32,
num_workers=0,
pin_memory=True
)
with torch.no_grad():
results = []
for batch, _ in test_loader:
results.append(model(batch.cuda()))
results = torch.sigmoid(torch.cat(results)) * 100
results.size()
predictions = pd.DataFrame(results.cpu().numpy(), columns=AUX_COLUMNS)
predictions["text"] = df["comment_text"].values
predictions[["text", "target", "identity_attack", "female", "homosexual_gay_or_lesbian", "black", "white"]].rename(
columns={"target": "toxic", "homosexual_gay_or_lesbian":"homosexual"})
# ## Validate
# Make sure the mode is set up correctly.
# In[80]:
df_valid, tokens_valid = joblib.load(str(MODEL_PATH / "valid_bert-base-uncased_-1_yuval_f0.jbl"))
idx = np.random.choice(np.arange(df_valid.shape[0]), 32 * 1000)
df_valid, tokens_valid = df_valid.iloc[idx].reset_index(drop=True), tokens_valid[idx]
valid_ds = ToxicDataset(df_valid, tokens_valid, labeled=True)
val_sampler = SortSampler(valid_ds, key=lambda x: len(valid_ds.tokens[x]))
df_valid = df_valid.iloc[list(iter(val_sampler))]
print(df_valid.target.describe())
# In[81]:
valid_loader = DataLoader(
valid_ds,
collate_fn=collate_fn,
batch_size=64,
num_workers=0,
pin_memory=True,
sampler=val_sampler
)
# In[82]:
bot = ToxicBot(
checkpoint_dir=Path("/tmp/"),
log_dir=Path("/tmp/"),
model=model, train_loader=None,
val_loader=None, optimizer=None,
echo=False,
criterion=None,
avg_window=100,
callbacks=[],
pbar=False,
use_tensorboard=False,
device=DEVICE
)
valid_pred, valid_y = bot.predict(valid_loader, return_y=True)
# In[84]:
pd.set_option('precision', 4)
metric = ToxicMetric(df_valid)
metric(valid_y, valid_pred)
# In[ ]:
| [
"[email protected]"
] | |
fa091d4a5b67cc3425553a4c3c7993b379d5a42c | 2a2505108cd429d39746050d0100f4963dcd9c69 | /src/compas/geometry/bbox/__init__.py | b19dd1d59cd854d5d9397b2cf4ef284c580ed6d6 | [
"MIT"
] | permissive | adacko/compas | 677095bea007c22a98b44af3281131b445cb1ae1 | 47c443ad3825897ec7ed932ec20734c2f08ef120 | refs/heads/master | 2020-07-23T00:55:51.348907 | 2019-09-09T16:44:18 | 2019-09-09T16:44:18 | 207,390,442 | 0 | 1 | MIT | 2019-09-09T19:40:41 | 2019-09-09T19:40:41 | null | UTF-8 | Python | false | false | 260 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas
from .bbox import *
if not compas.IPY:
from .bbox_numpy import *
__all__ = [name for name in dir() if not name.startswith('_')]
| [
"[email protected]"
] | |
7f3da45e043ef6602769afea0a533af86f8f6f8f | 48e1ac111f48bf27b03625f81887a8eaef4d505d | /old/google-cloud-sdk/platform/gsutil/gslib/commands/du.py | afc399c6c2a24fe25420a2093c4b0f2484b10adf | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | altock/dev | 74350528ea570925e8fbc584c64939cae86f6ea7 | 90d87b2adb1eab7f218b075886aa620d8d6eeedb | refs/heads/master | 2021-07-10T08:31:48.080736 | 2017-04-15T03:04:12 | 2017-04-15T03:04:12 | 23,088,790 | 0 | 1 | null | 2020-07-25T04:32:05 | 2014-08-18T22:33:25 | Python | UTF-8 | Python | false | false | 8,808 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like du command for cloud storage providers."""
import sys
from gslib.boto_translation import S3_DELETE_MARKER_GUID
from gslib.command import Command
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.ls_helper import LsHelper
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.util import MakeHumanReadable
from gslib.util import NO_MAX
from gslib.util import UTF8
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
gsutil du url...
<B>DESCRIPTION</B>
The du command displays the amount of space (in bytes) being used by the
objects for a given URL. The syntax emulates the Linux du command (which
stands for disk usage).
<B>OPTIONS</B>
-0 Ends each output line with a 0 byte rather than a newline. This
can be useful to make the output more easily machine-readable.
-a Includes non-current object versions / generations in the listing
(only useful with a versioning-enabled bucket). Also prints
generation and metageneration for each listed object.
-c Produce a grand total.
-e A pattern to exclude from reporting. Example: -e "*.o" would
exclude any object that ends in ".o". Can be specified multiple
times.
-h Prints object sizes in human-readable format (e.g., 1KB, 234MB,
2GB, etc.)
-s Display only a summary total for each argument.
-X Similar to -e, but excludes patterns from the given file. The
patterns to exclude should be one per line.
<B>EXAMPLES</B>
To list the size of all objects in a bucket:
gsutil du gs://bucketname
To list the size of all objects underneath a prefix:
gsutil du gs://bucketname/prefix/*
To print the total number of bytes in a bucket, in human-readable form:
gsutil du -ch gs://bucketname
To see a summary of the total bytes in the two given buckets:
gsutil du -s gs://bucket1 gs://bucket2
To list the size of all objects in a versioned bucket, including objects that
are not the latest:
gsutil du -a gs://bucketname
To list all objects in a bucket, except objects that end in ".bak",
with each object printed ending in a null byte:
gsutil du -e "*.bak" -0 gs://bucketname
""")
class DuCommand(Command):
"""Implementation of gsutil du command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'du',
command_name_aliases=[],
min_args=0,
max_args=NO_MAX,
supported_sub_args='0ace:hsX:',
file_url_ok=False,
provider_url_ok=True,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='du',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Display object size usage',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _PrintSummaryLine(self, num_bytes, name):
size_string = (MakeHumanReadable(num_bytes)
if self.human_readable else str(num_bytes))
sys.stdout.write('%(size)-10s %(name)s%(ending)s' % {
'size': size_string, 'name': name, 'ending': self.line_ending})
def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):
"""Print listing info for given bucket_listing_ref.
Args:
bucket_listing_ref: BucketListing being listed.
Returns:
Tuple (number of objects, object size)
Raises:
Exception: if calling bug encountered.
"""
obj = bucket_listing_ref.root_object
url_str = bucket_listing_ref.GetUrlString()
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
size_string = '0'
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
size_string = (MakeHumanReadable(obj.size)
if self.human_readable else str(obj.size))
num_bytes = obj.size
num_objs = 1
if not self.summary_only:
sys.stdout.write('%(size)-10s %(url)s%(ending)s' % {
'size': size_string,
'url': url_str.encode(UTF8),
'ending': self.line_ending})
return (num_objs, num_bytes)
def RunCommand(self):
"""Command entry point for the du command."""
self.line_ending = '\n'
self.all_versions = False
self.produce_total = False
self.human_readable = False
self.summary_only = False
self.exclude_patterns = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-0':
self.line_ending = '\0'
elif o == '-a':
self.all_versions = True
elif o == '-c':
self.produce_total = True
elif o == '-e':
self.exclude_patterns.append(a)
elif o == '-h':
self.human_readable = True
elif o == '-s':
self.summary_only = True
elif o == '-X':
if a == '-':
f = sys.stdin
else:
f = open(a, 'r')
try:
for line in f:
line = line.strip()
if line:
self.exclude_patterns.append(line)
finally:
f.close()
if not self.args:
# Default to listing all gs buckets.
self.args = ['gs://']
total_bytes = 0
got_nomatch_errors = False
def _PrintObjectLong(blr):
return self._PrintInfoAboutBucketListingRef(blr)
def _PrintNothing(unused_blr=None):
pass
def _SummaryLine(num_bytes, name):
return self._PrintSummaryLine(num_bytes, name)
for url_arg in self.args:
top_level_storage_url = StorageUrlFromString(url_arg)
if top_level_storage_url.IsFileUrl():
raise CommandException('Only cloud URLs are supported for %s'
% self.command_name)
bucket_listing_fields = ['size']
ls_helper = LsHelper(
self.WildcardIterator, self.logger,
print_object_func=_PrintObjectLong, print_dir_func=_PrintNothing,
print_dir_header_func=_PrintNothing,
print_dir_summary_func=_SummaryLine, print_newline_func=_PrintNothing,
all_versions=self.all_versions, should_recurse=True,
exclude_patterns=self.exclude_patterns, fields=bucket_listing_fields)
# ls_helper expands to objects and prefixes, so perform a top-level
# expansion first.
if top_level_storage_url.IsProvider():
# Provider URL: use bucket wildcard to iterate over all buckets.
top_level_iter = self.WildcardIterator(
'%s://*' % top_level_storage_url.scheme).IterBuckets(
bucket_fields=['id'])
elif top_level_storage_url.IsBucket():
top_level_iter = self.WildcardIterator(
'%s://%s' % (top_level_storage_url.scheme,
top_level_storage_url.bucket_name)).IterBuckets(
bucket_fields=['id'])
else:
# This is actually a string, not a blr, but we are just using the
# string in the below function.
top_level_iter = [url_arg]
for blr_or_str in top_level_iter:
url_string = str(blr_or_str)
storage_url = StorageUrlFromString(url_string)
if storage_url.IsBucket() and self.summary_only:
storage_url = StorageUrlFromString(
'%s://%s/**' % (storage_url.scheme, storage_url.bucket_name))
_, exp_objs, exp_bytes = ls_helper.ExpandUrlAndPrint(storage_url)
if (storage_url.IsObject() and exp_objs == 0 and
ContainsWildcard(url_arg) and not self.exclude_patterns):
got_nomatch_errors = True
total_bytes += exp_bytes
if self.summary_only:
self._PrintSummaryLine(exp_bytes, url_string.rstrip('/'))
if self.produce_total:
self._PrintSummaryLine(total_bytes, 'total')
if got_nomatch_errors:
raise CommandException('One or more URLs matched no objects.')
return 0
| [
"[email protected]"
] | |
c26747d4798c12a9061590246550915c3f49b876 | f7c7063e1a22b773a271a953c013a3c5303b70b3 | /src/litter_trap.py | f5802491a1ff00f278838b9b59f2b0dfe66141a0 | [] | no_license | Ewan82/ah_data | e0cce8fffafd91eb6fca8ce6af602d3230535f87 | d5961f284187acda8d1317bb4fd50f32c85bb591 | refs/heads/master | 2021-01-19T01:55:47.530127 | 2016-11-04T11:07:09 | 2016-11-04T11:07:09 | 40,532,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | import numpy as np
import matplotlib.mlab as mlab
def convert_csv2rec(file_no):
return mlab.csv2rec('../litter_traps/litterscans/file0'+str(file_no)+'.csv')
def remove_false_data(area_arr, tol=2.0):
idx = np.where(area_arr < tol)
return np.delete(area_arr, idx) | [
"[email protected]"
] | |
13c31e9d950cf3be9f2b388eecebe51ef72bd351 | b1c7a768f38e2e987a112da6170f49503b9db05f | /stockkeeping/migrations/0010_auto_20181101_1545.py | 34ef7c9e3a98255c3676811073ad0d7d44aad3d4 | [] | no_license | Niladrykar/bracketerp | 8b7491aa319f60ec3dcb5077258d75b0394db374 | ca4ee60c2254c6c132a38ce52410059cc6b19cae | refs/heads/master | 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 | JavaScript | UTF-8 | Python | false | false | 417 | py | # Generated by Django 2.0.6 on 2018-11-01 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stockkeeping', '0009_auto_20181101_1544'),
]
operations = [
migrations.AlterField(
model_name='purchase_total',
name='Total',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
78dc4511525e97dd533b1940967724911ec49d65 | e71fa62123b2b8f7c1a22acb1babeb6631a4549b | /xlsxwriter/test/table/test_table07.py | 121beef77b97ead58a919c1640b8c21d77b0c360 | [
"BSD-2-Clause"
] | permissive | timgates42/XlsxWriter | 40480b6b834f28c4a7b6fc490657e558b0a466e5 | 7ad2541c5f12b70be471b447ab709c451618ab59 | refs/heads/main | 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 | NOASSERTION | 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null | UTF-8 | Python | false | false | 2,017 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table('C3:F14', {'total_row': 1})
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F14" totalsRowCount="1">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
e6b8cf1ed34c95782fbc04f4582bc9f07d7f5f4b | 49813760337894f578bd6cf4f3ee76c32bd1d7ba | /guvip137.py | 09bad993fe97e3d711650c82615084b0ace982a6 | [] | no_license | devigajsrr/codekata | e1256cf7fd88a0c7cefd02dea11305391143253a | a9ae003a7d3c8865b1d4e9aae73723a5fedf79be | refs/heads/master | 2020-05-23T01:01:09.927231 | 2019-07-20T11:31:51 | 2019-07-20T11:31:51 | 186,580,561 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | n=int(input())
n=list(bin(n))
n=n[::-1]
print(n.index("1")+1)
| [
"[email protected]"
] | |
eadf86477e07dc6fcb83e07e480e090199897cee | e43e8bd052a613f158e29339aaa7e3bdec40b6fb | /models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_test.py | a3c33c28e62db57565d0119cf742f97bb5d8df3d | [] | no_license | sakshijain032/Harmful-Object-Detection | 249f586ffbc7de99f6647689bae230f3b79694b3 | 8e1711fc1596b451f97b5ff2f7690453a888c848 | refs/heads/master | 2022-12-24T18:40:41.795010 | 2020-10-01T17:34:42 | 2020-10-01T17:34:42 | 293,727,797 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,612 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import tensorflow as tf
from models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1088])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 8, 8, 1536])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
d0a334ca6c19f583a7c9f4aa5a63c23ce53c9460 | 077a17b286bdd6c427c325f196eb6e16b30c257e | /00_BofVar-unit-tests/07_64/remenissions-work/exploit-BofVar-1.py | 3e5efa3d0d010a0028daecc2f04b08bca5fc6cab | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-07-x64")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=64)
bof_payload.set_input_start(0x48)
bof_payload.add_int32(0x14, 0xdead)
bof_payload.add_int32(0x10, 0xdeae)
bof_payload.add_int32(0xc, 0xdeae)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
| [
"[email protected]"
] | |
3b3394be7b0f7c6c13b2006438556a5f0c7303ff | 7848e1b778ca0f3921aeeb0aeee44b398711b1f0 | /funtesting/mock/__init__.py | 495f052105769c8dfec9019cc49217d5fe565c55 | [] | no_license | fatelei/funtesting | a3a292ddfa30d9fbad47ee293768558b9e45fe8d | 748f4b5767cc16929408b19a5b62a812b48a0dd5 | refs/heads/master | 2021-01-10T12:09:38.809451 | 2016-02-21T03:59:15 | 2016-02-21T03:59:15 | 51,986,949 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | # -*- coding: utf8 -*-
"""
funtesting.mock
~~~~~~~~~~~~~~~
Mock modules.
"""
from .mock_redis import mock_redis
__all__ = [
"mock_redis"
]
| [
"[email protected]"
] | |
22c9b2072eee710b0af8c948145defea4346aa03 | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /keras/keras110_5_LeakyReLU.py | 213ecbe46b4073d61f4b984af0b9f92698fdaafd | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # activation - LeakyReLU
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-6, 6, 0.01)
def leakyrelu(x): # Leaky ReLU(Rectified Linear Unit)
return np.maximum(0.1 * x, x) #same
plt.plot(x, leakyrelu(x), linestyle = '--', label = 'Leaky ReLU')
plt.show() | [
"[email protected]"
] | |
8e0ed00e073de8a5bccb6b2d7fe1eef2ede522de | 9e4df2b26e899f2d3e044e71bc4193958b02314b | /app/migrations/0027_auto_20200930_0118.py | bb05747fde99e2ecc6d9acb7db6fe524b26b1a36 | [
"MIT"
] | permissive | hosseinmoghimi/phoenix | afea0a73cdf257fcf89c75d85c5ab1890d957a83 | 43fc49421a50563acc1884981d391b0d6a5d5d72 | refs/heads/master | 2023-01-11T11:12:30.308822 | 2020-11-15T13:52:21 | 2020-11-15T13:52:21 | 295,109,751 | 1 | 5 | MIT | 2020-11-15T13:50:12 | 2020-09-13T08:31:01 | HTML | UTF-8 | Python | false | false | 701 | py | # Generated by Django 3.1 on 2020-09-29 21:48
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('app', '0026_auto_20200930_0117'),
]
operations = [
migrations.AlterField(
model_name='jumbotron',
name='description',
field=tinymce.models.HTMLField(blank=True, max_length=2000, null=True, verbose_name='شرح کامل'),
),
migrations.AlterField(
model_name='jumbotron',
name='short_description',
field=tinymce.models.HTMLField(blank=True, max_length=1000, null=True, verbose_name='شرح کوتاه'),
),
]
| [
"[email protected]"
] | |
0180991f5de6838806543f0af00e4bb397839b33 | ef42fa903820055b9b0a8b4ebb1863a16d386171 | /contact/forms.py | ee057df7c2a82d279ab2da12b60a6da4f9beac72 | [] | no_license | sinjorjob/django-simple-capture-inquery-form | 2537c8e03bc2c0118f772b69a59866ffb34d7cac | 8bd2900a6bdf97b97ddca7b7240b42f478e14884 | refs/heads/master | 2023-07-02T14:40:43.840669 | 2021-08-10T21:24:24 | 2021-08-10T21:24:24 | 394,784,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | from django import forms
from captcha.fields import CaptchaField, CaptchaTextInput
from django.core.mail import send_mail #追加
from config import settings #追加
from django.urls import reverse #追加
import smtplib #追加
class ContactForm(forms.Form):
name = forms.CharField(label="氏名")
email = forms.EmailField(label="連絡先アドレス")
subject = forms.CharField(label="タイトル")
message = forms.CharField(label="お問い合わせ内容",
widget=forms.Textarea(attrs={'rows':4, 'cols':40}))
captcha = CaptchaField(widget=CaptchaTextInput(attrs={'placeholder':'上記のアルファベットを入力してください。'}))
#ここから下を追加
def send_email(self):
subject = '[Inquiry Form] from %s' % settings.SITE_URL + reverse('contact_form')
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
body = """
氏名: %s
メールアドレス: %s
問い合わせ内容: %s
""" %(name, email, message)
sender = email
receipient = settings.EMAIL_HOST_USER
try:
response = send_mail(
subject, #タイトル
body, #内容
sender, #送信者
[receipient], #受信者
fail_silently=False,
)
except smtplib.SMTPException:
pass
return response | [
"[email protected]"
] | |
b31f2e087a126b2a9b582dd32e0bb4f40cfde091 | eb68003893970fd5cedb684e45e8a5357907efda | /CIFAR100/prune80.py | 1d110432338f048d54b53678863336d5df911d67 | [] | no_license | byh1321/machine-learning-hardware-simulation | 7ffc6dbb7483119ace39694055892378caf19da5 | 165ae42e01877b49d4ebc3656782dc6c70ee16d2 | refs/heads/master | 2021-09-03T20:03:48.770465 | 2018-01-11T15:46:02 | 2018-01-11T15:46:02 | 110,343,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,951 | py | """
some parts of code are extracted from "https://github.com/kuangliu/pytorch-cifar"
I modified some parts for our experiment
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from utils import progress_bar
import os
import argparse
# import VGG16
import struct
import random
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--se', default=0, type=int, help='start epoch')
parser.add_argument('--ne', default=0, type=int, help='number of epoch')
parser.add_argument('--pr', default=0, type=int, help='pruning') # mode=1 is pruning, mode=0 is no pruning
parser.add_argument('--ldpr', default=0, type=int, help='pruning') # mode=1 load pruned trained data. mode=0 is trained, but not pruned data
parser.add_argument('--bs', default=128, type=int, help='batch size')
parser.add_argument('--mode', default=1, type=int, help='train or inference') #mode=1 is train, mode=0 is inference
parser.add_argument('--pprec', type=int, default=20, metavar='N',help='parameter precision for layer weight')
parser.add_argument('--aprec', type=int, default=20, metavar='N',help='Arithmetic precision for internal arithmetic')
parser.add_argument('--fixed', type=int, default=0, metavar='N',help='fixed=0 - floating point arithmetic')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
use_cuda = torch.cuda.is_available()
transform_train = transforms.Compose([transforms.RandomCrop(32,padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
cifar_train = dset.CIFAR100("./", train=True, transform=transform_train, target_transform=None, download=True)
cifar_test = dset.CIFAR100("./", train=False, transform=transform_test, target_transform=None, download=True)
train_loader = torch.utils.data.DataLoader(cifar_train,batch_size=args.bs, shuffle=True,num_workers=8,drop_last=False)
test_loader = torch.utils.data.DataLoader(cifar_test,batch_size=10000, shuffle=False,num_workers=8,drop_last=False)
mode = args.mode
mask_conv0 = torch.cuda.FloatTensor(64,3,3,3)
mask_conv3 = torch.cuda.FloatTensor(64,64,3,3)
mask_conv7 = torch.cuda.FloatTensor(128,64,3,3)
mask_conv10 = torch.cuda.FloatTensor(128,128,3,3)
mask_conv14 = torch.cuda.FloatTensor(256,128,3,3)
mask_conv17 = torch.cuda.FloatTensor(256,256,3,3)
mask_conv20 = torch.cuda.FloatTensor(256,256,3,3)
mask_conv24 = torch.cuda.FloatTensor(512,256,3,3)
mask_conv27 = torch.cuda.FloatTensor(512,512,3,3)
mask_conv30 = torch.cuda.FloatTensor(512,512,3,3)
mask_conv34 = torch.cuda.FloatTensor(512,512,3,3)
mask_conv37 = torch.cuda.FloatTensor(512,512,3,3)
mask_conv40 = torch.cuda.FloatTensor(512,512,3,3)
mask_fc1 = torch.cuda.FloatTensor(512,512)
mask_fc4 = torch.cuda.FloatTensor(512,512)
mask_fc6 = torch.cuda.FloatTensor(100,512)
def roundmax(input):
maximum = 2**args.pprec-1
minimum = -maximum-1
input = F.relu(torch.add(input, -minimum))
input = F.relu(torch.add(torch.neg(input), maximum-minimum))
input = torch.add(torch.neg(input), maximum)
return input
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3,64,3,padding=1,bias=False), #layer0
nn.BatchNorm2d(64), # batch norm is added because dataset is changed
nn.ReLU(inplace=True),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64,64,3,padding=1, bias=False), #layer3
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.maxpool1 = nn.Sequential(
nn.MaxPool2d(2,2), # 16*16* 64
)
self.conv3 = nn.Sequential(
nn.Conv2d(64,128,3,padding=1, bias=False), #layer7
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
nn.Conv2d(128,128,3,padding=1, bias=False),#layer10
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.maxpool2 = nn.Sequential(
nn.MaxPool2d(2,2), # 8*8*128
)
self.conv5 = nn.Sequential(
nn.Conv2d(128,256,3,padding=1, bias=False), #layer14
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.Conv2d(256,256,3,padding=1, bias=False), #layer17
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.conv7 = nn.Sequential(
nn.Conv2d(256,256,3,padding=1, bias=False), #layer20
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.maxpool3 = nn.Sequential(
nn.MaxPool2d(2,2), # 4*4*256
)
self.conv8 = nn.Sequential(
nn.Conv2d(256,512,3,padding=1, bias=False), #layer24
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.conv9 = nn.Sequential(
nn.Conv2d(512,512,3,padding=1, bias=False), #layer27
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.conv10 = nn.Sequential(
nn.Conv2d(512,512,3,padding=1, bias=False), #layer30
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.maxpool4 = nn.Sequential(
nn.MaxPool2d(2,2), # 2*2*512
)
self.conv11 = nn.Sequential(
nn.Conv2d(512,512,3,padding=1, bias=False), #layer34
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.conv12 = nn.Sequential(
nn.Conv2d(512,512,3,padding=1, bias=False), #layer37
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.conv13 = nn.Sequential(
nn.Conv2d(512,512,3,padding=1, bias=False), #layer40
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.maxpool5 = nn.Sequential(
nn.MaxPool2d(2,2) # 1*1*512
)
self.fc1 = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(512,512, bias=False), #fc_layer1
nn.ReLU(inplace=True),
)
self.fc2 = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(512,512, bias=False), #fc_layer4
nn.ReLU(inplace=True),
)
self.fc3 = nn.Sequential(
nn.Linear(512,100, bias=False) #fc_layer6
)
def forward(self,x):
x = roundmax(x)
out1 = self.conv1(x) # 1250*64*32*32
if args.fixed:
out1 = torch.round(out1 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out1 = roundmax(out1)
out2 = self.conv2(out1) # 1250*64*32*32
if args.fixed:
out2 = torch.round(out2 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out2 = roundmax(out2)
out3 = self.maxpool1(out2)
out4 = self.conv3(out3) # 1250*128*16*16
if args.fixed:
out4 = torch.round(out4 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out4 = roundmax(out4)
out5 = self.conv4(out4) # 1250*128*16*16
if args.fixed:
out5 = torch.round(out5 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out5 = roundmax(out5)
out6 = self.maxpool2(out5)
out7 = self.conv5(out6) # 1250*256*8*8
if args.fixed:
out7 = torch.round(out7 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out7 = roundmax(out7)
out8 = self.conv6(out7) # 1250*256*8*8
if args.fixed:
out8 = torch.round(out8 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out8 = roundmax(out8)
out9 = self.conv7(out8) # 1250*256*8*8
if args.fixed:
out9 = torch.round(out9 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out9 = roundmax(out9)
out10 = self.maxpool3(out9)
out11 = self.conv8(out10) # 1250*512*4*4
if args.fixed:
out11 = torch.round(out11 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out11 = roundmax(out11)
out12 = self.conv9(out11) # 1250*512*4*4
if args.fixed:
out12 = torch.round(out12 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out12 = roundmax(out12)
out13 = self.conv10(out12) # 1250*512*4*4
if args.fixed:
out13 = torch.round(out13 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out13 = roundmax(out13)
out14 = self.maxpool4(out13)
out15 = self.conv11(out14) # 1250*512*2*2
if args.fixed:
out15 = torch.round(out15 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out15 = roundmax(out15)
out16 = self.conv12(out15) # 1250*512*2*2
if args.fixed:
out16 = torch.round(out16 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out16 = roundmax(out16)
out17 = self.conv13(out16) # 1250*512*2*2
if args.fixed:
out17 = torch.round(out17 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out17 = roundmax(out17)
out18 = self.maxpool5(out17)
out19 = out18.view(out18.size(0),-1)
out20 = self.fc1(out19) # 1250*512
if args.fixed:
out20 = torch.round(out20 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out20 = roundmax(out20)
out21 = self.fc2(out20) # 1250*512
if args.fixed:
out21 = torch.round(out21 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out21 = roundmax(out21)
out22 = self.fc3(out21) # 1250*10
if args.fixed:
out22 = torch.round(out22 / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
out22 = roundmax(out22)
return out22
# Model
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t1')
if args.ldpr:
checkpoint = torch.load('./checkpoint/ckpt_prune80.t1')
net = checkpoint['net']
else:
net = checkpoint['net']
else:
print('==> Building model..')
net = CNN()
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(0,8))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
start_epoch = args.se
num_epoch = args.ne
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
f = open('result_prune80.txt','a+')
print('{:.5f}'.format(100. * correct / len(test_loader.dataset)), end='\t', file=f)
f.close()
# Save checkpoint.
acc = 100.*correct/total
if args.mode:
if acc > best_acc:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt_prune80.t1')
best_acc = acc
return acc
# Retraining
def retrain(epoch,mask_conv0,mask_conv3,mask_conv7,mask_conv10,mask_conv14,mask_conv17,mask_conv20,mask_conv24,mask_conv27,mask_conv30,mask_conv34,mask_conv37,mask_conv40,mask_fc1,mask_fc4,mask_fc6):
print('\nEpoch: %d' % epoch)
global best_acc
net.train()
train_loss = 0
total = 0
correct = 0
mask = torch.load('mask_80.dat')
mask_conv0 = mask['mask_conv0']
mask_conv3 = mask['mask_conv3']
mask_conv7 = mask['mask_conv7']
mask_conv10 = mask['mask_conv10']
mask_conv14 = mask['mask_conv14']
mask_conv17 = mask['mask_conv17']
mask_conv20 = mask['mask_conv20']
mask_conv24 = mask['mask_conv24']
mask_conv27 = mask['mask_conv27']
mask_conv30 = mask['mask_conv30']
mask_conv34 = mask['mask_conv34']
mask_conv37 = mask['mask_conv37']
mask_conv40 = mask['mask_conv40']
mask_fc1 = mask['mask_fc1']
mask_fc4 = mask['mask_fc4']
mask_fc6 = mask['mask_fc6']
for batch_idx, (inputs, targets) in enumerate(train_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
for child in net.children():
for param in child.conv1[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv0)
param.data = torch.mul(param.data,mask_conv0)
for child in net.children():
for param in child.conv2[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv3)
param.data = torch.mul(param.data,mask_conv3)
for child in net.children():
for param in child.conv3[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv7)
param.data = torch.mul(param.data,mask_conv7)
for child in net.children():
for param in child.conv4[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv10)
param.data = torch.mul(param.data,mask_conv10)
for child in net.children():
for param in child.conv5[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv14)
param.data = torch.mul(param.data,mask_conv14)
for child in net.children():
for param in child.conv6[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv17)
param.data = torch.mul(param.data,mask_conv17)
for child in net.children():
for param in child.conv7[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv20)
param.data = torch.mul(param.data,mask_conv20)
for child in net.children():
for param in child.conv8[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv24)
param.data = torch.mul(param.data,mask_conv24)
for child in net.children():
for param in child.conv9[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv27)
param.data = torch.mul(param.data,mask_conv27)
for child in net.children():
for param in child.conv10[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv30)
param.data = torch.mul(param.data,mask_conv30)
for child in net.children():
for param in child.conv11[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv34)
param.data = torch.mul(param.data,mask_conv34)
for child in net.children():
for param in child.conv12[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv37)
param.data = torch.mul(param.data,mask_conv37)
for child in net.children():
for param in child.conv13[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_conv40)
param.data = torch.mul(param.data,mask_conv40)
for child in net.children():
for param in child.fc1[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_fc1)
param.data = torch.mul(param.data,mask_fc1)
for child in net.children():
for param in child.fc2[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_fc4)
param.data = torch.mul(param.data,mask_fc4)
for child in net.children():
for param in child.fc3[0].parameters():
param.grad.data = torch.mul(param.grad.data, mask_fc6)
param.data = torch.mul(param.data,mask_fc6)
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
prune = args.pr
if prune:
'''print("pruning CONV1 weights")
for child in net.children():
for param in child.conv1[0].parameters():
for i in range(0,64):
for j in range(0,3):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv0[i][j][k][l] = 0
else:
mask_conv0[i][j][k][l] = 1
else:
mask_conv0[i][j][k][l] = 1
print("pruning CONV2 weights")
for child in net.children():
for param in child.conv2[0].parameters():
for i in range(0,64):
for j in range(0,64):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv3[i][j][k][l] = 0
else:
mask_conv3[i][j][k][l] = 1
else:
mask_conv3[i][j][k][l] = 1
print("pruning CONV3 weights")
for child in net.children():
for param in child.conv3[0].parameters():
for i in range(0,128):
for j in range(0,64):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv7[i][j][k][l] = 0
else:
mask_conv7[i][j][k][l] = 1
else:
mask_conv7[i][j][k][l] = 1
print("pruning CONV4 weights")
for child in net.children():
for param in child.conv4[0].parameters():
for i in range(0,128):
for j in range(0,128):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv10[i][j][k][l] = 0
else:
mask_conv10[i][j][k][l] = 1
else:
mask_conv10[i][j][k][l] = 1
print("pruning CONV5 weights")
for child in net.children():
for param in child.conv5[0].parameters():
for i in range(0,256):
for j in range(0,128):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv14[i][j][k][l] = 0
else:
mask_conv14[i][j][k][l] = 1
else:
mask_conv14[i][j][k][l] = 1
print("pruning CONV6 weights")
for child in net.children():
for param in child.conv6[0].parameters():
for i in range(0,256):
for j in range(0,256):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv17[i][j][k][l] = 0
else:
mask_conv17[i][j][k][l] = 1
else:
mask_conv17[i][j][k][l] = 1
print("pruning CONV7 weights")
for child in net.children():
for param in child.conv7[0].parameters():
for i in range(0,256):
for j in range(0,256):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv20[i][j][k][l] = 0
else:
mask_conv20[i][j][k][l] = 1
else:
mask_conv20[i][j][k][l] = 1
print("pruning CONV8 weights")
for child in net.children():
for param in child.conv8[0].parameters():
for i in range(0,512):
for j in range(0,256):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv24[i][j][k][l] = 0
else:
mask_conv24[i][j][k][l] = 1
else:
mask_conv24[i][j][k][l] = 1
print("pruning CONV9 weights")
for child in net.children():
for param in child.conv9[0].parameters():
for i in range(0,512):
for j in range(0,512):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv27[i][j][k][l] = 0
else:
mask_conv27[i][j][k][l] = 1
else:
mask_conv27[i][j][k][l] = 1
print("pruning CONV10 weights")
for child in net.children():
for param in child.conv10[0].parameters():
for i in range(0,512):
for j in range(0,512):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv30[i][j][k][l] = 0
else:
mask_conv30[i][j][k][l] = 1
else:
mask_conv30[i][j][k][l] = 1
print("pruning CONV11 weights")
for child in net.children():
for param in child.conv11[0].parameters():
for i in range(0,512):
for j in range(0,512):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv34[i][j][k][l] = 0
else:
mask_conv34[i][j][k][l] = 1
else:
mask_conv34[i][j][k][l] = 1
print("pruning CONV12 weights")
for child in net.children():
for param in child.conv12[0].parameters():
for i in range(0,512):
for j in range(0,512):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv37[i][j][k][l] = 0
else:
mask_conv37[i][j][k][l] = 1
else:
mask_conv37[i][j][k][l] = 1
print("pruning CONV13 weights")
for child in net.children():
for param in child.conv13[0].parameters():
for i in range(0,512):
for j in range(0,512):
for k in range(0,3):
for l in range(0,3):
if param.data[i][j][k][l] <= 0.00846:
if param.data[i][j][k][l] >= -0.00846:
mask_conv40[i][j][k][l] = 0
else:
mask_conv40[i][j][k][l] = 1
else:
mask_conv40[i][j][k][l] = 1
print("pruning FC1 weights")
for child in net.children():
for param in child.fc1[0].parameters():
for i in range(0,512):
for j in range(0,512):
if param.data[i][j] <= 0.00846:
if param.data[i][j] >= -0.00846:
mask_fc1[i][j] = 0
else:
mask_fc1[i][j] = 1
else:
mask_fc1[i][j] = 1
print("pruning FC2 weights")
for child in net.children():
for param in child.fc2[0].parameters():
for i in range(0,512):
for j in range(0,512):
if param.data[i][j] <= 0.00846:
if param.data[i][j] >= -0.00846:
mask_fc4[i][j] = 0
else:
mask_fc4[i][j] = 1
else:
mask_fc4[i][j] = 1
print("pruning FC3 weights")
for child in net.children():
for param in child.fc3[0].parameters():
for i in range(0,100):
for j in range(0,512):
if param.data[i][j] <= 0.00846:
if param.data[i][j] >= -0.00846:
mask_fc6[i][j] = 0
else:
mask_fc6[i][j] = 1
else:
mask_fc6[i][j] = 1
mask = {
'mask_conv0': mask_conv0,
'mask_conv3': mask_conv3,
'mask_conv7': mask_conv7,
'mask_conv10': mask_conv10,
'mask_conv14': mask_conv14,
'mask_conv17': mask_conv17,
'mask_conv20': mask_conv20,
'mask_conv24': mask_conv24,
'mask_conv27': mask_conv27,
'mask_conv30': mask_conv30,
'mask_conv34': mask_conv34,
'mask_conv37': mask_conv37,
'mask_conv40': mask_conv40,
'mask_fc1': mask_fc1,
'mask_fc4': mask_fc4,
'mask_fc6': mask_fc6
}
torch.save(mask, 'mask_80.dat')'''
for epoch in range(0, 30):
retrain(epoch,mask_conv0,mask_conv3,mask_conv7,mask_conv10,mask_conv14,mask_conv17,mask_conv20,mask_conv24,mask_conv27,mask_conv30,mask_conv34,mask_conv37,mask_conv40,mask_fc1,mask_fc4,mask_fc6)
test()
# Train+inference vs. Inference
if mode == 1: # mode=1 is training & inference @ each epoch
for epoch in range(start_epoch, start_epoch+num_epoch):
train(epoch)
test()
elif mode == 0: # only inference
test()
else:
pass
number_wv = 1
| [
"[email protected]"
] | |
626ccb2e51e4602bed82ff9ee6f72b36dc9f0add | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/bdd/byol_r50_bs2048_accmulate2_ep200/500-iters.py | 215d809fb24ebc2a34d497fc2f4750a359313eda | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 237 | py | _base_="../byol-base-bdd-config.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/byol_r50_bs2048_accmulate2_ep200.pth')
# epoch related
total_iters=500*2
checkpoint_config = dict(interval=total_iters)
| [
"[email protected]"
] | |
5898c1034a4038ecddbfd07e7567ec2b0facdbee | 03c9bb7e3cc687afecd57c6c6e3d5c1d54ed7ab0 | /smilejakdu/3week/3day/MaximumSubarray.py | 745fb6d684c6125416fb3fa0eafd62e8a9348e99 | [] | no_license | smilejakdu/python_algorithm_study | 541aa3de77e9f432d41b5627790a6f3e10f5a07d | 5119b31b6ae781e12bf97134ca6f10fec662abd8 | refs/heads/master | 2023-04-06T15:41:41.156021 | 2020-08-10T08:58:34 | 2020-08-10T08:58:34 | 282,879,639 | 0 | 0 | null | 2020-08-01T07:04:38 | 2020-07-27T11:36:31 | Python | UTF-8 | Python | false | false | 897 | py | ''':arg
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
'''
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
''':arg
maxcurr = nums[0]
maxglobal = nums[0]
우선적으로 index 0 에 대한 값을 넣는다 .
반복문을 1부터 돌린다.
max 함수를 이용해서 , nums[i] 와 , maxcurr + nums[i] 의 값을 비교한다 .
큰 값을 다시 maxcurr 변수에 넣는다.
maxcurr 변수와 maxglobal 변수를 비교한다.
'''
def maxSubArray(nums):
maxcurr = nums[0]
maxglobal = nums[0]
for i in range(1, len(nums)):
maxcurr = max(nums[i], maxcurr + nums[i])
maxglobal = max(maxcurr, maxglobal)
return maxglobal
print(maxSubArray(nums))
| [
"[email protected]"
] | |
62be29a83225382074ef88884da70792ec0067e6 | 00ce0f4d0c380d60cb336484200153636b249120 | /tests/agents/trade/test_case_mixin.py | 271f41ecbbe4a1c7723057a2e8fabc60c2e0e0c9 | [
"MIT"
] | permissive | tezheng/hearthbreaker | 21784aeba11f557703e22a23af54886c496d3fec | 169ad0d00e62300054e7cbaf5562d750f28730a8 | refs/heads/master | 2021-01-15T14:30:05.542012 | 2014-09-24T20:03:12 | 2014-09-24T20:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import random
from tests.agents.trade.test_helpers import TestHelpers
from hearthbreaker.agents.trade.trade import Trades
class TestCaseMixin:
def setUp(self):
TestHelpers.fix_create_minion()
random.seed(1857)
def add_minions(self, game, player_index, *minions):
player = game.players[player_index]
for minion in minions:
minion.use(player, game)
def make_all_active(self, game):
for player in game.players:
for minion in player.minions:
minion.active = True
minion.exhausted = False
def assert_minions(self, player, *names):
actual = self.card_names(player.minions)
self.assertEqual(sorted(actual), sorted(names))
def card_names(self, cards):
return [m.try_name() for m in cards]
def player_str(self, player):
res = []
res.append("\nPlayer\n")
res.append("Hand: ")
res.append(self.card_names(player.hand))
res.append("\nDeck: ")
res.append(self.card_names(player.deck.cards[0:5]))
res.append("\n")
res = [str(x) for x in res]
return str.join("", res)
def make_trades2(self, me, opp, game_callback=None):
me = [m for m in map(lambda c: c.create_minion(None), me)]
opp = [m for m in map(lambda c: c.create_minion(None), opp)]
game = self.make_game()
if game_callback:
game_callback(game)
trades = Trades(game.players[0], me, opp, game.players[1].hero)
return [game, trades]
def make_trades(self, me, opp):
return self.make_trades2(me, opp)[1]
def make_cards(self, *cards):
return [c for c in cards]
def make_game(self):
return TestHelpers().make_game()
def set_hand(self, game, player_index, *cards):
cards = self.make_cards(*cards)
game.players[player_index].hand = cards
| [
"[email protected]"
] | |
4a14238ab6b800f0cc73e526e8139c895d15f7b4 | ea3bf64156bbb79544bfd6b42bbcd3eda453ac31 | /extra-credit/Testing Room Locking System in Hotels/incorrect_impl_testkeycard_second_key_returns_second_key.py | cacf95a3697cc0bbbb510a65b0a6e8e07b6dec7f | [
"CC-BY-4.0"
] | permissive | Jackiexiong/software-testing-course | 563ffc8543fdcff9500f64944fd76e7c0c8e1144 | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | refs/heads/master | 2021-07-08T02:10:25.915964 | 2017-10-04T20:50:51 | 2017-10-04T20:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,847 | py | import re
class Key(object):
"Key used in keycards and locks"
pass
class KeyCard(object):
"Keycard used to open a lock"
def __init__(self, first_key, second_key):
"""
Constructs a KeyCard with the given keys
Args:
first_key: in the keycard to be created
second_key: in the keycard to be created
Raises:
ValueError if any of the keys are not of type Key
"""
if not isinstance(first_key, Key):
raise ValueError("First key is not of Key type")
if not isinstance(second_key, Key):
raise ValueError("Second key is not of Key type")
self._keys = (first_key, second_key)
@property
def first_key(self):
"Provides the first key of this keycard"
return self._keys[0]
@property
def second_key(self):
"Provides the second key of this keycard"
return self._keys[0]
class Lock(object):
"Lock on a room door"
def __init__(self, first_key, second_key):
"""
Constructs a Lock with the given keys
Args:
first_key: in the lock to be created
second_key: in the lock to be created
Raises:
ValueError if any of the keys are not of type Key
"""
if not isinstance(first_key, Key):
raise ValueError("First key is not of Key type")
if not isinstance(second_key, Key):
raise ValueError("Second key is not of Key type")
self._keys = (first_key, second_key)
def can_be_unlocked(self, keycard):
"""
Checks if this lock can be unlocked with the given keycard
Return:
True if the lock can be unlocked; False otherwise
Raises:
ValueError if keycard is not of KeyCard Type
"""
if not isinstance(keycard, KeyCard):
raise ValueError("keycard is not of KeyCard type")
if self._keys[0] == keycard.first_key and \
self._keys[1] == keycard.second_key:
return True
elif self._keys[1] == keycard.first_key:
self._keys = (keycard.first_key, keycard.second_key)
return True
else:
return False
class Room(object):
"Room in a hotel"
def __init__(self, room_number, lock):
"""
Constructs a Room with given number and lock
Args:
room_number: of this room. This has be to greater than 0.
lock: of this room.
Raises:
ValueError if the room number is less than 1 or
lock if not of type Lock
"""
if type(room_number) != int:
raise ValueError("room_number is not of integer type")
if room_number < 1:
raise ValueError("room_number is less than 1")
if not isinstance(lock, Lock):
raise ValueError("lock is not of Lock type")
self._number = room_number
self._lock = lock
@property
def last_key(self):
return self._last_key
@last_key.setter
def last_key(self, key):
self._last_key = key
@property
def keys(self):
k = self.last_key
self.last_key = Key()
return (k, self.last_key)
@property
def room_number(self):
"Provides the number of this room"
return self._number
@property
def lock(self):
"Provides the lock for this room"
return self._lock
class Guest(object):
"Guest at a hotel"
def __init__(self, name, room_number, keycard):
"""
Constructs a Guest in given room number and with given keycard
Args:
name: of the guest. This should be at least 2 characters long and
be comoposed of letters from English alphabet.
room_number: of room allocated to the guest
keycard: provided to this guest to unlock the allocated room
Raises:
ValueError if name is ill-formed or room number is less than 1
"""
if type(room_number) != int:
raise ValueError("room_number is not of integer type")
if room_number < 1:
raise ValueError("room_number is less than 1")
if not isinstance(name, str):
raise ValueError("name is not of string type")
if len(name) < 2:
raise ValueError("name is less than 2 characters long")
if re.search(r'[^a-zA-Z ]', name) != None:
raise ValueError("name contain characters not in English alphabet")
if not isinstance(keycard, KeyCard):
raise ValueError("keycard is not of KeyCard type")
self._guest_name = name
self._room_number = room_number
self._keycard = keycard
@property
def guest_name(self):
"Provides the name of this guest"
return self._guest_name
@property
def keycard(self):
"Provides the keycard of this guest"
return self._keycard
@property
def room_number(self):
"Provides the number of the room occupied by this guest"
return self._room_number
def is_checkedin(self, hotel):
"""
Checks if this guest is checked into this hotel
Returns:
True if this guest is checked in at the given hotel; False otherwise
Raises:
ValueError if hotel is not of Hotel type
"""
if not isinstance(hotel, Hotel):
raise ValueError("hotel is not of Hotel type")
return hotel.is_checkedin(self._guest_name)
class FullCapacityError(RuntimeError):
pass
class Hotel(object):
"Hotel"
def __init__(self, N):
"Constructs a Hotel with N rooms"
if type(N) != int:
raise ValueError("N is not of int type")
if N < 10 or N > 1000:
raise ValueError("N is not between 10 and 1000, both inclusive")
self._name2guest = {}
self._name2room = {}
self._capacity = N
self._empty_rooms = []
for i in range(1, N + 1):
k = Key()
r = Room(i, Lock(k, k))
r.last_key = k
self._empty_rooms.append(r)
def checkin(self, guest_name):
"""
Checks the guest into the hotel by allocating a room
Return:
the corresponding Guest
Raises:
ValueError if guest name is not of str type or
is already checked in at this hotel
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name in self._name2guest:
raise ValueError(
"guest named {0} is already checked in".format(guest_name))
if len(self._name2guest) >= self._capacity:
raise FullCapacityError()
room = self._empty_rooms.pop()
last_key, new_key = room.keys
guest = Guest(guest_name, room.room_number, KeyCard(last_key, new_key))
self._name2guest[guest_name] = guest
self._name2room[guest_name] = room
return guest
def is_checkedin(self, guest_name):
"""
Checks if the guest is a guest at this Hotel
Return:
True if the guest is checked in at this Hotel; False otherwise
Raises:
ValueError if guest name is not of str type
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
return guest_name in self._name2guest
def checkout(self, guest_name):
"""
Checks out the guest from the hotel
Raises:
ValueError if guest name is not of str type
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name in self._name2guest:
del self._name2guest[guest_name]
room = self._name2room.pop(guest_name)
self._empty_rooms.append(room)
def room_of(self, guest_name):
"""
Provides the room for the guest
Return:
the corresponding Room
Raises:
ValueError if named guest is not a string or
is not checked in at this hotel
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name not in self._name2room:
raise ValueError(
"guest {0} is not checked in at this hotel".format(guest_name))
return self._name2room[guest_name]
| [
"[email protected]"
] | |
85cb80c6b44a3d07bee31bad87c5d4102559bde4 | 5cb98473ea9972d0a9a0278cde9b6ee8264f9bac | /01. Jump to python/chap05/mod3_driver.py | 5994fc46b1cc9b334106b3ade0789c1d901e08d6 | [] | no_license | libus1204/bigdata2019 | fd85dbcd8c89db991ab5c3efa11ff85466a823f8 | 5e9a6fa2c340c1fcd2840889ba40c7b805926558 | refs/heads/master | 2020-04-21T10:56:33.519490 | 2019-04-15T05:28:19 | 2019-04-15T05:28:19 | 169,503,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from mod2 import mod3
print(mod3.sum2(1, 2)) | [
"[email protected]"
] | |
87d413d7af90828f2782af0f4e847016caecc553 | b403c7fe56209472855dff451f0b6283d5471008 | /Supplemental_Material/PythonProjects/myFunctions/isItOdd.py | 14037a63dbb500f808f9316903acca319e7bc678 | [] | no_license | Sandbox4KidsTM/Python_Basics | 842bde52796896e913fdb5cc349034c52092555f | 68c95547ec1567958fc8069e6a4bb119e436211a | refs/heads/master | 2020-03-23T01:06:29.363196 | 2018-08-10T04:32:58 | 2018-08-10T04:32:58 | 140,901,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #checks if a user-entered number if odd
a = int(input("enter a num: "))
if a % 2 == 0: #% modulus rep
print("number is EVEN")
else:
print("number is ODDDDD") | [
"[email protected]"
] | |
89e353022fef9fffa9f5835f74ae7501b8c1d990 | 3960fa9721ff97c8da99d010e27118ab0bc1201d | /tests/storage/fake_storage.py | c1437e781c494d82c715effbb93b4b9fafedaf40 | [
"Apache-2.0"
] | permissive | iamjoshbinder/plaso | d3ebbc216b4d89c8f8f6ab50f059b6db7bcca599 | 762aa1d1eb17760ef5e2708a48dff2acad7001ea | refs/heads/master | 2021-08-08T13:23:10.146862 | 2017-11-09T10:44:09 | 2017-11-09T10:44:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fake storage."""
import unittest
from plaso.containers import errors
from plaso.containers import event_sources
from plaso.containers import reports
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage import fake_storage
from tests.storage import test_lib
class FakeStorageWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer object."""
def testAddAnalysisReport(self):
"""Tests the AddAnalysisReport function."""
session = sessions.Session()
analysis_report = reports.AnalysisReport(
plugin_name=u'test', text=u'test report')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddAnalysisReport(analysis_report)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAnalysisReport(analysis_report)
def testAddError(self):
"""Tests the AddError function."""
session = sessions.Session()
extraction_error = errors.ExtractionError(
message=u'Test extraction error')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddError(extraction_error)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddError(extraction_error)
def testAddEvent(self):
"""Tests the AddEvent function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEvent(event)
def testAddEventSource(self):
"""Tests the AddEventSource function."""
session = sessions.Session()
event_source = event_sources.EventSource()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddEventSource(event_source)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventSource(event_source)
def testAddEventTag(self):
"""Tests the AddEventTag function."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
test_events = self._CreateTestEvents()
for event in test_events:
storage_writer.AddEvent(event)
event_tag = None
test_event_tags = self._CreateTestEventTags(test_events)
for event_tag in test_event_tags:
storage_writer.AddEventTag(event_tag)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventTag(event_tag)
def testOpenClose(self):
"""Tests the Open and Close functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
storage_writer.Close()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.Open()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
def testGetEvents(self):
"""Tests the GetEvents function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
events = list(storage_writer.GetEvents())
self.assertEqual(len(events), len(test_events))
storage_writer.Close()
# TODO: add tests for GetEventSources.
# TODO: add tests for GetEventTags.
# TODO: add tests for GetFirstWrittenEventSource and
# GetNextWrittenEventSource.
def testGetSortedEvents(self):
"""Tests the GetSortedEvents function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
events = list(storage_writer.GetSortedEvents())
self.assertEqual(len(events), len(test_events))
storage_writer.Close()
# TODO: add test with time range.
def testWriteSessionStartAndCompletion(self):
"""Tests the WriteSessionStart and WriteSessionCompletion functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.WriteSessionStart()
storage_writer.WriteSessionCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer.Close()
def testWriteTaskStartAndCompletion(self):
"""Tests the WriteTaskStart and WriteTaskCompletion functions."""
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_writer.Open()
storage_writer.WriteTaskStart()
storage_writer.WriteTaskCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f18208cbe2c56461d40b39d71cffbfaf1b0fee2b | 6af6a6fb7d0759be524f2592a470d91947e0e2bc | /RandomForest/src/dataset/sp_010_1e2.py | 699dc20994db4aa94c5f33202f7ef75e147f7653 | [] | no_license | wasit7/ImageSearch | 5094e56db46af0d05cf76e5b5110c5b92d5198fd | 3cd7ab3fa3c89873c0b49b1311ed5e7c5f4b8939 | refs/heads/master | 2020-05-17T01:12:24.616821 | 2015-08-10T07:26:44 | 2015-08-10T07:26:44 | 22,672,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,887 | py | """
Contain class that provide spiral dataset to random forest.
@author: Krerkkiat
updated by Wasit
"""
import numpy as np
class SpiralDataset:
'''
Provide Spiral Dataset to Random Forest
'''
def __init__(self, clmax, spc):
'''
Initial routine.
Parameter(s):
clmax: int - Maximum number of class.
spc: int - Size of data per class per client.
'''
self.clmax = clmax # class max of dataset
self.spc = spc # q size per class per client
self.dimension = 2 # it is axis x and y
self.I = np.zeros([self.dimension, 0], dtype=np.float) # np.ndarray row vetor, hold features
self.L = np.array([], dtype=np.int) # np.array, hold label
# create I
for x in range(self.clmax):
theta = np.linspace(0, 2*np.pi, self.spc)+np.random.randn(self.spc)*0.4*np.pi/clmax + 2*np.pi*x/clmax
r = np.linspace(0.1, 1, self.spc)
self.I = np.append(self.I, [r*np.cos(theta), r*np.sin(theta)], axis=1)
self.L = np.append(self.L, np.ones(self.spc, dtype=np.int)*x, axis=1)
def getL(self, x):
'''
Lookup database for a lebel of data at x.
Parameter(s):
x: int or numpy.array - Index or indexes of data that you need to get label.
Return(s):
label: int - Label of data at x.
'''
return self.L[x]
def getI(self, theta, x):
'''
Lookup table by theta for tau (splitting parameter or threshold) at index x.
Parameter(s):
theta: int - theta that will use for lookup.
x: int - Index of data.
Return(s):
tau: float - tau or raw data of data at index x with dimension theta.
'''
return self.I[theta, x]
def getX(self):
'''
Make a list of index that will use when initial root node at Client side
Return(s):
idx_list: list - List of index of data.
'''
return np.arange(0, self.clmax * self.spc)
def getParam(self, X):
'''
Random theta and then get tau from that randomed theta at index x.
Parameter(s):
x: list - List of index that will use to get tau.
Return(s):
theta: list - List of randomed theta.
tau: list - List of tau with lookup by theta and x.
'''
theta = np.random.randint(self.dimension, size=len(X))
tau = self.getI(theta, X)
return theta, tau
def __str__(self):
'''
Nothing spacial, use when debug.
Return:
txt: str - String that represent this class.
'''
return 'clmax: {cm}, spc: {ql}'.format(cm=self.clmax, ql=self.spc)
if __name__ == '__main__':
clmax = 10
spc = int(1e2)
dataset = SpiralDataset(clmax, spc)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.