ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a33206b003f30529e304d54f1bd41b3a0262716
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: camera_rot_corr_test :platform: Unix :synopsis: Test for the camera_rot_correction plugin .. moduleauthor:: Mark Basham <[email protected]> """ import unittest from savu.test import test_utils as tu from savu.test.travis.framework_tests.plugin_runner_test import \ run_protected_plugin_runner class CameraRotCorrTest(unittest.TestCase): global data_file, experiment data_file = '24737.nxs' experiment = None def test_correction(self): process_list = 'corrections/camera_rot_corr_test.nxs' options = tu.initialise_options(data_file, experiment, process_list) run_protected_plugin_runner(options) tu.cleanup(options) if __name__ == "__main__": unittest.main()
py
1a3320e83b434571b8fbb902209cbac03c51afe9
import requests import re from bs4 import BeautifulSoup import traceback import json def get_html_text(url): try: headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36', } r = requests.get(url, headers=headers) r.raise_for_status() r.encoding = 'utf-8' return r.text except: return 200 # traceback.print_exc() def get_nav_addr(lst, web_url): url = get_html_text(web_url + '/html/wenmimap.html') soup = BeautifulSoup(url, "html.parser") link_list = soup.find_all('div', attrs={'class': 'list'}) # print(link_list) # print(link_list[0]) # print(type(link_list[0])) # 获取map中导航的所有链接 for list in link_list: ul_list = list.find('ul') # print(ul_list) a = ul_list.find_all('a') for i in a: try: href = i.attrs['href'] lst.append(href) except: traceback.print_exc() def get_article_url(lst, web_url, path): fw = open(path, 'a', encoding='utf-8') nav_count = 0 # 获取每个文章的链接 for nav_link in lst: article_count = 0 print(nav_link) max_list = 0 min_list = 0 url = get_html_text(web_url + nav_link) soup = BeautifulSoup(url, "html.parser") page = soup.find('h3', attrs={'class': 'list_page'}) # print(page) # 判断是否该页面有内容 if page is None: continue min_page = page.find_all('a', string="首页") max_page = page.find_all('a', string="尾页") min_num = re.search(r'\d{1,4}', str(min_page[0])) max_num = re.search(r'\d{1,4}', str(max_page[0])) # 当只有一页的时候, 有可能链接为 /index.html, 因此没有想要的页码 if min_num: min_num1 = int(min_num.group(0)) else: min_num1 = 1 if max_num: max_num1 = int(max_num.group(0)) else: max_num1 = 1 # 获取最大的页数, 开始遍历每一页中的文章内容 num = int(max(min_num1, max_num1)) try: for i in range(num): r = get_html_text(web_url + nav_link + 'List_' + str(i + 1) + '.html') # url = web_url + nav_link + 'List_' + str(i + 1) + '.html' # 将"http://www.cnwmz.com/html/jiguandanwei_tag/gongqingtuan/List_27.html"等等写入文本 # fw.write(json.dumps(url, ensure_ascii=False) + '\n') soup = BeautifulSoup(r, "html.parser") # 直接获取<dt>标签中的内容 # article = soup.find("article", attrs={'id': 'list_l'}) article = soup.find_all('dt', soup) for article_list in article: a = article_list.find('a') href = a.attrs['href'] article_url = web_url + href # print(article_url) article_count += 1 fw.write(json.dumps(article_url, ensure_ascii=False) + '\n') # print(href) # lst.append(href) nav_count += 1 print(article_count) except: traceback.print_exc() print(nav_count) def get_news_content(lst, web_url, fpath, fcontent): info_dict = {'url': '', 'title': '', 'content': '', 'class': '', 'tag': []} fw = open(fcontent, 'a', encoding='utf-8') with open(fpath, 'r') as fr: for i, line in enumerate(fr): # print(i) url = re.search(r'http.+\.html', line).group(0) print(url) try: r = get_html_text(url) # 将url存入dict info_dict['url'] = url soup = BeautifulSoup(r, 'html.parser') nav = soup.find('nav') nav_a = nav.find_all('a') nav_con = '您现位置:' for nav_content in nav_a: # print(nav_content.string) nav_con += nav_content.string + '>' # print(nav_con + '>正文') # 导航存入dict info_dict['class'] = nav_con + '>正文' # print(nav) article = soup.find('article', attrs={'id': 'con_l'}) # print(article) title = article.find('h1') # print(title.string) # 标题存入dict info_dict['title'] = str(title.string) section_tip = article.find('section', attrs={'id': 'tip'}) tip = section_tip.find('h2') tag = tip.find_all('a') tag_con = [] for t in tag: tag_con.append(t.string) # 标签存到dict info_dict['tag'] = tag_con # 开始获取文章的内容 # 文章有可能有多页, 因此需要爬取多个页面的内容 web_url = re.search(r'http://www.cnwmz.com/html/\d+/\d+', line) content = '' con = [''] for j in range(30): # 尝试文章是否有多页, 若有多页的话, 则改变j的值进行内容添加 if j != 0: j += 1 wu = str(web_url.group(0)) + '_' + str(j) + '.html' wu_text = get_html_text(wu) if wu_text == 200: break soup = BeautifulSoup(wu_text, 'html.parser') article_content = soup.find('section', attrs={'id': 'article'}) art_p = article_content.find_all('p') print(len(art_p)) if len(art_p) < 2: for ac in article_content: name = str(ac.name) if name == 'br': content += '\n' if str(ac.string) == 'None' or str(ac.string) == 'wm("arc");': continue # print(ac.string) content += str(ac.string) # print(content) # con[0] = content else: article1 = soup.find('article', attrs={'id': 'con_l'}) art = article1.find_all('p') # print(art) for j, a in enumerate(art): # if a.attrs['class'] == 'page_css': # continue # print(art[j]) # print(type(art[j])) # if art[j].find('class', attrs={'class': 'page_css'}): # print(1) # continue m = art[j].find('a') if art[j].find('a'): if str(art[j].find('a').string) == '上一页': continue # print(a.contents) for m in a.contents: content += str(m.string) # print(m) # print(content) content += '\n' # con[0] = content # print(content) # 文章内容存入dict info_dict['content'] = content fw.write(json.dumps(info_dict, ensure_ascii=False) + '\n') except: traceback.print_exc() if i == 0: break # print(info_dict) fr.close() fw.close() def main(): web_url = 'http://www.cnwmz.com' lst = [] fpath = 'F://wmz/wmz.json' flist_url = 'F://wmz/article_url2.txt' fcontent = "F://wmz/article_info_test.txt" # 获取文秘站所有导航的页码链接 # get_nav_addr(lst, web_url) # 获取文秘站的所有文章链接 # get_article_url(lst, web_url, flist_url) # 获取想要的内容 get_news_content(lst, web_url, flist_url, fcontent) if __name__ == '__main__': main()
py
1a3320f436df3e630ebbbaac5a4046e50a4ac260
import aiohttp import aiosqlite import asyncio import discord from discord.ext import commands from backend.config import DATABASE_LOCATION if __name__ == "__main__": with open("token.txt") as file: TOKEN = file.read().strip() bot = commands.Bot(".", case_insensitive=True) @bot.event async def on_ready(): bot._session = aiohttp.ClientSession() print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') print(discord.utils.oauth_url(bot.user.id)) async def connect_db(): bot._db = await aiosqlite.connect(DATABASE_LOCATION) if __name__ == "__main__": bot.loop.run_until_complete(connect_db()) extensions = [ "cogs.rolehelper", "cogs.moderation", "cogs.fun", "cogs.rtfm", "cogs.dasmooi", "cogs.laf", "cogs.admin" ] [bot.load_extension(ext) for ext in extensions] bot.run(TOKEN)
py
1a33211a1fe13da3f9bfcbbae81c74358bf009e3
# Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import TYPE_CHECKING, Any, Optional, Tuple import attr import msgpack from unpaddedbase64 import decode_base64, encode_base64 from synapse.api.constants import ( EventContentFields, EventTypes, GuestAccess, HistoryVisibility, JoinRules, ) from synapse.api.errors import ( Codes, HttpResponseException, RequestSendFailed, SynapseError, ) from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000 # This is used to indicate we should only return rooms published to the main list. EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None) class RoomListHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.hs = hs self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.response_cache: ResponseCache[ Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]] ] = ResponseCache(hs.get_clock(), "room_list") self.remote_response_cache: ResponseCache[ Tuple[str, Optional[int], Optional[str], bool, Optional[str]] ] = ResponseCache(hs.get_clock(), "remote_room_list", timeout_ms=30 * 1000) async def get_local_public_room_list( self, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[dict] = None, network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID, from_federation: bool = False, ) -> JsonDict: """Generate a local public room list. There are multiple different lists: the main one plus one per third party network. A client can ask for a specific list or to return all. Args: limit since_token search_filter network_tuple: Which public list to use. This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. from_federation: true iff the request comes from the federation API """ if not self.enable_room_list_search: return {"chunk": [], "total_room_count_estimate": 0} logger.info( "Getting public room list: limit=%r, since=%r, search=%r, network=%r", limit, since_token, bool(search_filter), network_tuple, ) if search_filter: # We explicitly don't bother caching searches or requests for # appservice specific lists. logger.info("Bypassing cache as search request.") return await self._get_public_room_list( limit, since_token, search_filter, network_tuple=network_tuple, from_federation=from_federation, ) key = (limit, since_token, network_tuple) return await self.response_cache.wrap( key, self._get_public_room_list, limit, since_token, network_tuple=network_tuple, from_federation=from_federation, ) async def _get_public_room_list( self, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[dict] = None, network_tuple: Optional[ThirdPartyInstanceID] = EMPTY_THIRD_PARTY_ID, from_federation: bool = False, ) -> JsonDict: """Generate a public room list. Args: limit: Maximum amount of rooms to return. since_token: search_filter: Dictionary to filter rooms by. network_tuple: Which public list to use. This can be (None, None) to indicate the main list, or a particular appservice and network id to use an appservice specific one. Setting to None returns all public rooms across all lists. from_federation: Whether this request originated from a federating server or a client. Used for room filtering. """ # Pagination tokens work by storing the room ID sent in the last batch, # plus the direction (forwards or backwards). Next batch tokens always # go forwards, prev batch tokens always go backwards. if since_token: batch_token = RoomListNextBatch.from_token(since_token) bounds: Optional[Tuple[int, str]] = ( batch_token.last_joined_members, batch_token.last_room_id, ) forwards = batch_token.direction_is_forward has_batch_token = True else: bounds = None forwards = True has_batch_token = False # we request one more than wanted to see if there are more pages to come probing_limit = limit + 1 if limit is not None else None results = await self.store.get_largest_public_rooms( network_tuple, search_filter, probing_limit, bounds=bounds, forwards=forwards, ignore_non_federatable=from_federation, ) def build_room_entry(room: JsonDict) -> JsonDict: entry = { "room_id": room["room_id"], "name": room["name"], "topic": room["topic"], "canonical_alias": room["canonical_alias"], "num_joined_members": room["joined_members"], "avatar_url": room["avatar"], "world_readable": room["history_visibility"] == HistoryVisibility.WORLD_READABLE, "guest_can_join": room["guest_access"] == "can_join", "join_rule": room["join_rules"], } # Filter out Nones – rather omit the field altogether return {k: v for k, v in entry.items() if v is not None} results = [build_room_entry(r) for r in results] response: JsonDict = {} num_results = len(results) if limit is not None: more_to_come = num_results == probing_limit # Depending on direction we trim either the front or back. if forwards: results = results[:limit] else: results = results[-limit:] else: more_to_come = False if num_results > 0: final_entry = results[-1] initial_entry = results[0] if forwards: if has_batch_token: # If there was a token given then we assume that there # must be previous results. response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() if more_to_come: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() else: if has_batch_token: response["next_batch"] = RoomListNextBatch( last_joined_members=final_entry["num_joined_members"], last_room_id=final_entry["room_id"], direction_is_forward=True, ).to_token() if more_to_come: response["prev_batch"] = RoomListNextBatch( last_joined_members=initial_entry["num_joined_members"], last_room_id=initial_entry["room_id"], direction_is_forward=False, ).to_token() response["chunk"] = results response["total_room_count_estimate"] = await self.store.count_public_rooms( network_tuple, ignore_non_federatable=from_federation ) return response @cached(num_args=1, cache_context=True) async def generate_room_entry( self, room_id: str, num_joined_users: int, cache_context: _CacheContext, with_alias: bool = True, allow_private: bool = False, ) -> Optional[JsonDict]: """Returns the entry for a room Args: room_id: The room's ID. num_joined_users: Number of users in the room. cache_context: Information for cached responses. with_alias: Whether to return the room's aliases in the result. allow_private: Whether invite-only rooms should be shown. Returns: Returns a room entry as a dictionary, or None if this room was determined not to be shown publicly. """ result = {"room_id": room_id, "num_joined_members": num_joined_users} if with_alias: aliases = await self.store.get_aliases_for_room( room_id, on_invalidate=cache_context.invalidate ) if aliases: result["aliases"] = aliases current_state_ids = await self.store.get_current_state_ids( room_id, on_invalidate=cache_context.invalidate ) if not current_state_ids: # We're not in the room, so may as well bail out here. return result event_map = await self.store.get_events( [ event_id for key, event_id in current_state_ids.items() if key[0] in ( EventTypes.Create, EventTypes.JoinRules, EventTypes.Name, EventTypes.Topic, EventTypes.CanonicalAlias, EventTypes.RoomHistoryVisibility, EventTypes.GuestAccess, "m.room.avatar", ) ] ) current_state = {(ev.type, ev.state_key): ev for ev in event_map.values()} # Double check that this is actually a public room. join_rules_event = current_state.get((EventTypes.JoinRules, "")) if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: return None # Return whether this room is open to federation users or not create_event = current_state[EventTypes.Create, ""] result["m.federate"] = create_event.content.get( EventContentFields.FEDERATE, True ) name_event = current_state.get((EventTypes.Name, "")) if name_event: name = name_event.content.get("name", None) if name: result["name"] = name topic_event = current_state.get((EventTypes.Topic, "")) if topic_event: topic = topic_event.content.get("topic", None) if topic: result["topic"] = topic canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) if canonical_event: canonical_alias = canonical_event.content.get("alias", None) if canonical_alias: result["canonical_alias"] = canonical_alias visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) visibility = None if visibility_event: visibility = visibility_event.content.get("history_visibility", None) result["world_readable"] = visibility == HistoryVisibility.WORLD_READABLE guest_event = current_state.get((EventTypes.GuestAccess, "")) guest = None if guest_event: guest = guest_event.content.get(EventContentFields.GUEST_ACCESS) result["guest_can_join"] = guest == GuestAccess.CAN_JOIN avatar_event = current_state.get(("m.room.avatar", "")) if avatar_event: avatar_url = avatar_event.content.get("url", None) if avatar_url: result["avatar_url"] = avatar_url return result async def get_remote_public_room_list( self, server_name: str, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ) -> JsonDict: """Get the public room list from remote server Raises: SynapseError """ if not self.enable_room_list_search: return {"chunk": [], "total_room_count_estimate": 0} if search_filter: # Searching across federation is defined in MSC2197. # However, the remote homeserver may or may not actually support it. # So we first try an MSC2197 remote-filtered search, then fall back # to a locally-filtered search if we must. try: res = await self._get_remote_list_cached( server_name, limit=limit, since_token=since_token, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, search_filter=search_filter, ) return res except HttpResponseException as hre: syn_err = hre.to_synapse_error() if hre.code in (404, 405) or syn_err.errcode in ( Codes.UNRECOGNIZED, Codes.NOT_FOUND, ): logger.debug("Falling back to locally-filtered /publicRooms") else: # Not an error that should trigger a fallback. raise SynapseError(502, "Failed to fetch room list") except RequestSendFailed: # Not an error that should trigger a fallback. raise SynapseError(502, "Failed to fetch room list") # if we reach this point, then we fall back to the situation where # we currently don't support searching across federation, so we have # to do it manually without pagination limit = None since_token = None try: res = await self._get_remote_list_cached( server_name, limit=limit, since_token=since_token, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, ) except (RequestSendFailed, HttpResponseException): raise SynapseError(502, "Failed to fetch room list") if search_filter: res = { "chunk": [ entry for entry in list(res.get("chunk", [])) if _matches_room_entry(entry, search_filter) ] } return res async def _get_remote_list_cached( self, server_name: str, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ) -> JsonDict: """Wrapper around FederationClient.get_public_rooms that caches the result. """ repl_layer = self.hs.get_federation_client() if search_filter: # We can't cache when asking for search return await repl_layer.get_public_rooms( server_name, limit=limit, since_token=since_token, search_filter=search_filter, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, ) key = ( server_name, limit, since_token, include_all_networks, third_party_instance_id, ) return await self.remote_response_cache.wrap( key, repl_layer.get_public_rooms, server_name, limit=limit, since_token=since_token, search_filter=search_filter, include_all_networks=include_all_networks, third_party_instance_id=third_party_instance_id, ) @attr.s(slots=True, frozen=True, auto_attribs=True) class RoomListNextBatch: last_joined_members: int # The count to get rooms after/before last_room_id: str # The room_id to get rooms after/before direction_is_forward: bool # True if this is a next_batch, false if prev_batch KEY_DICT = { "last_joined_members": "m", "last_room_id": "r", "direction_is_forward": "d", } REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()} @classmethod def from_token(cls, token: str) -> "RoomListNextBatch": decoded = msgpack.loads(decode_base64(token), raw=False) return RoomListNextBatch( **{cls.REVERSE_KEY_DICT[key]: val for key, val in decoded.items()} ) def to_token(self) -> str: return encode_base64( msgpack.dumps( {self.KEY_DICT[key]: val for key, val in attr.asdict(self).items()} ) ) def copy_and_replace(self, **kwds: Any) -> "RoomListNextBatch": return attr.evolve(self, **kwds) def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool: if search_filter and search_filter.get("generic_search_term", None): generic_search_term = search_filter["generic_search_term"].upper() if generic_search_term in room_entry.get("name", "").upper(): return True elif generic_search_term in room_entry.get("topic", "").upper(): return True elif generic_search_term in room_entry.get("canonical_alias", "").upper(): return True else: return True return False
py
1a332133d67e7918e0c4e49ace0d8d1b5226d69d
import unittest import os import json import numpy as np import rastervision as rv from rastervision.core import Box from rastervision.data.raster_source import RasterSourceConfig from rastervision.data.crs_transformer import IdentityCRSTransformer from rastervision.utils.files import str_to_file from rastervision.rv_config import RVConfig class TestGeoJSONSource(unittest.TestCase): def setUp(self): self.crs_transformer = IdentityCRSTransformer() self.extent = Box.make_square(0, 0, 10) self.tmp_dir = RVConfig.get_tmp_dir() self.class_id = 2 self.background_class_id = 3 self.line_buffer = 1 self.uri = os.path.join(self.tmp_dir.name, 'temp.json') def build_source(self, geojson): str_to_file(json.dumps(geojson), self.uri) config = RasterSourceConfig.builder(rv.GEOJSON_SOURCE) \ .with_uri(self.uri) \ .with_rasterizer_options(self.background_class_id, self.line_buffer) \ .build() # Convert to proto and back as a test. config = RasterSourceConfig.builder(rv.GEOJSON_SOURCE) \ .from_proto(config.to_proto()) \ .build() source = config.create_source(self.uri, self.extent, self.crs_transformer) return source def tearDown(self): self.tmp_dir.cleanup() def test_get_chip(self): geojson = { 'type': 'FeatureCollection', 'features': [{ 'type': 'Feature', 'geometry': { 'type': 'Polygon', 'coordinates': [[[0., 0.], [0., 5.], [5., 5.], [5., 0.], [0., 0.]]] }, 'properties': { 'class_id': self.class_id, } }, { 'type': 'Feature', 'geometry': { 'type': 'LineString', 'coordinates': [[7., 0.], [7., 9.]] }, 'properties': { 'class_id': self.class_id } }] } source = self.build_source(geojson) self.assertEqual(source.get_extent(), self.extent) chip = source.get_image_array() self.assertEqual(chip.shape, (10, 10, 1)) expected_chip = self.background_class_id * np.ones((10, 10, 1)) expected_chip[0:5, 0:5, 0] = self.class_id expected_chip[0:10, 6:8] = self.class_id np.testing.assert_array_equal(chip, expected_chip) def test_get_chip_no_polygons(self): geojson = {'type': 'FeatureCollection', 'features': []} source = self.build_source(geojson) self.assertEqual(source.get_extent(), self.extent) chip = source.get_image_array() self.assertEqual(chip.shape, (10, 10, 1)) expected_chip = self.background_class_id * np.ones((10, 10, 1)) np.testing.assert_array_equal(chip, expected_chip) if __name__ == '__main__': unittest.main()
py
1a332233c6fa06de180185fde12bcaf8040c7bd9
""" Copyright 2021 Merck & Co., Inc. Kenilworth, NJ, USA. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ #from .fixtures import AUTH_HEADERS, BASE_API_PATH from requests import post as reqpost from os import getenv BASE_API_PATH = getenv('USER_FACING_API_HTTP_PATH', 'http://dp-api:9000') AUTH_HEADERS = {'X-Username': 'test-developer', 'X-Api-Key': getenv('INTTESTPASS', 'local-developer')} # Can't test on standalone def test_experimental_join_stats(): # routes: 225 # AccumuloControllerExperimental # POST /experimental/joinStats controllers.accumulo.AccumuloControllerExperimental.joinStats(request: Request) # JoinScanSpec: 406 body = { 'dataset_a': 'int_basic_test_data', 'table_a': 'int_basic_test_data', 'col_a': 'State Name', 'dataset_b': 'int_cell_level_visibilities_test_data', 'table_b': 'int_cell_level_visibilities_test_data', 'col_b': 'Location', 'limit': 1 } req_path = f'{BASE_API_PATH}/experimental/joinStats' res = reqpost(req_path, headers=AUTH_HEADERS, json=body) assert res.status_code == 200 assert res.json() != {} if __name__ == '__main__': test_experimental_join_stats() print('Complete')
py
1a332388faeebc0ad5645c3ec2d7d7eee28f9dcc
from ..data_class.data_manager import DataManager from ..directories import data_loaded import numpy as np import scipy.sparse as sp class TpfaScheme(DataManager): def __init__(self, data_impress, elements_lv0, data_name: str='TpfaScheme.npz', load=False): super().__init__(data_name, load=load) self.data_impress = data_impress self.elements_lv0 = elements_lv0 self.n_volumes = self.data_impress.len_entities['volumes'] self.gravity = data_loaded['gravity'] def get_transmissibility_matrix_without_boundary_conditions(self) -> None: vols_viz_internal_faces = self.elements_lv0['neig_internal_faces'] v0 = vols_viz_internal_faces internal_faces = self.elements_lv0['internal_faces'] transmissibility_faces = self.data_impress['transmissibility'] transmissibility_internal_faces = transmissibility_faces[internal_faces] t0 = transmissibility_internal_faces lines = np.array([v0[:, 0], v0[:, 1], v0[:, 0], v0[:, 1]]).flatten() cols = np.array([v0[:, 1], v0[:, 0], v0[:, 0], v0[:, 1]]).flatten() data = np.array([t0, t0, -t0, -t0]).flatten() T = sp.csc_matrix((data, (lines, cols)), shape=(self.n_volumes, self.n_volumes)) self['Tini'] = T
py
1a33238f5515750a1f363da19161a11b45e3f0a4
""" Filter data from DBPedia for inclusion in ConceptNet. The data we use here is far from the entirety of DBPedia, which lists millions of things that are not general knowledge, such as villages with a population of 20, individual roads, sports team rosters from a particular season, and so on. We try to filter this information to get data that's suitable for ConceptNet. We only extract information from the 'instance_types', 'interlanguage_links', and 'mappingbased_objects' files, using 'page_links' as a filter. We filter for relevant concepts in the following way: - Use only pages whose English version is the target of at least 100 Wikipedia links, as seen in the 'page_links_en' file. - Skip pages that are lists or Wikipedia internals. - Filter out the instances of specific types, such as 'Settlement' and 'Road'. - Use only pages that have been translated to at least 5 languages in the 'interlanguage_links' file. - Use only pages that have a Wikidata ID of less than 1000000. This is a crude heuristic, based on the fact that higher-numbered pages are likely to be less well known, but it lets us cut off reading the translation file early. We extract types and certain relations from the pages that remain using the 'instance_types' and 'mappingbased_objects' files. """ from conceptnet5.language.token_utils import un_camel_case from conceptnet5.uri import Licenses, uri_prefix, split_uri from conceptnet5.nodes import standardized_concept_uri, topic_to_concept from conceptnet5.edges import make_edge from conceptnet5.languages import ALL_LANGUAGES, LCODE_ALIASES from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter from conceptnet5.formats.semantic_web import resource_name, parse_nquads import urllib import bz2 import pathlib from operator import itemgetter import itertools parse_url = urllib.parse.urlparse RELATIONS = { 'isPartOf': '/r/PartOf', 'series': '/r/PartOf', 'languageFamily': '/r/PartOf', 'location': '/r/AtLocation', 'place': '/r/AtLocation', 'locatedInArea': '/r/AtLocation', 'spokenIn': '/r/AtLocation', # leave out differentFrom, as it is mostly about confusable names 'sameAs': '/r/Synonym', 'similar': '/r/SimilarTo', 'related': '/r/RelatedTo', 'seeAlso': '/r/RelatedTo', 'type': '/r/InstanceOf', 'field': '/r/dbpedia/field', 'academicDiscipline': '/r/dbpedia/field', 'genre': '/r/dbpedia/genre', 'literaryGenre': '/r/dbpedia/genre', 'influencedBy': '/r/dbpedia/influencedBy', 'knownFor': '/r/dbpedia/knownFor', 'notableIdea': '/r/dbpedia/knownFor', 'notableWork': '/r/dbpedia/knownFor', 'language': '/r/dbpedia/language', 'occupation': '/r/dbpedia/occupation', 'profession': '/r/dbpedia/occupation', #'author': '/r/dbpedia/writer', #'writer': '/r/dbpedia/writer', #'director': '/r/dbpedia/director', #'starring': '/r/dbpedia/starring', #'producer': '/r/dbpedia/producer', #'associatedBand': '/r/dbpedia/associatedBand', #'associatedMusicalArtist': '/r/dbpedia/associatedMusicalArtist', #'bandMember': '/r/dbpedia/bandMember', #'artist': '/r/dbpedia/artist', #'musicalArtist': '/r/dbpedia/artist', #'musicalBand': '/r/dbpedia/artist', 'genus': '/r/dbpedia/genus', 'leader': '/r/dbpedia/leader', 'capital': '/r/dbpedia/capital', 'service': '/r/dbpedia/product', 'product': '/r/dbpedia/product', } # Ban some concepts that are way too generic and often differ from the common # way that people use these words CONCEPT_BLACKLIST = { '/c/en/work/n', '/c/en/agent/n', '/c/en/artist/n', '/c/en/thing/n' } TYPE_BLACKLIST = { 'Settlement', 'Railway Line', 'Road', 'Sports Event', 'Event', 'Olympic Event', 'Soccer Tournament', 'Election', 'Diocese', 'Year', 'Football League Season', 'Grand Prix' } def translate_dbpedia_url(url): """ Convert an object that's defined by a DBPedia URL to a ConceptNet URI. We do this by finding the part of the URL that names the object, and using that as surface text for ConceptNet. This is, in some ways, abusing a naming convention in the Semantic Web. The URL of an object doesn't have to mean anything at all. The human-readable name is supposed to be a string, specified by the "name" relation. The problem here is that the "name" relation is not unique in either direction. A URL can have many names, and the same name can refer to many URLs, and some of these names are rarely used or are the result of parsing glitches. The URL itself is a stable thing that we can build a ConceptNet URI from, on the other hand. """ if '__' in url or 'dbpedia.org' not in url: return None parsed = parse_url(url) domain = parsed.netloc if '.' not in domain: return None if domain == 'dbpedia.org': # Handle old DBPedia URLs that had no language code domain = 'en.dbpedia.org' domain_parts = domain.split('.', 1) if domain_parts[1] == 'dbpedia.org': lang = domain_parts[0] if lang in LCODE_ALIASES: lang = LCODE_ALIASES[lang] if lang not in ALL_LANGUAGES: return None text = resource_name(url).replace('_', ' ') uri = topic_to_concept(lang, text) if uri in CONCEPT_BLACKLIST: return None else: return uri else: return None def map_dbpedia_relation(url): """ Recognize some relations that we can extract from DBPedia, and convert them to ConceptNet relations. If the relation is specific to DBPedia, it'll be in the '/r/dbpedia' namespace. >>> map_dbpedia_relation('http://www.w3.org/1999/02/22-rdf-syntax-ns#type') '/r/InstanceOf' >>> map_dbpedia_relation('http://dbpedia.org/ontology/location') '/r/AtLocation' >>> map_dbpedia_relation('http://dbpedia.org/ontology/genre') '/r/dbpedia/genre' """ name = resource_name(url) if name in RELATIONS: return RELATIONS[name] else: return None def get_urls_from_degree_file(in_degree_file): urls = set() for line in open(in_degree_file, encoding='utf-8'): line = line.strip() if line: count_str, url_str = line.split(' ', 1) assert url_str[0] == '<' assert url_str[-1] == '>' url = url_str[1:-1] urls.add(url) return urls def read_concept_file(concept_file): concepts = set() for line in open(concept_file, encoding='utf-8'): concept = uri_prefix(line.strip()) concepts.add(concept) return concepts def process_dbpedia(input_dir, output_file, concept_file): """ Read through multiple DBPedia files and output filtered assertions to `output_file`. """ ok_concepts = read_concept_file(concept_file) input_path = pathlib.Path(input_dir) interlang_path = input_path / 'interlanguage_links_en.tql.bz2' mapped_urls = interlanguage_mapping(interlang_path, ok_concepts) out = MsgpackStreamWriter(output_file) types_path = input_path / 'instance_types_en.tql.bz2' quads = parse_nquads(bz2.open(str(types_path), 'rt')) for subj, pred, obj, _graph in quads: subj_url = subj['url'] if ( 'Category:' in subj_url or 'File:' in subj_url or 'List_of' in subj_url or '__' in subj_url or 'Template:' in subj_url ): continue if subj_url in mapped_urls: subj_concept = translate_dbpedia_url(subj_url) obj_type = un_camel_case(resource_name(obj['url'])) if obj_type not in TYPE_BLACKLIST: obj_concept = standardized_concept_uri('en', obj_type, 'n') if obj_concept not in CONCEPT_BLACKLIST: edge = make_edge( '/r/IsA', subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url']) ) out.write(edge) for other_url in mapped_urls[subj_url]: if other_url.startswith('http://wikidata.dbpedia.org/'): urledge = make_edge( '/r/ExternalURL', subj_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=1.0 ) out.write(urledge) else: other_concept = translate_dbpedia_url(other_url) if other_concept: urledge = make_edge( '/r/ExternalURL', other_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=1.0 ) out.write(urledge) edge = make_edge( '/r/Synonym', other_concept, subj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(other_url), surfaceEnd=url_to_label(subj_url) ) out.write(edge) relations_path = input_path / 'mappingbased_objects_en.tql.bz2' quads = parse_nquads(bz2.open(str(relations_path), 'rt')) for subj, pred, obj, _graph in quads: subj_concept = translate_dbpedia_url(subj['url']) obj_concept = translate_dbpedia_url(obj['url']) rel_name = resource_name(pred['url']) if ( subj_concept and obj_concept and subj['url'] in mapped_urls and obj['url'] in mapped_urls ): if rel_name in RELATIONS: rel = RELATIONS[rel_name] edge = make_edge( rel, subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url']) ) out.write(edge) out.close() def url_to_label(url): return resource_name(url).replace('_', ' ') def interlanguage_mapping(interlang_path, ok_concepts): quads = parse_nquads(bz2.open(str(interlang_path), 'rt')) mapping = {} for subj, values in itertools.groupby(quads, itemgetter(0)): subj_url = subj['url'] subj_concept = translate_dbpedia_url(subj_url) pieces = split_uri(subj_concept) if len(pieces) >= 6: sense = pieces[5] if 'album' in sense or 'film' in sense or 'series' in sense or 'disambiguation' in sense or 'song' in sense or 'album' in sense or 'band' in sense: continue if uri_prefix(subj_concept) in ok_concepts: targets = [subj_url] for _subj, _pred, obj, _graph in values: url = obj['url'] if 'www.wikidata.org' in url: continue if url.startswith('http://wikidata.dbpedia.org/'): wikidata_id = resource_name(url) # Return early when we see a high-numbered Wikidata ID if int(wikidata_id[1:]) >= 1000000: return mapping targets.append(url) mapping[subj_url] = targets return mapping def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('input_dir', help="Directory containing DBPedia files") parser.add_argument('output_file', help='msgpack file to output to') parser.add_argument('concept_file', help="Text file of concepts used elsewhere in ConceptNet") args = parser.parse_args() process_dbpedia(args.input_dir, args.output_file, args.concept_file) if __name__ == '__main__': main()
py
1a332399306aff982d3f5396db2cdeb80fd8e8a2
import unittest import uuid from aioresponses import aioresponses from canvasaio import Canvas from canvasaio.assignment import ( Assignment, AssignmentGroup, AssignmentOverride, AssignmentExtension, ) from canvasaio.exceptions import CanvasException, RequiredFieldMissing from canvasaio.peer_review import PeerReview from canvasaio.progress import Progress from canvasaio.submission import Submission from canvasaio.user import UserDisplay from tests import settings from tests.util import register_uris, cleanup_file, aioresponse_mock @aioresponse_mock class TestAssignment(unittest.IsolatedAsyncioTestCase): async def asyncSetUp(self): self.canvas = Canvas(settings.BASE_URL, settings.API_KEY) with aioresponses() as m: register_uris({"course": ["get_by_id", "get_assignment_by_id"]}, m) self.course = await self.canvas.get_course(1) self.assignment = await self.course.get_assignment(1) async def asyncTearDown(self): await self.canvas.close() async def test__init__overrides(self, m): register_uris({"assignment": ["get_assignment_with_overrides"]}, m) assignment = await self.course.get_assignment(1) self.assertTrue(hasattr(assignment, "overrides")) self.assertIsInstance(assignment.overrides, list) self.assertEqual(len(assignment.overrides), 1) self.assertIsInstance(assignment.overrides[0], AssignmentOverride) # create_override() async def test_create_override(self, m): register_uris({"assignment": ["create_override"]}, m) override = await self.assignment.create_override( assignment_override={ "student_ids": [1, 2, 3], "title": "New Assignment Override", } ) self.assertIsInstance(override, AssignmentOverride) self.assertEqual(override.title, "New Assignment Override") # delete() async def test_delete_assignments(self, m): register_uris({"assignment": ["delete_assignment"]}, m) deleted_assignment = await self.assignment.delete() self.assertIsInstance(deleted_assignment, Assignment) # edit() async def test_edit_assignment(self, m): register_uris({"assignment": ["edit_assignment"]}, m) name = "New Name" edited_assignment = await self.assignment.edit(assignment={"name": name}) self.assertIsInstance(edited_assignment, Assignment) self.assertTrue(hasattr(edited_assignment, "name")) self.assertEqual(edited_assignment.name, name) # get_gradeable_students() async def test_get_gradeable_students(self, m): register_uris({"course": ["list_gradeable_students"]}, m) students = self.assignment.get_gradeable_students() student_list = [student async for student in students] self.assertEqual(len(student_list), 2) self.assertIsInstance(student_list[0], UserDisplay) # get_override() async def test_get_override(self, m): register_uris({"assignment": ["get_assignment_override"]}, m) override = await self.assignment.get_override(1) self.assertIsInstance(override, AssignmentOverride) # get_overrides() async def test_get_overrides(self, m): register_uris( { "assignment": [ "list_assignment_overrides", "list_assignment_overrides_p2", ] }, m, ) overrides = self.assignment.get_overrides() override_list = [override async for override in overrides] self.assertEqual(len(override_list), 4) self.assertIsInstance(override_list[0], AssignmentOverride) self.assertIsInstance(override_list[3], AssignmentOverride) # get_peer_reviews() async def test_get_peer_reviews(self, m): register_uris({"assignment": ["list_peer_reviews"]}, m) peer_reviews = self.assignment.get_peer_reviews() peer_review_list = [peer_review async for peer_review in peer_reviews] self.assertEqual(len(peer_review_list), 2) self.assertIsInstance(peer_review_list[0], PeerReview) # get_submission() async def test_get_submission(self, m): register_uris({"submission": ["get_by_id_course"], "user": ["get_by_id"]}, m) user_id = 1 submission_by_id = await self.assignment.get_submission(user_id) self.assertIsInstance(submission_by_id, Submission) self.assertTrue(hasattr(submission_by_id, "submission_type")) user = await self.canvas.get_user(user_id) submission_by_obj = await self.assignment.get_submission(user) self.assertIsInstance(submission_by_obj, Submission) self.assertTrue(hasattr(submission_by_obj, "submission_type")) # get_submissions() async def test_get_submissions(self, m): register_uris({"submission": ["list_submissions"]}, m) submissions = self.assignment.get_submissions() submission_list_by_id = [submission async for submission in submissions] self.assertEqual(len(submission_list_by_id), 2) self.assertIsInstance(submission_list_by_id[0], Submission) # set_extensions() async def test_set_extensions(self, m): register_uris({"assignment": ["set_extensions"]}, m) extension = await self.assignment.set_extensions( [{"user_id": 3, "extra_attempts": 2}, {"user_id": 2, "extra_attempts": 2}] ) self.assertIsInstance(extension, list) self.assertEqual(len(extension), 2) self.assertIsInstance(extension[0], AssignmentExtension) self.assertEqual(extension[0].user_id, 3) self.assertTrue(hasattr(extension[0], "extra_attempts")) self.assertEqual(extension[0].extra_attempts, 2) self.assertIsInstance(extension[1], AssignmentExtension) self.assertEqual(extension[1].user_id, 2) self.assertTrue(hasattr(extension[1], "extra_attempts")) self.assertEqual(extension[1].extra_attempts, 2) async def test_set_extensions_not_list(self, m): with self.assertRaises(ValueError): await self.assignment.set_extensions({"user_id": 3, "exrra_attempts": 2}) async def test_set_extensions_empty_list(self, m): with self.assertRaises(ValueError): await self.assignment.set_extensions([]) async def test_set_extensions_non_dicts(self, m): with self.assertRaises(ValueError): await self.assignment.set_extensions([("user_id", 1), ("extra_attempts", 2)]) async def test_set_extensions_missing_key(self, m): with self.assertRaises(RequiredFieldMissing): await self.assignment.set_extensions([{"extra_attempts": 3}]) # submit() async def test_submit(self, m): register_uris({"assignment": ["submit"]}, m) sub_type = "online_upload" sub_dict = {"submission_type": sub_type} submission = await self.assignment.submit(sub_dict) self.assertIsInstance(submission, Submission) self.assertTrue(hasattr(submission, "submission_type")) self.assertEqual(submission.submission_type, sub_type) async def test_submit_fail(self, m): with self.assertRaises(RequiredFieldMissing): await self.assignment.submit({}) async def test_submit_file(self, m): register_uris({"assignment": ["submit", "upload", "upload_final"]}, m) filename = "testfile_assignment_{}".format(uuid.uuid4().hex) try: with open(filename, "w+") as file: sub_type = "online_upload" sub_dict = {"submission_type": sub_type} submission = await self.assignment.submit(sub_dict, file) self.assertIsInstance(submission, Submission) self.assertTrue(hasattr(submission, "submission_type")) self.assertEqual(submission.submission_type, sub_type) finally: cleanup_file(filename) async def test_submit_file_wrong_type(self, m): filename = "testfile_assignment_{}".format(uuid.uuid4().hex) sub_type = "online_text_entry" sub_dict = {"submission_type": sub_type} with self.assertRaises(ValueError): await self.assignment.submit(sub_dict, filename) async def test_submit_file_upload_failure(self, m): register_uris({"assignment": ["submit", "upload", "upload_fail"]}, m) filename = "testfile_assignment_{}".format(uuid.uuid4().hex) try: with open(filename, "w+") as file: sub_type = "online_upload" sub_dict = {"submission_type": sub_type} with self.assertRaises(CanvasException): await self.assignment.submit(sub_dict, file) finally: cleanup_file(filename) # __str__() def test__str__(self, m): string = str(self.assignment) self.assertIsInstance(string, str) # submissions_bulk_update() async def test_submissions_bulk_update(self, m): register_uris({"assignment": ["update_submissions"]}, m) register_uris({"progress": ["course_progress"]}, m) progress = await self.assignment.submissions_bulk_update( grade_data={"1": {"posted_grade": 97}, "2": {"posted_grade": 98}} ) self.assertIsInstance(progress, Progress) self.assertTrue(progress.context_type == "Course") progress = await progress.query() self.assertTrue(progress.context_type == "Course") # upload_to_submission() async def test_upload_to_submission_self(self, m): register_uris({"assignment": ["upload", "upload_final"]}, m) filename = "testfile_assignment_{}".format(uuid.uuid4().hex) try: with open(filename, "w+") as file: response = await self.assignment.upload_to_submission(file) self.assertTrue(response[0]) self.assertIsInstance(response[1], dict) self.assertIn("url", response[1]) finally: cleanup_file(filename) async def test_upload_to_submission_user(self, m): register_uris({"assignment": ["upload_by_id", "upload_final"]}, m) filename = "testfile_assignment_{}".format(uuid.uuid4().hex) user_id = 1 try: with open(filename, "w+") as file: response = await self.assignment.upload_to_submission(file, user_id) self.assertTrue(response[0]) self.assertIsInstance(response[1], dict) self.assertIn("url", response[1]) finally: cleanup_file(filename) @aioresponse_mock class TestAssignmentExtension(unittest.IsolatedAsyncioTestCase): def setUp(self): self.canvas = Canvas(settings.BASE_URL, settings.API_KEY) self.extension = AssignmentExtension( self.canvas._Canvas__requester, {"assignment_id": 2, "user_id": 3, "extra_attempts": 2}, ) # __str__() def test__str__(self, m): string = str(self.extension) self.assertIsInstance(string, str) @aioresponse_mock class TestAssignmentGroup(unittest.IsolatedAsyncioTestCase): async def asyncSetUp(self): self.canvas = Canvas(settings.BASE_URL, settings.API_KEY) with aioresponses() as m: register_uris( {"course": ["get_by_id"], "assignment": ["get_assignment_group"]}, m ) self.course = await self.canvas.get_course(1) self.assignment_group = await self.course.get_assignment_group(5) async def asyncTearDown(self): await self.canvas.close() # edit() async def test_edit_assignment_group(self, m): register_uris({"assignment": ["edit_assignment_group"]}, m) name = "New Name" edited_assignment_group = await self.assignment_group.edit( assignment_group={"name": name} ) self.assertIsInstance(edited_assignment_group, AssignmentGroup) self.assertTrue(hasattr(edited_assignment_group, "name")) self.assertEqual(edited_assignment_group.name, name) # delete() async def test_delete_assignment_group(self, m): register_uris({"assignment": ["delete_assignment_group"]}, m) deleted_assignment_group = await self.assignment_group.delete() self.assertIsInstance(deleted_assignment_group, AssignmentGroup) self.assertTrue(hasattr(deleted_assignment_group, "name")) self.assertEqual(deleted_assignment_group.name, "Assignment Group 5") # __str__() def test__str__(self, m): string = str(self.assignment_group) self.assertIsInstance(string, str) @aioresponse_mock() class TestAssignmentOverride(unittest.IsolatedAsyncioTestCase): async def asyncSetUp(self): self.canvas = Canvas(settings.BASE_URL, settings.API_KEY) with aioresponses() as m: register_uris( { "course": ["get_by_id", "get_assignment_by_id"], "assignment": ["get_assignment_override"], }, m, ) self.course = await self.canvas.get_course(1) self.assignment = await self.course.get_assignment(1) self.assignment_override = await self.assignment.get_override(1) async def asyncTearDown(self): await self.canvas.close() # __str__() def test__str__(self, m): string = str(self.assignment_override) self.assertIsInstance(string, str) self.assertEqual(string, "Assignment Override 1 (1)") # delete() async def test_delete(self, m): register_uris({"assignment": ["delete_override"]}, m) deleted = await self.assignment_override.delete() self.assertIsInstance(deleted, AssignmentOverride) self.assertEqual(deleted.id, self.assignment_override.id) # edit() async def test_edit(self, m): register_uris({"assignment": ["edit_override"]}, m) edited = await self.assignment_override.edit( assignment_override={ "title": "New Title", "student_ids": self.assignment_override.student_ids, } ) self.assertEqual(edited, self.assignment_override) self.assertIsInstance(self.assignment_override, AssignmentOverride) self.assertEqual(edited.title, "New Title")
py
1a3323d3c34ee6eeddd6d2842012ceaa864a0ed9
# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import importlib import logging import os import os.path import socket import struct import tempfile import eventlet import mock import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_context import context as common_context from oslo_context import fixture as context_fixture from oslo_utils import encodeutils from oslo_utils import fixture as utils_fixture from oslo_utils import units import six import nova from nova import context from nova import exception from nova import test from nova import utils CONF = cfg.CONF class GenericUtilsTestCase(test.NoDBTestCase): def test_parse_server_string(self): result = utils.parse_server_string('::1') self.assertEqual(('::1', ''), result) result = utils.parse_server_string('[::1]:8773') self.assertEqual(('::1', '8773'), result) result = utils.parse_server_string('2001:db8::192.168.1.1') self.assertEqual(('2001:db8::192.168.1.1', ''), result) result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773') self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) result = utils.parse_server_string('192.168.1.1') self.assertEqual(('192.168.1.1', ''), result) result = utils.parse_server_string('192.168.1.2:8773') self.assertEqual(('192.168.1.2', '8773'), result) result = utils.parse_server_string('192.168.1.3') self.assertEqual(('192.168.1.3', ''), result) result = utils.parse_server_string('www.example.com:8443') self.assertEqual(('www.example.com', '8443'), result) result = utils.parse_server_string('www.example.com') self.assertEqual(('www.example.com', ''), result) # error case result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) result = utils.parse_server_string('') self.assertEqual(('', ''), result) def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_hostname_has_default(self): hostname = u"\u7684hello" defaultname = "Server-1" self.assertEqual("hello", utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default(self): hostname = u"\u7684" defaultname = "Server-1" self.assertEqual(defaultname, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default_too_long(self): hostname = u"\u7684" defaultname = "a" * 64 self.assertEqual("a" * 63, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_no_default(self): hostname = u"\u7684" self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_empty_minus_period(self): hostname = "---..." self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_with_space(self): hostname = " a b c " self.assertEqual("a-b-c", utils.sanitize_hostname(hostname)) def test_hostname_too_long(self): hostname = "a" * 64 self.assertEqual(63, len(utils.sanitize_hostname(hostname))) def test_hostname_truncated_no_hyphen(self): hostname = "a" * 62 hostname = hostname + '-' + 'a' res = utils.sanitize_hostname(hostname) # we trim to 63 and then trim the trailing dash self.assertEqual(62, len(res)) self.assertFalse(res.endswith('-'), 'The hostname ends with a -') def test_generate_password(self): password = utils.generate_password() self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) def test_read_file_as_root(self): def fake_execute(*args, **kwargs): if args[1] == 'bad': raise processutils.ProcessExecutionError() return 'fakecontents', None self.stub_out('nova.utils.execute', fake_execute) contents = utils.read_file_as_root('good') self.assertEqual(contents, 'fakecontents') self.assertRaises(exception.FileNotFound, utils.read_file_as_root, 'bad') def test_temporary_chown(self): def fake_execute(*args, **kwargs): if args[0] == 'chown': fake_execute.uid = args[1] self.stub_out('nova.utils.execute', fake_execute) with tempfile.NamedTemporaryFile() as f: with utils.temporary_chown(f.name, owner_uid=2): self.assertEqual(fake_execute.uid, 2) self.assertEqual(fake_execute.uid, os.getuid()) def test_xhtml_escape(self): self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"')) self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'")) self.assertEqual('&amp;', utils.xhtml_escape('&')) self.assertEqual('&gt;', utils.xhtml_escape('>')) self.assertEqual('&lt;', utils.xhtml_escape('<')) self.assertEqual('&lt;foo&gt;', utils.xhtml_escape('<foo>')) def test_is_valid_ipv6_cidr(self): self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64")) self.assertTrue(utils.is_valid_ipv6_cidr( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001/32")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertFalse(utils.is_valid_ipv6_cidr("foo")) self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1")) def test_get_shortened_ipv6(self): self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe", utils.get_shortened_ipv6( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254")) self.assertEqual("::1", utils.get_shortened_ipv6( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertEqual("caca::caca:0:babe:201:102", utils.get_shortened_ipv6( "caca:0000:0000:caca:0000:babe:0201:0102")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "failure") def test_get_shortened_ipv6_cidr(self): self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600:0000:0000:0000:0000:0000:0000:0000/64")) self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600::1/64")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "failure") def test_safe_ip_format(self): self.assertEqual("[::1]", utils.safe_ip_format("::1")) self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1")) self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format( "::ffff:127.0.0.1")) self.assertEqual("localhost", utils.safe_ip_format("localhost")) def test_format_remote_path(self): self.assertEqual("[::1]:/foo/bar", utils.format_remote_path("::1", "/foo/bar")) self.assertEqual("127.0.0.1:/foo/bar", utils.format_remote_path("127.0.0.1", "/foo/bar")) self.assertEqual("[::ffff:127.0.0.1]:/foo/bar", utils.format_remote_path("::ffff:127.0.0.1", "/foo/bar")) self.assertEqual("localhost:/foo/bar", utils.format_remote_path("localhost", "/foo/bar")) self.assertEqual("/foo/bar", utils.format_remote_path(None, "/foo/bar")) def test_get_hash_str(self): base_str = b"foo" base_unicode = u"foo" value = hashlib.md5(base_str).hexdigest() self.assertEqual( value, utils.get_hash_str(base_str)) self.assertEqual( value, utils.get_hash_str(base_unicode)) def test_use_rootwrap(self): self.flags(disable_rootwrap=False, group='workarounds') self.flags(rootwrap_config='foo') cmd = utils.get_root_helper() self.assertEqual('sudo nova-rootwrap foo', cmd) @mock.patch('nova.utils.RootwrapProcessHelper') def test_get_root_helper_proc(self, mock_proc_helper): self.flags(use_rootwrap_daemon=False) self.flags(rootwrap_config="/path/to/conf") utils._get_rootwrap_helper() mock_proc_helper.assert_called_once_with() @mock.patch('nova.utils.RootwrapDaemonHelper') def test_get_root_helper_daemon(self, mock_daemon_helper): conf_path = '/path/to/conf' self.flags(use_rootwrap_daemon=True) self.flags(rootwrap_config=conf_path) utils._get_rootwrap_helper() mock_daemon_helper.assert_called_once_with(conf_path) def test_use_sudo(self): self.flags(disable_rootwrap=True, group='workarounds') cmd = utils.get_root_helper() self.assertEqual('sudo', cmd) def test_ssh_execute(self): expected_args = ('ssh', '-o', 'BatchMode=yes', 'remotehost', 'ls', '-l') with mock.patch('nova.utils.execute') as mock_method: utils.ssh_execute('remotehost', 'ls', '-l') mock_method.assert_called_once_with(*expected_args) class TestCachedFile(test.NoDBTestCase): @mock.patch('os.path.getmtime', return_value=1) def test_read_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fresh, data = utils.read_cached_file("/this/is/a/fake") fdata = utils._FILE_CACHE['/this/is/a/fake']["data"] self.assertEqual(fdata, data) @mock.patch('os.path.getmtime', return_value=2) def test_read_modified_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fake_contents = "lorem ipsum" with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=fake_contents)): fresh, data = utils.read_cached_file("/this/is/a/fake") self.assertEqual(data, fake_contents) self.assertTrue(fresh) def test_delete_cached_file(self): filename = '/this/is/a/fake/deletion/of/cached/file' utils._FILE_CACHE = { filename: {"data": 1123, "mtime": 1} } self.assertIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) def test_delete_cached_file_not_exist(self): # We expect that if cached file does not exist no Exception raised. filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file' self.assertNotIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) class RootwrapDaemonTesetCase(test.NoDBTestCase): @mock.patch('oslo_rootwrap.client.Client') def test_get_client(self, mock_client): mock_conf = mock.MagicMock() utils.RootwrapDaemonHelper(mock_conf) mock_client.assert_called_once_with( ["sudo", "nova-rootwrap-daemon", mock_conf]) @mock.patch('nova.utils.LOG.info') def test_execute(self, mock_info): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) mock_info.assert_has_calls([mock.call( u'Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] ' u'kwargs=[%(kwargs)r]', {'cmd': u'a 1', 'kwargs': {'run_as_root': True, 'foo': 'bar'}})]) def test_execute_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True, process_input=True) daemon.client.execute.assert_called_once_with(['a', '1'], True) def test_execute_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2) def test_execute_pass_with_check_exit_code(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) daemon.execute('b', 2, check_exit_code=[-2]) def test_execute_fail_with_retry(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2) daemon.client.execute.assert_has_calls( [mock.call(['b', '2'], None), mock.call(['b', '2'], None)]) @mock.patch('nova.utils.LOG.log') def test_execute_fail_and_logging(self, mock_log): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2, loglevel=logging.CRITICAL, log_errors=processutils.LOG_ALL_ERRORS) mock_log.assert_has_calls( [ mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Retrying.', u'b 2'), mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Not Retrying.', u'b 2')] ) def test_trycmd(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) def test_trycmd_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.execute = mock.Mock(return_value=('out', 'err')) daemon.trycmd('a', 1, foo='bar', run_as_root=True, loglevel=logging.WARN, log_errors=True, process_input=True, delay_on_retry=False, attempts=5, check_exit_code=[200]) daemon.execute.assert_called_once_with('a', 1, attempts=5, check_exit_code=[200], delay_on_retry=False, foo='bar', log_errors=True, loglevel=30, process_input=True, run_as_root=True) def test_trycmd_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) self.assertIn(expected_err, err) def test_trycmd_fail_with_rety(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True, attempts=3) self.assertIn(expected_err, err) daemon.client.execute.assert_has_calls( [mock.call(['a', '1'], None), mock.call(['a', '1'], None), mock.call(['a', '1'], None)]) class VPNPingTestCase(test.NoDBTestCase): """Unit tests for utils.vpn_ping().""" def setUp(self): super(VPNPingTestCase, self).setUp() self.port = 'fake' self.address = 'fake' self.session_id = 0x1234 self.fmt = '!BQxxxxxQxxxx' def fake_reply_packet(self, pkt_id=0x40): return struct.pack(self.fmt, pkt_id, 0x0, self.session_id) def setup_socket(self, mock_socket, return_value, side_effect=None): socket_obj = mock.MagicMock() if side_effect is not None: socket_obj.recv.side_effect = side_effect else: socket_obj.recv.return_value = return_value mock_socket.return_value = socket_obj @mock.patch.object(socket, 'socket') def test_vpn_ping_timeout(self, mock_socket): """Server doesn't reply within timeout.""" self.setup_socket(mock_socket, None, socket.timeout) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_bad_len(self, mock_socket): """Test a short/invalid server reply.""" self.setup_socket(mock_socket, 'fake_reply') rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_bad_id(self, mock_socket): """Server sends an unknown packet ID.""" self.setup_socket(mock_socket, self.fake_reply_packet(pkt_id=0x41)) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertFalse(rc) @mock.patch.object(socket, 'socket') def test_vpn_ping_ok(self, mock_socket): self.setup_socket(mock_socket, self.fake_reply_packet()) rc = utils.vpn_ping(self.address, self.port, session_id=self.session_id) self.assertTrue(rc) class MonkeyPatchTestCase(test.NoDBTestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'nova.tests.unit.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = [] from nova.tests.unit.monkey_patch_example import example_a from nova.tests.unit.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(ret_a, 8) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(ret_b, 8) package_a = self.example_package + 'example_a.' self.assertIn(package_a + 'example_function_a', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method_add', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertNotIn(package_b + 'example_function_b', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method_add', nova.tests.unit.monkey_patch_example.CALLED_FUNCTION) class MonkeyPatchDefaultTestCase(test.NoDBTestCase): """Unit test for default monkey_patch_modules value.""" def setUp(self): super(MonkeyPatchDefaultTestCase, self).setUp() self.flags( monkey_patch=True) def test_monkey_patch_default_mod(self): # monkey_patch_modules is defined to be # <module_to_patch>:<decorator_to_patch_with> # Here we check that both parts of the default values are # valid for module in CONF.monkey_patch_modules: m = module.split(':', 1) # Check we can import the module to be patched importlib.import_module(m[0]) # check the decorator is valid decorator_name = m[1].rsplit('.', 1) decorator_module = importlib.import_module(decorator_name[0]) getattr(decorator_module, decorator_name[1]) class AuditPeriodTest(test.NoDBTestCase): def setUp(self): super(AuditPeriodTest, self).setUp() # a fairly random time to test with self.useFixture(utils_fixture.TimeFixture( datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012))) def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEqual(begin, datetime.datetime( hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEqual(begin, datetime.datetime( minute=10, hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=10, hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEqual(begin, datetime.datetime( minute=30, hour=6, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=30, hour=7, day=5, month=3, year=2012)) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEqual(begin, datetime.datetime( day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( day=5, month=3, year=2012)) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEqual(begin, datetime.datetime( hour=6, day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=6, day=5, month=3, year=2012)) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEqual(begin, datetime.datetime( hour=10, day=3, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=10, day=4, month=3, year=2012)) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=1, month=3, year=2012)) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEqual(begin, datetime.datetime( day=2, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=2, month=3, year=2012)) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEqual(begin, datetime.datetime( day=15, month=1, year=2012)) self.assertEqual(end, datetime.datetime( day=15, month=2, year=2012)) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEqual(begin, datetime.datetime( day=1, month=1, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=1, year=2012)) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=2, year=2012)) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEqual(begin, datetime.datetime( day=1, month=6, year=2010)) self.assertEqual(end, datetime.datetime( day=1, month=6, year=2011)) class MkfsTestCase(test.NoDBTestCase): @mock.patch('nova.utils.execute') def test_mkfs_ext4(self, mock_execute): utils.mkfs('ext4', '/my/block/dev') mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '/my/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_msdos(self, mock_execute): utils.mkfs('msdos', '/my/msdos/block/dev') mock_execute.assert_called_once_with('mkfs', '-t', 'msdos', '/my/msdos/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_swap(self, mock_execute): utils.mkfs('swap', '/my/swap/block/dev') mock_execute.assert_called_once_with('mkswap', '/my/swap/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_ext4_withlabel(self, mock_execute): utils.mkfs('ext4', '/my/block/dev', 'ext4-vol') mock_execute.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol', '/my/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_msdos_withlabel(self, mock_execute): utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol') mock_execute.assert_called_once_with('mkfs', '-t', 'msdos', '-n', 'msdos-vol', '/my/msdos/block/dev', run_as_root=False) @mock.patch('nova.utils.execute') def test_mkfs_swap_withlabel(self, mock_execute): utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol') mock_execute.assert_called_once_with('mkswap', '-L', 'swap-vol', '/my/swap/block/dev', run_as_root=False) class LastBytesTestCase(test.NoDBTestCase): """Test the last_bytes() utility method.""" def setUp(self): super(LastBytesTestCase, self).setUp() self.f = six.BytesIO(b'1234567890') def test_truncated(self): self.f.seek(0, os.SEEK_SET) out, remaining = utils.last_bytes(self.f, 5) self.assertEqual(out, b'67890') self.assertTrue(remaining > 0) def test_read_all(self): self.f.seek(0, os.SEEK_SET) out, remaining = utils.last_bytes(self.f, 1000) self.assertEqual(out, b'1234567890') self.assertFalse(remaining > 0) def test_seek_too_far_real_file(self): # StringIO doesn't raise IOError if you see past the start of the file. with tempfile.TemporaryFile() as flo: content = b'1234567890' flo.write(content) self.assertEqual((content, 0), utils.last_bytes(flo, 1000)) class MetadataToDictTestCase(test.NoDBTestCase): def test_metadata_to_dict(self): self.assertEqual(utils.metadata_to_dict( [{'key': 'foo1', 'value': 'bar'}, {'key': 'foo2', 'value': 'baz'}]), {'foo1': 'bar', 'foo2': 'baz'}) def test_metadata_to_dict_with_include_deleted(self): metadata = [{'key': 'foo1', 'value': 'bar', 'deleted': 1442875429, 'other': 'stuff'}, {'key': 'foo2', 'value': 'baz', 'deleted': 0, 'other': 'stuff2'}] self.assertEqual({'foo1': 'bar', 'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=True)) self.assertEqual({'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=False)) # verify correct default behavior self.assertEqual(utils.metadata_to_dict(metadata), utils.metadata_to_dict(metadata, include_deleted=False)) def test_metadata_to_dict_empty(self): self.assertEqual({}, utils.metadata_to_dict([])) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=True)) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=False)) def test_dict_to_metadata(self): def sort_key(adict): return sorted(adict.items()) metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')) expected = [{'key': 'foo1', 'value': 'bar1'}, {'key': 'foo2', 'value': 'bar2'}] self.assertEqual(sorted(metadata, key=sort_key), sorted(expected, key=sort_key)) def test_dict_to_metadata_empty(self): self.assertEqual(utils.dict_to_metadata({}), []) class ExpectedArgsTestCase(test.NoDBTestCase): def test_passes(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f @dec def func(foo, bar, baz="lol"): pass # Call to ensure nothing errors func(None, None) def test_raises(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def func(bar, baz): pass self.assertRaises(TypeError, dec, func) def test_var_no_of_args(self): @utils.expects_func_args('foo') def dec(f): return f @dec def func(bar, *args, **kwargs): pass # Call to ensure nothing errors func(None) def test_more_layers(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def dec_2(f): def inner_f(*a, **k): return f() return inner_f @dec_2 def func(bar, baz): pass self.assertRaises(TypeError, dec, func) class StringLengthTestCase(test.NoDBTestCase): def test_check_string_length(self): self.assertIsNone(utils.check_string_length( 'test', 'name', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', 'name', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) def test_check_string_length_noname(self): self.assertIsNone(utils.check_string_length( 'test', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, max_length=255) class ValidateIntegerTestCase(test.NoDBTestCase): def test_valid_inputs(self): self.assertEqual( utils.validate_integer(42, "answer"), 42) self.assertEqual( utils.validate_integer("42", "answer"), 42) self.assertEqual( utils.validate_integer( "7", "lucky", min_value=7, max_value=8), 7) self.assertEqual( utils.validate_integer( 7, "lucky", min_value=6, max_value=7), 7) self.assertEqual( utils.validate_integer( 300, "Spartaaa!!!", min_value=300), 300) self.assertEqual( utils.validate_integer( "300", "Spartaaa!!!", max_value=300), 300) def test_invalid_inputs(self): self.assertRaises(exception.InvalidInput, utils.validate_integer, "im-not-an-int", "not-an-int") self.assertRaises(exception.InvalidInput, utils.validate_integer, 3.14, "Pie") self.assertRaises(exception.InvalidInput, utils.validate_integer, "299", "Sparta no-show", min_value=300, max_value=300) self.assertRaises(exception.InvalidInput, utils.validate_integer, 55, "doing 55 in a 54", max_value=54) self.assertRaises(exception.InvalidInput, utils.validate_integer, six.unichr(129), "UnicodeError", max_value=1000) class ValidateNeutronConfiguration(test.NoDBTestCase): def test_nova_network(self): self.assertFalse(utils.is_neutron()) def test_neutron(self): self.flags(use_neutron=True) self.assertTrue(utils.is_neutron()) class AutoDiskConfigUtilTestCase(test.NoDBTestCase): def test_is_auto_disk_config_disabled(self): self.assertTrue(utils.is_auto_disk_config_disabled("Disabled ")) def test_is_auto_disk_config_disabled_none(self): self.assertFalse(utils.is_auto_disk_config_disabled(None)) def test_is_auto_disk_config_disabled_false(self): self.assertFalse(utils.is_auto_disk_config_disabled("false")) class GetSystemMetadataFromImageTestCase(test.NoDBTestCase): def get_image(self): image_meta = { "id": "fake-image", "name": "fake-name", "min_ram": 1, "min_disk": 1, "disk_format": "raw", "container_format": "bare", } return image_meta def get_flavor(self): flavor = { "id": "fake.flavor", "root_gb": 10, } return flavor def test_base_image_properties(self): image = self.get_image() # Verify that we inherit all the needed keys sys_meta = utils.get_system_metadata_from_image(image) for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that everything else is ignored self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS)) def test_inherit_image_properties(self): image = self.get_image() image["properties"] = {"foo1": "bar", "foo2": "baz"} sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(sys_meta[sys_key], expected) def test_skip_image_properties(self): image = self.get_image() image["properties"] = { "foo1": "bar", "foo2": "baz", "mappings": "wizz", "img_block_device_mapping": "eek", } sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) if key in utils.SM_SKIP_KEYS: self.assertNotIn(sys_key, sys_meta) else: self.assertEqual(sys_meta[sys_key], expected) def test_vhd_min_disk_image(self): image = self.get_image() flavor = self.get_flavor() image["disk_format"] = "vhd" sys_meta = utils.get_system_metadata_from_image(image, flavor) # Verify that the min_disk property is taken from # flavor's root_gb when using vhd disk format sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk") self.assertEqual(sys_meta[sys_key], flavor["root_gb"]) def test_dont_inherit_empty_values(self): image = self.get_image() for key in utils.SM_INHERITABLE_KEYS: image[key] = None sys_meta = utils.get_system_metadata_from_image(image) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertNotIn(sys_key, sys_meta) class GetImageFromSystemMetadataTestCase(test.NoDBTestCase): def get_system_metadata(self): sys_meta = { "image_min_ram": 1, "image_min_disk": 1, "image_disk_format": "raw", "image_container_format": "bare", } return sys_meta def test_image_from_system_metadata(self): sys_meta = self.get_system_metadata() sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar" sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz" sys_meta["%simg_block_device_mapping" % utils.SM_IMAGE_PROP_PREFIX] = "eek" image = utils.get_image_from_system_metadata(sys_meta) # Verify that we inherit all the needed keys for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that we inherit the rest of metadata as properties self.assertIn("properties", image) for key, value in six.iteritems(image["properties"]): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image["properties"][key], sys_meta[sys_key]) self.assertNotIn("img_block_device_mapping", image["properties"]) def test_dont_inherit_empty_values(self): sys_meta = self.get_system_metadata() for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) sys_meta[sys_key] = None image = utils.get_image_from_system_metadata(sys_meta) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: self.assertNotIn(key, image) class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase): def test_inherit_image_properties(self): properties = {"fake_prop": "fake_value"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(properties, image_meta["properties"]) def test_image_size(self): volume = {"size": 10} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(10 * units.Gi, image_meta["size"]) def test_image_status(self): volume = {} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual("active", image_meta["status"]) def test_values_conversion(self): properties = {"min_ram": "5", "min_disk": "7"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(5, image_meta["min_ram"]) self.assertEqual(7, image_meta["min_disk"]) def test_suppress_not_image_properties(self): properties = {"min_ram": "256", "min_disk": "128", "image_id": "fake_id", "image_name": "fake_name", "container_format": "ami", "disk_format": "ami", "size": "1234", "checksum": "fake_checksum"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual({}, image_meta["properties"]) self.assertEqual(0, image_meta["size"]) # volume's properties should not be touched self.assertNotEqual({}, properties) class ResourceFilterTestCase(test.NoDBTestCase): def _assert_filtering(self, res_list, filts, expected_tags): actual_tags = utils.filter_and_format_resource_metadata('instance', res_list, filts, 'metadata') self.assertJsonEqual(expected_tags, actual_tags) def test_filter_and_format_resource_metadata(self): # Create some tags # One overlapping pair, and one different key value pair # i1 : foo=bar, bax=wibble # i2 : foo=bar, baz=quux # resources i1 = { 'uuid': '1', 'metadata': {'foo': 'bar', 'bax': 'wibble'}, } i2 = { 'uuid': '2', 'metadata': {'foo': 'bar', 'baz': 'quux'}, } # Resources list rl = [i1, i2] # tags i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'} i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'} i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'} i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'} # No filter self._assert_filtering(rl, [], [i11, i12, i21, i22]) self._assert_filtering(rl, {}, [i11, i12, i21, i22]) # Key search # Both should have tags with key 'foo' and value 'bar' self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21]) # Both should have tags with key 'foo' self._assert_filtering(rl, {'key': 'foo'}, [i11, i21]) # Only i2 should have tags with key 'baz' and value 'quux' self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22]) # Only i2 should have tags with value 'quux' self._assert_filtering(rl, {'value': 'quux'}, [i22]) # Empty list should be returned when no tags match self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, []) # Multiple values # Only i2 should have tags with key 'baz' and values in the set # ['quux', 'wibble'] self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']}, [i22]) # But when specified as two different filters, no tags should be # returned. This is because, the filter will mean "return tags which # have (key=baz AND value=quux) AND (key=baz AND value=wibble) self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'}, {'key': 'baz', 'value': 'wibble'}], []) # Test for regex self._assert_filtering(rl, {'value': '\\Aqu..*\\Z(?s)'}, [i22]) # Make sure bug #1365887 is fixed i1['metadata']['key3'] = 'a' self._assert_filtering(rl, {'value': 'banana'}, []) class SafeTruncateTestCase(test.NoDBTestCase): def test_exception_to_dict_with_long_message_3_bytes(self): # Generate Chinese byte string whose length is 300. This Chinese UTF-8 # character occupies 3 bytes. After truncating, the byte string length # should be 255. msg = u'\u8d75' * 100 truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(255, len(byte_message)) def test_exception_to_dict_with_long_message_2_bytes(self): # Generate Russian byte string whose length is 300. This Russian UTF-8 # character occupies 2 bytes. After truncating, the byte string length # should be 254. msg = encodeutils.safe_decode('\xd0\x92' * 150) truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(254, len(byte_message)) class SpawnNTestCase(test.NoDBTestCase): def setUp(self): super(SpawnNTestCase, self).setUp() self.useFixture(context_fixture.ClearRequestContext()) self.spawn_name = 'spawn_n' def test_spawn_n_no_context(self): self.assertIsNone(common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual('test', args[0]) def fake(arg): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, 'test') self.assertIsNone(common_context.get_current()) def test_spawn_n_context(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) def test_spawn_n_context_different_from_passed(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') ctxt_passed = context.RequestContext('user', 'project', overwrite=False) self.assertEqual(ctxt, common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt_passed, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) class SpawnTestCase(SpawnNTestCase): def setUp(self): super(SpawnTestCase, self).setUp() self.spawn_name = 'spawn' class UT8TestCase(test.NoDBTestCase): def test_none_value(self): self.assertIsInstance(utils.utf8(None), type(None)) def test_bytes_value(self): some_value = b"fake data" return_value = utils.utf8(some_value) # check that type of returned value doesn't changed self.assertIsInstance(return_value, type(some_value)) self.assertEqual(some_value, return_value) def test_not_text_type(self): return_value = utils.utf8(1) self.assertEqual(b"1", return_value) self.assertIsInstance(return_value, six.binary_type) def test_text_type_with_encoding(self): some_value = 'test\u2026config' self.assertEqual(some_value, utils.utf8(some_value).decode("utf-8"))
py
1a33261d2aa40cc74d40a36a366fe708d724295b
# Generated by Django 3.0.3 on 2020-07-06 07:35 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('egs_locations_app', '0001_initial'), ] operations = [ migrations.AlterField( model_name='playfield', name='specialNotes', field=models.CharField(blank=True, default=None, help_text='Anything particularly groovy about this playfield', max_length=1024, null=True), ), migrations.AlterField( model_name='poi', name='countBlackContainers', field=models.IntegerField(blank=True, default=None, help_text='The number of brown containers in this POI is >= this number', null=True), ), migrations.AlterField( model_name='poi', name='countBrownContainers', field=models.IntegerField(blank=True, default=None, help_text='The number of brown containers in this POI is >= this number', null=True), ), migrations.AlterField( model_name='poi', name='countRedContainers', field=models.IntegerField(blank=True, default=None, help_text='The number of red containers in this POI is >= this number', null=True), ), migrations.AlterField( model_name='poi', name='countWhiteContainers', field=models.IntegerField(blank=True, default=None, help_text='The number of white containers in this POI is >= this number', null=True), ), migrations.AlterField( model_name='poi', name='countYellowContainers', field=models.IntegerField(blank=True, default=None, help_text='The number of yellow containers in this POI is >= this number', null=True), ), migrations.AlterField( model_name='system', name='territory', field=models.ForeignKey(blank=True, default=None, help_text='Faction territory the system belongs to', null=True, on_delete=django.db.models.deletion.CASCADE, to='egs_locations_app.Faction'), ), ]
py
1a332750b3ffb5b61e315696806a1e44e6027014
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class TagsConfig(AppConfig): name = "svaudio.tags" verbose_name = _("Tags") label = "Tags" def ready(self): from actstream import registry registry.register(self.get_model("Tag"))
py
1a3327b53750eba9dd8a7ce2e6cea85653219716
# Script to sort the large NZ Charities files into smaller files by year # Alasdair Rutherford, Diarmuid McDonnell # Created: 29 March 2018 # Last edited: Github history - https://github.com/DiarmuidM/mission_accomp/tree/master/syntax/data_collection/nz # Edited by Tom Wallace # Because of download limmitations of the API the larger files had to be downloaded in chunks. This file re-combines the chunked files for 'vOfficerOrganisations' and 'GrpOrgAllReturns'. # Data guide: https://www.charities.govt.nz/charities-in-new-zealand/the-charities-register/open-data/ #######Import packages####### import csv import re import requests import os import os.path import errno from time import sleep import sys sys.path.insert(0, './Functions_scripts') # Allows to set a different path for the scripts being called below (but only if it branches off of the root dir) from downloaddate_function import downloaddate, longtime from nz_rowfixer import row_fixer from loggenerator import gen_log #######Toggles####### stata = True #######Initialization####### # Run the downloaddate function to get the date ddate = downloaddate() log_starttime = longtime() # When the script starts for the logfile # Path to save the downloaded data datapath = './data_raw/' # Dropbox folder for project # Variables to store OData endpoint and database tables # # Add $returnall=true to every url baseurl = 'http://www.odata.charities.govt.nz/' register = 'Organisations' # This is returned as xml due to the number of records - $returnall=true grpannreturns = 'GrpOrgAllReturns' #'GrpOrgAllReturns?$returnall=true' # This is returned as xml due to the number of records - $returnall=true activities = 'Activities' area = 'AreaOfOperations' beneficiaries = 'Beneficiaries' group = 'Groups' officers = 'Officers' sectors = 'Sectors' funds = 'SourceOfFunds' vorgs = 'vOrganisations' voff = 'vOfficerOrganisations' #######Functions####### # Split the downloaded annual return files into calendar years processedfiles=[] def splitfilesbyyear(filename, data, column, length, width, splityear=0, splitmonth=0, splitday=0, splitemp=0, spliteymonth=0): inputfilepath = datapath + '/' + data + '/' + 'nz_' + data + '_y' + str(splityear) + '_m' + str(spliteymonth) + '_p' + str(splitemp) + '.csv' processedfiles.append(inputfilepath) with open(inputfilepath, 'rb') as file: filedata = file.read() # Replace the target string pattern = re.compile(b'[^\x00-\x7F]') filedata = re.sub(pattern, b'_', filedata) #filedata.replace('[^\x00-\x7F]', '_') # Write the file out again with open(datapath + '/' + 'nz_temp.csv', 'wb') as file: file.write(filedata) outputfiles = {} for year in range(2007,2020): outputfiles[str(year)] = open(filename + str(year) + '.csv', 'a', newline='') outputfiles[str(year) + 'a'] = csv.writer(outputfiles[str(year)]) outputfiles['error'] = open(filename + 'error' + '.csv', 'a', newline='') outputfiles['errora'] = csv.writer(outputfiles['error']) with open(datapath + '/' + 'nz_temp.csv', 'r', newline='') as inCSVfile: reader = csv.reader(inCSVfile) print('-') print(inputfilepath) startrow = 1 rowcounter=0 while rowcounter<startrow: next(reader) rowcounter+=1 for row in reader: #if len(row)==width: # this was the simple check before the function was written, can swtich back to it by commenting out the 2 lines below if the fixer breaks things out_row, fixed = row_fixer(row, width) if fixed==True: try: yearend = out_row[column][len(out_row[column])-length:] # Take the year out of the YearEnded column year = int(yearend) #yearend = yearend[2 - len(yearend):] if year>=0 and year <=20: yearend = '20' + yearend elif year >20 and year<=99: yearend = 2000 except: yearend=0 #print(inputfilepath, rowcounter) #print(' ', row[column], ' | -', yearend, '-') else: yearend=0 # Rceode the missing values for Stata if stata == True: out_row = [x if x != 'Null' else '.' for x in out_row] if int(yearend) in range(2007, 2020): # ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']: outputfiles[str(yearend) + 'a'].writerow(out_row) #print('.', end='') else: outputfiles['errora'].writerow(out_row) #print('*', end='') rowcounter+=1 for year in range(2008,2018): outputfiles[str(year)].close() outputfiles['error'].close() # Creates the header rows for the files by year def createannreturnfiles(filename, source): with open(source, 'r', newline='') as inCSVfile: reader = csv.reader(inCSVfile) row = reader.__next__() for year in range(2007,2020): outputfile = open(filename + str(year) + '.csv', 'w', newline='') outputfilew= csv.writer(outputfile) outputfilew.writerow(row) outputfile.close() outputfile = open(filename + 'error' + '.csv', 'w', newline='') outputfilew= csv.writer(outputfile) outputfilew.writerow(row) outputfile.close() return len(row) #######Main program####### search = [] search_big = [voff, grpannreturns] # [] writtenfiles=[] for data in search_big: filename = datapath + '/' + data +'/' + data + '_yr' # nz_vOfficerOrganisations_y2017_m0_p0_20180330.csv for year in range(2007,2020): # This loop creates the output names manually for the log file so it will need updated when 2020 is added - it was taking too long to make automatic but it could be done writtenfiles.append(filename+str(year)+'.csv') writtenfiles.append(filename+'error'+'.csv') filewidth = createannreturnfiles(filename, datapath + '/' + data + '/' + 'nz_' + data + '_y2017' + '_m0' + '_p0' + '.csv') print('Organise', data, 'by year') for year in [2008]: if data == grpannreturns: print('') print('grpannreturns', year) for month in range(1,13,1): splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year, spliteymonth=month, splitemp=1) # Using column 103 (index from 0) 'CZ' to regroup the files 'YearEnded' splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year, spliteymonth=month, splitemp=2) elif data == voff: print('') print('voff') for month in range(1,13,1): splitfilesbyyear(filename, data, 14, 2, filewidth, splityear=year, spliteymonth=month) # Using column 14 (index from 0) 'O' to regroup the files 'PositionAppointmentDate' #logcsv.writerow([datetime.today().strftime('%Y%m%d %H:%M'), filename, searchurl, success, fails]) # record in logfile for year in [2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]: if data == grpannreturns: print('') print('grpannreturns', year) splitfilesbyyear(filename, data, 103, 4, filewidth, splityear=year) elif data == voff: print('') print('voff', year) splitfilesbyyear(filename, data, 14, 2, filewidth, splityear=year) # Get csv print('') print('Done sorting ' + data) print('------------------------------------------------------------------------------') os.remove(datapath + '/' + 'nz_temp.csv') print('Removed temp file:',datapath + '/' + 'nz_temp.csv') #Log generator finishtime = longtime() # Get ending time scriptname = os.path.basename(__file__) # Get the current scriptname as a variable scriptpath = (os.path.dirname(os.path.realpath(__file__))) # Get the absolute dir the script is in scriptdesc = 'Because of download limmitations of the API the larger files had to be downloaded in chunks. This file re-combines the chunked files for "vOfficerOrganisations" and "GrpOrgAllReturns".' processedfiles = processedfiles # Get the input file details writtenfiles = writtenfiles # Get list of created files WARNING: this list has been created manually and will not update in future years settings_toggles = {'stata': stata} gen_log(log_starttime, finishtime, scriptname, scriptpath, scriptdesc, processedfiles, writtenfiles, str(settings_toggles)) # Pass info to log file generator print('\nAll done!')
py
1a33283f79d9a25f75e6e405bdc779a76cdf45b0
import unittest import os from conans.paths import EXPORT_SOURCES_DIR_OLD from conans.util.files import tar_extract from conans.test.utils.tools import TestServer, TestClient from conans.model.ref import ConanFileReference from conans.test.utils.test_files import temp_folder class DoNotKeepOldExportSourcesLayoutTest(unittest.TestCase): def test_basic(self): """ check that we do not generate anymore tgz with .c_src. also, they are not present any more in the cache layout, even if they come from a .c_src tgz server file """ test_server = TestServer() servers = {"default": test_server} client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]}) client.save({"conanfile.py": """from conans import ConanFile class MyPkg(ConanFile): name= "Pkg" version = "0.1" exports_sources = "*.txt" """, "myfile.txt": "Hello world"}) client.run("export . lasote/testing") client.run("upload Pkg/0.1@lasote/testing") client.run("remove * -f") client.run("search") self.assertIn("There are no packages", client.user_io.out) conan_reference = ConanFileReference.loads("Pkg/0.1@lasote/testing") path = test_server.paths.export(conan_reference) sources_tgz = os.path.join(path, "conan_sources.tgz") self.assertTrue(os.path.exists(sources_tgz)) folder = temp_folder() with open(sources_tgz, 'rb') as file_handler: tar_extract(file_handler, folder) self.assertEqual(os.listdir(folder), ["myfile.txt"]) # Now install again client.run("install Pkg/0.1@lasote/testing --build=missing") export = client.client_cache.export(conan_reference) self.assertNotIn(EXPORT_SOURCES_DIR_OLD, os.listdir(export)) export_sources = client.client_cache.export_sources(conan_reference) self.assertEqual(os.listdir(export_sources), ["myfile.txt"])
py
1a3328bf9dc07c84442d25e219eaa501f96a5b2e
from copy import copy, deepcopy from typing import ( Any, Collection, Dict, List, NamedTuple, Optional, Set, Union, cast, ) from ..error import GraphQLError from ..language import ast from ..pyutils import inspect, is_collection, is_description, FrozenList from .definition import ( GraphQLAbstractType, GraphQLInterfaceType, GraphQLInputObjectType, GraphQLNamedType, GraphQLObjectType, GraphQLUnionType, GraphQLType, GraphQLWrappingType, get_named_type, is_input_object_type, is_interface_type, is_object_type, is_union_type, is_wrapping_type, ) from .directives import GraphQLDirective, specified_directives, is_directive from .introspection import introspection_types __all__ = ["GraphQLSchema", "is_schema", "assert_schema"] TypeMap = Dict[str, GraphQLNamedType] class InterfaceImplementations(NamedTuple): objects: List[GraphQLObjectType] interfaces: List[GraphQLInterfaceType] class GraphQLSchema: """Schema Definition A Schema is created by supplying the root types of each type of operation, query and mutation (optional). A schema definition is then supplied to the validator and executor. Example:: MyAppSchema = GraphQLSchema( query=MyAppQueryRootType, mutation=MyAppMutationRootType) Note: When the schema is constructed, by default only the types that are reachable by traversing the root types are included, other types must be explicitly referenced. Example:: character_interface = GraphQLInterfaceType('Character', ...) human_type = GraphQLObjectType( 'Human', interfaces=[character_interface], ...) droid_type = GraphQLObjectType( 'Droid', interfaces: [character_interface], ...) schema = GraphQLSchema( query=GraphQLObjectType('Query', fields={'hero': GraphQLField(character_interface, ....)}), ... # Since this schema references only the `Character` interface it's # necessary to explicitly list the types that implement it if # you want them to be included in the final schema. types=[human_type, droid_type]) Note: If a list of ``directives`` is provided to GraphQLSchema, that will be the exact list of directives represented and allowed. If ``directives`` is not provided, then a default set of the specified directives (e.g. @include and @skip) will be used. If you wish to provide *additional* directives to these specified directives, you must explicitly declare them. Example:: MyAppSchema = GraphQLSchema( ... directives=specified_directives + [my_custom_directive]) """ query_type: Optional[GraphQLObjectType] mutation_type: Optional[GraphQLObjectType] subscription_type: Optional[GraphQLObjectType] type_map: TypeMap directives: FrozenList[GraphQLDirective] description: Optional[str] extensions: Optional[Dict[str, Any]] ast_node: Optional[ast.SchemaDefinitionNode] extension_ast_nodes: Optional[FrozenList[ast.SchemaExtensionNode]] _implementations_map: Dict[str, InterfaceImplementations] _sub_type_map: Dict[str, Set[str]] _validation_errors: Optional[List[GraphQLError]] def __init__( self, query: Optional[GraphQLObjectType] = None, mutation: Optional[GraphQLObjectType] = None, subscription: Optional[GraphQLObjectType] = None, types: Optional[Collection[GraphQLNamedType]] = None, directives: Optional[Collection[GraphQLDirective]] = None, description: Optional[str] = None, extensions: Optional[Dict[str, Any]] = None, ast_node: Optional[ast.SchemaDefinitionNode] = None, extension_ast_nodes: Optional[Collection[ast.SchemaExtensionNode]] = None, assume_valid: bool = False, ) -> None: """Initialize GraphQL schema. If this schema was built from a source known to be valid, then it may be marked with ``assume_valid`` to avoid an additional type system validation. """ self._validation_errors = [] if assume_valid else None # Check for common mistakes during construction to produce clear and early # error messages, but we leave the specific tests for the validation. if query and not isinstance(query, GraphQLType): raise TypeError("Expected query to be a GraphQL type.") if mutation and not isinstance(mutation, GraphQLType): raise TypeError("Expected mutation to be a GraphQL type.") if subscription and not isinstance(subscription, GraphQLType): raise TypeError("Expected subscription to be a GraphQL type.") if types is None: types = [] else: if not is_collection(types) or not all( isinstance(type_, GraphQLType) for type_ in types ): raise TypeError( "Schema types must be specified as a collection of GraphQL types." ) if directives is not None: # noinspection PyUnresolvedReferences if not is_collection(directives): raise TypeError("Schema directives must be a collection.") if not isinstance(directives, FrozenList): directives = FrozenList(directives) if description is not None and not is_description(description): raise TypeError("Schema description must be a string.") if extensions is not None and ( not isinstance(extensions, dict) or not all(isinstance(key, str) for key in extensions) ): raise TypeError("Schema extensions must be a dictionary with string keys.") if ast_node and not isinstance(ast_node, ast.SchemaDefinitionNode): raise TypeError("Schema AST node must be a SchemaDefinitionNode.") if extension_ast_nodes: if not is_collection(extension_ast_nodes) or not all( isinstance(node, ast.SchemaExtensionNode) for node in extension_ast_nodes ): raise TypeError( "Schema extension AST nodes must be specified" " as a collection of SchemaExtensionNode instances." ) if not isinstance(extension_ast_nodes, FrozenList): extension_ast_nodes = FrozenList(extension_ast_nodes) self.description = description self.extensions = extensions self.ast_node = ast_node self.extension_ast_nodes = ( cast(FrozenList[ast.SchemaExtensionNode], extension_ast_nodes) if extension_ast_nodes else None ) self.query_type = query self.mutation_type = mutation self.subscription_type = subscription # Provide specified directives (e.g. @include and @skip) by default self.directives = ( specified_directives if directives is None else cast(FrozenList[GraphQLDirective], directives) ) # To preserve order of user-provided types, we add first to add them to # the set of "collected" types, so `collect_referenced_types` ignore them. if types: all_referenced_types = TypeSet.with_initial_types(types) collect_referenced_types = all_referenced_types.collect_referenced_types for type_ in types: # When we are ready to process this type, we remove it from "collected" # types and then add it together with all dependent types in the correct # position. del all_referenced_types[type_] collect_referenced_types(type_) else: all_referenced_types = TypeSet() collect_referenced_types = all_referenced_types.collect_referenced_types if query: collect_referenced_types(query) if mutation: collect_referenced_types(mutation) if subscription: collect_referenced_types(subscription) for directive in self.directives: # Directives are not validated until validate_schema() is called. if is_directive(directive): for arg in directive.args.values(): collect_referenced_types(arg.type) collect_referenced_types(introspection_types["__Schema"]) # Storing the resulting map for reference by the schema. type_map: TypeMap = {} self.type_map = type_map self._sub_type_map = {} # Keep track of all implementations by interface name. implementations_map: Dict[str, InterfaceImplementations] = {} self._implementations_map = implementations_map for named_type in all_referenced_types: if not named_type: continue type_name = getattr(named_type, "name", None) if not type_name: raise TypeError( "One of the provided types for building the Schema" " is missing a name.", ) if type_name in type_map: raise TypeError( "Schema must contain uniquely named types" f" but contains multiple types named '{type_name}'." ) type_map[type_name] = named_type if is_interface_type(named_type): named_type = cast(GraphQLInterfaceType, named_type) # Store implementations by interface. for iface in named_type.interfaces: if is_interface_type(iface): iface = cast(GraphQLInterfaceType, iface) if iface.name in implementations_map: implementations = implementations_map[iface.name] else: implementations = implementations_map[ iface.name ] = InterfaceImplementations(objects=[], interfaces=[]) implementations.interfaces.append(named_type) elif is_object_type(named_type): named_type = cast(GraphQLObjectType, named_type) # Store implementations by objects. for iface in named_type.interfaces: if is_interface_type(iface): iface = cast(GraphQLInterfaceType, iface) if iface.name in implementations_map: implementations = implementations_map[iface.name] else: implementations = implementations_map[ iface.name ] = InterfaceImplementations(objects=[], interfaces=[]) implementations.objects.append(named_type) def to_kwargs(self) -> Dict[str, Any]: return dict( query=self.query_type, mutation=self.mutation_type, subscription=self.subscription_type, types=FrozenList(self.type_map.values()) or None, directives=self.directives[:], description=self.description, extensions=self.extensions, ast_node=self.ast_node, extension_ast_nodes=self.extension_ast_nodes or FrozenList(), assume_valid=self._validation_errors is not None, ) def __copy__(self) -> "GraphQLSchema": # pragma: no cover return self.__class__(**self.to_kwargs()) def __deepcopy__(self, memo_: Dict) -> "GraphQLSchema": from ..type import ( is_introspection_type, is_specified_scalar_type, is_specified_directive, ) type_map: TypeMap = { name: copy(type_) for name, type_ in self.type_map.items() if not is_introspection_type(type_) and not is_specified_scalar_type(type_) } types = type_map.values() for type_ in types: remap_named_type(type_, type_map) directives = [ directive if is_specified_directive(directive) else copy(directive) for directive in self.directives ] return self.__class__( self.query_type and cast(GraphQLObjectType, type_map[self.query_type.name]), self.mutation_type and cast(GraphQLObjectType, type_map[self.mutation_type.name]), self.subscription_type and cast(GraphQLObjectType, type_map[self.subscription_type.name]), types, directives, self.description, extensions=deepcopy(self.extensions), ast_node=deepcopy(self.ast_node), extension_ast_nodes=deepcopy(self.extension_ast_nodes), assume_valid=True, ) def get_type(self, name: str) -> Optional[GraphQLNamedType]: return self.type_map.get(name) def get_possible_types( self, abstract_type: GraphQLAbstractType ) -> List[GraphQLObjectType]: """Get list of all possible concrete types for given abstract type.""" return ( cast(GraphQLUnionType, abstract_type).types if is_union_type(abstract_type) else self.get_implementations( cast(GraphQLInterfaceType, abstract_type) ).objects ) def get_implementations( self, interface_type: GraphQLInterfaceType ) -> InterfaceImplementations: return self._implementations_map.get( interface_type.name, InterfaceImplementations(objects=[], interfaces=[]) ) def is_possible_type( self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType ) -> bool: """Check whether a concrete type is possible for an abstract type. Deprecated: Use is_sub_type() instead. """ return self.is_sub_type(abstract_type, possible_type) def is_sub_type( self, abstract_type: GraphQLAbstractType, maybe_sub_type: GraphQLNamedType, ) -> bool: """Check whether a type is a subtype of a given abstract type.""" types = self._sub_type_map.get(abstract_type.name) if types is None: types = set() add = types.add if is_union_type(abstract_type): for type_ in cast(GraphQLUnionType, abstract_type).types: add(type_.name) else: implementations = self.get_implementations( cast(GraphQLInterfaceType, abstract_type) ) for type_ in implementations.objects: add(type_.name) for type_ in implementations.interfaces: add(type_.name) self._sub_type_map[abstract_type.name] = types return maybe_sub_type.name in types def get_directive(self, name: str) -> Optional[GraphQLDirective]: for directive in self.directives: if directive.name == name: return directive return None @property def validation_errors(self) -> Optional[List[GraphQLError]]: return self._validation_errors class TypeSet(Dict[GraphQLNamedType, None]): """An ordered set of types that can be collected starting from initial types.""" @classmethod def with_initial_types(cls, types: Collection[GraphQLType]) -> "TypeSet": return cast(TypeSet, super().fromkeys(types)) def collect_referenced_types(self, type_: GraphQLType) -> None: """Recursive function supplementing the type starting from an initial type.""" named_type = get_named_type(type_) if named_type in self: return self[named_type] = None collect_referenced_types = self.collect_referenced_types if is_union_type(named_type): named_type = cast(GraphQLUnionType, named_type) for member_type in named_type.types: collect_referenced_types(member_type) elif is_object_type(named_type) or is_interface_type(named_type): named_type = cast( Union[GraphQLObjectType, GraphQLInterfaceType], named_type ) for interface_type in named_type.interfaces: collect_referenced_types(interface_type) for field in named_type.fields.values(): collect_referenced_types(field.type) for arg in field.args.values(): collect_referenced_types(arg.type) elif is_input_object_type(named_type): named_type = cast(GraphQLInputObjectType, named_type) for field in named_type.fields.values(): collect_referenced_types(field.type) def is_schema(schema: Any) -> bool: """Test if the given value is a GraphQL schema.""" return isinstance(schema, GraphQLSchema) def assert_schema(schema: Any) -> GraphQLSchema: if not is_schema(schema): raise TypeError(f"Expected {inspect(schema)} to be a GraphQL schema.") return cast(GraphQLSchema, schema) def remapped_type(type_: GraphQLType, type_map: TypeMap) -> GraphQLType: """Get a copy of the given type that uses this type map.""" if is_wrapping_type(type_): type_ = cast(GraphQLWrappingType, type_) return type_.__class__(remapped_type(type_.of_type, type_map)) type_ = cast(GraphQLNamedType, type_) return type_map.get(type_.name, type_) def remap_named_type(type_: GraphQLNamedType, type_map: TypeMap) -> None: """Change all references in the given named type to use this type map.""" if is_union_type(type_): type_ = cast(GraphQLUnionType, type_) type_.types = [ type_map.get(member_type.name, member_type) for member_type in type_.types ] elif is_object_type(type_) or is_interface_type(type_): type_ = cast(Union[GraphQLObjectType, GraphQLInterfaceType], type_) type_.interfaces = [ type_map.get(interface_type.name, interface_type) for interface_type in type_.interfaces ] fields = type_.fields for field_name, field in fields.items(): field = copy(field) field.type = remapped_type(field.type, type_map) args = field.args for arg_name, arg in args.items(): arg = copy(arg) arg.type = remapped_type(arg.type, type_map) args[arg_name] = arg fields[field_name] = field elif is_input_object_type(type_): type_ = cast(GraphQLInputObjectType, type_) fields = type_.fields for field_name, field in fields.items(): field = copy(field) field.type = remapped_type(field.type, type_map) fields[field_name] = field
py
1a3329cc3852b8f27e50bc212b0f5ebf14f17ad5
import re import sys import argparse def parse_fasta(fh): fa = {} current_short_name = None # Part 1: compile list of lines per sequence for ln in fh: if ln[0] == '>': # new name line; remember current sequence's short name long_name = ln[1:].rstrip() current_short_name = long_name.split()[0] fa[current_short_name] = [] else: # append nucleotides to current sequence fa[current_short_name].append(ln.rstrip()) # Part 2: join lists into strings for short_name, nuc_list in fa.iteritems(): # join this sequence's lines into one long string fa[short_name] = ''.join(nuc_list) return fa def main(): parser = argparse.ArgumentParser() parser.add_argument("-a","--assembly",help="assembled contigs") parser.add_argument("-e","--enzyme",help="restriction enzyme") #parser.add_argument("-m","--mapping", help="mapping of read to contigs in bam format") #parser.add_argument("-d","--dir",help="output directory for results",default='out') args = parser.parse_args() f = parse_fasta(open(args.assembly,'r')) if args.enzyme == "DNASE": for key in f: id, seq = key, f[key] print id, len(seq)/2, len(seq)/2 sys.exit(0) enzymes_input = args.enzyme.replace(' ','').split(',') final_enzymes = [] for each in enzymes_input: if 'N' in each: final_enzymes.append(each.replace('N','G')) final_enzymes.append(each.replace('N','A')) final_enzymes.append(each.replace('N','T')) final_enzymes.append(each.replace('N','C')) else: final_enzymes.append(each) for key in f: id,seq = key, f[key] left_count = 0 rigt_count = 0 for enzyme in final_enzymes: pos = [m.start(0) for m in re.finditer(enzyme,seq)] length = len(seq) for each in pos: if each < length/2: left_count += 1 else: rigt_count += 1 # pos = [m.start(0) for m in re.finditer(r"GA[ACTG]TC",seq)] # length = len(seq) # left_count = 0 # rigt_count = 0 # for each in pos: # if each < length/2: # left_count += 1 # else: # rigt_count += 1 print id, left_count, rigt_count if __name__ == '__main__': main()
py
1a3329d2eca3074ddc7f20adaf24aa1b4eca7d9e
import os PRODUCTION = os.environ.get('PRODUCTION', False) DEBUG = not PRODUCTION
py
1a332ae656dc937c3748dcf31d80df03fe4b62f9
from django.db import migrations, models def forwards_func(apps, schema_editor): Beer = apps.get_model("beerfest", "Beer") for beer in Beer.objects.all(): old_abv = beer.abv if old_abv is None: continue # skip null values new_abv = old_abv / 10 if new_abv >= 100: raise ValueError( f"ABV {new_abv:.1f}% greater than or equal to 100%. " "Aborting migration" ) beer.abv = new_abv beer.save() def reverse_func(apps, schema_editor): Beer = apps.get_model("beerfest", "Beer") for beer in Beer.objects.all(): old_abv = beer.abv if old_abv is None: continue # skip null values beer.abv = int(old_abv * 10) beer.save() class Migration(migrations.Migration): dependencies = [ ('beerfest', '0004_auto_20181026_0212'), ] operations = [ migrations.AlterField( model_name='beer', name='abv', field=models.DecimalField( blank=True, decimal_places=1, max_digits=5, null=True ), ), migrations.RunPython(forwards_func, reverse_func), migrations.AlterField( model_name='beer', name='abv', field=models.DecimalField( blank=True, decimal_places=1, max_digits=3, null=True), ), ]
py
1a332bf32dfff439c2224b35073e74b60f28607f
# Generated by Django 3.2.4 on 2021-08-02 16:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0011_myrating'), ] operations = [ migrations.AddField( model_name='article', name='ratings', field=models.ManyToManyField(blank=True, related_name='rating', to='blog.MyRating'), ), ]
py
1a332c8b1aacc13495fa6b2bedb3959df7b186a9
import collections import sys from .compat import recursive_repr, abc from _pmem import ffi # XXX refactor to make this import unneeded? # XXX: refactor to allocate this instead of hardcoding it. LIST_POBJPTR_ARRAY_TYPE_NUM = 30 class PersistentList(abc.MutableSequence): """Persistent version of the 'list' type.""" # XXX locking! # XXX All bookkeeping attrs should be _v_xxxx so that all other attrs # (other than _p_mm) can be made persistent. def __init__(self, *args, **kw): if not args: return if len(args) != 1: raise TypeError("PersistentList takes at most 1" " argument, {} given".format(len(args))) self.extend(args[0]) def _p_new(self, manager): mm = self._p_mm = manager with mm.transaction(): # XXX Will want to implement a freelist here, like CPython self._p_oid = mm.zalloc(ffi.sizeof('PListObject')) ob = ffi.cast('PObject *', mm.direct(self._p_oid)) ob.ob_type = mm._get_type_code(PersistentList) self._body = ffi.cast('PListObject *', mm.direct(self._p_oid)) def _p_resurrect(self, manager, oid): mm = self._p_mm = manager self._p_oid = oid self._body = ffi.cast('PListObject *', mm.direct(oid)) # Methods and properties needed to implement the ABC required methods. @property def _size(self): return ffi.cast('PVarObject *', self._body).ob_size @property def _allocated(self): return self._body.allocated @property def _items(self): mm = self._p_mm ob_items = mm.otuple(self._body.ob_items) if ob_items == mm.OID_NULL: return None return ffi.cast('PObjPtr *', mm.direct(ob_items)) def _resize(self, newsize): # Note that resize does *not* set self._size. That needs to be done by # the caller such that that the we never expose invalid item cells. # The size field is covered by a snapshot done here, though. mm = self._p_mm allocated = self._allocated # Only realloc if we don't have enough space already. if (allocated >= newsize and newsize >= allocated >> 1): assert self._items != None or newsize == 0 with mm.transaction(): ob = ffi.cast('PVarObject *', self._body) mm.snapshot_range(ffi.addressof(ob, 'ob_size'), ffi.sizeof('size_t')) ob.ob_size = newsize return # We use CPython's overallocation algorithm. new_allocated = (newsize >> 3) + (3 if newsize < 9 else 6) + newsize if newsize == 0: new_allocated = 0 items = self._items with mm.transaction(): if items is None: items = mm.zalloc(new_allocated * ffi.sizeof('PObjPtr'), type_num=LIST_POBJPTR_ARRAY_TYPE_NUM) else: items = mm.zrealloc(self._body.ob_items, new_allocated * ffi.sizeof('PObjPtr'), LIST_POBJPTR_ARRAY_TYPE_NUM) mm.snapshot_range(self._body, ffi.sizeof('PListObject')) self._body.ob_items = items self._body.allocated = new_allocated def insert(self, index, value): mm = self._p_mm size = self._size newsize = size + 1 with mm.transaction(): self._resize(newsize) if index < 0: index += size if index < 0: index = 0 if index > size: index = size items = self._items mm.snapshot_range(items + index, ffi.offsetof('PObjPtr *', newsize)) for i in range(size, index, -1): items[i] = items[i-1] v_oid = mm.persist(value) mm.incref(v_oid) items[index] = v_oid ffi.cast('PVarObject *', self._body).ob_size = newsize def _normalize_index(self, index): try: index = int(index) except TypeError: # Assume it is a slice # XXX fixme raise NotImplementedError("Slicing not yet implemented") if index < 0: index += self._size if index < 0 or index >= self._size: raise IndexError(index) return index def __setitem__(self, index, value): mm = self._p_mm index = self._normalize_index(index) items = self._items with mm.transaction(): v_oid = mm.persist(value) mm.snapshot_range(ffi.addressof(items, index), ffi.sizeof('PObjPtr *')) mm.xdecref(items[index]) items[index] = v_oid mm.incref(v_oid) def __delitem__(self, index): mm = self._p_mm index = self._normalize_index(index) size = self._size newsize = size - 1 items = self._items with mm.transaction(): ffi.cast('PVarObject *', self._body).ob_size = newsize # We can't completely hide the process of transformation...this # really needs a lock (or translation to GIL-locked C). mm.snapshot_range(ffi.addressof(items, index), ffi.offsetof('PObjPtr *', size)) oid = mm.otuple(items[index]) for i in range(index, newsize): items[i] = items[i+1] mm.decref(oid) self._resize(newsize) def __getitem__(self, index): index = self._normalize_index(index) items = self._items return self._p_mm.resurrect(items[index]) def __len__(self): return self._size # Additional list methods not provided by the ABC. @recursive_repr() def __repr__(self): return "{}([{}])".format(self.__class__.__name__, ', '.join("{!r}".format(x) for x in self)) def __eq__(self, other): if not (isinstance(other, PersistentList) or isinstance(other, list)): return NotImplemented if len(self) != len(other): return False for i in range(len(self)): if self[i] != other[i]: return False return True if sys.version_info[0] < 3: def __ne__(self, other): return not self == other def clear(self): mm = self._p_mm if self._size == 0: return items = self._items with mm.transaction(): size = self._size # Set size to zero now so we never have an invalid state. ffi.cast('PVarObject *', self._body).ob_size = 0 for i in range(size): # Grab oid in tuple form so the assignment can't change it oid = mm.otuple(items[i]) items[i] = mm.OID_NULL mm.decref(oid) self._resize(0) # Additional methods required by the pmemobj API. def _p_traverse(self): items = self._items for i in range(len(self)): yield items[i] def _p_substructures(self): return ((self._body.ob_items, LIST_POBJPTR_ARRAY_TYPE_NUM),) def _p_deallocate(self): self.clear()
py
1a332d0ef7f112225f8f94702ebe7577d485c06c
# Copyright 2022 Kaiyu Zheng # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sciex import Experiment, Trial, Event, Result from moos3d.tests.experiments.runner import * from moos3d.tests.experiments.experiment import make_domain, make_trial from moos3d import * import matplotlib.pyplot as plt import os import random ABS_PATH = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(ABS_PATH, "results") prior_type = "uniform" discount_factor = 0.99 detect_after_look = True """ This experiment investigates sensor quality. Fix alpha, alter beta. Or fix beta, alter alpha. The alpha will be fixed at 1e6. The beta will be fixed at 0. To separate the possible interaction between the two numbers. We run the experiments on 16x16x16 worlds with 4 objects. """ def main(): # Check experiment_scalability.py for comments on `num_trials` num_trials = 14 # running on 3 computers. so 14*3 = 42 > 40. domain = (16, 2, 10, 10, 3.0, 500, 360) n, k, d, max_depth, planning_time, max_steps, max_time = domain if n == 16: setting_hier = [(1,1,max_depth), (2,2,max_depth), (4,4,max_depth)] setting_op = [(1,1,max_depth), (1,2,max_depth), (1,4,max_depth)] ## parameters big = 1000 small = 1 exploration_const = 1000 params = {"prior_type": prior_type, "discount_factor": discount_factor, "max_depth": max_depth, "planning_time": planning_time, "max_steps": max_steps, "max_time": max_time, "detect_after_look": detect_after_look, "big": big, "small": small, "exploration_const": exploration_const} alpha_fixed = 1e5 beta_fixed = 0 # SIMPLER to understand! scenarios = [(1e1, 0.3), # severe noise (1e1, 0.8), (1e2, 0.3), (1e2, 0.8), (5e2, 0.3), (5e2, 0.8), (1e3, 0.3), (1e3, 0.8), (1e4, 0.3), (1e4, 0.8), (1e5, 0.3), (1e5, 0.8)] # no noise all_trials = [] # Generate a world. For the same world, run different sensors & baselines. # Do this for #num_trials number of worlds for t in range(num_trials): seed = random.randint(1, 1000000) # build world worldstr = make_domain(n, k, d) # Run different sensors and baselines for i in range(len(scenarios)): alpha, beta = scenarios[i] params['alpha'] = alpha params['beta'] = beta trial_name = "quality%s_%s" % (str(scenarios[i]).replace(", ", "-"), str(seed)) pouct_trial = make_trial(trial_name, worldstr, "pouct", "octree", **params) multires_trial = make_trial(trial_name, worldstr, "hierarchical", "octree", setting=setting_hier, **params) options_trial = make_trial(trial_name, worldstr, "options", "octree", setting=setting_op, **params) pomcp_trial = make_trial(trial_name, worldstr, "pomcp", "particles", num_particles=1000, **params) random_trial = make_trial(trial_name, worldstr, "purelyrandom", "octree", **params) porollout_trial = make_trial(trial_name, worldstr, "porollout", "octree", porollout_policy=PolicyModel(detect_after_look=detect_after_look), **params) all_trials.extend([pouct_trial, multires_trial, options_trial, pomcp_trial, porollout_trial, random_trial]) # Generate scripts to run experiments and gather results exp = Experiment("QualitySensorCC", all_trials, output_dir, verbose=True) exp.generate_trial_scripts(split=5) print("Find multiple computers to run these experiments.") if __name__ == "__main__": main()
py
1a332d74a3096573c1e4ac9b9290b03f76e1eba0
from flask import render_template, request from rhytmic_exam_app import db from rhytmic_exam_app.errors import bp @bp.app_errorhandler(404) def not_found_error(error): return render_template("errors/404.html"), 404 @bp.app_errorhandler(500) def internal_error(error): db.session.rollback() return render_template("errors/500.html"), 500
py
1a332d7e91c6bc0d3451f5231b4ffaac4fddbcc4
# -*- coding: utf-8 -* PROVINCE_CHOICES = ( ('AG', 'Agrigento'), ('AL', 'Alessandria'), ('AN', 'Ancona'), ('AO', 'Aosta'), ('AR', 'Arezzo'), ('AP', 'Ascoli Piceno'), ('AT', 'Asti'), ('AV', 'Avellino'), ('BA', 'Bari'), ('BT', 'Barletta-Andria-Trani'), # active starting from 2009 ('BL', 'Belluno'), ('BN', 'Benevento'), ('BG', 'Bergamo'), ('BI', 'Biella'), ('BO', 'Bologna'), ('BZ', 'Bolzano/Bozen'), ('BS', 'Brescia'), ('BR', 'Brindisi'), ('CA', 'Cagliari'), ('CL', 'Caltanissetta'), ('CB', 'Campobasso'), ('CI', 'Carbonia-Iglesias'), ('CE', 'Caserta'), ('CT', 'Catania'), ('CZ', 'Catanzaro'), ('CH', 'Chieti'), ('CO', 'Como'), ('CS', 'Cosenza'), ('CR', 'Cremona'), ('KR', 'Crotone'), ('CN', 'Cuneo'), ('EN', 'Enna'), ('FM', 'Fermo'), # active starting from 2009 ('FE', 'Ferrara'), ('FI', 'Firenze'), ('FG', 'Foggia'), ('FC', 'Forlì-Cesena'), ('FR', 'Frosinone'), ('GE', 'Genova'), ('GO', 'Gorizia'), ('GR', 'Grosseto'), ('IM', 'Imperia'), ('IS', 'Isernia'), ('SP', 'La Spezia'), ('AQ', u'L’Aquila'), ('LT', 'Latina'), ('LE', 'Lecce'), ('LC', 'Lecco'), ('LI', 'Livorno'), ('LO', 'Lodi'), ('LU', 'Lucca'), ('MC', 'Macerata'), ('MN', 'Mantova'), ('MS', 'Massa-Carrara'), ('MT', 'Matera'), ('VS', 'Medio Campidano'), ('ME', 'Messina'), ('MI', 'Milano'), ('MO', 'Modena'), ('MB', 'Monza e Brianza'), # active starting from 2009 ('NA', 'Napoli'), ('NO', 'Novara'), ('NU', 'Nuoro'), ('OG', 'Ogliastra'), ('OT', 'Olbia-Tempio'), ('OR', 'Oristano'), ('PD', 'Padova'), ('PA', 'Palermo'), ('PR', 'Parma'), ('PV', 'Pavia'), ('PG', 'Perugia'), ('PU', 'Pesaro e Urbino'), ('PE', 'Pescara'), ('PC', 'Piacenza'), ('PI', 'Pisa'), ('PT', 'Pistoia'), ('PN', 'Pordenone'), ('PZ', 'Potenza'), ('PO', 'Prato'), ('RG', 'Ragusa'), ('RA', 'Ravenna'), ('RC', 'Reggio Calabria'), ('RE', 'Reggio Emilia'), ('RI', 'Rieti'), ('RN', 'Rimini'), ('RM', 'Roma'), ('RO', 'Rovigo'), ('SA', 'Salerno'), ('SS', 'Sassari'), ('SV', 'Savona'), ('SI', 'Siena'), ('SR', 'Siracusa'), ('SO', 'Sondrio'), ('TA', 'Taranto'), ('TE', 'Teramo'), ('TR', 'Terni'), ('TO', 'Torino'), ('TP', 'Trapani'), ('TN', 'Trento'), ('TV', 'Treviso'), ('TS', 'Trieste'), ('UD', 'Udine'), ('VA', 'Varese'), ('VE', 'Venezia'), ('VB', 'Verbano Cusio Ossola'), ('VC', 'Vercelli'), ('VR', 'Verona'), ('VV', 'Vibo Valentia'), ('VI', 'Vicenza'), ('VT', 'Viterbo'), )
py
1a332d934aae169f525e503f9958448d0eeda034
""" Integral Transforms """ from __future__ import print_function, division from sympy.core import S from sympy.core.compatibility import reduce from sympy.core.function import Function from sympy.core.numbers import oo from sympy.core.symbol import Dummy from sympy.integrals import integrate, Integral from sympy.integrals.meijerint import _dummy from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And from sympy.simplify import simplify from sympy.utilities import default_sort_key ########################################################################## # Helpers / Utilities ########################################################################## class IntegralTransformError(NotImplementedError): """ Exception raised in relation to problems computing transforms. This class is mostly used internally; if integrals cannot be computed objects representing unevaluated transforms are usually returned. The hint ``needeval=True`` can be used to disable returning transform objects, and instead raise this exception if an integral cannot be computed. """ def __init__(self, transform, function, msg): super(IntegralTransformError, self).__init__( "%s Transform could not be computed: %s." % (transform, msg)) self.function = function class IntegralTransform(Function): """ Base class for integral transforms. This class represents unevaluated transforms. To implement a concrete transform, derive from this class and implement the _compute_transform(f, x, s, **hints) and _as_integral(f, x, s) functions. If the transform cannot be computed, raise IntegralTransformError. Also set cls._name. Implement self._collapse_extra if your function returns more than just a number and possibly a convergence condition. """ nargs = 3 @property def function(self): """ The function to be transformed. """ return self.args[0] @property def function_variable(self): """ The dependent variable of the function to be transformed. """ return self.args[1] @property def transform_variable(self): """ The independent transform variable. """ return self.args[2] @property def free_symbols(self): """ This method returns the symbols that will exist when the transform is evaluated. """ return self.function.free_symbols.union(set([self.transform_variable])) \ - set([self.function_variable]) def _compute_transform(self, f, x, s, **hints): raise NotImplementedError def _as_integral(self, f, x, s): raise NotImplementedError def _collapse_extra(self, extra): from sympy import And cond = And(*extra) if cond == False: raise IntegralTransformError(self.__class__.name, None, '') def doit(self, **hints): """ Try to evaluate the transform in closed form. This general function handles linearity, but apart from that leaves pretty much everything to _compute_transform. Standard hints are the following: - ``simplify``: whether or not to simplify the result - ``noconds``: if True, don't return convergence conditions - ``needeval``: if True, raise IntegralTransformError instead of returning IntegralTransform objects The default values of these hints depend on the concrete transform, usually the default is ``(simplify, noconds, needeval) = (True, False, False)``. """ from sympy import Add, expand_mul, Mul from sympy.core.function import AppliedUndef needeval = hints.pop('needeval', False) try_directly = not any(func.has(self.function_variable) for func in self.function.atoms(AppliedUndef)) if try_directly: try: return self._compute_transform(self.function, self.function_variable, self.transform_variable, **hints) except IntegralTransformError: pass fn = self.function if not fn.is_Add: fn = expand_mul(fn) if fn.is_Add: hints['needeval'] = needeval res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints) for x in fn.args] extra = [] ress = [] for x in res: if not isinstance(x, tuple): x = [x] ress.append(x[0]) if len(x) > 1: extra += [x[1:]] res = Add(*ress) if not extra: return res try: extra = self._collapse_extra(extra) return tuple([res]) + tuple(extra) except IntegralTransformError: pass if needeval: raise IntegralTransformError( self.__class__._name, self.function, 'needeval') # TODO handle derivatives etc # pull out constant coefficients coeff, rest = fn.as_coeff_mul(self.function_variable) return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:]))) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable) def _eval_rewrite_as_Integral(self, *args): return self.as_integral from sympy.solvers.inequalities import _solve_inequality def _simplify(expr, doit): from sympy import powdenest, piecewise_fold if doit: return simplify(powdenest(piecewise_fold(expr), polar=True)) return expr def _noconds_(default): """ This is a decorator generator for dropping convergence conditions. Suppose you define a function ``transform(*args)`` which returns a tuple of the form ``(result, cond1, cond2, ...)``. Decorating it ``@_noconds_(default)`` will add a new keyword argument ``noconds`` to it. If ``noconds=True``, the return value will be altered to be only ``result``, whereas if ``noconds=False`` the return value will not be altered. The default value of the ``noconds`` keyword will be ``default`` (i.e. the argument of this function). """ def make_wrapper(func): from sympy.core.decorators import wraps @wraps(func) def wrapper(*args, **kwargs): noconds = kwargs.pop('noconds', default) res = func(*args, **kwargs) if noconds: return res[0] return res return wrapper return make_wrapper _noconds = _noconds_(False) ########################################################################## # Mellin Transform ########################################################################## def _default_integrator(f, x): return integrate(f, (x, 0, oo)) @_noconds def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True): """ Backend function to compute Mellin transforms. """ from sympy import re, Max, Min, count_ops # We use a fresh dummy, because assumptions on s might drop conditions on # convergence of the integral. s = _dummy('s', 'mellin-transform', f) F = integrator(x**(s - 1) * f, x) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), (-oo, oo), True if not F.is_Piecewise: raise IntegralTransformError('Mellin', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Mellin', f, 'integral in unexpected form') def process_conds(cond): """ Turn ``cond`` into a strip (a, b), and auxiliary conditions. """ a = -oo b = oo aux = True conds = conjuncts(to_cnf(cond)) t = Dummy('t', real=True) for c in conds: a_ = oo b_ = -oo aux_ = [] for d in disjuncts(c): d_ = d.replace( re, lambda x: x.as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op not in ('>', '>=', '<', '<=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op not in ('>', '>=', '<', '<='): aux_ += [d] continue if soln.lts == t: b_ = Max(soln.gts, b_) else: a_ = Min(soln.lts, a_) if a_ != oo and a_ != b: a = Max(a_, a) elif b_ != -oo and b_ != a: b = Min(b_, b) else: aux = And(aux, Or(*aux_)) return a, b, aux conds = [process_conds(c) for c in disjuncts(cond)] conds = [x for x in conds if x[2] != False] conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2]))) if not conds: raise IntegralTransformError('Mellin', f, 'no convergence found') a, b, aux = conds[0] return _simplify(F.subs(s, s_), simplify), (a, b), aux class MellinTransform(IntegralTransform): """ Class representing unevaluated Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Mellin transforms, see the :func:`mellin_transform` docstring. """ _name = 'Mellin' def _compute_transform(self, f, x, s, **hints): return _mellin_transform(f, x, s, **hints) def _as_integral(self, f, x, s): from sympy import Integral return Integral(f*x**(s - 1), (x, 0, oo)) def _collapse_extra(self, extra): from sympy import And, Max, Min a = [] b = [] cond = [] for (sa, sb), c in extra: a += [sa] b += [sb] cond += [c] res = (Max(*a), Min(*b)), And(*cond) if (res[0][0] >= res[0][1]) == True or res[1] == False: raise IntegralTransformError( 'Mellin', None, 'no combined convergence.') return res def mellin_transform(f, x, s, **hints): r""" Compute the Mellin transform `F(s)` of `f(x)`, .. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x. For all "sensible" functions, this converges absolutely in a strip `a < \operatorname{Re}(s) < b`. The Mellin transform is related via change of variables to the Fourier transform, and also to the (bilateral) Laplace transform. This function returns ``(F, (a, b), cond)`` where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip (as above), and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`MellinTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``, then only `F` will be returned (i.e. not ``cond``, and also not the strip ``(a, b)``). >>> from sympy.integrals.transforms import mellin_transform >>> from sympy import exp >>> from sympy.abc import x, s >>> mellin_transform(exp(-x), x, s) (gamma(s), (0, oo), True) See Also ======== inverse_mellin_transform, laplace_transform, fourier_transform hankel_transform, inverse_hankel_transform """ return MellinTransform(f, x, s).doit(**hints) def _rewrite_sin(m_n, s, a, b): """ Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible with the strip (a, b). Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``. >>> from sympy.integrals.transforms import _rewrite_sin >>> from sympy import pi, S >>> from sympy.abc import s >>> _rewrite_sin((pi, 0), s, 0, 1) (gamma(s), gamma(-s + 1), pi) >>> _rewrite_sin((pi, 0), s, 1, 0) (gamma(s - 1), gamma(-s + 2), -pi) >>> _rewrite_sin((pi, 0), s, -1, 0) (gamma(s + 1), gamma(-s), -pi) >>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2) (gamma(s - 1/2), gamma(-s + 3/2), -pi) >>> _rewrite_sin((pi, pi), s, 0, 1) (gamma(s), gamma(-s + 1), -pi) >>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2) (gamma(2*s), gamma(-2*s + 1), pi) >>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1) (gamma(2*s - 1), gamma(-2*s + 2), -pi) """ # (This is a separate function because it is moderately complicated, # and I want to doctest it.) # We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x). # But there is one comlication: the gamma functions determine the # inegration contour in the definition of the G-function. Usually # it would not matter if this is slightly shifted, unless this way # we create an undefined function! # So we try to write this in such a way that the gammas are # eminently on the right side of the strip. from sympy import expand_mul, pi, ceiling, gamma, re m, n = m_n m = expand_mul(m/pi) n = expand_mul(n/pi) r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi class MellinTransformStripError(ValueError): """ Exception raised by _rewrite_gamma. Mainly for internal use. """ pass def _rewrite_gamma(f, s, a, b): """ Try to rewrite the product f(s) as a product of gamma functions, so that the inverse Mellin transform of f can be expressed as a meijer G function. Return (an, ap), (bm, bq), arg, exp, fac such that G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s). Raises IntegralTransformError or MellinTransformStripError on failure. It is asserted that f has no poles in the fundamental strip designated by (a, b). One of a and b is allowed to be None. The fundamental strip is important, because it determines the inversion contour. This function can handle exponentials, linear factors, trigonometric functions. This is a helper function for inverse_mellin_transform that will not attempt any transformations on f. >>> from sympy.integrals.transforms import _rewrite_gamma >>> from sympy.abc import s >>> from sympy import oo >>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo) (([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1) >>> _rewrite_gamma((s-1)**2, s, -oo, oo) (([], [1, 1]), ([2, 2], []), 1, 1, 1) Importance of the fundamental strip: >>> _rewrite_gamma(1/s, s, 0, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, None, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, 0, None) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, -oo, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, None, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, -oo, None) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(2**(-s+3), s, -oo, oo) (([], []), ([], []), 1/2, 1, 8) """ from itertools import repeat from sympy import (Poly, gamma, Mul, re, RootOf, exp as exp_, E, expand, roots, ilcm, pi, sin, cos, tan, cot, igcd, exp_polar) # Our strategy will be as follows: # 1) Guess a constant c such that the inversion integral should be # performed wrt s'=c*s (instead of plain s). Write s for s'. # 2) Process all factors, rewrite them independently as gamma functions in # argument s, or exponentials of s. # 3) Try to transform all gamma functions s.t. they have argument # a+s or a-s. # 4) Check that the resulting G function parameters are valid. # 5) Combine all the exponentials. a_, b_ = S([a, b]) def left(c, is_numer): """ Decide whether pole at c lies to the left of the fundamental strip. """ # heuristically, this is the best chance for us to solve the inequalities c = expand(re(c)) if a_ is None: return c < b_ if b_ is None: return c <= a_ if (c >= b_) is True: return False if (c <= a_) is True: return True if is_numer: return None if a_.free_symbols or b_.free_symbols or c.free_symbols: return None # XXX #raise IntegralTransformError('Inverse Mellin', f, # 'Could not determine position of singularity %s' # ' relative to fundamental strip' % c) raise MellinTransformStripError('Pole inside critical strip?') # 1) s_multipliers = [] for g in f.atoms(gamma): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff] for g in f.atoms(sin, cos, tan, cot): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff/pi] s_multipliers = [abs(x) for x in s_multipliers if x.is_real] common_coefficient = S(1) for x in s_multipliers: if not x.is_Rational: common_coefficient = x break s_multipliers = [x/common_coefficient for x in s_multipliers] if any(not x.is_Rational for x in s_multipliers): raise NotImplementedError s_multiplier = common_coefficient/reduce(ilcm, [S(x.q) for x in s_multipliers], S(1)) if s_multiplier == common_coefficient: if len(s_multipliers) == 0: s_multiplier = common_coefficient else: s_multiplier = common_coefficient \ *reduce(igcd, [S(x.p) for x in s_multipliers]) exponent = S(1) fac = S(1) f = f.subs(s, s/s_multiplier) fac /= s_multiplier exponent = 1/s_multiplier if a_ is not None: a_ *= s_multiplier if b_ is not None: b_ *= s_multiplier # 2) numer, denom = f.as_numer_denom() numer = Mul.make_args(numer) denom = Mul.make_args(denom) args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False))) facs = [] dfacs = [] # *_gammas will contain pairs (a, c) representing Gamma(a*s + c) numer_gammas = [] denom_gammas = [] # exponentials will contain bases for exponentials of s exponentials = [] def exception(fact): return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact) while args: fact, is_numer = args.pop() if is_numer: ugammas, lgammas = numer_gammas, denom_gammas ufacs, lfacs = facs, dfacs else: ugammas, lgammas = denom_gammas, numer_gammas ufacs, lfacs = dfacs, facs def linear_arg(arg): """ Test if arg is of form a*s+b, raise exception if not. """ if not arg.is_polynomial(s): raise exception(fact) p = Poly(arg, s) if p.degree() != 1: raise exception(fact) return p.all_coeffs() # constants if not fact.has(s): ufacs += [fact] # exponentials elif fact.is_Pow or isinstance(fact, exp_): if fact.is_Pow: base = fact.base exp = fact.exp else: base = exp_polar(1) exp = fact.args[0] if exp.is_Integer: cond = is_numer if exp < 0: cond = not cond args += [(base, cond)]*abs(exp) continue elif not base.has(s): a, b = linear_arg(exp) if not is_numer: base = 1/base exponentials += [base**a] facs += [base**b] else: raise exception(fact) # linear factors elif fact.is_polynomial(s): p = Poly(fact, s) if p.degree() != 1: # We completely factor the poly. For this we need the roots. # Now roots() only works in some cases (low degree), and RootOf # only works without parameters. So try both... coeff = p.LT()[1] rs = roots(p, s) if len(rs) != p.degree(): rs = RootOf.all_roots(p) ufacs += [coeff] args += [(s - c, is_numer) for c in rs] continue a, c = p.all_coeffs() ufacs += [a] c /= -a # Now need to convert s - c if left(c, is_numer): ugammas += [(S(1), -c + 1)] lgammas += [(S(1), -c)] else: ufacs += [-1] ugammas += [(S(-1), c + 1)] lgammas += [(S(-1), c)] elif isinstance(fact, gamma): a, b = linear_arg(fact.args[0]) if is_numer: if (a > 0 and (left(-b/a, is_numer) is False)) or \ (a < 0 and (left(-b/a, is_numer) is True)): raise NotImplementedError( 'Gammas partially over the strip.') ugammas += [(a, b)] elif isinstance(fact, sin): # We try to re-write all trigs as gammas. This is not in # general the best strategy, since sometimes this is impossible, # but rewriting as exponentials would work. However trig functions # in inverse mellin transforms usually all come from simplifying # gamma terms, so this should work. a = fact.args[0] if is_numer: # No problem with the poles. gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi else: gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_) args += [(gamma1, not is_numer), (gamma2, not is_numer)] ufacs += [fac_] elif isinstance(fact, tan): a = fact.args[0] args += [(sin(a, evaluate=False), is_numer), (sin(pi/2 - a, evaluate=False), not is_numer)] elif isinstance(fact, cos): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer)] elif isinstance(fact, cot): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer), (sin(a, evaluate=False), not is_numer)] else: raise exception(fact) fac *= Mul(*facs)/Mul(*dfacs) # 3) an, ap, bm, bq = [], [], [], [] for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True), (denom_gammas, bq, ap, False)]: while gammas: a, c = gammas.pop() if a != -1 and a != +1: # We use the gamma function multiplication theorem. p = abs(S(a)) newa = a/p newc = c/p assert a.is_Integer for k in range(p): gammas += [(newa, newc + k/p)] if is_numer: fac *= (2*pi)**((1 - p)/2) * p**(c - S(1)/2) exponentials += [p**a] else: fac /= (2*pi)**((1 - p)/2) * p**(c - S(1)/2) exponentials += [p**(-a)] continue if a == +1: plus.append(1 - c) else: minus.append(c) # 4) # TODO # 5) arg = Mul(*exponentials) # for testability, sort the arguments an.sort(key=default_sort_key) ap.sort(key=default_sort_key) bm.sort(key=default_sort_key) bq.sort(key=default_sort_key) return (an, ap), (bm, bq), arg, exponent, fac @_noconds_(True) def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False): """ A helper for the real inverse_mellin_transform function, this one here assumes x to be real and positive. """ from sympy import (expand, expand_mul, hyperexpand, meijerg, And, Or, arg, pi, re, factor, Heaviside, gamma, Add) x = _dummy('t', 'inverse-mellin-transform', F, positive=True) # Actually, we won't try integration at all. Instead we use the definition # of the Meijer G function as a fairly general inverse mellin transform. F = F.rewrite(gamma) for g in [factor(F), expand_mul(F), expand(F)]: if g.is_Add: # do all terms separately ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg, noconds=False) for G in g.args] conds = [p[1] for p in ress] ress = [p[0] for p in ress] res = Add(*ress) if not as_meijerg: res = factor(res, gens=res.atoms(Heaviside)) return res.subs(x, x_), And(*conds) try: a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1]) except IntegralTransformError: continue G = meijerg(a, b, C/x**e) if as_meijerg: h = G else: try: h = hyperexpand(G) except NotImplementedError as detail: raise IntegralTransformError( 'Inverse Mellin', F, 'Could not calculate integral') if h.is_Piecewise and len(h.args) == 3: # XXX we break modularity here! h = Heaviside(x - abs(C))*h.args[0].args[0] \ + Heaviside(abs(C) - x)*h.args[1].args[0] # We must ensure that the intgral along the line we want converges, # and return that value. # See [L], 5.2 cond = [abs(arg(G.argument)) < G.delta*pi] # Note: we allow ">=" here, this corresponds to convergence if we let # limits go to oo symetrically. ">" corresponds to absolute convergence. cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1), abs(arg(G.argument)) == G.delta*pi)] cond = Or(*cond) if cond == False: raise IntegralTransformError( 'Inverse Mellin', F, 'does not converge') return (h*fac).subs(x, x_), cond raise IntegralTransformError('Inverse Mellin', F, '') _allowed = None class InverseMellinTransform(IntegralTransform): """ Class representing unevaluated inverse Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Mellin transforms, see the :func:`inverse_mellin_transform` docstring. """ nargs = 5 _name = 'Inverse Mellin' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, a, b, **opts): if a is None: a = InverseMellinTransform._none_sentinel if b is None: b = InverseMellinTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, a, b, **opts) @property def fundamental_strip(self): a, b = self.args[3], self.args[4] if a is InverseMellinTransform._none_sentinel: a = None if b is InverseMellinTransform._none_sentinel: b = None return a, b def _compute_transform(self, F, s, x, **hints): from sympy import postorder_traversal global _allowed if _allowed is None: from sympy import ( exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf) _allowed = set( [exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf]) for f in postorder_traversal(F): if f.is_Function and f.has(s) and f.func not in _allowed: raise IntegralTransformError('Inverse Mellin', F, 'Component %s not recognised.' % f) strip = self.fundamental_strip return _inverse_mellin_transform(F, s, x, strip, **hints) def _as_integral(self, F, s, x): from sympy import Integral, I, oo c = self.__class__._c return Integral(F*x**(-s), (s, c - I*oo, c + I*oo)) def inverse_mellin_transform(F, s, x, strip, **hints): r""" Compute the inverse Mellin transform of `F(s)` over the fundamental strip given by ``strip=(a, b)``. This can be defined as .. math:: f(x) = \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s, for any `c` in the fundamental strip. Under certain regularity conditions on `F` and/or `f`, this recovers `f` from its Mellin transform `F` (and vice versa), for positive real `x`. One of `a` or `b` may be passed as ``None``; a suitable `c` will be inferred. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseMellinTransform` object. Note that this function will assume x to be positive and real, regardless of the sympy assumptions! For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. >>> from sympy.integrals.transforms import inverse_mellin_transform >>> from sympy import oo, gamma >>> from sympy.abc import x, s >>> inverse_mellin_transform(gamma(s), s, x, (0, oo)) exp(-x) The fundamental strip matters: >>> f = 1/(s**2 - 1) >>> inverse_mellin_transform(f, s, x, (-oo, -1)) (x/2 - 1/(2*x))*Heaviside(x - 1) >>> inverse_mellin_transform(f, s, x, (-1, 1)) -x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x) >>> inverse_mellin_transform(f, s, x, (1, oo)) (-x/2 + 1/(2*x))*Heaviside(-x + 1) See Also ======== mellin_transform hankel_transform, inverse_hankel_transform """ return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints) ########################################################################## # Laplace Transform ########################################################################## def _simplifyconds(expr, s, a): r""" Naively simplify some conditions occuring in ``expr``, given that `\operatorname{Re}(s) > a`. >>> from sympy.integrals.transforms import _simplifyconds as simp >>> from sympy.abc import x >>> from sympy import sympify as S >>> simp(abs(x**2) < 1, x, 1) False >>> simp(abs(x**2) < 1, x, 2) False >>> simp(abs(x**2) < 1, x, 0) Abs(x**2) < 1 >>> simp(abs(1/x**2) < 1, x, 1) True >>> simp(S(1) < abs(x), x, 1) True >>> simp(S(1) < abs(1/x), x, 1) False >>> from sympy import Ne >>> simp(Ne(1, x**3), x, 1) True >>> simp(Ne(1, x**3), x, 2) True >>> simp(Ne(1, x**3), x, 0) 1 != x**3 """ from sympy.core.relational import ( StrictGreaterThan, StrictLessThan, Unequality ) from sympy import Abs def power(ex): if ex == s: return 1 if ex.is_Pow and ex.base == s: return ex.exp return None def bigger(ex1, ex2): """ Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|. Else return None. """ if ex1.has(s) and ex2.has(s): return None if ex1.func is Abs: ex1 = ex1.args[0] if ex2.func is Abs: ex2 = ex2.args[0] if ex1.has(s): return bigger(1/ex2, 1/ex1) n = power(ex2) if n is None: return None if n > 0 and (abs(ex1) <= abs(a)**n) is True: return False if n < 0 and (abs(ex1) >= abs(a)**n) is True: return True def replie(x, y): """ simplify x < y """ if not (x.is_positive or x.func is Abs) \ or not (y.is_positive or y.func is Abs): return (x < y) r = bigger(x, y) if r is not None: return not r return (x < y) def replue(x, y): if bigger(x, y) in (True, False): return True return Unequality(x, y) def repl(ex, *args): if isinstance(ex, bool): return ex return ex.replace(*args) expr = repl(expr, StrictLessThan, replie) expr = repl(expr, StrictGreaterThan, lambda x, y: replie(y, x)) expr = repl(expr, Unequality, replue) return expr @_noconds def _laplace_transform(f, t, s_, simplify=True): """ The backend function for Laplace transforms. """ from sympy import (re, Max, exp, pi, Abs, Min, periodic_argument as arg, cos, Wild, symbols, polar_lift) s = Dummy('s') F = integrate(exp(-s*t) * f, (t, 0, oo)) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), -oo, True if not F.is_Piecewise: raise IntegralTransformError( 'Laplace', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Laplace', f, 'integral in unexpected form') def process_conds(conds): """ Turn ``conds`` into a strip and auxiliary conditions. """ a = -oo aux = True conds = conjuncts(to_cnf(conds)) u = Dummy('u', real=True) p, q, w1, w2, w3, w4, w5 = symbols( 'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s]) for c in conds: a_ = oo aux_ = [] for d in disjuncts(c): m = d.match(abs(arg((s + w3)**p*q, w1)) < w2) if not m: m = d.match(abs(arg((s + w3)**p*q, w1)) <= w2) if not m: m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) < w2) if not m: m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) <= w2) if m: if m[q] > 0 and m[w2]/m[p] == pi/2: d = re(s + m[w3]) > 0 m = d.match( 0 < cos(abs(arg(s**w1*w5, q))*w2)*abs(s**w3)**w4 - p) if not m: m = d.match(0 < cos(abs( arg(polar_lift(s)**w1*w5, q))*w2)*abs(s**w3)**w4 - p) if m and all(m[wild] > 0 for wild in [w1, w2, w3, w4, w5]): d = re(s) > m[p] d_ = d.replace( re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op not in ('>', '>=', '<', '<=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op not in ('>', '>=', '<', '<='): aux_ += [d] continue if soln.lts == t: raise IntegralTransformError('Laplace', f, 'convergence not in half-plane?') else: a_ = Min(soln.lts, a_) if a_ != oo: a = Max(a_, a) else: aux = And(aux, Or(*aux_)) return a, aux conds = [process_conds(c) for c in disjuncts(cond)] conds2 = [x for x in conds if x[1] != False and x[0] != -oo] if not conds2: conds2 = [x for x in conds if x[1] != False] conds = conds2 def cnt(expr): if isinstance(expr, bool): return 0 return expr.count_ops() conds.sort(key=lambda x: (-x[0], cnt(x[1]))) if not conds: raise IntegralTransformError('Laplace', f, 'no convergence found') a, aux = conds[0] def sbs(expr): if isinstance(expr, bool): return expr return expr.subs(s, s_) if simplify: F = _simplifyconds(F, s, a) aux = _simplifyconds(aux, s, a) return _simplify(F.subs(s, s_), simplify), sbs(a), sbs(aux) class LaplaceTransform(IntegralTransform): """ Class representing unevaluated Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Laplace transforms, see the :func:`laplace_transform` docstring. """ _name = 'Laplace' def _compute_transform(self, f, t, s, **hints): return _laplace_transform(f, t, s, **hints) def _as_integral(self, f, t, s): from sympy import Integral, exp return Integral(f*exp(-s*t), (t, 0, oo)) def _collapse_extra(self, extra): from sympy import And, Max conds = [] planes = [] for plane, cond in extra: conds.append(cond) planes.append(plane) cond = And(*conds) plane = Max(*planes) if cond == False: raise IntegralTransformError( 'Laplace', None, 'No combined convergence.') return plane, cond def laplace_transform(f, t, s, **hints): r""" Compute the Laplace Transform `F(s)` of `f(t)`, .. math :: F(s) = \int_0^\infty e^{-st} f(t) \mathrm{d}t. For all "sensible" functions, this converges absolutely in a half plane `a < \operatorname{Re}(s)`. This function returns ``(F, a, cond)`` where ``F`` is the Laplace transform of ``f``, `\operatorname{Re}(s) > a` is the half-plane of convergence, and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`LaplaceTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``). >>> from sympy.integrals import laplace_transform >>> from sympy.abc import t, s, a >>> laplace_transform(t**a, t, s) (s**(-a)*gamma(a + 1)/s, 0, -re(a) < 1) See Also ======== inverse_laplace_transform, mellin_transform, fourier_transform hankel_transform, inverse_hankel_transform """ return LaplaceTransform(f, t, s).doit(**hints) @_noconds_(True) def _inverse_laplace_transform(F, s, t_, plane, simplify=True): """ The backend function for inverse Laplace transforms. """ from sympy import exp, Heaviside, log, expand_complex, Integral, Piecewise from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp # There are two strategies we can try: # 1) Use inverse mellin transforms - related by a simple change of variables. # 2) Use the inversion integral. t = Dummy('t', real=True) def pw_simp(*args): """ Simplify a piecewise expression from hyperexpand. """ # XXX we break modularity here! if len(args) != 3: return Piecewise(*args) arg = args[2].args[0].argument coeff, exponent = _get_coeff_exp(arg, t) e1 = args[0].args[0] e2 = args[1].args[0] return Heaviside(1/abs(coeff) - t**exponent)*e1 \ + Heaviside(t**exponent - 1/abs(coeff))*e2 try: f, cond = inverse_mellin_transform(F, s, exp(-t), (None, oo), needeval=True, noconds=False) except IntegralTransformError: f = None if f is None: f = meijerint_inversion(F, s, t) if f is None: raise IntegralTransformError('Inverse Laplace', f, '') if f.is_Piecewise: f, cond = f.args[0] if f.has(Integral): raise IntegralTransformError('Inverse Laplace', f, 'inversion integral of unrecognised form.') else: cond = True f = f.replace(Piecewise, pw_simp) if f.is_Piecewise: # many of the functions called below can't work with piecewise # (b/c it has a bool in args) return f.subs(t, t_), cond u = Dummy('u') def simp_heaviside(arg): a = arg.subs(exp(-t), u) if a.has(t): return Heaviside(arg) rel = _solve_inequality(a > 0, u) if rel.lts == u: k = log(rel.gts) return Heaviside(t + k) else: k = log(rel.lts) return Heaviside(-(t + k)) f = f.replace(Heaviside, simp_heaviside) def simp_exp(arg): return expand_complex(exp(arg)) f = f.replace(exp, simp_exp) # TODO it would be nice to fix cosh and sinh ... simplify messes these # exponentials up return _simplify(f.subs(t, t_), simplify), cond class InverseLaplaceTransform(IntegralTransform): """ Class representing unevaluated inverse Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Laplace transforms, see the :func:`inverse_laplace_transform` docstring. """ nargs = 4 _name = 'Inverse Laplace' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, plane, **opts): if plane is None: plane = InverseLaplaceTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, plane, **opts) @property def fundamental_plane(self): plane = self.args[3] if plane is InverseLaplaceTransform._none_sentinel: plane = None return plane def _compute_transform(self, F, s, t, **hints): return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints) def _as_integral(self, F, s, t): from sympy import I, Integral, exp c = self.__class__._c return Integral(exp(s*t)*F, (s, c - I*oo, c + I*oo)) def inverse_laplace_transform(F, s, t, plane=None, **hints): r""" Compute the inverse Laplace transform of `F(s)`, defined as .. math :: f(t) = \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s, for `c` so large that `F(s)` has no singularites in the half-plane `\operatorname{Re}(s) > c-\epsilon`. The plane can be specified by argument ``plane``, but will be inferred if passed as None. Under certain regularity conditions, this recovers `f(t)` from its Laplace Transform `F(s)`, for non-negative `t`, and vice versa. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseLaplaceTransform` object. Note that this function will always assume `t` to be real, regardless of the sympy assumption on `t`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. >>> from sympy.integrals.transforms import inverse_laplace_transform >>> from sympy import exp, Symbol >>> from sympy.abc import s, t >>> a = Symbol('a', positive=True) >>> inverse_laplace_transform(exp(-a*s)/s, s, t) Heaviside(-a + t) See Also ======== laplace_transform hankel_transform, inverse_hankel_transform """ return InverseLaplaceTransform(F, s, t, plane).doit(**hints) ########################################################################## # Fourier Transform ########################################################################## @_noconds_(True) def _fourier_transform(f, x, k, a, b, name, simplify=True): """ Compute a general Fourier-type transform F(k) = a int_-oo^oo exp(b*I*x*k) f(x) dx. For suitable choice of a and b, this reduces to the standard Fourier and inverse Fourier transforms. """ from sympy import exp, I, oo F = integrate(a*f*exp(b*I*x*k), (x, -oo, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class FourierTypeTransform(IntegralTransform): """ Base class for Fourier transforms. Specify cls._a and cls._b. """ def _compute_transform(self, f, x, k, **hints): return _fourier_transform(f, x, k, self.__class__._a, self.__class__._b, self.__class__._name, **hints) def _as_integral(self, f, x, k): from sympy import Integral, exp, I a = self.__class__._a b = self.__class__._b return Integral(a*f*exp(b*I*x*k), (x, -oo, oo)) class FourierTransform(FourierTypeTransform): """ Class representing unevaluated Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Fourier transforms, see the :func:`fourier_transform` docstring. """ _name = 'Fourier' _a = 1 _b = -2*S.Pi def fourier_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency Fourier transform of `f`, defined as .. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`FourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import fourier_transform, exp >>> from sympy.abc import x, k >>> fourier_transform(exp(-x**2), x, k) sqrt(pi)*exp(-pi**2*k**2) >>> fourier_transform(exp(-x**2), x, k, noconds=False) (sqrt(pi)*exp(-pi**2*k**2), True) See Also ======== inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return FourierTransform(f, x, k).doit(**hints) class InverseFourierTransform(FourierTypeTransform): """ Class representing unevaluated inverse Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Fourier transforms, see the :func:`inverse_fourier_transform` docstring. """ _name = 'Inverse Fourier' _a = 1 _b = 2*S.Pi def inverse_fourier_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse Fourier transform of `F`, defined as .. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseFourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_fourier_transform, exp, sqrt, pi >>> from sympy.abc import x, k >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x) exp(-x**2) >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False) (exp(-x**2), True) See Also ======== fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseFourierTransform(F, k, x).doit(**hints) ########################################################################## # Fourier Sine and Cosine Transform ########################################################################## from sympy import sin, cos, sqrt, pi, I, oo @_noconds_(True) def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True): """ Compute a general sine or cosine-type transform F(k) = a int_0^oo b*sin(x*k) f(x) dx. F(k) = a int_0^oo b*cos(x*k) f(x) dx. For suitable choice of a and b, this reduces to the standard sine/cosine and inverse sine/cosine transforms. """ F = integrate(a*f*K(b*x*k), (x, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class SineCosineTypeTransform(IntegralTransform): """ Base class for sine and cosine transforms. Specify cls._a and cls._b and cls._kern. """ def _compute_transform(self, f, x, k, **hints): return _sine_cosine_transform(f, x, k, self.__class__._a, self.__class__._b, self.__class__._kern, self.__class__._name, **hints) def _as_integral(self, f, x, k): from sympy import Integral, exp, I a = self.__class__._a b = self.__class__._b K = self.__class__._kern return Integral(a*f*K(b*x*k), (x, 0, oo)) class SineTransform(SineCosineTypeTransform): """ Class representing unevaluated sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute sine transforms, see the :func:`sine_transform` docstring. """ _name = 'Sine' _kern = sin _a = sqrt(2)/sqrt(pi) _b = 1 def sine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency sine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`SineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import sine_transform, exp >>> from sympy.abc import x, k, a >>> sine_transform(x*exp(-a*x**2), x, k) sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2)) >>> sine_transform(x**(-a), x, k) 2**(-a + 1/2)*k**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + 1/2) See Also ======== fourier_transform, inverse_fourier_transform inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return SineTransform(f, x, k).doit(**hints) class InverseSineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse sine transforms, see the :func:`inverse_sine_transform` docstring. """ _name = 'Inverse Sine' _kern = sin _a = sqrt(2)/sqrt(pi) _b = 1 def inverse_sine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse sine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseSineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_sine_transform, exp, sqrt, gamma, pi >>> from sympy.abc import x, k, a >>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)* ... gamma(-a/2 + 1)/gamma((a+1)/2), k, x) x**(-a) >>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x) x*exp(-a*x**2) See Also ======== fourier_transform, inverse_fourier_transform sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseSineTransform(F, k, x).doit(**hints) class CosineTransform(SineCosineTypeTransform): """ Class representing unevaluated cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute cosine transforms, see the :func:`cosine_transform` docstring. """ _name = 'Cosine' _kern = cos _a = sqrt(2)/sqrt(pi) _b = 1 def cosine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency cosine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`CosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import cosine_transform, exp, sqrt, cos >>> from sympy.abc import x, k, a >>> cosine_transform(exp(-a*x), x, k) sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)) >>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k) a*exp(-a**2/(2*k))/(2*k**(3/2)) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return CosineTransform(f, x, k).doit(**hints) class InverseCosineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse cosine transforms, see the :func:`inverse_cosine_transform` docstring. """ _name = 'Inverse Cosine' _kern = cos _a = sqrt(2)/sqrt(pi) _b = 1 def inverse_cosine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse cosine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseCosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_cosine_transform, exp, sqrt, pi >>> from sympy.abc import x, k, a >>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x) exp(-a*x) >>> inverse_cosine_transform(1/sqrt(k), k, x) 1/sqrt(x) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseCosineTransform(F, k, x).doit(**hints) ########################################################################## # Hankel Transform ########################################################################## @_noconds_(True) def _hankel_transform(f, r, k, nu, name, simplify=True): """ Compute a general Hankel transform .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. """ from sympy import besselj, oo F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class HankelTypeTransform(IntegralTransform): """ Base class for Hankel transforms. """ nargs = 4 def doit(self, **hints): return self._compute_transform(self.function, self.function_variable, self.transform_variable, self.args[3], **hints) def _compute_transform(self, f, r, k, nu, **hints): return _hankel_transform(f, r, k, nu, self._name, **hints) def _as_integral(self, f, r, k, nu): from sympy import Integral, besselj, oo return Integral(f*besselj(nu, k*r)*r, (r, 0, oo)) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable, self.args[3]) class HankelTransform(HankelTypeTransform): """ Class representing unevaluated Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Hankel transforms, see the :func:`hankel_transform` docstring. """ _name = 'Hankel' def hankel_transform(f, r, k, nu, **hints): r""" Compute the Hankel transform of `f`, defined as .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`HankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import gamma, exp, sinh, cosh >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform inverse_hankel_transform mellin_transform, laplace_transform """ return HankelTransform(f, r, k, nu).doit(**hints) class InverseHankelTransform(HankelTypeTransform): """ Class representing unevaluated inverse Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Hankel transforms, see the :func:`inverse_hankel_transform` docstring. """ _name = 'Inverse Hankel' def inverse_hankel_transform(F, k, r, nu, **hints): r""" Compute the inverse Hankel transform of `F` defined as .. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseHankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import hankel_transform, inverse_hankel_transform, gamma >>> from sympy import gamma, exp, sinh, cosh >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform mellin_transform, laplace_transform """ return InverseHankelTransform(F, k, r, nu).doit(**hints)
py
1a332e0de141f48e3704fa01b52c5159ac26186f
#!/usr/bin/env python from collections import OrderedDict import json import numpy from kernel_tuner import tune_kernel from tune_utils import get_kernel_path def tune_expdist(): device = 2 tune_params = OrderedDict() tune_params["block_size_x"] = [32] #[2**i for i in range(5,10)] tune_params["block_size_y"] = [2**i for i in range(6)] tune_params["tile_size_x"] = [2**i for i in range(4)] tune_params["tile_size_y"] = [2**i for i in range(4)] tune_params["use_shared_mem"] = [1] #[0, 1] #setup test input alloc_size = 22000 size = numpy.int32(20000) max_blocks = numpy.int32( numpy.ceil(size / float(numpy.amin(tune_params["block_size_x"]))) * numpy.ceil(size / float(numpy.amin(tune_params["block_size_y"]))) ) ndim = numpy.int32(2) A = numpy.random.randn(alloc_size*ndim).astype(numpy.float64) B = A+0.00001*numpy.random.randn(alloc_size*ndim).astype(numpy.float64) scale_A = numpy.absolute(0.01*numpy.random.randn(alloc_size).astype(numpy.float64)) scale_B = numpy.absolute(0.01*numpy.random.randn(alloc_size).astype(numpy.float64)) cost = numpy.zeros((max_blocks)).astype(numpy.float64) #time the reference function #arguments = [cost, A, B, size, size, ndim, scale_A, scale_B] #with open(get_kernel_path('expdist')+'expdist_c.cpp', 'r') as f: # kernel_string = f.read() #print("CPU timing") #tune_kernel("time_expdist", kernel_string, size, arguments, {"block_size_x": [1]}, # lang="C", compiler_options=['-I'+get_kernel_path('expdist'), '-O3'], device=2) #tune the GPU function print("GPU timing") with open(get_kernel_path('expdist')+'kernels.cu', 'r') as f: kernel_string = f.read() arguments = [A, B, size, size, scale_A, scale_B, cost] cp = ['-O3'] grid_div_x = ["block_size_x", "tile_size_x"] grid_div_y = ["block_size_y", "tile_size_y"] kernel1 = tune_kernel("ExpDist", kernel_string, (size, size), arguments, tune_params, compiler_options=cp, grid_div_x=grid_div_x, grid_div_y=grid_div_y, device=2) with open("expdist.json", 'w') as fp: json.dump(kernel1, fp) best_config1 = min(kernel1[0], key=lambda x:x['time']) nblocks = numpy.int32( numpy.ceil(size / float(best_config1["block_size_x"]*best_config1["tile_size_x"])) * numpy.ceil(size / float(best_config1["block_size_y"]*best_config1["tile_size_y"])) ) tune_params = OrderedDict() tune_params["block_size_x"] = [32*i for i in range(1,33)] arguments = [numpy.zeros(1).astype(numpy.float64), cost, size, size, nblocks] kernel2 = tune_kernel("reduce_cross_term", kernel_string, 1, arguments, tune_params, grid_div_x=[], compiler_options=cp, device=2) best_config2 = min(kernel2[0], key=lambda x:x['time']) print("best GPU configuration, total time=", best_config1['time'] + best_config2['time']) print(best_config1) print(best_config2) if __name__ == "__main__": tune_expdist()
py
1a332f3153fd1ccd254cb5e189474306c6357d07
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libxxf86vm(AutotoolsPackage): """libXxf86vm - Extension library for the XFree86-VidMode X extension.""" homepage = "http://cgit.freedesktop.org/xorg/lib/libXxf86vm" url = "https://www.x.org/archive/individual/lib/libXxf86vm-1.1.4.tar.gz" version('1.1.4', sha256='5108553c378a25688dcb57dca383664c36e293d60b1505815f67980ba9318a99') depends_on('[email protected]:') depends_on('libxext') depends_on('xproto', type='build') depends_on('xextproto', type='build') depends_on('[email protected]:', type='build') depends_on('pkgconfig', type='build') depends_on('util-macros', type='build')
py
1a333055578d8bf8af26f6ee614410047170d93a
from django.contrib import admin from django.forms.models import ModelForm import django from moderation.models import ModeratedObject, MODERATION_DRAFT_STATE,\ MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED,\ MODERATION_STATUS_APPROVED from django.utils.translation import ugettext as _ from moderation.forms import BaseModeratedObjectForm from moderation.helpers import automoderate from moderation.diff import get_changes_between_models def approve_objects(modeladmin, request, queryset): for obj in queryset: obj.approve(moderated_by=request.user) approve_objects.short_description = "Approve selected moderated objects" def reject_objects(modeladmin, request, queryset): for obj in queryset: obj.reject(moderated_by=request.user) reject_objects.short_description = "Reject selected moderated objects" def set_objects_as_pending(modeladmin, request, queryset): queryset.update(moderation_status=MODERATION_STATUS_PENDING) set_objects_as_pending.short_description = "Set selected moderated objects "\ "as Pending" class ModerationAdmin(admin.ModelAdmin): admin_integration_enabled = True def get_form(self, request, obj=None): if obj and self.admin_integration_enabled: return self.get_moderated_object_form(obj.__class__) return super(ModerationAdmin, self).get_form(request, obj) def change_view(self, request, object_id, extra_context=None): if self.admin_integration_enabled: self.send_message(request, object_id) return super(ModerationAdmin, self).change_view(request, object_id) def send_message(self, request, object_id): try: obj = self.model.objects.get(pk=object_id) moderated_obj = ModeratedObject.objects.get_for_instance(obj) msg = self.get_moderation_message(moderated_obj.moderation_status, moderated_obj.moderation_reason) except ModeratedObject.DoesNotExist: msg = self.get_moderation_message() self.message_user(request, msg) def save_model(self, request, obj, form, change): obj.save() automoderate(obj, request.user) def get_moderation_message(self, moderation_status=None, reason=None): if moderation_status == MODERATION_STATUS_PENDING: return _(u"Object is not viewable on site, "\ "it will be visible when moderator will accept it") elif moderation_status == MODERATION_STATUS_REJECTED: return _(u"Object has been rejected by moderator, "\ "reason: %s" % reason) elif moderation_status == MODERATION_STATUS_APPROVED: return _(u"Object has been approved by moderator "\ "and is visible on site") elif moderation_status is None: return _("This object is not registered with "\ "the moderation system.") def get_moderated_object_form(self, model_class): class ModeratedObjectForm(BaseModeratedObjectForm): class Meta: model = model_class return ModeratedObjectForm class ModeratedObjectAdmin(admin.ModelAdmin): date_hierarchy = 'date_created' list_display = ('content_object', 'content_type', 'date_created', 'moderation_status', 'moderated_by', 'moderation_date') list_filter = ['content_type', 'moderation_status'] change_form_template = 'moderation/moderate_object.html' change_list_template = 'moderation/moderated_objects_list.html' actions = [reject_objects, approve_objects, set_objects_as_pending] fieldsets = ( ('Object moderation', {'fields': ('moderation_reason',)}), ) def get_actions(self, request): actions = super(ModeratedObjectAdmin, self).get_actions(request) del actions['delete_selected'] return actions def content_object(self, obj): return unicode(obj.changed_object) def queryset(self, request): qs = super(ModeratedObjectAdmin, self).queryset(request) return qs.exclude(moderation_state=MODERATION_DRAFT_STATE) def get_moderated_object_form(self, model_class): class ModeratedObjectForm(ModelForm): class Meta: model = model_class return ModeratedObjectForm def change_view(self, request, object_id, extra_context=None): from moderation import moderation moderated_object = ModeratedObject.objects.get(pk=object_id) changed_object = moderated_object.changed_object moderator = moderation.get_moderator(changed_object.__class__) changes = get_changes_between_models( moderated_object.get_object_for_this_type(), changed_object, moderator.fields_exclude).values() if request.POST: admin_form = self.get_form(request, moderated_object)(request.POST) if admin_form.is_valid(): reason = admin_form.cleaned_data['moderation_reason'] if 'approve' in request.POST: moderated_object.approve(request.user, reason) elif 'reject' in request.POST: moderated_object.reject(request.user, reason) extra_context = {'changes': changes, 'django_version': django.get_version()[:3]} return super(ModeratedObjectAdmin, self).change_view(request, object_id, extra_context) admin.site.register(ModeratedObject, ModeratedObjectAdmin)
py
1a3330b62b2bcd5323bd52656b3c33cec1cd5937
import sys import collections import operator import itertools from bisect import bisect_left import os import glob import concurrent.futures import math import time import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import DIcommonfn import DIread import DIreadlib Ent=DIcommonfn.Ent param_set={ "mzML_files", "library", "ms1_ppm", "ms2_ppm", "MS2_score", "sample_info", } param_dict=DIcommonfn.read_param(param_set) mzML_files=sorted(glob.glob(param_dict["mzML_files"])) basename_l=[os.path.basename(x)for x in mzML_files] ms1ppm=float(param_dict['ms1_ppm'])/1e6 MS2_score=float(param_dict["MS2_score"]) def get_sample_info(): sample_info_file=glob.glob(param_dict["sample_info"])[0] sample_type=dict() with open(sample_info_file)as sfile: next(sfile) for line in sfile: if line[0]!='#'and line.rstrip(): lsp=line.rstrip('\n').split('\t') sample_type[lsp[0]]=lsp[1].upper() return sample_type def cos_sim(list1,list2): if len(list1)!=len(list2): print('adf') sys.exit() if sum(list1)<=0 or sum(list2)<=0: return 0 return sum(math.sqrt(x*y) for x,y in zip(list1,list2))/math.sqrt(sum(list1)*sum(list2)) def aveMS1spec(mzML_file): basename0=os.path.basename(mzML_file) print(basename0) ms1_scans,ms2_scans,_=DIread.print_eic_ms(mzML_file) ms1_peaks=sorted((mz,i,ii) for ii,ms1_ in enumerate(ms1_scans) for mz,i in zip(ms1_.mz_l,ms1_.I_l)) avespec=[] while ms1_peaks: maxI=max(ms1_peaks,key=operator.itemgetter(1))[0] pos0=bisect_left(ms1_peaks,(maxI-.005,)) pos1=bisect_left(ms1_peaks,(maxI+.005,)) if len(set(ii for _,_,ii in ms1_peaks[pos0:pos1]))>len(ms1_scans)/3: avespec.append((maxI,sum(i for _,i,_ in ms1_peaks[pos0:pos1])/len(ms1_scans))) del ms1_peaks[pos0:pos1] return sorted(avespec),ms2_scans,basename0 from multiprocessing import freeze_support if __name__ == '__main__': start_time = time.time() print(len(mzML_files),'mzML files') freeze_support() sample_type=get_sample_info() with concurrent.futures.ProcessPoolExecutor(max_workers=9) as executor: avespec_l=list(executor.map(aveMS1spec, mzML_files)) ms1_peaks=sorted((mz,i,ii) for ms1_,_,ii in avespec_l for mz,i in ms1_) conspec=[] while ms1_peaks: maxI=max(ms1_peaks,key=operator.itemgetter(1))[0] pos0=bisect_left(ms1_peaks,(maxI-.005,)) pos1=bisect_left(ms1_peaks,(maxI+.005,)) if len(set(ii for _,_,ii in ms1_peaks[pos0:pos1]if sample_type[ii]!='BLANK'))>=5: conspec.append(maxI) del ms1_peaks[pos0:pos1] lib_ent=DIreadlib.get_cpds() con_tab=dict() con_ms1=dict() for ii,conmz in enumerate(sorted(conspec),1): con_tab[(conmz,ii)]=dict() err_bd=DIcommonfn.bound_ppm(conmz*ms1ppm) pos_0=bisect_left(lib_ent,(conmz-err_bd,)) pos_1=bisect_left(lib_ent,(conmz+err_bd,)) ms1name=[x.replace('\n','---') for _,x in sorted(set((abs(ent.Mmass-conmz),ent.name)for ent in lib_ent[pos_0:pos_1]))] con_ms1[conmz]=ms1name for avespec,ms2_scans,basename0 in avespec_l: pos=bisect_left(ms2_scans,(conmz,)) if pos>0 and ms2_scans[pos][0]-conmz > conmz-ms2_scans[pos-1][0]: pos-=1 expMS2=ms2_scans[pos] top10=sorted(expMS2[1].I_l,reverse=True)[min(len(expMS2[1].I_l)-1,9)] topN=[x for x in zip(expMS2[1].mz_l,expMS2[1].I_l)if x[1]>=top10] topmz=[x for x,_ in topN] topI=[x for _,x in topN] if abs(conmz-expMS2[0])>.51: print('{} {} {} {}'.format(abs(conmz-expMS2[0]),expMS2[0],conmz,basename0)) score_ent=[] for ent in lib_ent[pos_0:pos_1]: ms2_I=[] ent_I=[] xfrag=set() hpeak=0 mpeak=0 fent=sorted([(y,x)for x,y in zip(ent.mz,ent.I)if(ent.charge*conmz-x)>3.3],reverse=True)[:10] for f_I,f_mz in fent: err_bd=.01 pos0=bisect_left(topmz,f_mz-err_bd) pos1=bisect_left(topmz,f_mz+err_bd,lo=pos0) ent_I.append(f_I) if pos0!=pos1: ms2_I.append(max(topI[pos0:pos1])) for i in range(pos0,pos1): xfrag.add(i) if f_I==fent[0][0]: hpeak=1 mpeak+=1 else: ms2_I.append(0) if hpeak: for nn,(f_mz,f_I) in enumerate(zip(topmz,topI)): if nn not in xfrag and (ent.charge*conmz-f_mz)>3.3: ms2_I.append(f_I) ent_I.append(0) cs=cos_sim(ent_I,ms2_I) score_ent.append((cs,ent,mpeak)) pos0=bisect_left(avespec,(conmz-.005,)) pos1=bisect_left(avespec,(conmz+.005,)) aveI=sum(x for _,x in avespec[pos0:pos1]) ave_mz=(sum(mz*i for mz,i in avespec[pos0:pos1])/aveI if aveI>0 else None) if score_ent: score_ent=max(score_ent) sc=score_ent[0] mpeak=score_ent[2] ent=score_ent[1] exp_mz=expMS2[1].mz_l exp_I=expMS2[1].I_l else: sc=None mpeak=None ent=Ent(conmz,'m/z={:.4f}'.format(conmz),tuple(),tuple(),'',1,None,'') exp_mz=exp_I=tuple() con_tab[(conmz,ii)][basename0]=(aveI,sc,ent,exp_mz,exp_I,ave_mz,mpeak) for basename0 in basename_l: open('ann_{}.txt'.format(basename0),'w') frago=open('quant_frag.txt','w') frago.write('group\tID\talt_IDs\tMS1\tmz\tadduct\tID\tcount\tfrag_m/z\t'+'\t'.join(basename_l)+'\t'+'\t'.join('score_'+x for x in basename_l)+'\t'+'\t'.join('mass_error_'+x for x in basename_l)+'\n') for x,y in sorted(con_tab.items(),key=lambda x:x[0][0]): c=collections.Counter(yy[2] for yy in y.values()) ent,cn=c.most_common(1)[0] identified=(''if all(yy[2].name.startswith('m/z=')for yy in y.values())else'*') id_with_count='{} ({})'.format(ent.name.replace('\n','---'),cn) alt_id_with_count=' --- '.join('{} ({})'.format(ent.name.replace('\n','---'),cn) for ent,cn in c.most_common()[1:]) count_pos=sum(1 for qs in y.values() if qs[0]>0) frago.write('{}\t{}\t{}\t{}\t{:.4f}\t{}\t{}\t{}\t{}'.format(x[1],id_with_count,alt_id_with_count,' --- '.join(con_ms1[x[0]]),x[0],ent.adduct,identified,count_pos,'precursor')) for basename0 in basename_l: qs=y[basename0] frago.write('\t{:.1f}'.format(qs[0])) for basename0 in basename_l: qs=y[basename0] frago.write('\t{:.2f}'.format(qs[1],qs[6])if qs and qs[1] else '\t') for basename0 in basename_l: qs=y[basename0] frago.write('\t{:.3f}'.format(ent.Mmass-qs[5])if qs[1]and qs[5]else '\t') frago.write('\n') if ent.mz: mzML_f=dict() for basename0 in basename_l: qs=y[basename0] mzML_f[basename0]=dict() for f_mz in ent.mz: pos0=bisect_left(qs[3],f_mz-.01) pos1=bisect_left(qs[3],f_mz+.01) mzML_f[basename0][f_mz]=(max(qs[4][pos0:pos1])if pos0<pos1 else 0) maxf=max(mzML_f[basename0].values()) if maxf>0: for f_mz in ent.mz: mzML_f[basename0][f_mz]/=maxf for f_mz,_ in sorted(zip(ent.mz,ent.I),key=operator.itemgetter(1),reverse=True): frago.write('{}\t{}\t{}\t{}\t{:.4f}\t{}\t{}\t{}\t{}'.format(x[1],id_with_count,alt_id_with_count,'',x[0],ent.adduct,identified,sum(1 for basename0 in basename_l if mzML_f[basename0][f_mz]>0),f_mz)) for basename0 in basename_l: frago.write('\t{:.2f}'.format(mzML_f[basename0][f_mz])) frago.write('\n') for basename0 in basename_l: qs=y[basename0] if qs[1]: with open('ann_{}.txt'.format(os.path.basename(basename0)),'a')as ann: ann.write('NAME:\n') ann.write('{}\n'.format(qs[2].name)) ann.write('ADDUCT: {}\n'.format(qs[2].adduct)) ann.write('TARGET_M/Z: {:.6f}\n'.format(x[0])) ann.write('DOT_PRODUCT: {:.3f}\n'.format(qs[1])) ann.write('EXPERIMENTAL_SPECTRUM:\n') for mz,i in sorted(zip(qs[3],qs[4]),key=operator.itemgetter(1),reverse=True): ann.write('{:.6f} {:.2f}\n'.format(mz,i)) ann.write('LIBRARY_SPECTRUM:\n') for mz,i in zip(qs[2].mz,qs[2].I): ann.write('{:.6f} {:.2f}\n'.format(mz,i)) ann.write('\n') with PdfPages('aveMS1spec.pdf') as pdf0: for avespec,_,basename0 in avespec_l: plt.figure(figsize=(9, 4)) ax=plt.subplot(1,1,1) ax.set_title('{} ave. MS1'.format(basename0[:-5])) exp_=ax.vlines(x=[mz for mz,_ in avespec], ymin=0, ymax=[i for _,i in avespec], color='black',lw=.5) ax.set_xlabel('m/z')#,fontsize=22) ax.set_ylabel('intensity')#,fontsize=22) pdf0.savefig() plt.close() print("Run time = {:.1f} mins".format(((time.time() - start_time)/60)))
py
1a33328d77f924f83f6c81831600ae3c9221c7c8
#!/usr/bin/env python3 # Copyright (c) 2021 The Elixir Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """RPCs that handle raw transaction packages.""" from decimal import Decimal from io import BytesIO import random from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE from test_framework.test_framework import ElixirTestFramework from test_framework.messages import ( BIP125_SEQUENCE_NUMBER, COIN, CTransaction, CTxInWitness, ) from test_framework.script import ( CScript, OP_TRUE, ) from test_framework.util import ( assert_equal, hex_str_to_bytes, ) class RPCPackagesTest(ElixirTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def assert_testres_equal(self, package_hex, testres_expected): """Shuffle package_hex and assert that the testmempoolaccept result matches testres_expected. This should only be used to test packages where the order does not matter. The ordering of transactions in package_hex and testres_expected must match. """ shuffled_indeces = list(range(len(package_hex))) random.shuffle(shuffled_indeces) shuffled_package = [package_hex[i] for i in shuffled_indeces] shuffled_testres = [testres_expected[i] for i in shuffled_indeces] assert_equal(shuffled_testres, self.nodes[0].testmempoolaccept(shuffled_package)) def run_test(self): self.log.info("Generate blocks to create UTXOs") node = self.nodes[0] self.privkeys = [node.get_deterministic_priv_key().key] self.address = node.get_deterministic_priv_key().address self.coins = [] # The last 100 coinbase transactions are premature for b in node.generatetoaddress(200, self.address)[:100]: coinbase = node.getblock(blockhash=b, verbosity=2)["tx"][0] self.coins.append({ "txid": coinbase["txid"], "amount": coinbase["vout"][0]["value"], "scriptPubKey": coinbase["vout"][0]["scriptPubKey"], }) # Create some transactions that can be reused throughout the test. Never submit these to mempool. self.independent_txns_hex = [] self.independent_txns_testres = [] for _ in range(3): coin = self.coins.pop() rawtx = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}], {self.address : coin["amount"] - Decimal("0.0001")}) signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys) assert signedtx["complete"] testres = node.testmempoolaccept([signedtx["hex"]]) assert testres[0]["allowed"] self.independent_txns_hex.append(signedtx["hex"]) # testmempoolaccept returns a list of length one, avoid creating a 2D list self.independent_txns_testres.append(testres[0]) self.independent_txns_testres_blank = [{ "txid": res["txid"], "wtxid": res["wtxid"]} for res in self.independent_txns_testres] self.test_independent() self.test_chain() self.test_multiple_children() self.test_multiple_parents() self.test_conflicting() self.test_rbf() def chain_transaction(self, parent_txid, parent_value, n=0, parent_locking_script=None): """Build a transaction that spends parent_txid.vout[n] and produces one output with amount = parent_value with a fee deducted. Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created). """ node = self.nodes[0] inputs = [{"txid": parent_txid, "vout": n}] my_value = parent_value - Decimal("0.0001") outputs = {self.address : my_value} rawtx = node.createrawtransaction(inputs, outputs) prevtxs = [{ "txid": parent_txid, "vout": n, "scriptPubKey": parent_locking_script, "amount": parent_value, }] if parent_locking_script else None signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys, prevtxs=prevtxs) tx = CTransaction() assert signedtx["complete"] tx.deserialize(BytesIO(hex_str_to_bytes(signedtx["hex"]))) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex()) def test_independent(self): self.log.info("Test multiple independent transactions in a package") node = self.nodes[0] # For independent transactions, order doesn't matter. self.assert_testres_equal(self.independent_txns_hex, self.independent_txns_testres) self.log.info("Test an otherwise valid package with an extra garbage tx appended") garbage_tx = node.createrawtransaction([{"txid": "00" * 32, "vout": 5}], {self.address: 1}) tx = CTransaction() tx.deserialize(BytesIO(hex_str_to_bytes(garbage_tx))) # Only the txid and wtxids are returned because validation is incomplete for the independent txns. # Package validation is atomic: if the node cannot find a UTXO for any single tx in the package, # it terminates immediately to avoid unnecessary, expensive signature verification. package_bad = self.independent_txns_hex + [garbage_tx] testres_bad = self.independent_txns_testres_blank + [{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "allowed": False, "reject-reason": "missing-inputs"}] self.assert_testres_equal(package_bad, testres_bad) self.log.info("Check testmempoolaccept tells us when some transactions completed validation successfully") coin = self.coins.pop() tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}], {self.address : coin["amount"] - Decimal("0.0001")}) tx_bad_sig = CTransaction() tx_bad_sig.deserialize(BytesIO(hex_str_to_bytes(tx_bad_sig_hex))) testres_bad_sig = node.testmempoolaccept(self.independent_txns_hex + [tx_bad_sig_hex]) # By the time the signature for the last transaction is checked, all the other transactions # have been fully validated, which is why the node returns full validation results for all # transactions here but empty results in other cases. assert_equal(testres_bad_sig, self.independent_txns_testres + [{ "txid": tx_bad_sig.rehash(), "wtxid": tx_bad_sig.getwtxid(), "allowed": False, "reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)" }]) self.log.info("Check testmempoolaccept reports txns in packages that exceed max feerate") coin = self.coins.pop() tx_high_fee_raw = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}], {self.address : coin["amount"] - Decimal("0.999")}) tx_high_fee_signed = node.signrawtransactionwithkey(hexstring=tx_high_fee_raw, privkeys=self.privkeys) assert tx_high_fee_signed["complete"] tx_high_fee = CTransaction() tx_high_fee.deserialize(BytesIO(hex_str_to_bytes(tx_high_fee_signed["hex"]))) testres_high_fee = node.testmempoolaccept([tx_high_fee_signed["hex"]]) assert_equal(testres_high_fee, [ {"txid": tx_high_fee.rehash(), "wtxid": tx_high_fee.getwtxid(), "allowed": False, "reject-reason": "max-fee-exceeded"} ]) package_high_fee = [tx_high_fee_signed["hex"]] + self.independent_txns_hex testres_package_high_fee = node.testmempoolaccept(package_high_fee) assert_equal(testres_package_high_fee, testres_high_fee + self.independent_txns_testres_blank) def test_chain(self): node = self.nodes[0] first_coin = self.coins.pop() # Chain of 25 transactions parent_locking_script = None txid = first_coin["txid"] chain_hex = [] chain_txns = [] value = first_coin["amount"] for _ in range(25): (tx, txhex, value, parent_locking_script) = self.chain_transaction(txid, value, 0, parent_locking_script) txid = tx.rehash() chain_hex.append(txhex) chain_txns.append(tx) self.log.info("Check that testmempoolaccept requires packages to be sorted by dependency") assert_equal(node.testmempoolaccept(rawtxs=chain_hex[::-1]), [{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "package-error": "package-not-sorted"} for tx in chain_txns[::-1]]) self.log.info("Testmempoolaccept a chain of 25 transactions") testres_multiple = node.testmempoolaccept(rawtxs=chain_hex) testres_single = [] # Test accept and then submit each one individually, which should be identical to package test accept for rawtx in chain_hex: testres = node.testmempoolaccept([rawtx]) testres_single.append(testres[0]) # Submit the transaction now so its child should have no problem validating node.sendrawtransaction(rawtx) assert_equal(testres_single, testres_multiple) # Clean up by clearing the mempool node.generate(1) def test_multiple_children(self): node = self.nodes[0] self.log.info("Testmempoolaccept a package in which a transaction has two children within the package") first_coin = self.coins.pop() value = (first_coin["amount"] - Decimal("0.0002")) / 2 # Deduct reasonable fee and make 2 outputs inputs = [{"txid": first_coin["txid"], "vout": 0}] outputs = [{self.address : value}, {ADDRESS_BCRT1_P2WSH_OP_TRUE : value}] rawtx = node.createrawtransaction(inputs, outputs) parent_signed = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys) parent_tx = CTransaction() assert parent_signed["complete"] parent_tx.deserialize(BytesIO(hex_str_to_bytes(parent_signed["hex"]))) parent_txid = parent_tx.rehash() assert node.testmempoolaccept([parent_signed["hex"]])[0]["allowed"] parent_locking_script_a = parent_tx.vout[0].scriptPubKey.hex() child_value = value - Decimal("0.0001") # Child A (_, tx_child_a_hex, _, _) = self.chain_transaction(parent_txid, child_value, 0, parent_locking_script_a) assert not node.testmempoolaccept([tx_child_a_hex])[0]["allowed"] # Child B rawtx_b = node.createrawtransaction([{"txid": parent_txid, "vout": 1}], {self.address : child_value}) tx_child_b = CTransaction() tx_child_b.deserialize(BytesIO(hex_str_to_bytes(rawtx_b))) tx_child_b.wit.vtxinwit = [CTxInWitness()] tx_child_b.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] tx_child_b_hex = tx_child_b.serialize().hex() assert not node.testmempoolaccept([tx_child_b_hex])[0]["allowed"] self.log.info("Testmempoolaccept with entire package, should work with children in either order") testres_multiple_ab = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_a_hex, tx_child_b_hex]) testres_multiple_ba = node.testmempoolaccept(rawtxs=[parent_signed["hex"], tx_child_b_hex, tx_child_a_hex]) assert all([testres["allowed"] for testres in testres_multiple_ab + testres_multiple_ba]) testres_single = [] # Test accept and then submit each one individually, which should be identical to package testaccept for rawtx in [parent_signed["hex"], tx_child_a_hex, tx_child_b_hex]: testres = node.testmempoolaccept([rawtx]) testres_single.append(testres[0]) # Submit the transaction now so its child should have no problem validating node.sendrawtransaction(rawtx) assert_equal(testres_single, testres_multiple_ab) def create_child_with_parents(self, parents_tx, values, locking_scripts): """Creates a transaction that spends the first output of each parent in parents_tx.""" num_parents = len(parents_tx) total_value = sum(values) inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx] outputs = {self.address : total_value - num_parents * Decimal("0.0001")} rawtx_child = self.nodes[0].createrawtransaction(inputs, outputs) prevtxs = [] for i in range(num_parents): prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]}) signedtx_child = self.nodes[0].signrawtransactionwithkey(hexstring=rawtx_child, privkeys=self.privkeys, prevtxs=prevtxs) assert signedtx_child["complete"] return signedtx_child["hex"] def test_multiple_parents(self): node = self.nodes[0] self.log.info("Testmempoolaccept a package in which a transaction has multiple parents within the package") for num_parents in [2, 10, 24]: # Test a package with num_parents parents and 1 child transaction. package_hex = [] parents_tx = [] values = [] parent_locking_scripts = [] for _ in range(num_parents): parent_coin = self.coins.pop() value = parent_coin["amount"] (tx, txhex, value, parent_locking_script) = self.chain_transaction(parent_coin["txid"], value) package_hex.append(txhex) parents_tx.append(tx) values.append(value) parent_locking_scripts.append(parent_locking_script) child_hex = self.create_child_with_parents(parents_tx, values, parent_locking_scripts) # Package accept should work with the parents in any order (as long as parents come before child) for _ in range(10): random.shuffle(package_hex) testres_multiple = node.testmempoolaccept(rawtxs=package_hex + [child_hex]) assert all([testres["allowed"] for testres in testres_multiple]) testres_single = [] # Test accept and then submit each one individually, which should be identical to package testaccept for rawtx in package_hex + [child_hex]: testres_single.append(node.testmempoolaccept([rawtx])[0]) # Submit the transaction now so its child should have no problem validating node.sendrawtransaction(rawtx) assert_equal(testres_single, testres_multiple) def test_conflicting(self): node = self.nodes[0] prevtx = self.coins.pop() inputs = [{"txid": prevtx["txid"], "vout": 0}] output1 = {node.get_deterministic_priv_key().address: 50 - 0.00125} output2 = {ADDRESS_BCRT1_P2WSH_OP_TRUE: 50 - 0.00125} # tx1 and tx2 share the same inputs rawtx1 = node.createrawtransaction(inputs, output1) rawtx2 = node.createrawtransaction(inputs, output2) signedtx1 = node.signrawtransactionwithkey(hexstring=rawtx1, privkeys=self.privkeys) signedtx2 = node.signrawtransactionwithkey(hexstring=rawtx2, privkeys=self.privkeys) tx1 = CTransaction() tx1.deserialize(BytesIO(hex_str_to_bytes(signedtx1["hex"]))) tx2 = CTransaction() tx2.deserialize(BytesIO(hex_str_to_bytes(signedtx2["hex"]))) assert signedtx1["complete"] assert signedtx2["complete"] # Ensure tx1 and tx2 are valid by themselves assert node.testmempoolaccept([signedtx1["hex"]])[0]["allowed"] assert node.testmempoolaccept([signedtx2["hex"]])[0]["allowed"] self.log.info("Test duplicate transactions in the same package") testres = node.testmempoolaccept([signedtx1["hex"], signedtx1["hex"]]) assert_equal(testres, [ {"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"}, {"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"} ]) self.log.info("Test conflicting transactions in the same package") testres = node.testmempoolaccept([signedtx1["hex"], signedtx2["hex"]]) assert_equal(testres, [ {"txid": tx1.rehash(), "wtxid": tx1.getwtxid(), "package-error": "conflict-in-package"}, {"txid": tx2.rehash(), "wtxid": tx2.getwtxid(), "package-error": "conflict-in-package"} ]) def test_rbf(self): node = self.nodes[0] coin = self.coins.pop() inputs = [{"txid": coin["txid"], "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}] fee = Decimal('0.00125000') output = {node.get_deterministic_priv_key().address: 50 - fee} raw_replaceable_tx = node.createrawtransaction(inputs, output) signed_replaceable_tx = node.signrawtransactionwithkey(hexstring=raw_replaceable_tx, privkeys=self.privkeys) testres_replaceable = node.testmempoolaccept([signed_replaceable_tx["hex"]]) replaceable_tx = CTransaction() replaceable_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"]))) assert_equal(testres_replaceable, [ {"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(), "allowed": True, "vsize": replaceable_tx.get_vsize(), "fees": { "base": fee }} ]) # Replacement transaction is identical except has double the fee replacement_tx = CTransaction() replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"]))) replacement_tx.vout[0].nValue -= int(fee * COIN) # Doubled fee signed_replacement_tx = node.signrawtransactionwithkey(replacement_tx.serialize().hex(), self.privkeys) replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replacement_tx["hex"]))) self.log.info("Test that transactions within a package cannot replace each other") testres_rbf_conflicting = node.testmempoolaccept([signed_replaceable_tx["hex"], signed_replacement_tx["hex"]]) assert_equal(testres_rbf_conflicting, [ {"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(), "package-error": "conflict-in-package"}, {"txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "package-error": "conflict-in-package"} ]) self.log.info("Test that packages cannot conflict with mempool transactions, even if a valid BIP125 RBF") node.sendrawtransaction(signed_replaceable_tx["hex"]) testres_rbf_single = node.testmempoolaccept([signed_replacement_tx["hex"]]) # This transaction is a valid BIP125 replace-by-fee assert testres_rbf_single[0]["allowed"] testres_rbf_package = self.independent_txns_testres_blank + [{ "txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "allowed": False, "reject-reason": "txn-mempool-conflict" }] self.assert_testres_equal(self.independent_txns_hex + [signed_replacement_tx["hex"]], testres_rbf_package) if __name__ == "__main__": RPCPackagesTest().main()
py
1a33341119be8b886ff2f9176c4ea5f3631c2683
# # This file is part of the MicroPython project, http://micropython.org/ # # The MIT License (MIT) # # Copyright (c) 2017 SummerGift <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import os from sys import argv import argparse from gen_qstr import make_bytes def qstr_end_idx(str): min = 255 idx = str.find(')') if(idx > -1): min = idx idx = str.find(',') if idx > -1 and idx < min: min = idx idx = str.find(':') if idx > -1 and idx < min: min = idx idx = str.find('=') if idx > -1 and idx < min: min = idx idx = str.find(' ') if idx > -1 and idx < min: min = idx idx = str.find(';') if idx > -1 and idx < min: min = idx idx = str.find('<') if idx > -1 and idx < min: min = idx idx = str.find('}') if idx > -1 and idx < min: min = idx idx = str.find('{') if idx > -1 and idx < min: min = idx idx = str.find('.') if idx > -1 and idx < min: min = idx return min def spec_charactor_filter(string): if ('#' in string): return False if ('/' in string): return False if ('*' in string): return False if ('//' in string): return False if ('(' in string): return False if (')' in string): return False if ('{' in string): return False if ('}' in string): return False if (' ' in string): return False if ('\\' in string): return False if ('}' in string): return False if ('-' in string): return False return True def read_qstr(file,list): fd = open(file) try: for line in fd: idx = 0 for i in range(5): idx = line.find('MP_QSTR_',idx) if (idx >= 0): str = line[idx+len('MP_QSTR_'):-1] lidx = qstr_end_idx(str) qstr = str[0:lidx] if spec_charactor_filter(qstr): if qstr not in list: list.append(qstr) idx = idx +1 else: idx = line.find('Q(') if idx ==0: #print("**:",line+' in '+file) str = line[idx+len('Q('):-1] lidx = qstr_end_idx(str) qstr = str[0:lidx] if spec_charactor_filter(qstr): if qstr not in list: list.append(qstr) break except UnicodeDecodeError as e: print(e) print('in',file) fd.close() def gen_qstr(path,hash_len=1): headerfile = './qstrdefs.generated.h' headerdef = '// This file was automatically generated by auto_generate_qstr.py' + '\n'+ '\n' headerdef = headerdef + '\n' + ' QDEF(MP_QSTRnull, (const byte*)"\\x00\\x00\\x00" "") '+ '\n' try: os.remove(headerfile) except WindowsError as e: print(e) qstr_list = [] total = 0 for (root, dirs, files) in os.walk(path): for file in files: if (file.split(".")[-1] == 'c'): read_qstr(root + '/' + file, qstr_list) if (file.split(".")[-1] == 'h'): read_qstr(root + '/' + file, qstr_list) outfd = open(headerfile, 'wt') outfd.writelines(headerdef) for qstr in qstr_list: out = make_bytes(1, hash_len, qstr) outfd.writelines(out + '\n'); # print(out) outfd.close() print('Generate QString count:', len(qstr_list)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', '--dir', type=str, default='../', help='txt file') args, unparsed = parser.parse_known_args() gen_qstr(args.dir)
py
1a333415ef8f6351fa215bb23e6df928890e19a1
from __future__ import unicode_literals from moto.acm import acm_backends from moto.apigateway import apigateway_backends from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends from moto.cognitoidentity import cognitoidentity_backends from moto.cognitoidp import cognitoidp_backends from moto.core import moto_api_backends from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends from moto.dynamodb2 import dynamodb_backends2 from moto.dynamodbstreams import dynamodbstreams_backends from moto.ec2 import ec2_backends from moto.ecr import ecr_backends from moto.ecs import ecs_backends from moto.elb import elb_backends from moto.elbv2 import elbv2_backends from moto.emr import emr_backends from moto.events import events_backends from moto.glacier import glacier_backends from moto.glue import glue_backends from moto.iam import iam_backends from moto.instance_metadata import instance_metadata_backends from moto.kinesis import kinesis_backends from moto.kms import kms_backends from moto.logs import logs_backends from moto.opsworks import opsworks_backends from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends from moto.secretsmanager import secretsmanager_backends from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends from moto.swf import swf_backends from moto.xray import xray_backends from moto.iot import iot_backends from moto.iotdata import iotdata_backends from moto.batch import batch_backends from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends BACKENDS = { 'acm': acm_backends, 'apigateway': apigateway_backends, 'autoscaling': autoscaling_backends, 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, 'cognito-identity': cognitoidentity_backends, 'cognito-idp': cognitoidp_backends, 'datapipeline': datapipeline_backends, 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, 'dynamodbstreams': dynamodbstreams_backends, 'ec2': ec2_backends, 'ecr': ecr_backends, 'ecs': ecs_backends, 'elb': elb_backends, 'elbv2': elbv2_backends, 'events': events_backends, 'emr': emr_backends, 'glacier': glacier_backends, 'glue': glue_backends, 'iam': iam_backends, 'moto_api': moto_api_backends, 'instance_metadata': instance_metadata_backends, 'logs': logs_backends, 'kinesis': kinesis_backends, 'kms': kms_backends, 'opsworks': opsworks_backends, 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, 'rds': rds2_backends, 's3': s3_backends, 's3bucket_path': s3_backends, 'ses': ses_backends, 'secretsmanager': secretsmanager_backends, 'sns': sns_backends, 'sqs': sqs_backends, 'ssm': ssm_backends, 'sts': sts_backends, 'swf': swf_backends, 'route53': route53_backends, 'lambda': lambda_backends, 'xray': xray_backends, 'resourcegroupstaggingapi': resourcegroupstaggingapi_backends, 'iot': iot_backends, 'iot-data': iotdata_backends, } def get_model(name, region_name): for backends in BACKENDS.values(): for region, backend in backends.items(): if region == region_name: models = getattr(backend.__class__, '__models__', {}) if name in models: return list(getattr(backend, models[name])())
py
1a3334977eaa4d646e8290d7128541284bc9cf05
from unittest.mock import patch from django.core.management import call_command from django.db.utils import OperationalError from django.test import TestCase class CommandsTestCase(TestCase): def test_wait_for_db_ready(self): """Test waiting for db when db is available""" with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.return_value = True call_command('wait_for_db') self.assertEqual(gi.call_count, 1) @patch('time.sleep', return_value=None) def test_wait_for_db(self, ts): """Test waiting for db""" with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.side_effect = [OperationalError] * 5 + [True] call_command('wait_for_db') self.assertEqual(gi.call_count, 6)
py
1a3335ba02d8d10bfb0e85f4f131b1804f47cd82
""" Passwords and secrets tools """ import string import secrets def password_generator(password_type=None, length=None) : """A random password generator password_type(str): the type of password, deciding charset length(int): password length, default 20 chars return(str): a random password raise: ValueError if wrong type """ if length is None : password_length = 20 elif length < 0 : raise ValueError('Length cannot be negative!') else : password_length = int(length) if password_type is None : password_type = 'plaintext' # Available charset _charset = { 'mysql': string.ascii_letters + string.digits + r'!@#$%^&*', 'ssh': string.ascii_letters + string.digits + r'~!@#$%^&*)(}{?+/=][,.><;:`', 'username': string.ascii_letters + string.digits + '_', 'plaintext': string.ascii_letters + string.digits } # Define charset according to password type try : alphabet = _charset[password_type] except KeyError : raise ValueError('Wrong type!') # A password consists of random characters password = ''.join(secrets.choice(alphabet) for i in range(password_length)) # A username starts with a letter if password_type == 'username' : password = secrets.choice(string.ascii_letters) + password[1:] return password def main() : # Parse arguments when executed in terminal from argparse import ArgumentParser from sys import exit parser = ArgumentParser(description='A password generator') parser.add_argument('--length', '-l', type=int, default=None, help='length, default to 20') parser.add_argument('--type', '-t', type=str, default=None, help='password types, including: mysql / ssh / username / plaintext(default)') args = parser.parse_args() try : password_to_show = password_generator(password_type=args.type, length=args.length) except ValueError as e : print('Something went wrong:', e) exit(2) print(password_to_show, end='') if __name__ == '__main__' : main()
py
1a33361d072ab356aa2c82ebc43fcc55472025e9
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Implementation of Neural Net (NN) functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import candidate_sampling_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None): """Computes log Poisson loss given `log_input`. Gives the log-likelihood loss between the prediction and the target under the assumption that the target has a Poisson distribution. Caveat: By default, this is not the exact loss, but the loss minus a constant term [log(z!)]. That has no effect for optimization, but does not play well with relative loss comparisons. To compute an approximation of the log factorial term, specify compute_full_loss=True to enable Stirling's Approximation. For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson loss is -log(exp(-x) * (x^z) / z!) = -log(exp(-x) * (x^z)) + log(z!) ~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)] [ Note the second term is the Stirling's Approximation for log(z!). It is invariant to x and does not affect optimization, though important for correct relative loss comparisons. It is only computed when compute_full_loss == True. ] = x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)] = exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)] Args: targets: A `Tensor` of the same type and shape as `log_input`. log_input: A `Tensor` of type `float32` or `float64`. compute_full_loss: whether to compute the full loss. If false, a constant term is dropped in favor of more efficient optimization. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `log_input` with the componentwise logistic losses. Raises: ValueError: If `log_input` and `targets` do not have the same shape. """ with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name: log_input = ops.convert_to_tensor(log_input, name="log_input") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(log_input.get_shape()) except ValueError: raise ValueError( "log_input and targets must have the same shape (%s vs %s)" % (log_input.get_shape(), targets.get_shape())) result = math_ops.exp(log_input) - log_input * targets if compute_full_loss: # need to create constant tensors here so that their dtypes can be matched # to that of the targets. point_five = constant_op.constant(0.5, dtype=targets.dtype) two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype) stirling_approx = (targets * math_ops.log(targets)) - targets + ( point_five * math_ops.log(two_pi * targets)) zeros = array_ops.zeros_like(targets, dtype=targets.dtype) ones = array_ops.ones_like(targets, dtype=targets.dtype) cond = math_ops.logical_and(targets >= zeros, targets <= ones) result += array_ops.where(cond, zeros, stirling_approx) return result def sigmoid_cross_entropy_with_logits(_sentinel=None, # pylint: disable=invalid-name labels=None, logits=None, name=None): """Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = labels`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `labels` must have the same type and shape. Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `labels` do not have the same shape. """ # pylint: disable=protected-access nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel, labels, logits) # pylint: enable=protected-access with ops.name_scope(name, "logistic_loss", [logits, labels]) as name: logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") try: labels.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError("logits and labels must have the same shape (%s vs %s)" % (logits.get_shape(), labels.get_shape())) # The logistic loss formula from above is # x - x * z + log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # -x * z + log(1 + exp(x)) # Note that these two expressions can be combined into the following: # max(x, 0) - x * z + log(1 + exp(-abs(x))) # To allow computing gradients at zero, we define custom versions of max and # abs functions. zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = array_ops.where(cond, logits, zeros) neg_abs_logits = array_ops.where(cond, -logits, logits) return math_ops.add(relu_logits - logits * labels, math_ops.log1p(math_ops.exp(neg_abs_logits)), name=name) def weighted_cross_entropy_with_logits(targets, logits, pos_weight, name=None): """Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits)) The argument `pos_weight` is used as a multiplier for the positive targets: targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = targets`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `targets` must have the same type and shape. Args: targets: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weighted logistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. """ with ops.name_scope(name, "logistic_loss", [logits, targets]) as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError("logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) # The logistic loss formula from above is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x # To avoid branching, we use the combined version # (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) log_weight = 1 + (pos_weight - 1) * targets return math_ops.add( (1 - targets) * logits, log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits)), name=name) def relu_layer(x, weights, biases, name=None): """Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units. """ with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name) def l2_normalize(x, dim, epsilon=1e-12, name=None): """Normalizes along dimension `dim` using an L2 norm. For a 1-D tensor with `dim = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `dim`. Args: x: A `Tensor`. dim: Dimension along which to normalize. A scalar or a vector of integers. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.name_scope(name, "l2_normalize", [x]) as name: x = ops.convert_to_tensor(x, name="x") square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.multiply(x, x_inv_norm, name=name) def zero_fraction(value, name=None): """Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, ```python z = tf.Relu(...) summ = tf.contrib.deprecated.scalar_summary('sparsity', tf.nn.zero_fraction(z)) ``` Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`. """ with ops.name_scope(name, "zero_fraction", [value]): value = ops.convert_to_tensor(value, name="value") zero = constant_op.constant(0, dtype=value.dtype, name="zero") return math_ops.reduce_mean( math_ops.cast(math_ops.equal(value, zero), dtypes.float32)) # pylint: disable=redefined-builtin def depthwise_conv2d(input, filter, strides, padding, rate=None, name=None): """Depthwise 2-D convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di, strides[2] * j + rate[1] * dj, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Args: input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the @{tf.nn.convolution$comment here} rate: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, in_channels * channel_multiplier].` """ with ops.name_scope(name, "depthwise", [input, filter]) as name: input = ops.convert_to_tensor(input, name="tensor_in") filter = ops.convert_to_tensor(filter, name="filter_in") if rate is None: rate = [1, 1] def op(input_converted, _, padding): return nn_ops.depthwise_conv2d_native( input=input_converted, filter=filter, strides=strides, padding=padding, name=name) return nn_ops.with_space_to_batch( input=input, filter_shape=array_ops.shape(filter), dilation_rate=rate, padding=padding, op=op) # pylint: enable=redefined-builtin # pylint: disable=redefined-builtin,line-too-long def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, rate=None, name=None): """2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, output[b, i, j, k] = sum_{di, dj, q, r] input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Args: input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the @{tf.nn.convolution$comment here} rate: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`. Raises: ValueError: If channel_multiplier * in_channels > out_channels, which means that the separable convolution is overparameterized. """ with ops.name_scope(name, "separable_conv2d", [input, depthwise_filter, pointwise_filter]) as name: input = ops.convert_to_tensor(input, name="tensor_in") depthwise_filter = ops.convert_to_tensor( depthwise_filter, name="depthwise_filter") pointwise_filter = ops.convert_to_tensor( pointwise_filter, name="pointwise_filter") pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4) pointwise_filter_shape[0].assert_is_compatible_with(1) pointwise_filter_shape[1].assert_is_compatible_with(1) channel_multiplier = depthwise_filter.get_shape().with_rank(4)[3] in_channels = input.get_shape().with_rank(4)[3] out_channels = pointwise_filter_shape[3] if rate is None: rate = [1, 1] # If any of channel numbers is unknown, then the comparison below returns # None. See TensorShape.__gt__(). if channel_multiplier * in_channels > out_channels: raise ValueError("Refusing to perform an overparameterized separable " "convolution: channel_multiplier * in_channels = " "%d * %d = %d > %d = out_channels" % (channel_multiplier, in_channels, channel_multiplier * in_channels, out_channels)) # The layout of the ops in the graph are expected to be as follows: # depthwise_conv2d // Conv2D op corresponding to native deptwise conv. # separable_conv2d // Conv2D op corresponding to the pointwise conv. def op(input_converted, _, padding): return nn_ops.depthwise_conv2d_native( input=input_converted, filter=depthwise_filter, strides=strides, padding=padding, name="depthwise") depthwise = nn_ops.with_space_to_batch( input=input, filter_shape=array_ops.shape(depthwise_filter), dilation_rate=rate, padding=padding, op=op) return nn_ops.conv2d( depthwise, pointwise_filter, [1, 1, 1, 1], padding="VALID", name=name) # pylint: enable=redefined-builtin,line-too-long def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ axes = list(set(axes)) with ops.name_scope(name, "sufficient_statistics", [x, shift]): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if all(x_shape[d].value is not None for d in axes): counts = 1 for d in axes: counts *= x_shape[d].value counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_dims = array_ops.gather( math_ops.cast(array_ops.shape(x), x.dtype), axes) counts = math_ops.reduce_prod(x_dims, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.subtract(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance) def moments(x, axes, shift=None, name=None, keep_dims=False): """Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. Note: for numerical stability, when shift=None, the true mean would be computed and used as shift. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` in which case the true mean of the data is used as shift. A shift close to the true mean provides the most numerically stable results. name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.name_scope(name, "moments", [x, axes, shift]): # The dynamic range of fp16 is too limited to support the collection of # sufficient statistics. As a workaround we simply perform the operations # on 32-bit floats before converting the mean and variance back to fp16 y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x if shift is None: # Compute true mean while keeping the dims for proper broadcasting. shift = array_ops.stop_gradient( math_ops.reduce_mean(y, axes, keep_dims=True)) else: shift = math_ops.cast(shift, y.dtype) counts, m_ss, v_ss, shift = sufficient_statistics( y, axes, shift=shift, keep_dims=keep_dims, name=name) # Reshape shift as needed. shift = array_ops.reshape(shift, array_ops.shape(m_ss)) shift.set_shape(m_ss.get_shape()) with ops.control_dependencies([counts, m_ss, v_ss]): mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16)) else: return (mean, variance) def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False): """Returns the frequency-weighted mean and variance of `x`. Args: x: A tensor. axes: 1-d tensor of int32 values; these are the axes along which to compute mean and variance. frequency_weights: A tensor of positive weights which can be broadcast with x. name: Name used to scope the operation. keep_dims: Produce moments with the same dimensionality as the input. Returns: Two tensors: `weighted_mean` and `weighted_variance`. """ with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]): x = ops.convert_to_tensor(x, name="x") frequency_weights = ops.convert_to_tensor( frequency_weights, name="frequency_weights") # Unlike moments(), this just uses a simpler two-pass method. # See comment in moments() WRT precision; it applies here too. needs_cast = x.dtype == dtypes.float16 if needs_cast: x = math_ops.cast(x, dtypes.float32) if frequency_weights.dtype != x.dtype: frequency_weights = math_ops.cast(frequency_weights, x.dtype) # Note that we use keep_dims=True for our reductions regardless of the arg; # this is so that the results remain broadcast-compatible with the inputs. weighted_input_sum = math_ops.reduce_sum( frequency_weights * x, axes, name="weighted_input_sum", keep_dims=True) # The shape of the weights isn't necessarily the same as x's # shape, just broadcast-compatible with it -- so this expression # performs broadcasting to give a per-item weight, with the same # shape as (freqency_weights * x). This avoids having to reason # through all the broadcast logic to compute a correct # sum_of_weights. broadcasted_weights = frequency_weights + array_ops.zeros_like(x) sum_of_weights = math_ops.reduce_sum( broadcasted_weights, axes, name="sum_of_weights", keep_dims=True) divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum") weighted_mean = math_ops.multiply(weighted_input_sum, divisor) # Have the weighted mean; now on to variance: weighted_distsq = math_ops.reduce_sum( frequency_weights * math_ops.squared_difference(x, weighted_mean), axes, name="weighted_distsq", keep_dims=True) weighted_variance = math_ops.multiply(weighted_distsq, divisor) if not keep_dims: weighted_mean = array_ops.squeeze(weighted_mean, squeeze_dims=axes) weighted_variance = array_ops.squeeze( weighted_variance, squeeze_dims=axes) if needs_cast: weighted_mean = math_ops.cast(weighted_mean, dtypes.float16) weighted_variance = math_ops.cast(weighted_variance, dtypes.float16) return weighted_mean, weighted_variance def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): r"""Batch normalization. As described in http://arxiv.org/abs/1502.03167. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\): \\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=False)` during training, or running averages thereof during inference. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor. """ with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]): inv = math_ops.rsqrt(variance + variance_epsilon) if scale is not None: inv *= scale return x * inv + (offset - mean * inv if offset is not None else -mean * inv) def fused_batch_norm( x, scale, offset, # pylint: disable=invalid-name mean=None, variance=None, epsilon=0.001, data_format="NHWC", is_training=True, name=None): r"""Batch normalization. As described in http://arxiv.org/abs/1502.03167. Args: x: Input `Tensor` of 4 dimensions. scale: A `Tensor` of 1 dimension for scaling. offset: A `Tensor` of 1 dimension for bias. mean: A `Tensor` of 1 dimension for population mean used for inference. variance: A `Tensor` of 1 dimension for population variance used for inference. epsilon: A small float number added to the variance of x. data_format: The data format for x. Either "NHWC" (default) or "NCHW". is_training: A bool value to specify if the operation is used for training or inference. name: A name for this operation (optional). Returns: y: A 4D Tensor for the normalized, scaled, offsetted x. batch_mean: A 1D Tensor for the mean of x. batch_var: A 1D Tensor for the variance of x. Raises: ValueError: If mean or variance is not None when is_training is True. """ x = ops.convert_to_tensor(x, name="input") scale = ops.convert_to_tensor(scale, name="scale") offset = ops.convert_to_tensor(offset, name="offset") if is_training: if (mean is not None) or (variance is not None): raise ValueError("Both 'mean' and 'variance' must be None " "if is_training is True.") if mean is None: mean = constant_op.constant([]) if variance is None: variance = constant_op.constant([]) # Add 1e-12 to epsilon when epsilon <= 1e-5 to prevent CUDNN exception. epsilon = epsilon if epsilon > 1e-5 else epsilon + 1e-12 # pylint: disable=protected-access y, batch_mean, batch_var, _, _ = gen_nn_ops._fused_batch_norm( x, scale, offset, mean, variance, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name) return y, batch_mean, batch_var # pylint: enable=protected-access def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): """Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`. """ return batch_normalization(t, m, v, beta, gamma if scale_after_normalization else None, variance_epsilon, name) def _sum_rows(x): """Returns a vector summing up each row of the matrix x.""" # _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is # a matrix. The gradient of _sum_rows(x) is more efficient than # reduce_sum(x, 1)'s gradient in today's implementation. Therefore, # we use _sum_rows(x) in the nce_loss() computation since the loss # is mostly used for training. cols = array_ops.shape(x)[1] ones_shape = array_ops.stack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [-1]) def _compute_sampled_logits(weights, biases, labels, inputs, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy="mod", name=None): """Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: out_logits, out_labels: `Tensor` objects each with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax). """ if isinstance(weights, variables.PartitionedVariable): weights = list(weights) if not isinstance(weights, list): weights = [weights] with ops.name_scope(name, "compute_sampled_logits", weights + [biases, inputs, labels]): if labels.dtype != dtypes.int64: labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [-1]) # Sample the negative labels. # sampled shape: [num_sampled] tensor # true_expected_count shape = [batch_size, 1] tensor # sampled_expected_count shape = [num_sampled] tensor if sampled_values is None: sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler( true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) # NOTE: pylint cannot tell that 'sampled_values' is a sequence # pylint: disable=unpacking-non-sequence sampled, true_expected_count, sampled_expected_count = sampled_values # pylint: enable=unpacking-non-sequence # labels_flat is a [batch_size * num_true] tensor # sampled is a [num_sampled] int tensor all_ids = array_ops.concat([labels_flat, sampled], 0) # weights shape is [num_classes, dim] all_w = embedding_ops.embedding_lookup( weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) # true_w shape is [batch_size * num_true, dim] # true_b is a [batch_size * num_true] tensor true_w = array_ops.slice( all_w, [0, 0], array_ops.stack([array_ops.shape(labels_flat)[0], -1])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) # inputs shape is [batch_size, dim] # true_w shape is [batch_size * num_true, dim] # row_wise_dots is [batch_size, num_true, dim] dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0) row_wise_dots = math_ops.multiply( array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) # We want the row-wise dot plus biases which yields a # [batch_size, num_true] tensor of true_logits. dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat([[-1], dim], 0)) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true]) true_b = array_ops.reshape(true_b, [-1, num_true]) true_logits += true_b # Lookup weights and biases for sampled labels. # sampled_w shape is [num_sampled, dim] # sampled_b is a [num_sampled] float tensor sampled_w = array_ops.slice( all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1]) # inputs has shape [batch_size, dim] # sampled_w has shape [num_sampled, dim] # sampled_b has shape [num_sampled] # Apply X*W'+B, which yields [batch_size, num_sampled] sampled_logits = math_ops.matmul( inputs, sampled_w, transpose_b=True) + sampled_b if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits( labels, sampled, num_true=num_true) acc_indices, acc_ids, acc_weights = acc_hits # This is how SparseToDense expects the indices. acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1]) acc_ids_2d_int32 = array_ops.reshape( math_ops.cast(acc_ids, dtypes.int32), [-1, 1]) sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1, "sparse_indices") # Create sampled_logits_shape = [batch_size, num_sampled] sampled_logits_shape = array_ops.concat( [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)], 0) if sampled_logits.dtype != acc_weights.dtype: acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense( sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: # Subtract log of Q(l), prior probability that l appears in sampled. true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) # Construct output logits and labels. The true labels/logits start at col 0. out_logits = array_ops.concat([true_logits, sampled_logits], 1) # true_logits is a float tensor, ones_like(true_logits) is a float tensor # of ones. We then divide by num_true to ensure the per-example labels sum # to 1.0, i.e. form a proper probability distribution. out_labels = array_ops.concat([ array_ops.ones_like(true_logits) / num_true, array_ops.zeros_like(sampled_logits) ], 1) return out_logits, out_labels def nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy="mod", name="nce_loss"): """Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf) Note: By default this uses a log-uniform (Zipfian) distribution for sampling, so your labels must be sorted in order of decreasing frequency to achieve good results. For more details, see @{tf.nn.log_uniform_candidate_sampler}. Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove "accidental hits" where a sampled class equals one of the target classes. If set to `True`, this is a "Sampled Logistic" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses. """ logits, labels = _compute_sampled_logits( weights=weights, biases=biases, labels=labels, inputs=inputs, num_sampled=num_sampled, num_classes=num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits( labels=labels, logits=logits, name="sampled_losses") # sampled_losses is batch_size x {true_loss, sampled_losses...} # We sum out true and sampled losses. return _sum_rows(sampled_losses) def sampled_softmax_loss(weights, biases, labels, inputs, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy="mod", name="sampled_softmax_loss"): """Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses. """ logits, labels = _compute_sampled_logits( weights=weights, biases=biases, labels=labels, inputs=inputs, num_sampled=num_sampled, num_classes=num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(labels=labels, logits=logits) # sampled_losses is a [batch_size] tensor. return sampled_losses
py
1a33362fbe96945bf2dd2f7af454a4bc584d2efa
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .tracked_resource import TrackedResource class StepResource(TrackedResource): """The resource representation of a rollout step. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} :vartype id: str :ivar name: The name of the resource :vartype name: str :ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. :vartype type: str :param tags: Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives :type location: str :param properties: Required. The properties that define the step. :type properties: ~azure.mgmt.deploymentmanager.models.StepProperties """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'properties': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'StepProperties'}, } def __init__(self, **kwargs): super(StepResource, self).__init__(**kwargs) self.properties = kwargs.get('properties', None)
py
1a33371dcf73e533c4d552754bd9eb80f62e55fd
#!/usr/bin/env python # Zed Attack Proxy (ZAP) and its related class files. # # ZAP is an HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017 ZAP Development Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script runs a full scan against an API defined by OpenAPI/Swagger or SOAP # using ZAP # # It can either be run 'standalone', in which case depends on # https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run # inside one of the ZAP docker containers. It automatically detects if it is # running in docker so the parameters are the same. # # It currently support APIS defined by: # OpenAPI/Swagger URL # OpenAPI/Swagger file # SOAP URL # SOAP File # It will exit with codes of: # 0: Success # 1: At least 1 FAIL # 2: At least one WARN and no FAILs # 3: Any other failure # By default all alerts found by ZAP will be treated as WARNings. # You can use the -c or -u parameters to specify a configuration file to override # this. # You can generate a template configuration file using the -g parameter. You will # then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want # to be handled differently. # You can also add your own messages for the rules by appending them after a tab # at the end of each line. # By default the active scan rules run are hardcoded in the API-Minimal.policy # file but you can change them by supplying a configuration file with the rules # you dont want to be run set to IGNORE. import getopt import json import logging import os import os.path import subprocess import sys import time from datetime import datetime from six.moves.urllib.parse import urljoin from zapv2 import ZAPv2 from zap_common import * class NoUrlsException(Exception): pass config_dict = {} config_msg = {} out_of_scope_dict = {} min_level = 0 # Scan rules that aren't really relevant, eg the examples rules in the alpha set blacklist = ['-1', '50003', '60000', '60001'] # Scan rules that are being addressed in_progress_issues = {} logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # Hide "Starting new HTTP connection" messages logging.getLogger("requests").setLevel(logging.WARNING) def usage(): print('Usage: zap-api-scan.py -t <target> -f <format> [options]') print(' -t target target API definition, OpenAPI or SOAP, local file or URL, eg https://www.example.com/openapi.json') print(' -f format either openapi or soap') print('Options:') print(' -h print this help message') print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings') print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings') print(' -g gen_file generate default config file(all rules set to WARN)') print(' -r report_html file to write the full ZAP HTML report') print(' -w report_md file to write the full ZAP Wiki(Markdown) report') print(' -x report_xml file to write the full ZAP XML report') print(' -J report_json file to write the full ZAP JSON document') print(' -a include the alpha passive scan rules as well') print(' -d show debug messages') print(' -P specify listen port') print(' -D delay in seconds to wait for passive scanning ') print(' -i default rules not in the config file to INFO') print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs') print(' -n context_file context file which will be loaded prior to scanning the target') print(' -p progress_file progress file which specifies issues that are being addressed') print(' -s short output format - dont show PASSes or example URLs') print(' -T max time in minutes to wait for ZAP to start and the passive scan to run') print(' -O the hostname to override in the (remote) OpenAPI spec') print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"') print(' --hook path to python file that define your custom hooks') print('') print('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-API-Scan') def main(argv): global min_level global in_progress_issues cid = '' context_file = '' progress_file = '' config_file = '' config_url = '' generate = '' port = 0 detailed_output = True report_html = '' report_md = '' report_xml = '' report_json = '' target = '' target_file = '' target_url = '' host_override = '' format = '' zap_alpha = False info_unspecified = False base_dir = '' zap_ip = 'localhost' zap_options = '' delay = 0 timeout = 0 hook_file = None pass_count = 0 warn_count = 0 fail_count = 0 info_count = 0 ignore_count = 0 warn_inprog_count = 0 fail_inprog_count = 0 try: opts, args = getopt.getopt(argv, "t:f:c:u:g:m:n:r:J:w:x:l:hdaijp:sz:P:D:T:O:", ["hook="]) except getopt.GetoptError as exc: logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg) usage() sys.exit(3) for opt, arg in opts: if opt == '-h': usage() sys.exit(0) elif opt == '-t': target = arg logging.debug('Target: ' + target) elif opt == '-f': format = arg elif opt == '-c': config_file = arg elif opt == '-u': config_url = arg elif opt == '-g': generate = arg elif opt == '-d': logging.getLogger().setLevel(logging.DEBUG) elif opt == '-P': port = int(arg) elif opt == '-D': delay = int(arg) elif opt == '-n': context_file = arg elif opt == '-p': progress_file = arg elif opt == '-r': report_html = arg elif opt == '-J': report_json = arg elif opt == '-w': report_md = arg elif opt == '-x': report_xml = arg elif opt == '-a': zap_alpha = True elif opt == '-i': info_unspecified = True elif opt == '-l': try: min_level = zap_conf_lvls.index(arg) except ValueError: logging.warning('Level must be one of ' + str(zap_conf_lvls)) usage() sys.exit(3) elif opt == '-z': zap_options = arg elif opt == '-s': detailed_output = False elif opt == '-T': timeout = int(arg) elif opt == '-O': host_override = arg elif opt == '--hook': hook_file = arg check_zap_client_version() load_custom_hooks(hook_file) trigger_hook('cli_opts', opts) # Check target supplied and ok if len(target) == 0: usage() sys.exit(3) if format != 'openapi' and format != 'soap': logging.warning('Format must be either \'openapi\' or \'soap\'') usage() sys.exit(3) if running_in_docker(): base_dir = '/zap/wrk/' if config_file or generate or report_html or report_xml or report_json or progress_file or context_file or target_file: # Check directory has been mounted if not os.path.exists(base_dir): logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ') usage() sys.exit(3) if target.startswith('http://') or target.startswith('https://'): target_url = target else: # assume its a file if not os.path.exists(base_dir + target): logging.warning('Target must either start with \'http://\' or \'https://\' or be a local file') logging.warning('File does not exist: ' + base_dir + target) usage() sys.exit(3) else: target_file = target # Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option if port == 0: port = get_free_port() logging.debug('Using port: ' + str(port)) if config_file: # load config file from filestore with open(base_dir + config_file) as f: try: load_config(f, config_dict, config_msg, out_of_scope_dict) except ValueError as e: logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e)) sys.exit(3) elif config_url: # load config file from url try: config_data = urlopen(config_url).read().decode('UTF-8').splitlines() load_config(config_data, config_dict, config_msg, out_of_scope_dict) except ValueError as e: logging.warning("Failed to read configs from " + config_url + " " + str(e)) sys.exit(3) except: logging.warning('Failed to read configs from ' + config_url) sys.exit(3) if progress_file: # load progress file from filestore with open(base_dir + progress_file) as f: progress = json.load(f) # parse into something more useful... # in_prog_issues = map of vulnid -> {object with everything in} for issue in progress["issues"]: if issue["state"] == "inprogress": in_progress_issues[issue["id"]] = issue if running_in_docker(): try: params = [ '-addonupdate', '-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container if zap_alpha: params.append('-addoninstall') params.append('pscanrulesAlpha') add_zap_options(params, zap_options) start_zap(port, params) except OSError: logging.warning('Failed to start ZAP :(') sys.exit(3) else: # Not running in docker, so start one mount_dir = '' if context_file: mount_dir = os.path.dirname(os.path.abspath(context_file)) params = ['-addonupdate'] if (zap_alpha): params.extend(['-addoninstall', 'pscanrulesAlpha']) add_zap_options(params, zap_options) try: cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir) zap_ip = ipaddress_for_cid(cid) logging.debug('Docker ZAP IP Addr: ' + zap_ip) # Copy across the files that may not be in all of the docker images try: subprocess.check_output(['docker', 'exec', '-t', cid, 'mkdir', '-p', '/home/zap/.ZAP_D/scripts/scripts/httpsender/']) cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js', '/home/zap/.ZAP_D/') cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js', '/home/zap/.ZAP_D/') cp_to_docker(cid, 'policies/API-Minimal.policy', '/home/zap/.ZAP_D/') if target_file: cp_to_docker(cid, target_file, '/zap/') except OSError: logging.warning('Failed to copy one of the required files') sys.exit(3) except OSError: logging.warning('Failed to start ZAP in docker :(') sys.exit(3) try: zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)}) wait_for_zap_start(zap, timeout * 60) trigger_hook('zap_started', zap, target) if context_file: # handle the context file, cant use base_dir as it might not have been set up zap_import_context(zap, '/zap/wrk/' + os.path.basename(context_file)) # Enable scripts zap.script.load('Alert_on_HTTP_Response_Code_Errors.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js') zap.script.enable('Alert_on_HTTP_Response_Code_Errors.js') zap.script.load('Alert_on_Unexpected_Content_Types.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js') zap.script.enable('Alert_on_Unexpected_Content_Types.js') # Import the API defn if format == 'openapi': trigger_hook('importing_openapi', target_url, target_file) if target_url: logging.debug('Import OpenAPI URL ' + target_url) res = zap.openapi.import_url(target, host_override) urls = zap.core.urls() if host_override: target = urljoin(target_url, '//' + host_override) logging.info('Using host override, new target: {0}'.format(target)) else: logging.debug('Import OpenAPI File ' + target_file) res = zap.openapi.import_file(base_dir + target_file) urls = zap.core.urls() if len(urls) > 0: # Choose the first one - will be striping off the path below target = urls[0] logging.debug('Using target from imported file: {0}'.format(target)) else: trigger_hook('importing_soap', target_url, target_file) if target_url: logging.debug('Import SOAP URL ' + target_url) res = zap._request(zap.base + 'soap/action/importUrl/', {'url':target}) urls = zap.core.urls() else: logging.debug('Import SOAP File ' + target_file) res = zap._request(zap.base + 'soap/action/importFile/', {'file': base_dir + target_file}) urls = zap.core.urls() if len(urls) > 0: # Choose the first one - will be striping off the path below target = urls[0] logging.debug('Using target from imported file: {0}'.format(target)) logging.info('Number of Imported URLs: ' + str(len(urls))) logging.debug('Import warnings: ' + str(res)) if len(urls) == 0: logging.warning('Failed to import any URLs') # No point continue, there's nothing to scan. raise NoUrlsException() if target.count('/') > 2: old_target = target # The url can include a valid path, but always reset to scan the host target = target[0:target.index('/', 8)+1] logging.debug('Normalised target from {0} to {1}'.format(old_target, target)) # Wait for a delay if specified with -D option if (delay): start_scan = datetime.now() while((datetime.now() - start_scan).seconds < delay ): time.sleep(5) logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds') # Set up the scan policy scan_policy = 'API-Minimal' if config_dict: # They have supplied a config file, use this to define the ascan rules # Use the default one as the script might not have write access to the one just copied across scan_policy = 'Default Policy' zap.ascan.enable_all_scanners(scanpolicyname=scan_policy) for scanner, state in config_dict.items(): if state == 'IGNORE': # Dont bother checking the result - this will fail for pscan rules zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy) zap_active_scan(zap, target, scan_policy) zap_wait_for_passive_scan(zap, timeout * 60) # Print out a count of the number of urls num_urls = len(zap.core.urls()) if num_urls == 0: logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container') else: if detailed_output: print('Total of ' + str(num_urls) + ' URLs') alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict) all_ascan_rules = zap.ascan.scanners('Default Policy') all_pscan_rules = zap.pscan.scanners all_dict = {} for rule in all_pscan_rules: plugin_id = rule.get('id') if plugin_id in blacklist: continue all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality') for rule in all_ascan_rules: plugin_id = rule.get('id') if plugin_id in blacklist: continue all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality') if generate: # Create the config file with open(base_dir + generate, 'w') as f: f.write('# zap-api-scan rule configuration file\n') f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n') f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n') f.write('# Only the rule identifiers are used - the names are just for info\n') f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n') for key, rule in sorted(all_dict.iteritems()): f.write(key + '\tWARN\t(' + rule + ')\n') # print out the passing rules pass_dict = {} for rule in all_pscan_rules: plugin_id = rule.get('id') if plugin_id in blacklist: continue if (not alert_dict.has_key(plugin_id)): pass_dict[plugin_id] = rule.get('name') for rule in all_ascan_rules: plugin_id = rule.get('id') if plugin_id in blacklist: continue if not alert_dict.has_key(plugin_id) and not(config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE'): pass_dict[plugin_id] = rule.get('name') if min_level == zap_conf_lvls.index("PASS") and detailed_output: for key, rule in sorted(pass_dict.iteritems()): print('PASS: ' + rule + ' [' + key + ']') pass_count = len(pass_dict) if detailed_output: # print out the ignored ascan rules(there will be no alerts for these as they were not run) for rule in all_ascan_rules: plugin_id = rule.get('id') if plugin_id in blacklist: continue if config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE': print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']') # print out the ignored rules ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level, inc_ignore_rules, True, detailed_output, {}) # print out the info rules info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level, inc_info_rules, info_unspecified, detailed_output, in_progress_issues) # print out the warning rules warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level, inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues) # print out the failing rules fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level, inc_fail_rules, True, detailed_output, in_progress_issues) if report_html: # Save the report write_report(base_dir + report_html, zap.core.htmlreport()) if report_json: # Save the report write_report(base_dir + report_json, zap.core.jsonreport()) if report_md: # Save the report write_report(base_dir + report_md, zap.core.mdreport()) if report_xml: # Save the report write_report(base_dir + report_xml, zap.core.xmlreport()) print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) + '\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) + '\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count)) trigger_hook('zap_pre_shutdown', zap) # Stop ZAP zap.core.shutdown() except IOError as e: if hasattr(e, 'args') and len(e.args) > 1: errno, strerror = e print("ERROR " + str(strerror)) logging.warning('I/O error(' + str(errno) + '): ' + str(strerror)) else: print("ERROR %s" % e) logging.warning('I/O error: ' + str(e)) dump_log_file(cid) except NoUrlsException: dump_log_file(cid) except: print("ERROR " + str(sys.exc_info()[0])) logging.warning('Unexpected error: ' + str(sys.exc_info()[0])) dump_log_file(cid) if not running_in_docker(): stop_docker(cid) trigger_hook('pre_exit', fail_count, warn_count, pass_count) if fail_count > 0: sys.exit(1) elif warn_count > 0: sys.exit(2) elif pass_count > 0: sys.exit(0) else: sys.exit(3) if __name__ == "__main__": main(sys.argv[1:])
py
1a33388bc27bfdd1462f65b7fca4a55040af44d4
# CONFIG -----------------------------------------------------------------------------------------------------------# # Here are the input and output data paths (Note: you can override wav_path in preprocess.py) wav_path = '../database/blizzard2013/segmented/small_wavn_lead_trail_silence_removed_16000' data_path = 'data_blizzard2013_16000/' # model ids are separate - that way you can use a new tts with an old wavernn and vice versa # NB: expect undefined behaviour if models were trained on different DSP settings voc_model_id = 'blizzard2013_mol' tts_model_id = 'blizzard2013_lsa_smooth_attention_pytorch_multihead' # set this to True if you are only interested in WaveRNN ignore_tts = False # DSP --------------------------------------------------------------------------------------------------------------# # Settings for all models sample_rate = 16000 n_fft = 2048 fft_bins = n_fft // 2 + 1 num_mels = 80 hop_length = 200 # 12.5ms - in line with Tacotron 2 paper win_length = 800 # 50ms - same reason as above fmin = 40 min_level_db = -100 ref_level_db = 20 bits = 9 # bit depth of signal mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode below peak_norm = False # Normalise to the peak of each wav file # WAVERNN / VOCODER ------------------------------------------------------------------------------------------------# # Model Hparams voc_mode = 'MOL' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from mixture of logistics) voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length voc_rnn_dims = 512 voc_fc_dims = 512 voc_compute_dims = 128 voc_res_out_dims = 128 voc_res_blocks = 10 # Training voc_batch_size = 64 voc_lr = 1e-4 voc_checkpoint_every = 25_000 voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint voc_total_steps = 2_000_000 # Total number of training steps voc_test_samples = 50 # How many unseen samples to put aside for testing voc_pad = 2 # this will pad the input so that the resnet can 'see' wider than input length voc_seq_len = hop_length * 5 # must be a multiple of hop_length # Generating / Synthesizing voc_gen_batched = True # very fast (realtime+) single utterance batched generation voc_target = 11_000 # target number of samples to be generated in each batch entry voc_overlap = 550 # number of samples for crossfading between batches # TACOTRON/TTS -----------------------------------------------------------------------------------------------------# # Model Hparams tts_r = 1 # model predicts r frames per output step tts_embed_dims = 256 # embedding dimension for the graphemes/phoneme inputs tts_encoder_dims = 128 tts_decoder_dims = 256 tts_postnet_dims = 128 tts_encoder_K = 16 tts_lstm_dims = 512 tts_postnet_K = 8 tts_num_highways = 4 tts_dropout = 0.5 tts_cleaner_names = ['english_cleaners'] # Training tts_schedule = [(7, 1e-3, 10_000, 8), # progressive training schedule (5, 1e-4, 100_000, 8), # (r, lr, step, batch_size) (2, 1e-4, 180_000, 8), (1, 1e-4, 350_000, 8), (1, 1e-4, 1000_000, 8)] tts_max_mel_len = 1250 # if you have a couple of extremely long spectrograms you might want to use this tts_bin_lengths = True # bins the spectrogram lengths before sampling in data loader - speeds up training tts_clip_grad_norm = 1.0 # clips the gradient norm to prevent explosion - set to None if not needed tts_checkpoint_every = 2_000 # checkpoints the model every X steps # TODO: tts_phoneme_prob = 0.0 # [0 <-> 1] probability for feeding model phonemes vrs graphemes # ------------------------------------------------------------------------------------------------------------------# # reference encoder ref_enc_filters = [32, 32, 64, 64, 128, 128] reference_depth = 128 # Global style token num_gst = 10 num_heads = 4 # Head number for multi-head attention style_embed_depth = 256 style_att_dim = 128
py
1a3338f019868cdeb353b33227e21dea199d9546
#!/usr/bin/env python3 # encoding: utf-8 async def hello_world(): return 'Hello, world!' async def say_hi(): print(await hello_world())
py
1a33394a5d90653e46b20ff1d7973f6e22e96660
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations import asyncio import logging import signal import sys import traceback from typing import Any, List, Optional, TYPE_CHECKING, Union import aiohttp from .user import User from .invite import Invite from .template import Template from .widget import Widget from .guild import Guild from .channel import _channel_factory from .enums import ChannelType from .mentions import AllowedMentions from .errors import * from .enums import Status, VoiceRegion from .flags import ApplicationFlags from .gateway import * from .activity import BaseActivity, create_activity from .voice_client import VoiceClient from .http import HTTPClient from .state import ConnectionState from . import utils from .object import Object from .backoff import ExponentialBackoff from .webhook import Webhook from .iterators import GuildIterator from .appinfo import AppInfo __all__ = ( 'Client', ) if TYPE_CHECKING: from .abc import SnowflakeTime log = logging.getLogger(__name__) def _cancel_tasks(loop): tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()} if not tasks: return log.info('Cleaning up after %d tasks.', len(tasks)) for task in tasks: task.cancel() loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True)) log.info('All tasks finished cancelling.') for task in tasks: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'Unhandled exception during Client.run shutdown.', 'exception': task.exception(), 'task': task }) def _cleanup_loop(loop): try: _cancel_tasks(loop) loop.run_until_complete(loop.shutdown_asyncgens()) finally: log.info('Closing the event loop.') loop.close() class Client: r"""Represents a client connection that connects to Discord. This class is used to interact with the Discord WebSocket and API. A number of options can be passed to the :class:`Client`. Parameters ----------- max_messages: Optional[:class:`int`] The maximum number of messages to store in the internal message cache. This defaults to ``1000``. Passing in ``None`` disables the message cache. .. versionchanged:: 1.3 Allow disabling the message cache and change the default size to ``1000``. loop: Optional[:class:`asyncio.AbstractEventLoop`] The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations. Defaults to ``None``, in which case the default event loop is used via :func:`asyncio.get_event_loop()`. connector: :class:`aiohttp.BaseConnector` The connector to use for connection pooling. proxy: Optional[:class:`str`] Proxy URL. proxy_auth: Optional[:class:`aiohttp.BasicAuth`] An object that represents proxy HTTP Basic Authorization. shard_id: Optional[:class:`int`] Integer starting at ``0`` and less than :attr:`.shard_count`. shard_count: Optional[:class:`int`] The total number of shards. application_id: :class:`int` The client's application ID. intents: :class:`Intents` The intents that you want to enable for the session. This is a way of disabling and enabling certain gateway events from triggering and being sent. If not given, defaults to a regularly constructed :class:`Intents` class. .. versionadded:: 1.5 member_cache_flags: :class:`MemberCacheFlags` Allows for finer control over how the library caches members. If not given, defaults to cache as much as possible with the currently selected intents. .. versionadded:: 1.5 chunk_guilds_at_startup: :class:`bool` Indicates if :func:`.on_ready` should be delayed to chunk all guilds at start-up if necessary. This operation is incredibly slow for large amounts of guilds. The default is ``True`` if :attr:`Intents.members` is ``True``. .. versionadded:: 1.5 status: Optional[:class:`.Status`] A status to start your presence with upon logging on to Discord. activity: Optional[:class:`.BaseActivity`] An activity to start your presence with upon logging on to Discord. allowed_mentions: Optional[:class:`AllowedMentions`] Control how the client handles mentions by default on every message sent. .. versionadded:: 1.4 heartbeat_timeout: :class:`float` The maximum numbers of seconds before timing out and restarting the WebSocket in the case of not receiving a HEARTBEAT_ACK. Useful if processing the initial packets take too long to the point of disconnecting you. The default timeout is 60 seconds. guild_ready_timeout: :class:`float` The maximum number of seconds to wait for the GUILD_CREATE stream to end before preparing the member cache and firing READY. The default timeout is 2 seconds. .. versionadded:: 1.4 assume_unsync_clock: :class:`bool` Whether to assume the system clock is unsynced. This applies to the ratelimit handling code. If this is set to ``True``, the default, then the library uses the time to reset a rate limit bucket given by Discord. If this is ``False`` then your system clock is used to calculate how long to sleep for. If this is set to ``False`` it is recommended to sync your system clock to Google's NTP server. .. versionadded:: 1.3 Attributes ----------- ws The websocket gateway the client is currently connected to. Could be ``None``. loop: :class:`asyncio.AbstractEventLoop` The event loop that the client uses for asynchronous operations. """ def __init__(self, *, loop=None, **options): self.ws = None self.loop = asyncio.get_event_loop() if loop is None else loop self._listeners = {} self.shard_id = options.get('shard_id') self.shard_count = options.get('shard_count') connector = options.pop('connector', None) proxy = options.pop('proxy', None) proxy_auth = options.pop('proxy_auth', None) unsync_clock = options.pop('assume_unsync_clock', True) self.http = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop) self._handlers = { 'ready': self._handle_ready } self._hooks = { 'before_identify': self._call_before_identify_hook } self._connection = self._get_state(**options) self._connection.shard_count = self.shard_count self._closed = False self._ready = asyncio.Event() self._connection._get_websocket = self._get_websocket self._connection._get_client = lambda: self if VoiceClient.warn_nacl: VoiceClient.warn_nacl = False log.warning("PyNaCl is not installed, voice will NOT be supported") # internals def _get_websocket(self, guild_id=None, *, shard_id=None): return self.ws def _get_state(self, **options): return ConnectionState(dispatch=self.dispatch, handlers=self._handlers, hooks=self._hooks, http=self.http, loop=self.loop, **options) def _handle_ready(self): self._ready.set() @property def latency(self): """:class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds. This could be referred to as the Discord WebSocket protocol latency. """ ws = self.ws return float('nan') if not ws else ws.latency def is_ws_ratelimited(self): """:class:`bool`: Whether the websocket is currently rate limited. This can be useful to know when deciding whether you should query members using HTTP or via the gateway. .. versionadded:: 1.6 """ if self.ws: return self.ws.is_ratelimited() return False @property def user(self): """Optional[:class:`.ClientUser`]: Represents the connected client. ``None`` if not logged in.""" return self._connection.user @property def guilds(self): """List[:class:`.Guild`]: The guilds that the connected client is a member of.""" return self._connection.guilds @property def emojis(self): """List[:class:`.Emoji`]: The emojis that the connected client has.""" return self._connection.emojis @property def cached_messages(self): """Sequence[:class:`.Message`]: Read-only list of messages the connected client has cached. .. versionadded:: 1.1 """ return utils.SequenceProxy(self._connection._messages or []) @property def private_channels(self): """List[:class:`.abc.PrivateChannel`]: The private channels that the connected client is participating on. .. note:: This returns only up to 128 most recent private channels due to an internal working on how Discord deals with private channels. """ return self._connection.private_channels @property def voice_clients(self): """List[:class:`.VoiceProtocol`]: Represents a list of voice connections. These are usually :class:`.VoiceClient` instances. """ return self._connection.voice_clients @property def application_id(self): """Optional[:class:`int`]: The client's application ID. If this is not passed via ``__init__`` then this is retrieved through the gateway when an event contains the data. Usually after :func:`~discord.on_connect` is called. """ return self._connection.application_id @property def application_flags(self) -> ApplicationFlags: """:class:`~discord.ApplicationFlags`: The client's application flags. .. versionadded: 2.0 """ return self._connection.application_flags # type: ignore def is_ready(self): """:class:`bool`: Specifies if the client's internal cache is ready for use.""" return self._ready.is_set() async def _run_event(self, coro, event_name, *args, **kwargs): try: await coro(*args, **kwargs) except asyncio.CancelledError: pass except Exception: try: await self.on_error(event_name, *args, **kwargs) except asyncio.CancelledError: pass def _schedule_event(self, coro, event_name, *args, **kwargs): wrapped = self._run_event(coro, event_name, *args, **kwargs) # Schedules the task return asyncio.create_task(wrapped, name=f'discord.py: {event_name}') def dispatch(self, event, *args, **kwargs): log.debug('Dispatching event %s', event) method = 'on_' + event listeners = self._listeners.get(event) if listeners: removed = [] for i, (future, condition) in enumerate(listeners): if future.cancelled(): removed.append(i) continue try: result = condition(*args) except Exception as exc: future.set_exception(exc) removed.append(i) else: if result: if len(args) == 0: future.set_result(None) elif len(args) == 1: future.set_result(args[0]) else: future.set_result(args) removed.append(i) if len(removed) == len(listeners): self._listeners.pop(event) else: for idx in reversed(removed): del listeners[idx] try: coro = getattr(self, method) except AttributeError: pass else: self._schedule_event(coro, method, *args, **kwargs) async def on_error(self, event_method, *args, **kwargs): """|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`~discord.on_error` for more details. """ print(f'Ignoring exception in {event_method}', file=sys.stderr) traceback.print_exc() # hooks async def _call_before_identify_hook(self, shard_id, *, initial=False): # This hook is an internal hook that actually calls the public one. # It allows the library to have its own hook without stepping on the # toes of those who need to override their own hook. await self.before_identify_hook(shard_id, initial=initial) async def before_identify_hook(self, shard_id, *, initial=False): """|coro| A hook that is called before IDENTIFYing a session. This is useful if you wish to have more control over the synchronization of multiple IDENTIFYing clients. The default implementation sleeps for 5 seconds. .. versionadded:: 1.4 Parameters ------------ shard_id: :class:`int` The shard ID that requested being IDENTIFY'd initial: :class:`bool` Whether this IDENTIFY is the first initial IDENTIFY. """ if not initial: await asyncio.sleep(5.0) # login state management async def login(self, token): """|coro| Logs in the client with the specified credentials. Parameters ----------- token: :class:`str` The authentication token. Do not prefix this token with anything as the library will do it for you. Raises ------ :exc:`.LoginFailure` The wrong credentials are passed. :exc:`.HTTPException` An unknown HTTP related error occurred, usually when it isn't 200 or the known incorrect credentials passing status code. """ log.info('logging in using static token') await self.http.static_login(token.strip()) async def connect(self, *, reconnect=True): """|coro| Creates a websocket connection and lets the websocket listen to messages from Discord. This is a loop that runs the entire event system and miscellaneous aspects of the library. Control is not resumed until the WebSocket connection is terminated. Parameters ----------- reconnect: :class:`bool` If we should attempt reconnecting, either due to internet failure or a specific failure on Discord's part. Certain disconnects that lead to bad state will not be handled (such as invalid sharding payloads or bad tokens). Raises ------- :exc:`.GatewayNotFound` If the gateway to connect to Discord is not found. Usually if this is thrown then there is a Discord API outage. :exc:`.ConnectionClosed` The websocket connection has been terminated. """ backoff = ExponentialBackoff() ws_params = { 'initial': True, 'shard_id': self.shard_id, } while not self.is_closed(): try: coro = DiscordWebSocket.from_client(self, **ws_params) self.ws = await asyncio.wait_for(coro, timeout=60.0) ws_params['initial'] = False while True: await self.ws.poll_event() except ReconnectWebSocket as e: log.info('Got a request to %s the websocket.', e.op) self.dispatch('disconnect') ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id) continue except (OSError, HTTPException, GatewayNotFound, ConnectionClosed, aiohttp.ClientError, asyncio.TimeoutError) as exc: self.dispatch('disconnect') if not reconnect: await self.close() if isinstance(exc, ConnectionClosed) and exc.code == 1000: # clean close, don't re-raise this return raise if self.is_closed(): return # If we get connection reset by peer then try to RESUME if isinstance(exc, OSError) and exc.errno in (54, 10054): ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id) continue # We should only get this when an unhandled close code happens, # such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc) # sometimes, discord sends us 1000 for unknown reasons so we should reconnect # regardless and rely on is_closed instead if isinstance(exc, ConnectionClosed): if exc.code == 4014: raise PrivilegedIntentsRequired(exc.shard_id) from None if exc.code != 1000: await self.close() raise retry = backoff.delay() log.exception("Attempting a reconnect in %.2fs", retry) await asyncio.sleep(retry) # Always try to RESUME the connection # If the connection is not RESUME-able then the gateway will invalidate the session. # This is apparently what the official Discord client does. ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id) async def close(self): """|coro| Closes the connection to Discord. """ if self._closed: return await self.http.close() self._closed = True for voice in self.voice_clients: try: await voice.disconnect() except Exception: # if an error happens during disconnects, disregard it. pass if self.ws is not None and self.ws.open: await self.ws.close(code=1000) self._ready.clear() def clear(self): """Clears the internal state of the bot. After this, the bot can be considered "re-opened", i.e. :meth:`is_closed` and :meth:`is_ready` both return ``False`` along with the bot's internal cache cleared. """ self._closed = False self._ready.clear() self._connection.clear() self.http.recreate() async def start(self, token, *, reconnect=True): """|coro| A shorthand coroutine for :meth:`login` + :meth:`connect`. Raises ------- TypeError An unexpected keyword argument was received. """ await self.login(token) await self.connect(reconnect=reconnect) def run(self, *args, **kwargs): """A blocking call that abstracts away the event loop initialisation from you. If you want more control over the event loop then this function should not be used. Use :meth:`start` coroutine or :meth:`connect` + :meth:`login`. Roughly Equivalent to: :: try: loop.run_until_complete(start(*args, **kwargs)) except KeyboardInterrupt: loop.run_until_complete(close()) # cancel all tasks lingering finally: loop.close() .. warning:: This function must be the last function to call due to the fact that it is blocking. That means that registration of events or anything being called after this function call will not execute until it returns. """ loop = self.loop try: loop.add_signal_handler(signal.SIGINT, lambda: loop.stop()) loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop()) except NotImplementedError: pass async def runner(): try: await self.start(*args, **kwargs) finally: if not self.is_closed(): await self.close() def stop_loop_on_completion(f): loop.stop() future = asyncio.ensure_future(runner(), loop=loop) future.add_done_callback(stop_loop_on_completion) try: loop.run_forever() except KeyboardInterrupt: log.info('Received signal to terminate bot and event loop.') finally: future.remove_done_callback(stop_loop_on_completion) log.info('Cleaning up tasks.') _cleanup_loop(loop) if not future.cancelled(): try: return future.result() except KeyboardInterrupt: # I am unsure why this gets raised here but suppress it anyway return None # properties def is_closed(self): """:class:`bool`: Indicates if the websocket connection is closed.""" return self._closed @property def activity(self): """Optional[:class:`.BaseActivity`]: The activity being used upon logging in. """ return create_activity(self._connection._activity) @activity.setter def activity(self, value): if value is None: self._connection._activity = None elif isinstance(value, BaseActivity): self._connection._activity = value.to_dict() else: raise TypeError('activity must derive from BaseActivity.') @property def allowed_mentions(self): """Optional[:class:`~discord.AllowedMentions`]: The allowed mention configuration. .. versionadded:: 1.4 """ return self._connection.allowed_mentions @allowed_mentions.setter def allowed_mentions(self, value): if value is None or isinstance(value, AllowedMentions): self._connection.allowed_mentions = value else: raise TypeError(f'allowed_mentions must be AllowedMentions not {value.__class__!r}') @property def intents(self): """:class:`~discord.Intents`: The intents configured for this connection. .. versionadded:: 1.5 """ return self._connection.intents # helpers/getters @property def users(self): """List[:class:`~discord.User`]: Returns a list of all the users the bot can see.""" return list(self._connection._users.values()) def get_channel(self, id): """Returns a channel with the given ID. Parameters ----------- id: :class:`int` The ID to search for. Returns -------- Optional[Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]] The returned channel or ``None`` if not found. """ return self._connection.get_channel(id) def get_guild(self, id): """Returns a guild with the given ID. Parameters ----------- id: :class:`int` The ID to search for. Returns -------- Optional[:class:`.Guild`] The guild or ``None`` if not found. """ return self._connection._get_guild(id) def get_user(self, id): """Returns a user with the given ID. Parameters ----------- id: :class:`int` The ID to search for. Returns -------- Optional[:class:`~discord.User`] The user or ``None`` if not found. """ return self._connection.get_user(id) def get_emoji(self, id): """Returns an emoji with the given ID. Parameters ----------- id: :class:`int` The ID to search for. Returns -------- Optional[:class:`.Emoji`] The custom emoji or ``None`` if not found. """ return self._connection.get_emoji(id) def get_all_channels(self): """A generator that retrieves every :class:`.abc.GuildChannel` the client can 'access'. This is equivalent to: :: for guild in client.guilds: for channel in guild.channels: yield channel .. note:: Just because you receive a :class:`.abc.GuildChannel` does not mean that you can communicate in said channel. :meth:`.abc.GuildChannel.permissions_for` should be used for that. Yields ------ :class:`.abc.GuildChannel` A channel the client can 'access'. """ for guild in self.guilds: yield from guild.channels def get_all_members(self): """Returns a generator with every :class:`.Member` the client can see. This is equivalent to: :: for guild in client.guilds: for member in guild.members: yield member Yields ------ :class:`.Member` A member the client can see. """ for guild in self.guilds: yield from guild.members # listeners/waiters async def wait_until_ready(self): """|coro| Waits until the client's internal cache is all ready. """ await self._ready.wait() def wait_for(self, event, *, check=None, timeout=None): """|coro| Waits for a WebSocket event to be dispatched. This could be used to wait for a user to reply to a message, or to react to a message, or to edit a message in a self-contained way. The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default, it does not timeout. Note that this does propagate the :exc:`asyncio.TimeoutError` for you in case of timeout and is provided for ease of use. In case the event returns multiple arguments, a :class:`tuple` containing those arguments is returned instead. Please check the :ref:`documentation <discord-api-events>` for a list of events and their parameters. This function returns the **first event that meets the requirements**. Examples --------- Waiting for a user reply: :: @client.event async def on_message(message): if message.content.startswith('$greet'): channel = message.channel await channel.send('Say hello!') def check(m): return m.content == 'hello' and m.channel == channel msg = await client.wait_for('message', check=check) await channel.send(f'Hello {msg.author}!') Waiting for a thumbs up reaction from the message author: :: @client.event async def on_message(message): if message.content.startswith('$thumb'): channel = message.channel await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate') def check(reaction, user): return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}' try: reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await channel.send('\N{THUMBS DOWN SIGN}') else: await channel.send('\N{THUMBS UP SIGN}') Parameters ------------ event: :class:`str` The event name, similar to the :ref:`event reference <discord-api-events>`, but without the ``on_`` prefix, to wait for. check: Optional[Callable[..., :class:`bool`]] A predicate to check what to wait for. The arguments must meet the parameters of the event being waited for. timeout: Optional[:class:`float`] The number of seconds to wait before timing out and raising :exc:`asyncio.TimeoutError`. Raises ------- asyncio.TimeoutError If a timeout is provided and it was reached. Returns -------- Any Returns no arguments, a single argument, or a :class:`tuple` of multiple arguments that mirrors the parameters passed in the :ref:`event reference <discord-api-events>`. """ future = self.loop.create_future() if check is None: def _check(*args): return True check = _check ev = event.lower() try: listeners = self._listeners[ev] except KeyError: listeners = [] self._listeners[ev] = listeners listeners.append((future, check)) return asyncio.wait_for(future, timeout) # event registration def event(self, coro): """A decorator that registers an event to listen to. You can find more info about the events on the :ref:`documentation below <discord-api-events>`. The events must be a :ref:`coroutine <coroutine>`, if not, :exc:`TypeError` is raised. Example --------- .. code-block:: python3 @client.event async def on_ready(): print('Ready!') Raises -------- TypeError The coroutine passed is not actually a coroutine. """ if not asyncio.iscoroutinefunction(coro): raise TypeError('event registered must be a coroutine function') setattr(self, coro.__name__, coro) log.debug('%s has successfully been registered as an event', coro.__name__) return coro async def change_presence(self, *, activity=None, status=None, afk=False): """|coro| Changes the client's presence. Example --------- .. code-block:: python3 game = discord.Game("with the API") await client.change_presence(status=discord.Status.idle, activity=game) Parameters ---------- activity: Optional[:class:`.BaseActivity`] The activity being done. ``None`` if no currently active activity is done. status: Optional[:class:`.Status`] Indicates what status to change to. If ``None``, then :attr:`.Status.online` is used. afk: Optional[:class:`bool`] Indicates if you are going AFK. This allows the discord client to know how to handle push notifications better for you in case you are actually idle and not lying. Raises ------ :exc:`.InvalidArgument` If the ``activity`` parameter is not the proper type. """ if status is None: status = 'online' status_enum = Status.online elif status is Status.offline: status = 'invisible' status_enum = Status.offline else: status_enum = status status = str(status) await self.ws.change_presence(activity=activity, status=status, afk=afk) for guild in self._connection.guilds: me = guild.me if me is None: continue if activity is not None: me.activities = (activity,) else: me.activities = () me.status = status_enum # Guild stuff def fetch_guilds(self, *, limit: int = 100, before: SnowflakeTime = None, after: SnowflakeTime = None) -> List[Guild]: """Retrieves an :class:`.AsyncIterator` that enables receiving your guilds. .. note:: Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`, :attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`. .. note:: This method is an API call. For general usage, consider :attr:`guilds` instead. Examples --------- Usage :: async for guild in client.fetch_guilds(limit=150): print(guild.name) Flattening into a list :: guilds = await client.fetch_guilds(limit=150).flatten() # guilds is now a list of Guild... All parameters are optional. Parameters ----------- limit: Optional[:class:`int`] The number of guilds to retrieve. If ``None``, it retrieves every guild you have access to. Note, however, that this would make it a slow operation. Defaults to ``100``. before: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`] Retrieves guilds before this date or object. If a datetime is provided, it is recommended to use a UTC aware datetime. If the datetime is naive, it is assumed to be local time. after: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`] Retrieve guilds after this date or object. If a datetime is provided, it is recommended to use a UTC aware datetime. If the datetime is naive, it is assumed to be local time. Raises ------ :exc:`.HTTPException` Getting the guilds failed. Yields -------- :class:`.Guild` The guild with the guild data parsed. """ return GuildIterator(self, limit=limit, before=before, after=after) async def fetch_template(self, code): """|coro| Gets a :class:`.Template` from a discord.new URL or code. Parameters ----------- code: Union[:class:`.Template`, :class:`str`] The Discord Template Code or URL (must be a discord.new URL). Raises ------- :exc:`.NotFound` The template is invalid. :exc:`.HTTPException` Getting the template failed. Returns -------- :class:`.Template` The template from the URL/code. """ code = utils.resolve_template(code) data = await self.http.get_template(code) return Template(data=data, state=self._connection) # type: ignore async def fetch_guild(self, guild_id): """|coro| Retrieves a :class:`.Guild` from an ID. .. note:: Using this, you will **not** receive :attr:`.Guild.channels`, :attr:`.Guild.members`, :attr:`.Member.activity` and :attr:`.Member.voice` per :class:`.Member`. .. note:: This method is an API call. For general usage, consider :meth:`get_guild` instead. Parameters ----------- guild_id: :class:`int` The guild's ID to fetch from. Raises ------ :exc:`.Forbidden` You do not have access to the guild. :exc:`.HTTPException` Getting the guild failed. Returns -------- :class:`.Guild` The guild from the ID. """ data = await self.http.get_guild(guild_id) return Guild(data=data, state=self._connection) async def create_guild(self, name: str, region: Optional[VoiceRegion] = None, icon: Any = None, *, code: str = None): """|coro| Creates a :class:`.Guild`. Bot accounts in more than 10 guilds are not allowed to create guilds. Parameters ---------- name: :class:`str` The name of the guild. region: :class:`.VoiceRegion` The region for the voice communication server. Defaults to :attr:`.VoiceRegion.us_west`. icon: :class:`bytes` The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit` for more details on what is expected. code: Optional[:class:`str`] The code for a template to create the guild with. .. versionadded:: 1.4 Raises ------ :exc:`.HTTPException` Guild creation failed. :exc:`.InvalidArgument` Invalid icon image format given. Must be PNG or JPG. Returns ------- :class:`.Guild` The guild created. This is not the same guild that is added to cache. """ if icon is not None: icon = utils._bytes_to_base64_data(icon) region = region or VoiceRegion.us_west region_value = region.value if code: data = await self.http.create_from_template(code, name, region_value, icon) else: data = await self.http.create_guild(name, region_value, icon) return Guild(data=data, state=self._connection) # Invite management async def fetch_invite(self, url: Union[Invite, str], *, with_counts: bool = True, with_expiration: bool = True) -> Invite: """|coro| Gets an :class:`.Invite` from a discord.gg URL or ID. .. note:: If the invite is for a guild you have not joined, the guild and channel attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and :class:`.PartialInviteChannel` respectively. Parameters ----------- url: Union[:class:`.Invite`, :class:`str`] The Discord invite ID or URL (must be a discord.gg URL). with_counts: :class:`bool` Whether to include count information in the invite. This fills the :attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count` fields. with_expiration: :class:`bool` Whether to include the expiration date of the invite. This fills the :attr:`.Invite.expires_at` field. .. versionadded:: 2.0 Raises ------- :exc:`.NotFound` The invite has expired or is invalid. :exc:`.HTTPException` Getting the invite failed. Returns -------- :class:`.Invite` The invite from the URL/ID. """ invite_id = utils.resolve_invite(url) data = await self.http.get_invite(invite_id, with_counts=with_counts, with_expiration=with_expiration) return Invite.from_incomplete(state=self._connection, data=data) async def delete_invite(self, invite: Union[Invite, str]) -> None: """|coro| Revokes an :class:`.Invite`, URL, or ID to an invite. You must have the :attr:`~.Permissions.manage_channels` permission in the associated guild to do this. Parameters ---------- invite: Union[:class:`.Invite`, :class:`str`] The invite to revoke. Raises ------- :exc:`.Forbidden` You do not have permissions to revoke invites. :exc:`.NotFound` The invite is invalid or expired. :exc:`.HTTPException` Revoking the invite failed. """ invite_id = utils.resolve_invite(invite) await self.http.delete_invite(invite_id) # Miscellaneous stuff async def fetch_widget(self, guild_id): """|coro| Gets a :class:`.Widget` from a guild ID. .. note:: The guild must have the widget enabled to get this information. Parameters ----------- guild_id: :class:`int` The ID of the guild. Raises ------- :exc:`.Forbidden` The widget for this guild is disabled. :exc:`.HTTPException` Retrieving the widget failed. Returns -------- :class:`.Widget` The guild's widget. """ data = await self.http.get_widget(guild_id) return Widget(state=self._connection, data=data) async def application_info(self): """|coro| Retrieves the bot's application information. Raises ------- :exc:`.HTTPException` Retrieving the information failed somehow. Returns -------- :class:`.AppInfo` The bot's application information. """ data = await self.http.application_info() if 'rpc_origins' not in data: data['rpc_origins'] = None return AppInfo(self._connection, data) async def fetch_user(self, user_id): """|coro| Retrieves a :class:`~discord.User` based on their ID. You do not have to share any guilds with the user to get this information, however many operations do require that you do. .. note:: This method is an API call. If you have :attr:`discord.Intents.members` and member cache enabled, consider :meth:`get_user` instead. Parameters ----------- user_id: :class:`int` The user's ID to fetch from. Raises ------- :exc:`.NotFound` A user with this ID does not exist. :exc:`.HTTPException` Fetching the user failed. Returns -------- :class:`~discord.User` The user you requested. """ data = await self.http.get_user(user_id) return User(state=self._connection, data=data) async def fetch_channel(self, channel_id): """|coro| Retrieves a :class:`.abc.GuildChannel` or :class:`.abc.PrivateChannel` with the specified ID. .. note:: This method is an API call. For general usage, consider :meth:`get_channel` instead. .. versionadded:: 1.2 Raises ------- :exc:`.InvalidData` An unknown channel type was received from Discord. :exc:`.HTTPException` Retrieving the channel failed. :exc:`.NotFound` Invalid Channel ID. :exc:`.Forbidden` You do not have permission to fetch this channel. Returns -------- Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`] The channel from the ID. """ data = await self.http.get_channel(channel_id) factory, ch_type = _channel_factory(data['type']) if factory is None: raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data)) if ch_type in (ChannelType.group, ChannelType.private): channel = factory(me=self.user, data=data, state=self._connection) else: guild_id = int(data['guild_id']) guild = self.get_guild(guild_id) or Object(id=guild_id) channel = factory(guild=guild, state=self._connection, data=data) return channel async def fetch_webhook(self, webhook_id): """|coro| Retrieves a :class:`.Webhook` with the specified ID. Raises -------- :exc:`.HTTPException` Retrieving the webhook failed. :exc:`.NotFound` Invalid webhook ID. :exc:`.Forbidden` You do not have permission to fetch this webhook. Returns --------- :class:`.Webhook` The webhook you requested. """ data = await self.http.get_webhook(webhook_id) return Webhook.from_state(data, state=self._connection) async def create_dm(self, user): """|coro| Creates a :class:`.DMChannel` with this user. This should be rarely called, as this is done transparently for most people. .. versionadded:: 2.0 Parameters ----------- user: :class:`~discord.abc.Snowflake` The user to create a DM with. Returns ------- :class:`.DMChannel` The channel that was created. """ state = self._connection found = state._get_private_channel_by_user(user.id) if found: return found data = await state.http.start_private_message(user.id) return state.add_dm_channel(data)
py
1a3339bc9308d8478520c22fd3600972a27a98dd
import logging import numpy as np import torch from torch.utils.data import Dataset, DataLoader LOGLEVEL = (('debug', logging.DEBUG), ('info', logging.INFO), ('warn', logging.WARN), ('error', logging.ERROR)) LOG = logging.getLogger(__name__) _train_file = 'dataset/train.npz' _test_file = 'dataset/test.npz' _embedding_file = 'dataset/text-embedding.npz' _split_file = 'dataset/train-split.npz' class ToxicTrainData(Dataset): def __init__(self, train_file, train_embedding): super(ToxicTrainData, self).__init__() self.text = train_embedding train_npz = np.load(train_file) self.X = train_npz['X'].astype(np.float32) self.y = train_npz['y'].astype(np.float32) def __len__(self): return len(self.y) def __getitem__(self, idx): return self.text[idx], self.X[idx], self.y[idx] class ToxicTestData(Dataset): def __init__(self, test_file, test_embedding): super(ToxicTestData, self).__init__() self.text = test_embedding test_npz = np.load(test_file) self.X = test_npz['X'].astype(np.float32) def __len__(self): return len(self.X) def __getitem__(self, idx): return self.text[idx], self.X[idx], 0 class ToxicDataStride(Dataset): def __init__(self, dataset, indices, embedding_list): super(ToxicDataStride, self).__init__() self.dataset = dataset self.indices = indices self.embedding_list = embedding_list def __len__(self): return len(self.indices) def __getitem__(self, idx): text, X, y = self.dataset[self.indices[idx]] text = self.embedding_list[text] return torch.from_numpy(text), torch.from_numpy(X), y class ToxicTrainSplitter(object): def __init__(self, dataset, split_file, embedding_list): self.dataset = dataset self.embedding_list = embedding_list self.indices = np.load(split_file)['indices'] def split(self): valid_indices = self.indices[0] train_indices = [] for indices in self.indices[1:]: train_indices += indices train_stride = ToxicDataStride(self.dataset, train_indices, self.embedding_list) valid_stride = ToxicDataStride(self.dataset, valid_indices, self.embedding_list) return train_stride, valid_stride def kfold(self): for i in range(len(self.indices)): valid_indices = self.indices[i] train_indices = [] for j in range(len(self.indices)): if j != i: train_indices += self.indices[j] train_stride = ToxicDataStride(self.dataset, train_indices, self.embedding_list) valid_stride = ToxicDataStride(self.dataset, valid_indices, self.embedding_list) yield train_stride, valid_stride class ToxicTrainLoader(object): def __init__(self, batch_size, cv, n_workers=0): self.batch_size = batch_size self.cv = cv self.n_workers = n_workers embedding_npz = np.load(_embedding_file) train_embedding = embedding_npz['train_embedding'] embedding_list = embedding_npz['embedding_list'] dataset = ToxicTrainData(_train_file, train_embedding) self.splitter = ToxicTrainSplitter(dataset, _split_file, embedding_list) def __call__(self): if self.cv: strides = self.splitter.kfold() for train_stride, valid_stride in strides: train_loader = DataLoader(train_stride, shuffle=True, batch_size=self.batch_size, num_workers=self.n_workers) valid_loader = DataLoader(valid_stride, shuffle=False, batch_size=self.batch_size, num_workers=self.n_workers) yield train_loader, valid_loader else: train_stride, valid_stride = self.splitter.split() train_loader = DataLoader(train_stride, shuffle=True, batch_size=self.batch_size, num_workers=self.n_workers) valid_loader = DataLoader(valid_stride, shuffle=False, batch_size=self.batch_size, num_workers=self.n_workers) yield train_loader, valid_loader class ToxicTestLoader(object): def __init__(self, batch_size, n_workers=0, validate=False): self.batch_size = batch_size self.n_workers = n_workers embedding_npz = np.load(_embedding_file) train_embedding = embedding_npz['train_embedding'] test_embedding = embedding_npz['test_embedding'] embedding_list = embedding_npz['embedding_list'] if validate: dataset = ToxicTestData(_train_file, train_embedding) else: dataset = ToxicTestData(_test_file, test_embedding) indices = np.arange(len(dataset)) self.stride = ToxicDataStride(dataset, indices, embedding_list) def __call__(self): return DataLoader(self.stride, batch_size=self.batch_size, num_workers=self.n_workers, shuffle=False) ############################################################ # learning rate scheduler ############################################################ class LRSchedNone(object): """ No learning rate adjustment """ def __init__(self, param_groups, lr): self.param_groups = param_groups self.set_lr(lr) def set_lr(self, lr): for param_group in self.param_groups: param_group['lr'] = lr self.lr = lr def update(self, loss, **kwargs): pass class LRSchedStep(LRSchedNone): """ Learning rate scheduler based on predefine (loss, lr) pairs """ def __init__(self, param_groups, lr, *steps): super(LRSchedStep, self).__init__(param_groups, lr) self.steps = steps def update(self, loss, **kwargs): maximize = kwargs.get('maximize', False) for step_loss, step_lr in self.steps: adjust_needed = (maximize and loss > step_loss) or \ ((not maximize) and loss < step_loss) if adjust_needed and self.lr > step_lr: self.set_lr(step_lr) LOG.info('Update learning rate to {:.5f}'.format(step_lr)) class LRSchedDecay(LRSchedNone): """ Learning rate decay on each epoch """ def __init__(self, param_groups, lr, decay, lr_min=0.0): super(LRSchedDecay, self).__init__(param_groups, lr) self.decay = decay self.lr_min = lr_min def update(self, loss, **kwargs): if self.lr > self.lr_min: lr = max(self.lr*self.decay, self.lr_min) self.set_lr(lr) LOG.debug('Update learning rate to {:.5f}'.format(lr))
py
1a3339c48af91b5d8d69355b13723c8733b06975
from base64 import b64encode, b64decode class Byte: def __get__(self, instance, cls): return instance._byte def __set__(self, instance, value): instance._byte = value instance._hex = value.hex() instance._base64 = b64encode(value) class Hex: def __get__(self, instance, cls): return instance._hex def __set__(self, instance, value): instance._byte = bytes.fromhex(value) instance._hex = value instance._base64 = b64encode(bytes.fromhex(value)) class Base64: def __get__(self, instance, cls): return instance._base64 def __set__(self, instance, value): instance._byte = b64decode(value) instance._hex = b64decode(value).hex() instance._base64 = value class Data: byte = Byte() hex = Hex() base64 = Base64() def __init__(self): self._byte = None self._hex = None self._base64 = None self._ascii = None
py
1a3339f876fd3a36251c4678210267a238805eff
import fxcmpy import pandas as pd import numpy as np import datetime as dt from pyti.simple_moving_average import simple_moving_average as sma #from pyti.exponential_moving_average import exponential_moving_average as sma #con = fxcmpy.fxcmpy(config_file='fxcm.cfg') # Allows for printing the whole data frame pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # df = con.get_candles('EUR/USD', period='D1', start= dt.datetime(2021, 12, 15), end = dt.datetime(2022, 1, 30)) #df_data = con.get_candles('EUR/USD', period='m5', number=10000) # saving the dataframe #df_data.to_csv('EUR_USD.csv') df = pd.read_csv('EUR_USD_M5_SLOW_1000_FAST_100.csv') # Define our pip cost and lot size pip_cost = 1 lot_size = 10 # Define EMA Fast / Slow parameters fast = 100 slow = 1000 # HMA fast and slow calculation df['hma_fast'] = sma(df['askclose'], fast) df['hma_slow'] = sma(df['askclose'], slow) df['sell'] = (df['hma_fast'] < df['hma_slow']) df['buy'] = (df['hma_fast'] > df['hma_slow']) df['signal'] = np.where(df['hma_fast'] < df['hma_slow'], 1, 0) df['position_operation'] = df['signal'].diff() begin_prices_buy = [] end_prices_buy = [] begin_prices_sell = [] end_prices_sell = [] profits = 0 # get open/close price for each open position for sells for i, row in df.iterrows(): if df.loc[i, 'position_operation'] == 1 and df.loc[i, 'sell'] == True and df.loc[i, 'signal'] == 1: begin_prices_sell.append(float(df.loc[i, 'askclose'])) index = i while index < len(df.index): if df.loc[index, 'position_operation'] == -1 and df.loc[index, 'sell'] == False: end_prices_sell.append(float(df.loc[index, 'askclose'])) index = len(df.index) index += 1 # get open/close price for each open position for sells i = 0 for i, row in df.iterrows(): if df.loc[i, 'position_operation'] == -1 and df.loc[i, 'buy'] == True and df.loc[i, 'signal'] == 0: begin_prices_buy.append(float(df.loc[i, 'askclose'])) index = i while index < len(df.index): if df.loc[index, 'position_operation'] == 1 and df.loc[index, 'buy'] == False: end_prices_buy.append(float(df.loc[index, 'askclose'])) index = len(df.index) index += 1 # # Calculating the profit / loss # for i in range(len(begin_prices_buy)): # profit = (end_prices_buy[i] - begin_prices_buy[i]) * 100 * pip_cost * lot_size # profits += profit # print("The return for trade " + str(i + 1) + " is: " + str(int(profit))) # Calculating the profit / loss # for i in range(len(begin_prices_buy)): # profit = (begin_prices_buy[i] - end_prices_buy[i]) * 1000 * pip_cost * lot_size # profits += profit # print("The return for trade BUY" + str(i + 1) + " is: " + str(int(profit))) # Reduce Operations not Concluded begin_prices_sell = begin_prices_sell[:len(end_prices_sell)] begin_prices_buy = begin_prices_buy[:len(end_prices_buy)] # Calculating the profit / loss SELL i = 0 for i in range(len(begin_prices_sell)): profit = (begin_prices_sell[i] - end_prices_sell[i]) * 1000 * pip_cost * lot_size profit = profit - 2 profits += profit print("The return for trade SELL" + str(i + 1) + " is: " + str(int(profit))) i = 0 for i in range(len(begin_prices_buy)): profit = (end_prices_buy[i] - begin_prices_buy[i]) * 1000 * pip_cost * lot_size profits += profit profit = profit - 2 print("The return for trade BUY" + str(i + 1) + " is: " + str(int(profit))) print("Profit: " + str(profits)) import matplotlib.pyplot as plt fig = plt.figure(figsize=(24, 16)) ax1 = fig.add_subplot(111, ylabel='EUR/USD Price') # Plotting market prices and moving averages df['askclose'].plot(ax=ax1, color='r', lw=1.) df[['hma_fast', 'hma_slow']].plot(ax=ax1, lw=2.) # Placing purple markers for position entry ax1.plot(df.loc[df.position_operation == 1.0].index, df.hma_fast[df.position_operation == 1.0], 'v', markersize=10, color='red') # Placing black markers for position exit ax1.plot(df.loc[df.position_operation == -1.0].index, df.hma_slow[df.position_operation == -1.0], '^', markersize=10, color='green') # Plotting of returns ax2 = ax1.twinx() ax2.grid(False) ax2.set_ylabel('Profits in $') ax2.plot(df['total'], color='green') plt.show()
py
1a333a5b95ba9f41712e6e62fc77a248fca96e63
#!/usr/bin/python # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple client for the Gerrit REST API. Example usage: ./gerrit_client.py [command] [args]"" """ from __future__ import print_function import json import logging import optparse import subcommand import sys if sys.version_info.major == 2: import urlparse from urllib import quote_plus else: from urllib.parse import quote_plus import urllib.parse as urlparse import fix_encoding import gerrit_util import setup_color __version__ = '0.1' def write_result(result, opt): if opt.json_file: with open(opt.json_file, 'w') as json_file: json_file.write(json.dumps(result)) @subcommand.usage('[args ...]') def CMDmovechanges(parser, args): parser.add_option('-p', '--param', dest='params', action='append', help='repeatable query parameter, format: -p key=value') parser.add_option('--destination_branch', dest='destination_branch', help='where to move changes to') (opt, args) = parser.parse_args(args) assert opt.destination_branch, "--destination_branch not defined" for p in opt.params: assert '=' in p, '--param is key=value, not "%s"' % p host = urlparse.urlparse(opt.host).netloc limit = 100 while True: result = gerrit_util.QueryChanges( host, list(tuple(p.split('=', 1)) for p in opt.params), limit=limit, ) for change in result: gerrit_util.MoveChange(host, change['id'], opt.destination_branch) if len(result) < limit: break logging.info("Done") @subcommand.usage('[args ...]') def CMDbranchinfo(parser, args): parser.add_option('--branch', dest='branch', help='branch name') (opt, args) = parser.parse_args(args) host = urlparse.urlparse(opt.host).netloc project = quote_plus(opt.project) branch = quote_plus(opt.branch) result = gerrit_util.GetGerritBranch(host, project, branch) logging.info(result) write_result(result, opt) @subcommand.usage('[args ...]') def CMDbranch(parser, args): parser.add_option('--branch', dest='branch', help='branch name') parser.add_option('--commit', dest='commit', help='commit hash') (opt, args) = parser.parse_args(args) assert opt.project, "--project not defined" assert opt.branch, "--branch not defined" assert opt.commit, "--commit not defined" project = quote_plus(opt.project) host = urlparse.urlparse(opt.host).netloc branch = quote_plus(opt.branch) commit = quote_plus(opt.commit) result = gerrit_util.CreateGerritBranch(host, project, branch, commit) logging.info(result) write_result(result, opt) @subcommand.usage('[args ...]') def CMDchanges(parser, args): parser.add_option('-p', '--param', dest='params', action='append', help='repeatable query parameter, format: -p key=value') parser.add_option('-o', '--o-param', dest='o_params', action='append', help='gerrit output parameters, e.g. ALL_REVISIONS') parser.add_option('--limit', dest='limit', type=int, help='maximum number of results to return') parser.add_option('--start', dest='start', type=int, help='how many changes to skip ' '(starting with the most recent)') (opt, args) = parser.parse_args(args) for p in opt.params: assert '=' in p, '--param is key=value, not "%s"' % p result = gerrit_util.QueryChanges( urlparse.urlparse(opt.host).netloc, list(tuple(p.split('=', 1)) for p in opt.params), start=opt.start, # Default: None limit=opt.limit, # Default: None o_params=opt.o_params, # Default: None ) logging.info('Change query returned %d changes.', len(result)) write_result(result, opt) @subcommand.usage('') def CMDabandon(parser, args): parser.add_option('-c', '--change', type=int, help='change number') parser.add_option('-m', '--message', default='', help='reason for abandoning') (opt, args) = parser.parse_args(args) assert opt.change, "-c not defined" result = gerrit_util.AbandonChange( urlparse.urlparse(opt.host).netloc, opt.change, opt.message) logging.info(result) write_result(result, opt) class OptionParser(optparse.OptionParser): """Creates the option parse and add --verbose support.""" def __init__(self, *args, **kwargs): optparse.OptionParser.__init__(self, *args, version=__version__, **kwargs) self.add_option( '--verbose', action='count', default=0, help='Use 2 times for more debugging info') self.add_option('--host', dest='host', help='Url of host.') self.add_option('--project', dest='project', help='project name') self.add_option( '--json_file', dest='json_file', help='output json filepath') def parse_args(self, args=None, values=None): options, args = optparse.OptionParser.parse_args(self, args, values) # Host is always required assert options.host, "--host not defined." levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)]) return options, args def main(argv): if sys.hexversion < 0x02060000: print('\nYour python version %s is unsupported, please upgrade.\n' % (sys.version.split(' ', 1)[0],), file=sys.stderr) return 2 dispatcher = subcommand.CommandDispatcher(__name__) return dispatcher.execute(OptionParser(), argv) if __name__ == '__main__': # These affect sys.stdout so do it outside of main() to simplify mocks in # unit testing. fix_encoding.fix_encoding() setup_color.init() try: sys.exit(main(sys.argv[1:])) except KeyboardInterrupt: sys.stderr.write('interrupted\n') sys.exit(1)
py
1a333a740d944c8f87416ab6ebd47ce0230f3a4a
#!/usr/bin/env python3 import os from mowgli.infrastructure import endpoints def run(): port = os.environ.get('PORT', 8080) endpoints.APP.run(port=port, debug=True, host='0.0.0.0') if __name__ == '__main__': run()
py
1a333af7347432dc222780c92120f43a5cd1514e
""" Copyright 2021 the CVXPY developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cvxpy.atoms.elementwise.entr import entr from cvxpy.atoms.elementwise.log import log from cvxpy.atoms.elementwise.maximum import maximum # flake8: noqa: E501 def loggamma(x): """Elementwise log of the gamma function. Implementation has modest accuracy over the full range, approaching perfect accuracy as x goes to infinity. For details on the nature of the approximation, refer to `CVXPY GitHub Issue #228 <https://github.com/cvxpy/cvxpy/issues/228#issuecomment-544281906>`_. """ return maximum( 2.18382 - 3.62887*x, 1.79241 - 2.4902*x, 1.21628 - 1.37035*x, 0.261474 - 0.28904*x, 0.577216 - 0.577216*x, -0.175517 + 0.03649*x, -1.27572 + 0.621514*x, -0.845568 + 0.422784*x, -0.577216*x - log(x), 0.918939 - x - entr(x) - 0.5*log(x), )
py
1a333b14522e341b3b8dceae6bc4ea8b04323b3e
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class VirtualMachineScaleSetRollingUpgradesOperations(object): """VirtualMachineScaleSetRollingUpgradesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2019_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _cancel_initial( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" # Construct URL url = self._cancel_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore def begin_cancel( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Cancels the current virtual machine scale set rolling upgrade. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._cancel_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore def _start_os_upgrade_initial( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" # Construct URL url = self._start_os_upgrade_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore def begin_start_os_upgrade( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Starts a rolling upgrade to move all virtual machine scale set instances to the latest available Platform Image OS version. Instances which are already running the latest available OS version are not affected. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._start_os_upgrade_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore def _start_extension_upgrade_initial( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" # Construct URL url = self._start_extension_upgrade_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_extension_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore def begin_start_extension_upgrade( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to the latest available extension version. Instances which are already running the latest extension versions are not affected. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._start_extension_upgrade_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start_extension_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore def get_latest( self, resource_group_name, # type: str vm_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.RollingUpgradeStatusInfo" """Gets the status of the latest virtual machine scale set rolling upgrade. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RollingUpgradeStatusInfo, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2019_07_01.models.RollingUpgradeStatusInfo :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-07-01" accept = "application/json" # Construct URL url = self.get_latest.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
py
1a333c1b8112998c74451fb1cd24ec81f6ed56ff
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for backend.lib.user.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import mock import endpoints from loaner.web_app.backend.lib import user from loaner.web_app.backend.testing import loanertest class UserTest(loanertest.EndpointsTestCase): def test_get_user_email(self): self.login_user() self.assertEqual(user.get_user_email(), loanertest.USER_EMAIL) def test_get_endpoints_user_email(self): self.login_endpoints_user() self.assertEqual(user.get_user_email(), loanertest.USER_EMAIL) @mock.patch('__main__.user.endpoints.get_current_user') @mock.patch('__main__.user.users.get_current_user') def test_endpoints_error( self, mock_currentuser, mock_currentuser_endpoint): mock_currentuser.return_value = None mock_currentuser_endpoint.side_effect = endpoints.InvalidGetUserCall with self.assertRaises(user.UserNotFound): user.get_user_email() @mock.patch('__main__.user.endpoints.get_current_user') @mock.patch('__main__.user.users.get_current_user') def test_get_user_email_no_user_found( self, mock_currentuser, mock_currentuser_endpoint): mock_currentuser.return_value = None mock_currentuser_endpoint.return_value = None with self.assertRaises(user.UserNotFound): user.get_user_email() if __name__ == '__main__': loanertest.main()
py
1a333c83ac590e71ec9715e3eea8a0e2e1c702db
"""Union-find data structure.""" # based on https://www.ics.uci.edu/~eppstein/PADS/UnionFind.py class UnionFind: """Union-find data structure.""" def __init__(self, n): """Create a new empty union-find structure.""" # we have singletons self.array = list(range(n)) self.size = n # We give the clusters as a dict self.clusters = {i: {i} for i in range(n)} def __getitem__(self, element): """Find and return the name of the set containing the element.""" return self.array[element] def union(self, group1, group2): """Find the sets containing the objects and merge them all.""" if len(self.clusters[group1]) > len(self.clusters[group2]): normal_order = True to_extend = group1 to_delete = group2 else: normal_order = False to_extend = group2 to_delete = group1 # update elements for i in self.clusters[to_delete]: self.array[i] = to_extend # updating the clusters self.clusters[to_extend].update(self.clusters[to_delete]) del self.clusters[to_delete] # which cluster is bigger? return normal_order def get_cluster(self, group): """List of element with id group.""" if group in self.clusters: if self.clusters[group] == set(): print("Something went wrong!") return self.clusters[group] else: return None def move(self, elem, group): """Move element into group.""" elem_group = self.array[elem] self.clusters[elem_group].remove(elem) if self.clusters[elem_group] == set(): del self.clusters[elem_group] self.array[elem] = group if group in self.clusters: self.clusters[group].add(elem) else: self.clusters[group] = {elem} def escape(self, elem, lower, upper): """Move element somewhere else.""" elem_group = self.array[elem] self.clusters[elem_group].remove(elem) for i in range(lower, upper): if i not in self.clusters: # if the cluster is empty self.array[elem] = i self.clusters[i] = {elem} break def __repr__(self): """To print only.""" return str(self.array)+str(self.clusters)
py
1a333f4378a3b5a0a4a343379953ebc5355decce
#!/usr/bin/env python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example updates a creative set by adding a companion creative. To determine which creative sets exist, run get_all_creative_sets.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ # Import appropriate modules from the client library. from googleads import ad_manager # Set the ID of the creative set to update. CREATIVE_SET_ID = 'INSERT_CREATIVE_SET_ID_HERE' COMPANION_CREATIVE_ID = 'INSERT_COMPANION_CREATIVE_ID_HERE' def main(client, creative_set_id, companion_creative_id): # Initialize appropriate service. creative_set_service = client.GetService('CreativeSetService', version='v201808') # Create statement to select a single creative set by ID. statement = (ad_manager.StatementBuilder(version='v201808') .Where('id = :creativeSetId') .WithBindVariable('creativeSetId', long(creative_set_id))) # Get creative set. response = creative_set_service.getCreativeSetsByStatement( statement.ToStatement()) if 'results' in response and len(response['results']): updated_created_sets = [] for creative_set in response['results']: creative_set['companionCreativeIds'].append(companion_creative_id) updated_created_sets.append(creative_set) # Update the creative sets on the server. creative_sets = creative_set_service.updateCreativeSet(updated_created_sets) # Display results. for creative_set in creative_sets: print (('Creative set with ID "%s", master creative ID "%s", and ' 'companion creative IDs {%s} was updated.') % (creative_set['id'], creative_set['masterCreativeId'], ','.join(creative_set['companionCreativeIds']))) else: print 'No creative sets found to update.' if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client, CREATIVE_SET_ID, COMPANION_CREATIVE_ID)
py
1a333f782be742009123a1d36d500946906c8843
''' Design a data structure that supports all following operations in average O(1) time. insert(val): Inserts an item val to the set if not already present. remove(val): Removes an item val from the set if present. getRandom: Returns a random element from current set of elements. Each element must have the same probability of being returned. ''' import random class RandomizedSet: def __init__(self): """ Initialize your data structure here. """ self.ds = set() def insert(self, val: int) -> bool: """ Inserts a value to the set. Returns true if the set did not already contain the specified element. """ if val not in self.ds: self.ds.add(val) return True return False def remove(self, val: int) -> bool: """ Removes a value from the set. Returns true if the set contained the specified element. """ if val in self.ds: self.ds.remove(val) return True return False def getRandom(self) -> int: """ Get a random element from the set. """ len_set = len(self.ds) num = random.randint(0, len_set-1) l = list(self.ds) return l[num] # Your RandomizedSet object will be instantiated and called as such: # obj = RandomizedSet() # param_1 = obj.insert(val) # param_2 = obj.remove(val) # param_3 = obj.getRandom()
py
1a333fef91c1dad74140ba01fe14419155a7ddfb
from flask import Flask, render_template, session, redirect, url_for, flash from flask_bootstrap import Bootstrap from flask_moment import Moment from flask_wtf import Form from wtforms import StringField, SubmitField from wtforms.validators import Required, ValidationError from datetime import datetime app = Flask(__name__) bootstrap = Bootstrap(app) moment = Moment(app) # Example 4.1 requirement # Not creating some crazy string because it's # exposed in this github repo anyways app.config['SECRET_KEY'] = 'key' @app.route('/', methods=['GET', 'POST']) def index(): form = UofTForm() if form.validate_on_submit(): old_name = session.get('name') print(old_name) print(form.name.data) if old_name is not None and old_name != form.name.data: flash('Looks like you have changed your name!') session['name'] = form.name.data session['email'] = form.email.data return redirect(url_for('index')) return render_template( 'index.html', current_time=datetime.utcnow(), form=form, name=session.get('name'), email=session.get('email') ) @app.route('/user/<name>') def user(name): return render_template('user.html', name=name) @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.errorhandler(500) def internal_server_error(e): return render_template('500.html'), 500 class EmailValidator(object): """ Checks for the presence of an '@' symbol. """ def __call__(self, form, field): if '@' not in field.data: message = 'Please include an \'@\' in the email address. \'' + field.data + '\' is missing an \'@\'.' raise ValidationError(message) class UofTForm(Form): name = StringField('What is your name?', validators=[Required()]) email = StringField('What is your UofT Email address?', validators=[EmailValidator()]) submit = SubmitField('Submit') if __name__ == '__main__': app.run(debug=True)
py
1a333ffc30bb85ea4fb6c7390482730634ee7724
#!/usr/bin/env python """ Copyright (c) 2020 NIDDS developers (https://github.com/prasanthc41m/nidds/) See the file 'LICENSE' for copying permission """ import re from core.common import retrieve_content __url__ = "https://cybercrime-tracker.net/all.php" __check__ = "cp.php?m=login" __info__ = "malware" __reference__ = "cybercrime-tracker.net" def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: content = content.replace("<br />", '\n') for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '(SSL)' in line: continue if '://' in line: line = re.search(r"://(.*)", line).group(1) line = line.rstrip('/') if '/' in line: retval[line] = (__info__, __reference__) line = line.split('/')[0] if ':' in line: line = line.split(':')[0] if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line): retval[line] = ("potential malware site", __reference__) else: retval[line] = (__info__, __reference__) return retval
py
1a3340b637d1687536456407a5e6596ae34d0601
import numpy as np import torch from torch.utils.data import Dataset import h5py def rotate_point_cloud(batch_data): """ Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds """ rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle = np.random.uniform() * 2 * np.pi cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]) shape_pc = batch_data[k, ...] rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) return rotated_data def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): """ Randomly jitter points. jittering is per point. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, jittered batch of point clouds """ B, N, C = batch_data.shape assert(clip > 0) jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip) jittered_data += batch_data return jittered_data def center_data(pcs): for pc in pcs: centroid = np.mean(pc, axis=0) pc[:,0]-=centroid[0] pc[:,1]-=centroid[1] pc[:,2]-=centroid[2] return pcs def normalize_data(pcs): for pc in pcs: #get furthest point distance then normalize d = max(np.sum(np.abs(pc)**2,axis=-1)**(1./2)) pc /= d # pc[:,0]/=max(abs(pc[:,0])) # pc[:,1]/=max(abs(pc[:,1])) # pc[:,2]/=max(abs(pc[:,2])) return pcs def load_withmask_h5(h5_filename): f = h5py.File(h5_filename, 'r') data = f['data'][:] label = f['label'][:] mask = f['mask'][:] return data, label, mask def convert_to_binary_mask(masks): binary_masks = [] for i in range(masks.shape[0]): binary_mask = np.ones(masks[i].shape) bg_idx = np.where(masks[i, :] == -1) binary_mask[bg_idx] = 0 binary_masks.append(binary_mask) binary_masks = np.array(binary_masks) return binary_masks class ScanObjectNN(Dataset): def __init__(self, data_dir, center=True, normalize=True, train=False, subsample=None): self.data, self.label, self.mask = load_withmask_h5(data_dir) self.mask = convert_to_binary_mask(self.mask) if center: self.data = center_data(self.data) if normalize: self.data = normalize_data(self.data) self.train = train self.subsample = subsample def __getitem__(self, item): pointcloud = self.data[item][None] label = self.label[item] mask = self.mask[item] if self.train: pointcloud = jitter_point_cloud(pointcloud) pointcloud = rotate_point_cloud(pointcloud) pc_np = pointcloud[0].copy() ma_np = mask.copy() if self.subsample is not None: idx = np.random.choice(pc_np.shape[0], size=self.subsample, replace=False) pc_np = pc_np[idx] ma_np = ma_np[idx] pc = torch.from_numpy(pc_np).type(torch.FloatTensor) ma = torch.from_numpy(ma_np).type(torch.LongTensor) return pc, label, ma def __len__(self): return self.data.shape[0]
py
1a3341471955484074913367f00ce40dd741554b
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19121 if testnet else 9121 connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
py
1a3341baa17728709aa05e5fc48c72dc279e5093
# -*- coding: utf-8 -*- # # # tracker/util/__init__.py # # # MIT License # # Copyright (c) 2018-2019 Brandon Gomes # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ MATHUSLA Tracking Utilities. """ # -------------- Tracker Library -------------- # from .core import *
py
1a3341c8cc3a1ec22c36e6d3407f5cc78e748595
# autogenerated grammar from file ../mwlib/resources/docbook-rng-4-4/docbook.rng using: ./build_rng_grammar.py grammar = {'em': {'attributes': [], 'children': []}, 'programlisting': {'attributes': ['revisionflag', 'continuation', 'xml:space', 'xml:base', 'startinglinenumber', 'id', 'width', 'role', 'xreflabel', 'conformance', 'revision', 'remap', 'vendor', 'linenumbering', 'format', 'userlevel', 'arch', 'condition', 'lang', 'language', 'wordsize', 'security', 'os', 'dir'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'lineannotation', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'textobject', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'co', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'coref', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'varlistentry': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['term', 'listitem']}, 'mediaobject': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['audioobject', 'videoobject', 'caption', 'imageobject', 'imageobjectco', 'textobject', 'objectinfo']}, 'citation': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'imageobject': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['objectinfo', 'imagedata']}, 'phrase': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'literallayout': {'attributes': ['revisionflag', 'continuation', 'xml:space', 'xml:base', 'startinglinenumber', 'id', 'width', 'role', 'xreflabel', 'conformance', 'revision', 'remap', 'vendor', 'linenumbering', 'format', 'userlevel', 'arch', 'class', 'condition', 'lang', 'language', 'wordsize', 'security', 'os', 'dir'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'lineannotation', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'textobject', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'co', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'coref', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'xref': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'endterm', 'xrefstyle', 'linkend', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': []}, 'span': {'attributes': [], 'children': []}, 'caption': {'attributes': ['revisionflag', 'onmousedown', 'xml:base', 'id', 'style', 'title', 'wordsize', 'role', 'onclick', 'onmousemove', 'conformance', 'revision', 'remap', 'vendor', 'xreflabel', 'onmouseout', 'onkeypress', 'onkeydown', 'onmouseover', 'userlevel', 'arch', 'class', 'condition', 'lang', 'align', 'xml:lang', 'onmouseup', 'onkeyup', 'security', 'ondblclick', 'os', 'dir'], 'children': ['programlisting', 'tip', 'formalpara', 'warning', 'caution', 'literallayout', 'glosslist', 'screenshot', 'simpara', 'segmentedlist', 'note', 'screenco', 'calloutlist', 'blockquote', 'para', 'screen', 'important', 'orderedlist', 'programlistingco', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist']}, 'section': {'attributes': ['status', 'lang', 'remap', 'vendor', 'os', 'revisionflag', 'conformance', 'label', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'subtitle', 'simplesect', 'figure', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'formalpara', 'informalequation', 'warning', 'caution', 'informalexample', 'table', 'fieldsynopsis', 'funcsynopsis', 'literallayout', 'glosslist', 'index', 'msgset', 'bibliography', 'destructorsynopsis', 'title', 'highlights', 'section', 'bridgehead', 'tip', 'segmentedlist', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'lot', 'toc', 'sectioninfo', 'indexterm', 'abstract', 'calloutlist', 'blockquote', 'screenshot', 'screen', 'graphicco', 'remark', 'qandaset', 'epigraph', 'task', 'important', 'para', 'address', 'orderedlist', 'beginpage', 'classsynopsis', 'sidebar', 'mediaobjectco', 'simpara', 'anchor', 'refentry', 'glossary', 'programlistingco', 'graphic', 'example', 'authorblurb', 'synopsis', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist', 'equation', 'procedure']}, 'tr': {'attributes': ['style', 'onmousedown', 'onmouseup', 'onmouseout', 'title', 'align', 'onkeypress', 'onkeydown', 'class', 'char', 'bgcolor', 'xml:lang', 'onmousemove', 'onmouseover', 'onclick', 'valign', 'onkeyup', 'ondblclick', 'charoff', 'id'], 'children': ['td', 'th']}, 'informaltable': {'attributes': ['orient', 'revisionflag', 'rowsep', 'tabstyle', 'frame', 'remap', 'onmousedown', 'xml:base', 'tocentry', 'border', 'id', 'style', 'title', 'label', 'pgwide', 'xml:lang', 'onmousemove', 'xreflabel', 'role', 'conformance', 'revision', 'rowheader', 'vendor', 'onclick', 'onmouseout', 'onkeypress', 'rules', 'colsep', 'onkeydown', 'floatstyle', 'cellpadding', 'onmouseover', 'userlevel', 'arch', 'class', 'condition', 'lang', 'cellspacing', 'align', 'shortentry', 'bgcolor', 'summary', 'wordsize', 'onmouseup', 'onkeyup', 'security', 'ondblclick', 'os', 'width', 'dir'], 'children': ['tgroup', 'colgroup', 'mediaobject', 'tr', 'tbody', 'graphic', 'tfoot', 'textobject', 'blockinfo', 'col', 'thead']}, 'emphasis': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'book': {'attributes': ['status', 'lang', 'remap', 'vendor', 'os', 'revisionflag', 'conformance', 'fpi', 'label', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['chapter', 'index', 'subtitle', 'colophon', 'reference', 'title', 'glossary', 'appendix', 'setindex', 'titleabbrev', 'article', 'bookinfo', 'bibliography', 'lot', 'preface', 'dedication', 'toc', 'part']}, 'listitem': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'userlevel', 'wordsize', 'role', 'dir', 'xreflabel', 'override', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'funcsynopsis', 'figure', 'cmdsynopsis', 'mediaobject', 'mediaobjectco', 'formalpara', 'informalequation', 'warning', 'caution', 'informalexample', 'fieldsynopsis', 'table', 'literallayout', 'glosslist', 'task', 'msgset', 'destructorsynopsis', 'authorblurb', 'highlights', 'bridgehead', 'tip', 'segmentedlist', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'indexterm', 'abstract', 'calloutlist', 'blockquote', 'screenshot', 'screen', 'graphicco', 'qandaset', 'epigraph', 'important', 'para', 'address', 'orderedlist', 'beginpage', 'classsynopsis', 'sidebar', 'remark', 'simpara', 'anchor', 'programlistingco', 'graphic', 'example', 'synopsis', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist', 'equation', 'procedure']}, 'sectioninfo': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['contractsponsor', 'isbn', 'contractnum', 'pubdate', 'productnumber', 'mediaobject', 'address', 'invpartnumber', 'itermset', 'printhistory', 'edition', 'modespec', 'pubsnumber', 'contrib', 'seriesvolnums', 'corpauthor', 'authorgroup', 'artpagenums', 'author', 'orgname', 'volumenum', 'confgroup', 'authorinitials', 'pagenums', 'editor', 'bibliorelation', 'honorific', 'corpname', 'indexterm', 'abstract', 'keywordset', 'subjectset', 'firstname', 'citebiblioid', 'title', 'issuenum', 'collab', 'othercredit', 'corpcredit', 'citetitle', 'biblioset', 'legalnotice', 'bibliomisc', 'date', 'othername', 'surname', 'lineage', 'publisher', 'biblioid', 'graphic', 'publishername', 'copyright', 'subtitle', 'affiliation', 'bibliocoverage', 'issn', 'bibliosource', 'productname', 'authorblurb', 'personname', 'abbrev', 'titleabbrev', 'releaseinfo', 'revhistory']}, 'chapterinfo': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['contractsponsor', 'isbn', 'contractnum', 'pubdate', 'productnumber', 'mediaobject', 'address', 'invpartnumber', 'itermset', 'printhistory', 'edition', 'modespec', 'pubsnumber', 'contrib', 'seriesvolnums', 'corpauthor', 'authorgroup', 'artpagenums', 'author', 'orgname', 'volumenum', 'confgroup', 'authorinitials', 'pagenums', 'editor', 'bibliorelation', 'honorific', 'corpname', 'indexterm', 'abstract', 'keywordset', 'subjectset', 'firstname', 'citebiblioid', 'title', 'issuenum', 'collab', 'othercredit', 'corpcredit', 'citetitle', 'biblioset', 'legalnotice', 'bibliomisc', 'date', 'othername', 'surname', 'lineage', 'publisher', 'biblioid', 'graphic', 'publishername', 'copyright', 'subtitle', 'affiliation', 'bibliocoverage', 'issn', 'bibliosource', 'productname', 'authorblurb', 'personname', 'abbrev', 'titleabbrev', 'releaseinfo', 'revhistory']}, 'math': {'attributes': [], 'children': []}, 'blockquote': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'funcsynopsis', 'figure', 'cmdsynopsis', 'mediaobject', 'mediaobjectco', 'formalpara', 'informalequation', 'warning', 'caution', 'informalexample', 'fieldsynopsis', 'table', 'blockinfo', 'glosslist', 'literallayout', 'task', 'msgset', 'destructorsynopsis', 'title', 'highlights', 'bridgehead', 'tip', 'segmentedlist', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'indexterm', 'abstract', 'calloutlist', 'blockquote', 'attribution', 'screenshot', 'screen', 'graphicco', 'qandaset', 'epigraph', 'important', 'para', 'address', 'orderedlist', 'beginpage', 'classsynopsis', 'sidebar', 'remark', 'simpara', 'anchor', 'programlistingco', 'graphic', 'example', 'authorblurb', 'synopsis', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist', 'equation', 'procedure']}, 'para': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['mediaobjectco', 'code', 'keycombo', 'mediaobject', 'informalequation', 'citerefentry', 'guiicon', 'literallayout', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'calloutlist', 'blockquote', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'bibliolist', 'modespec', 'medialabel', 'guilabel', 'table', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'segmentedlist', 'filename', 'errorcode', 'informaltable', 'screenco', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'graphicco', 'acronym', 'screen', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'itemizedlist', 'package', 'varname', 'programlistingco', 'errortype', 'example', 'glosslist', 'ooclass', 'guibutton', 'structfield', 'programlisting', 'funcsynopsis', 'productnumber', 'authorinitials', 'address', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'figure', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'orderedlist', 'graphic', 'equation', 'inlinemediaobject', 'glossterm', 'simplelist', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'warning', 'caution', 'informalexample', 'phrase', 'xref', 'author', 'orgname', 'tip', 'synopsis', 'informalfigure', 'note', 'application', 'emphasis', 'returnvalue', 'variablelist', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'screenshot', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'important', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'quote': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'object': {'attributes': [], 'children': []}, 'bookinfo': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'dir', 'wordsize', 'role', 'condition', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'contents', 'revision'], 'children': ['contractsponsor', 'isbn', 'contractnum', 'pubdate', 'productnumber', 'mediaobject', 'address', 'invpartnumber', 'itermset', 'printhistory', 'edition', 'modespec', 'pubsnumber', 'contrib', 'seriesvolnums', 'corpauthor', 'authorgroup', 'artpagenums', 'author', 'orgname', 'volumenum', 'confgroup', 'authorinitials', 'pagenums', 'editor', 'bibliorelation', 'honorific', 'corpname', 'indexterm', 'abstract', 'keywordset', 'subjectset', 'firstname', 'citebiblioid', 'title', 'issuenum', 'collab', 'othercredit', 'corpcredit', 'citetitle', 'biblioset', 'legalnotice', 'bibliomisc', 'date', 'othername', 'surname', 'lineage', 'publisher', 'biblioid', 'graphic', 'publishername', 'copyright', 'subtitle', 'affiliation', 'bibliocoverage', 'issn', 'bibliosource', 'productname', 'authorblurb', 'personname', 'abbrev', 'titleabbrev', 'releaseinfo', 'revhistory']}, 'td': {'attributes': ['colspan', 'onmousedown', 'char', 'nowrap', 'id', 'axis', 'style', 'rowspan', 'title', 'bgcolor', 'onmousemove', 'onmouseup', 'valign', 'scope', 'charoff', 'onclick', 'onmouseout', 'onkeypress', 'onkeydown', 'onmouseover', 'height', 'class', 'align', 'headers', 'xml:lang', 'onkeyup', 'ondblclick', 'width', 'abbr'], 'children': ['code', 'keycombo', 'mediaobject', 'citerefentry', 'guiicon', 'literallayout', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'calloutlist', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'bibliolist', 'modespec', 'medialabel', 'guilabel', 'table', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'simpara', 'segmentedlist', 'filename', 'errorcode', 'informaltable', 'screenco', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'screen', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'itemizedlist', 'package', 'varname', 'programlistingco', 'errortype', 'glosslist', 'ooclass', 'guibutton', 'structfield', 'programlisting', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'formalpara', 'literal', 'type', 'olink', 'function', 'option', 'para', 'corpcredit', 'link', 'foreignphrase', 'orderedlist', 'graphic', 'inlinemediaobject', 'glossterm', 'simplelist', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'warning', 'caution', 'phrase', 'xref', 'author', 'orgname', 'tip', 'synopsis', 'note', 'application', 'emphasis', 'returnvalue', 'variablelist', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'screenshot', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'important', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'ulink': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'url', 'arch', 'xrefstyle', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'revision', 'type', 'id', 'condition', 'conformance'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'article': {'attributes': ['status', 'lang', 'remap', 'vendor', 'os', 'revisionflag', 'conformance', 'xml:base', 'class', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'parentbook', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['appendix', 'programlisting', 'subtitle', 'simplesect', 'figure', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'formalpara', 'informalequation', 'warning', 'caution', 'informalexample', 'table', 'fieldsynopsis', 'funcsynopsis', 'literallayout', 'glosslist', 'task', 'msgset', 'bibliography', 'destructorsynopsis', 'title', 'highlights', 'section', 'bridgehead', 'tip', 'segmentedlist', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'lot', 'ackno', 'sect1', 'indexterm', 'abstract', 'calloutlist', 'blockquote', 'screenshot', 'index', 'screen', 'graphicco', 'remark', 'qandaset', 'epigraph', 'important', 'para', 'address', 'toc', 'orderedlist', 'beginpage', 'classsynopsis', 'sidebar', 'mediaobjectco', 'simpara', 'anchor', 'refentry', 'tocchap', 'programlistingco', 'graphic', 'articleinfo', 'example', 'authorblurb', 'glossary', 'synopsis', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist', 'equation', 'procedure']}, 'orderedlist': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'continuation', 'spacing', 'dir', 'wordsize', 'role', 'condition', 'xreflabel', 'xml:base', 'userlevel', 'inheritnum', 'security', 'revision', 'arch', 'id', 'numeration', 'conformance'], 'children': ['programlisting', 'funcsynopsis', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'informalequation', 'fieldsynopsis', 'caution', 'informalexample', 'authorblurb', 'blockinfo', 'literallayout', 'destructorsynopsis', 'title', 'highlights', 'bridgehead', 'tip', 'formalpara', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'indexterm', 'abstract', 'listitem', 'blockquote', 'screenshot', 'screen', 'graphicco', 'remark', 'important', 'para', 'address', 'beginpage', 'classsynopsis', 'mediaobjectco', 'simpara', 'programlistingco', 'graphic', 'synopsis', 'warning', 'epigraph', 'anchor']}, 'subscript': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['remark', 'symbol', 'replaceable', 'inlinemediaobject', 'emphasis', 'ulink', 'link', 'subscript', 'inlinegraphic', 'anchor', 'olink', 'superscript']}, 'chapter': {'attributes': ['status', 'lang', 'remap', 'vendor', 'os', 'revisionflag', 'conformance', 'label', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'subtitle', 'simplesect', 'figure', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'formalpara', 'sect1', 'informalequation', 'tocchap', 'caution', 'informalexample', 'table', 'fieldsynopsis', 'funcsynopsis', 'literallayout', 'glosslist', 'graphicco', 'index', 'msgset', 'bibliography', 'destructorsynopsis', 'title', 'highlights', 'section', 'bridgehead', 'tip', 'segmentedlist', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'lot', 'toc', 'indexterm', 'chapterinfo', 'abstract', 'calloutlist', 'blockquote', 'screenshot', 'screen', 'beginpage', 'remark', 'qandaset', 'epigraph', 'task', 'important', 'para', 'address', 'orderedlist', 'classsynopsis', 'sidebar', 'mediaobjectco', 'simpara', 'anchor', 'refentry', 'glossary', 'programlistingco', 'graphic', 'example', 'authorblurb', 'synopsis', 'bibliolist', 'variablelist', 'simplelist', 'itemizedlist', 'warning', 'equation', 'procedure']}, 'term': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'methodsynopsis', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'classsynopsis', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'destructorsynopsis', 'filename', 'errorcode', 'constructorsynopsis', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'funcsynopsis', 'productnumber', 'authorinitials', 'classname', 'fieldsynopsis', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'synopsis', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'beginpage', 'cmdsynopsis', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'imagedata': {'attributes': ['revisionflag', 'xml:base', 'id', 'srccredit', 'scale', 'width', 'contentdepth', 'role', 'xreflabel', 'valign', 'conformance', 'revision', 'remap', 'scalefit', 'vendor', 'format', 'userlevel', 'fileref', 'arch', 'condition', 'lang', 'align', 'depth', 'wordsize', 'entityref', 'contentwidth', 'security', 'os', 'dir'], 'children': []}, 'title': {'attributes': ['lang', 'revisionflag', 'remap', 'pagenum', 'os', 'conformance', 'xml:base', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'vendor', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['code', 'keycombo', 'citerefentry', 'guiicon', 'guimenu', 'guisubmenu', 'userinput', 'errortext', 'footnoteref', 'inlineequation', 'structname', 'interfacename', 'mousebutton', 'envar', 'othercredit', 'ulink', 'methodname', 'guimenuitem', 'systemitem', 'keycap', 'productname', 'token', 'modespec', 'medialabel', 'guilabel', 'wordasword', 'corpauthor', 'ooexception', 'filename', 'errorcode', 'computeroutput', 'subscript', 'email', 'acronym', 'replaceable', 'citetitle', 'exceptionname', 'oointerface', 'revhistory', 'interface', 'optional', 'keycode', 'remark', 'package', 'varname', 'errortype', 'ooclass', 'guibutton', 'structfield', 'productnumber', 'authorinitials', 'classname', 'prompt', 'inlinegraphic', 'personname', 'trademark', 'literal', 'type', 'olink', 'function', 'option', 'corpcredit', 'link', 'foreignphrase', 'inlinemediaobject', 'glossterm', 'property', 'anchor', 'constant', 'menuchoice', 'footnote', 'citation', 'errorname', 'hardware', 'phrase', 'xref', 'author', 'orgname', 'application', 'emphasis', 'returnvalue', 'indexterm', 'parameter', 'firstterm', 'sgmltag', 'quote', 'symbol', 'action', 'biblioref', 'database', 'markup', 'uri', 'abbrev', 'command', 'keysym', 'superscript']}, 'articleinfo': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['contractsponsor', 'isbn', 'contractnum', 'pubdate', 'productnumber', 'mediaobject', 'address', 'invpartnumber', 'itermset', 'printhistory', 'edition', 'modespec', 'pubsnumber', 'contrib', 'seriesvolnums', 'corpauthor', 'authorgroup', 'artpagenums', 'author', 'orgname', 'volumenum', 'confgroup', 'authorinitials', 'pagenums', 'editor', 'bibliorelation', 'honorific', 'corpname', 'indexterm', 'abstract', 'keywordset', 'subjectset', 'firstname', 'citebiblioid', 'title', 'issuenum', 'collab', 'othercredit', 'corpcredit', 'citetitle', 'biblioset', 'legalnotice', 'bibliomisc', 'date', 'othername', 'surname', 'lineage', 'publisher', 'biblioid', 'graphic', 'publishername', 'copyright', 'subtitle', 'affiliation', 'bibliocoverage', 'issn', 'bibliosource', 'productname', 'authorblurb', 'personname', 'abbrev', 'titleabbrev', 'releaseinfo', 'revhistory']}, 'inlinemediaobject': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['audioobject', 'videoobject', 'imageobject', 'imageobjectco', 'textobject', 'objectinfo']}, 'variablelist': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'termlength', 'conformance', 'spacing', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'funcsynopsis', 'varlistentry', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'informalequation', 'fieldsynopsis', 'caution', 'informalexample', 'authorblurb', 'blockinfo', 'literallayout', 'destructorsynopsis', 'title', 'highlights', 'bridgehead', 'tip', 'formalpara', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'indexterm', 'abstract', 'blockquote', 'screenshot', 'screen', 'graphicco', 'remark', 'important', 'para', 'address', 'beginpage', 'classsynopsis', 'mediaobjectco', 'simpara', 'programlistingco', 'graphic', 'synopsis', 'warning', 'epigraph', 'anchor']}, 'itemizedlist': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'spacing', 'mark', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['programlisting', 'funcsynopsis', 'cmdsynopsis', 'mediaobject', 'titleabbrev', 'informalequation', 'fieldsynopsis', 'caution', 'informalexample', 'authorblurb', 'blockinfo', 'literallayout', 'destructorsynopsis', 'title', 'highlights', 'bridgehead', 'tip', 'formalpara', 'informalfigure', 'methodsynopsis', 'note', 'informaltable', 'screenco', 'constructorsynopsis', 'indexterm', 'abstract', 'listitem', 'blockquote', 'screenshot', 'screen', 'graphicco', 'remark', 'important', 'para', 'address', 'beginpage', 'classsynopsis', 'mediaobjectco', 'simpara', 'programlistingco', 'graphic', 'synopsis', 'warning', 'epigraph', 'anchor']}, 'superscript': {'attributes': ['lang', 'revisionflag', 'remap', 'vendor', 'os', 'conformance', 'wordsize', 'role', 'dir', 'xreflabel', 'userlevel', 'xml:base', 'security', 'arch', 'id', 'condition', 'revision'], 'children': ['remark', 'symbol', 'replaceable', 'inlinemediaobject', 'emphasis', 'ulink', 'link', 'subscript', 'inlinegraphic', 'anchor', 'olink', 'superscript']}}
py
1a3341e136842612ef258515e76dfa214c7c16d3
# (c) 2016 James Turner <[email protected]> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: aws_service_ip_ranges author: - James Turner <[email protected]> version_added: "2.5" requirements: - must have public internet connectivity short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3. description: - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking. - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service. options: service: description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS' default: null region: description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1' default: null """ EXAMPLES = """ vars: ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}" tasks: - name: "use list return option and iterate as a loop" debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " - name: "Pull S3 IP ranges, and print the default return style" debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" """ RETURN = """ _raw: description: comma-separated list of CIDR ranges """ import json from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): try: resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') amazon_response = json.load(resp)['prefixes'] except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e)) except HTTPError as e: raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) except SSLValidationError as e: raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e)) except URLError as e: raise AnsibleError("Failed look up IP range service: %s" % to_native(e)) except ConnectionError as e: raise AnsibleError("Error connecting to IP range service: %s" % to_native(e)) if 'region' in kwargs: region = kwargs['region'] amazon_response = (item for item in amazon_response if item['region'] == region) if 'service' in kwargs: service = str.upper(kwargs['service']) amazon_response = (item for item in amazon_response if item['service'] == service) return [item['ip_prefix'] for item in amazon_response]
py
1a3342109cd6d99137b2f5274e09cea2fc509913
# automatically generated by the FlatBuffers compiler, do not modify # namespace: schema import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Stage(object): __slots__ = ['_tab'] @classmethod def GetRootAsStage(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Stage() x.Init(buf, n + offset) return x # Stage def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Stage def Name(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # Stage def Data(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from schema.Data import Data obj = Data() obj.Init(self._tab.Bytes, x) return obj return None # Stage def DataLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # Stage def DataIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # Stage def CmdList(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: a = self._tab.Vector(o) return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return "" # Stage def CmdListLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # Stage def CmdListIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 # Stage def TrackTime(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return True # Stage def TrackRam(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # Stage def TrackCpu(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # Stage def TrackGpu(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # Stage def Comment(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.String(o + self._tab.Pos) return None def StageStart(builder): builder.StartObject(8) def StageAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) def StageAddData(builder, data): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0) def StageStartDataVector(builder, numElems): return builder.StartVector(4, numElems, 4) def StageAddCmdList(builder, cmdList): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(cmdList), 0) def StageStartCmdListVector(builder, numElems): return builder.StartVector(4, numElems, 4) def StageAddTrackTime(builder, trackTime): builder.PrependBoolSlot(3, trackTime, 1) def StageAddTrackRam(builder, trackRam): builder.PrependBoolSlot(4, trackRam, 0) def StageAddTrackCpu(builder, trackCpu): builder.PrependBoolSlot(5, trackCpu, 0) def StageAddTrackGpu(builder, trackGpu): builder.PrependBoolSlot(6, trackGpu, 0) def StageAddComment(builder, comment): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(comment), 0) def StageEnd(builder): return builder.EndObject()
py
1a334465740145751bdb3336aefaa42bf6e0a5a5
#!/usr/bin/env python import sys, traceback import cv2 import os import re import numpy as np import argparse import string import plantcv as pcv def options(): parser = argparse.ArgumentParser(description="Imaging processing with opencv") parser.add_argument("-i", "--image", help="Input image file.", required=True) parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=False) parser.add_argument("-r","--result", help="result file.", required= False ) parser.add_argument("-r2","--coresult", help="result file.", default=None ) parser.add_argument("-w","--writeimg", help="write out images.", default=False, action="store_true") parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action=None) args = parser.parse_args() return args ### Main pipeline def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 30, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 130, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 130, 255, 'light', device, args.debug) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, args.debug) # Dilate to join small objects with larger ones device, ab_cnt1=pcv.dilate(ab_fill1, 3, 2, device, args.debug) device, ab_cnt2=pcv.dilate(ab_fill1, 3, 2, device, args.debug) # Fill dilated image mask device, ab_cnt3=pcv.fill(ab_cnt2,ab_cnt1,150,device,args.debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug) device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 590, 0,-490,-375) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile=False if args.writeimg==True: outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 384, device,args.debug,outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile) # Output shape and color data result=open(args.result,"a") result.write('\t'.join(map(str,shape_header))) result.write("\n") result.write('\t'.join(map(str,shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,color_header))) result.write("\n") result.write('\t'.join(map(str,color_data))) result.write("\n") result.write('\t'.join(map(str,boundary_header))) result.write("\n") result.write('\t'.join(map(str,boundary_data))) result.write("\n") result.write('\t'.join(map(str,boundary_img1))) result.write("\n") for row in color_img: result.write('\t'.join(map(str,row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image if args.coresult is not None: device, nirpath=pcv.get_nir(path,filename,device,args.debug) nir, path1, filename1=pcv.readimage(nirpath) nir2=cv2.imread(nirpath,0) # Flip mask device, f_mask= pcv.flip(mask,"vertical",device,args.debug) device, f_mask= pcv.flip(f_mask,"vertical",device,args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.2591687042,0.2591687042, device, args.debug) # position, and crop mask device,newmask=pcv.crop_position_mask(nir,nmask,device,30,7,"top","right",args.debug) # Identify objects device, nir_objects,nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1=False if args.writeimg==True: outfile1=args.outdir+"/"+filename1 device,nhist_header, nhist_data,nir_imgs= pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device,False, args.debug, outfile1) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1) coresult=open(args.coresult,"a") coresult.write('\t'.join(map(str,nhist_header))) coresult.write("\n") coresult.write('\t'.join(map(str,nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write('\t'.join(map(str,row))) coresult.write("\n") coresult.write('\t'.join(map(str,nshape_header))) coresult.write("\n") coresult.write('\t'.join(map(str,nshape_data))) coresult.write("\n") coresult.write('\t'.join(map(str,nir_shape))) coresult.write("\n") coresult.close() if __name__ == '__main__': main()
py
1a33447b87f600241916ca98527bfe7cb44eef5f
""" Luftballon Bewege den Korb unter den Ballon. """ newPage(1000, 1000) oval(300, 400, 500, 500) rect(450, 100, 200, 200)
py
1a334507917b422f9e2263a53da071ad7c9816a8
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe import _ class TrainingFeedback(Document): def validate(self): training_event = frappe.get_doc("Training Event", self.training_event) if training_event.docstatus != 1: frappe.throw(_('{0} must be submitted').format(_('Training Event'))) def on_submit(self): training_event = frappe.get_doc("Training Event", self.training_event) event_status = None for e in training_event.employees: if e.employee == self.employee: event_status = 'Feedback Submitted' break if event_status: frappe.db.set_value("Training Event", self.training_event, "event_status", event_status)
py
1a3345196f80f6764060ae9acd2c6cf1d5155a47
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Tokenizer class. """ from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ...file_utils import is_sentencepiece_available, is_tokenizers_available from ...utils import logging from ..bart.tokenization_bart import BartTokenizer from ..bert.tokenization_bert import BertTokenizer from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer from ..bertweet.tokenization_bertweet import BertweetTokenizer from ..blenderbot.tokenization_blenderbot import BlenderbotTokenizer from ..blenderbot_small.tokenization_blenderbot_small import BlenderbotSmallTokenizer from ..convbert.tokenization_convbert import ConvBertTokenizer from ..ctrl.tokenization_ctrl import CTRLTokenizer from ..deberta.tokenization_deberta import DebertaTokenizer from ..distilbert.tokenization_distilbert import DistilBertTokenizer from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer from ..electra.tokenization_electra import ElectraTokenizer from ..flaubert.tokenization_flaubert import FlaubertTokenizer from ..fsmt.tokenization_fsmt import FSMTTokenizer from ..funnel.tokenization_funnel import FunnelTokenizer from ..gpt2.tokenization_gpt2 import GPT2Tokenizer from ..herbert.tokenization_herbert import HerbertTokenizer from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer from ..led.tokenization_led import LEDTokenizer from ..longformer.tokenization_longformer import LongformerTokenizer from ..lxmert.tokenization_lxmert import LxmertTokenizer from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer from ..mpnet.tokenization_mpnet import MPNetTokenizer from ..openai.tokenization_openai import OpenAIGPTTokenizer from ..phobert.tokenization_phobert import PhobertTokenizer from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer from ..rag.tokenization_rag import RagTokenizer from ..retribert.tokenization_retribert import RetriBertTokenizer from ..roberta.tokenization_roberta import RobertaTokenizer from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer from ..tapas.tokenization_tapas import TapasTokenizer from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer from ..xlm.tokenization_xlm import XLMTokenizer from .configuration_auto import ( AlbertConfig, AutoConfig, BartConfig, BertConfig, BertGenerationConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LxmertConfig, MarianConfig, MBartConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, RagConfig, ReformerConfig, RetriBertConfig, RobertaConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, replace_list_option_in_docstrings, ) if is_sentencepiece_available(): from ..albert.tokenization_albert import AlbertTokenizer from ..barthez.tokenization_barthez import BarthezTokenizer from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer from ..camembert.tokenization_camembert import CamembertTokenizer from ..deberta_v2.tokenization_deberta_v2 import DebertaV2Tokenizer from ..marian.tokenization_marian import MarianTokenizer from ..mbart.tokenization_mbart import MBartTokenizer from ..mt5 import MT5Tokenizer from ..pegasus.tokenization_pegasus import PegasusTokenizer from ..reformer.tokenization_reformer import ReformerTokenizer from ..t5.tokenization_t5 import T5Tokenizer from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer from ..xlnet.tokenization_xlnet import XLNetTokenizer else: AlbertTokenizer = None BarthezTokenizer = None BertGenerationTokenizer = None CamembertTokenizer = None DebertaV2Tokenizer = None MarianTokenizer = None MBartTokenizer = None MT5Tokenizer = None PegasusTokenizer = None ReformerTokenizer = None T5Tokenizer = None XLMRobertaTokenizer = None XLNetTokenizer = None XLMProphetNetTokenizer = None if is_tokenizers_available(): from ..albert.tokenization_albert_fast import AlbertTokenizerFast from ..bart.tokenization_bart_fast import BartTokenizerFast from ..barthez.tokenization_barthez_fast import BarthezTokenizerFast from ..bert.tokenization_bert_fast import BertTokenizerFast from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast from ..convbert.tokenization_convbert_fast import ConvBertTokenizerFast from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast from ..electra.tokenization_electra_fast import ElectraTokenizerFast from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast from ..led.tokenization_led_fast import LEDTokenizerFast from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast from ..mbart.tokenization_mbart_fast import MBartTokenizerFast from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast from ..mpnet.tokenization_mpnet_fast import MPNetTokenizerFast from ..mt5 import MT5TokenizerFast from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast from ..t5.tokenization_t5_fast import T5TokenizerFast from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast else: AlbertTokenizerFast = None BartTokenizerFast = None BarthezTokenizerFast = None BertTokenizerFast = None CamembertTokenizerFast = None ConvBertTokenizerFast = None DistilBertTokenizerFast = None DPRQuestionEncoderTokenizerFast = None ElectraTokenizerFast = None FunnelTokenizerFast = None GPT2TokenizerFast = None HerbertTokenizerFast = None LayoutLMTokenizerFast = None LEDTokenizerFast = None LongformerTokenizerFast = None LxmertTokenizerFast = None MBartTokenizerFast = None MobileBertTokenizerFast = None MPNetTokenizerFast = None MT5TokenizerFast = None OpenAIGPTTokenizerFast = None PegasusTokenizerFast = None ReformerTokenizerFast = None RetriBertTokenizerFast = None RobertaTokenizerFast = None SqueezeBertTokenizerFast = None T5TokenizerFast = None XLMRobertaTokenizerFast = None XLNetTokenizerFast = None logger = logging.get_logger(__name__) TOKENIZER_MAPPING = OrderedDict( [ (RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)), (T5Config, (T5Tokenizer, T5TokenizerFast)), (MT5Config, (MT5Tokenizer, MT5TokenizerFast)), (MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)), (DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)), (AlbertConfig, (AlbertTokenizer, AlbertTokenizerFast)), (CamembertConfig, (CamembertTokenizer, CamembertTokenizerFast)), (PegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)), (MBartConfig, (MBartTokenizer, MBartTokenizerFast)), (XLMRobertaConfig, (XLMRobertaTokenizer, XLMRobertaTokenizerFast)), (MarianConfig, (MarianTokenizer, None)), (BlenderbotSmallConfig, (BlenderbotSmallTokenizer, None)), (BlenderbotConfig, (BlenderbotTokenizer, None)), (LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)), (BartConfig, (BartTokenizer, BartTokenizerFast)), (LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)), (RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)), (ReformerConfig, (ReformerTokenizer, ReformerTokenizerFast)), (ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)), (FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)), (LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)), (LayoutLMConfig, (LayoutLMTokenizer, LayoutLMTokenizerFast)), (DPRConfig, (DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast)), (SqueezeBertConfig, (SqueezeBertTokenizer, SqueezeBertTokenizerFast)), (BertConfig, (BertTokenizer, BertTokenizerFast)), (OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)), (GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)), (TransfoXLConfig, (TransfoXLTokenizer, None)), (XLNetConfig, (XLNetTokenizer, XLNetTokenizerFast)), (FlaubertConfig, (FlaubertTokenizer, None)), (XLMConfig, (XLMTokenizer, None)), (CTRLConfig, (CTRLTokenizer, None)), (FSMTConfig, (FSMTTokenizer, None)), (BertGenerationConfig, (BertGenerationTokenizer, None)), (DebertaConfig, (DebertaTokenizer, None)), (DebertaV2Config, (DebertaV2Tokenizer, None)), (RagConfig, (RagTokenizer, None)), (XLMProphetNetConfig, (XLMProphetNetTokenizer, None)), (ProphetNetConfig, (ProphetNetTokenizer, None)), (MPNetConfig, (MPNetTokenizer, MPNetTokenizerFast)), (TapasConfig, (TapasTokenizer, None)), (LEDConfig, (LEDTokenizer, LEDTokenizerFast)), (ConvBertConfig, (ConvBertTokenizer, ConvBertTokenizerFast)), (IBertConfig, (RobertaTokenizer, RobertaTokenizerFast)), (Wav2Vec2Config, (Wav2Vec2CTCTokenizer, None)), ] ) # For tokenizers which are not directly mapped from a config NO_CONFIG_TOKENIZER = [ BertJapaneseTokenizer, BertweetTokenizer, HerbertTokenizer, HerbertTokenizerFast, PhobertTokenizer, BarthezTokenizer, BarthezTokenizerFast, ] SLOW_TOKENIZER_MAPPING = { k: (v[0] if v[0] is not None else v[1]) for k, v in TOKENIZER_MAPPING.items() if (v[0] is not None or v[1] is not None) } def tokenizer_class_from_name(class_name: str): all_tokenizer_classes = ( [v[0] for v in TOKENIZER_MAPPING.values() if v[0] is not None] + [v[1] for v in TOKENIZER_MAPPING.values() if v[1] is not None] + NO_CONFIG_TOKENIZER ) for c in all_tokenizer_classes: if c.__name__ == class_name: return c class AutoTokenizer: r""" This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when created with the :meth:`AutoTokenizer.from_pretrained` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoTokenizer is designed to be instantiated " "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING) def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): r""" Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary. The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object (either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`: List options Params: pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): Can be either: - A string, the `model id` of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g., ``./my_model_directory/``. - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``. (Not applicable to all derived classes) inputs (additional positional arguments, `optional`): Will be passed along to the Tokenizer ``__init__()`` method. config (:class:`~transformers.PreTrainedConfig`, `optional`) The configuration object used to dertermine the tokenizer class to instantiate. cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str]`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. subfolder (:obj:`str`, `optional`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here. use_fast (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to try to load the fast version of the tokenizer. kwargs (additional keyword arguments, `optional`): Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details. Examples:: >>> from transformers import AutoTokenizer >>> # Download vocabulary from huggingface.co and cache. >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> # Download vocabulary from huggingface.co (user-uploaded) and cache. >>> tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased') >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) >>> tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/') """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) use_fast = kwargs.pop("use_fast", True) if config.tokenizer_class is not None: tokenizer_class = None if use_fast and not config.tokenizer_class.endswith("Fast"): tokenizer_class_candidate = f"{config.tokenizer_class}Fast" tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: tokenizer_class_candidate = config.tokenizer_class tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: raise ValueError( "Tokenizer class {} does not exist or is not currently imported.".format(tokenizer_class_candidate) ) return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) # if model is an encoder decoder, the encoder tokenizer class is used by default if isinstance(config, EncoderDecoderConfig): if type(config.decoder) is not type(config.encoder): # noqa: E721 logger.warn( f"The encoder model config class: {config.encoder.__class__} is different from the decoder model " f"config class: {config.decoder.__class}. It is not recommended to use the " "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder " "specific tokenizer classes." ) config = config.encoder if type(config) in TOKENIZER_MAPPING.keys(): tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)] if tokenizer_class_fast and (use_fast or tokenizer_class_py is None): return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: if tokenizer_class_py is not None: return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: raise ValueError( "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed " "in order to use this tokenizer." ) raise ValueError( "Unrecognized configuration class {} to build an AutoTokenizer.\n" "Model type should be one of {}.".format( config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys()) ) )
py
1a334560d8d22d93fb8ac7f7e63aa55660bd0f6b
import functools from flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for) from werkzeug.security import check_password_hash, generate_password_hash from flask_blog.db import get_db bp = Blueprint('auth', __name__ , url_prefix='/auth') @bp.route('/register',methods=('GET','POST')) def register(): if request.method=='POST': username = request.form['username'] password = request.form['password'] db = get_db() error = None if not username: error = 'Username is required' elif not password: error = 'Password is required' elif db.execute('SELECT id FROM user WHERE username = ?',(username,)).fetchone() is not None: error = 'User {} is already registered'.format(username) if error is None: db.execute('INSERT INTO user (username,password) VALUES(?,?)', (username,generate_password_hash(password))) db.commit() return redirect(url_for('auth.login')) flash(error) return render_template('auth/register.html') @bp.route('/login',methods=('GET','POST')) def login(): if request.method == 'POST': username = request.form['username'] password = request.form['password'] db = get_db() error = None user =db.execute('SELECT * FROM user WHERE username = ?',(username,)).fetchone() if user is None: error = 'Incorrect username.' elif not check_password_hash(user['password'],password) : error = 'Incorrect password.' if error is None: session.clear() session['user_id']=user['id'] return redirect(url_for('index')) flash(error) return render_template('auth/login.html') @bp.before_app_request def load_logged_in_user(): user_id = session.get('user_id') if user_id is None: g.user =None else: g.user =get_db().execute('SELECT * FROM user WHERE id = ?',(user_id,)).fetchone() @bp.route('/logout') def logout(): session.clear() return redirect(url_for('index')) def login_required(view): @functools.wraps(view) def wrapped_view(**kwargs): if g.user is None: return redirect(url_for('auth.login')) return view(**kwargs) return wrapped_view
py
1a33458b86341c84e95a91df1353a8daaf3ee52f
""" spyre.Tools.wfm_writer.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tool to write waveform in a .WFM format Authors: Alexandre Bourassa, Kevin Miao Date: 20/04/2016 """ import struct import numpy as _np from datetime import datetime as _dt import time as _t import sys def array_to_ieee_block(analog, marker1, marker2, prepend_length=True): """ Produces a little-endian 4-byte floating point + 1-byte marker representation of analog :param analog: Array of numpy float32 :param marker1: Array of numpy int8 :param marker2: Array of numpy int8 :return: Byte Stream in the WFM format """ num_bytes = 5 * len(analog) num_digit = len(str(num_bytes)) if not marker1.dtype==_np.int8: marker1 = _np.asarray(marker1, dtype=_np.int8) if not marker2.dtype==_np.int8: marker2 = _np.asarray(marker2, dtype=_np.int8) if not analog.dtype == _np.float32: analog = _np.asarray(analog, dtype=_np.float32) points = _np.zeros(len(analog), dtype='<f4, i1') points['f1'] = (marker1 + ((marker2)<<1))<<6 points['f0'] = analog #Makes sure that the byteordering is 'little' if not sys.byteorder == 'little': points = points.newbyteorder('<') bin_all = points.tobytes() if prepend_length: return bytes('#{:d}{:d}'.format(num_digit, num_bytes), encoding='ascii') + bin_all else : return bin_all def iee_block_to_array(block): """ Decodes an iee block into three arrays :param block: iee formatted block :return: analog, marker1, marker2 """ block = block.rstrip() #Check for a '#' if block[0:1] != b'#': raise ValueError("Argument is not a iee formatted block") #Check for that there is the correct number of bytes num_digit = int(block[1:2]) num_bytes = int(block[2:2+num_digit]) block = block[2 + num_digit:] if len(block) != num_bytes: raise ValueError("Argument is not a iee formatted block") n_points = int(num_bytes/5) array = struct.unpack('<'+'fB'*n_points, block) analog = _np.array(array[::2]) marker = _np.array(array[1::2]) print(marker) marker1 = _np.right_shift(_np.bitwise_and(marker, 64), 6) marker2 = _np.right_shift(marker, 7) return analog, marker1, marker2 class AWG_Record(object): def __init__(self, name, data, data_type=None): self.name = name self.data = data if data_type is None: if type(data) == str: self.data_type = 'char' elif type(data) == float: self.data_type = 'double' elif type(data) == bytes : self.data_type = 'bytes' else: self.data_type = 'short' else: if not data_type in ['char', 'double', 'long', 'short', 'bytes']: raise Exception("Invalid data type!") self.data_type= data_type def _get_format_str(self, data, type): if type == 'char': return '%ds'%(len(data)+1) elif type == 'double': return 'd' elif type == 'long': return 'l' elif type == 'short': return 'h' elif type == 'bytes': return '%ds'%(len(data)) else: raise Exception("Invalid data type!") def _get_length(self, data, type): if type == 'char': return len(data) + 1 elif type == 'bytes': return len(data) elif type == 'double': return 8 elif type == 'long': return 4 elif type == 'short': return 2 else :raise Exception("Invalid data type!") def get_bytes(self): fmt = '<ii'+self._get_format_str(self.name, 'char')+self._get_format_str(self.data, self.data_type) name_l = self._get_length(self.name, 'char') data_l = self._get_length(self.data, self.data_type) if self.data_type == 'char': data = bytes(self.data, encoding='ascii') else : data = self.data return struct.pack(fmt, name_l, data_l, bytes(self.name, encoding='ascii'), data) class AWG_File_Writer(object): def __init__(self): self.records = ([],[],[],[],[],[],[],) self.add_record("MAGIC", 5000, 1) self.add_record("VERSION", 1, 1) self.wfm = list() self.n_seq_lines = 0 def add_record(self, name, data, group, data_type=None): group -= 1 if not group in range(7): raise Exception("Invalid group!") self.records[group].append(AWG_Record(name, data, data_type=data_type)) def add_waveform(self, name, analog, marker1, marker2): if len(self.wfm)>=32000: raise Exception("Maximum 32000 waveform in .AWG file...") if len(analog)<250: print("WARNING: The AWG will use the software sequencer because this waveform has less than 250 points") data = array_to_ieee_block(analog, marker1, marker2, prepend_length=False) t = _dt.now() tm = [t.year, t.month, t.weekday(), t.day, t.hour, t.minute, t.second, t.microsecond // 1000] self.wfm.append(name) N = len(self.wfm) self.add_record("WAVEFORM_NAME_{}".format(N), name, 5) self.add_record("WAVEFORM_TYPE_{}".format(N), 2, 5) self.add_record("WAVEFORM_LENGTH_{}".format(N), len(analog), 5, data_type='long') self.add_record("WAVEFORM_TIMESTAMP_{}".format(N), struct.pack('<' + 'h' * 8, *tm), 5, data_type='bytes') self.add_record("WAVEFORM_DATA_{}".format(N), data, 5) def add_sequence_line(self, wfm=("", "", "", ""), use_sub_seq = False, sub_seq_name="", repeat_count=0, wait_for_trigger=False, jump_target=0, goto_target=0): if self.n_seq_lines >= 8000: raise Exception("Maximum 8000 lines for main sequence in .AWG file...") N = self.n_seq_lines + 1 if not (len(wfm) == 4): raise Exception("There should be 4 entries in the wfm tuples") if not 65536 >= repeat_count >= 0: raise Exception("Maximum of 65536 for repeat_count") if not use_sub_seq and wfm[0] == wfm[1] == wfm[2] == wfm[3] == "": raise Exception("At least one channel must have non-empty wfm") if use_sub_seq and sub_seq_name=="": raise Exception("sub_seq_name is empty") self.add_record('SEQUENCE_WAIT_{}'.format(N), wait_for_trigger, 6) self.add_record('SEQUENCE_LOOP_{}'.format(N), repeat_count, 6, data_type='long') self.add_record('SEQUENCE_JUMP_{}'.format(N), jump_target, 6) self.add_record('SEQUENCE_GOTO_{}'.format(N), goto_target, 6) # Add the wfm / subseq if use_sub_seq: wfm = ("", "", "", "") else: sub_seq_name = "" for i in range(len(wfm)): if not use_sub_seq: if wfm[i] != "": self.add_record("SEQUENCE_WAVEFORM_NAME_CH_{}_{}".format(i + 1, N), wfm[i], 6) self.add_record("SEQUENCE_IS_SUBSEQ_{}".format(N), int(use_sub_seq), 6, data_type='long') self.add_record("SEQUENCE_SUBSEQ_NAME_{}".format(N), sub_seq_name, 6) self.n_seq_lines += 1 def add_subseq(self, name): ss = Sub_Sequence(name) self.records[6].append(ss) return ss def get_bytes(self): ans = list() for i in range(len(self.records)): group_list = self.records[i] if not i == 6: ans.extend([entry.get_bytes() for entry in group_list]) else: # Special treatement for subseq group subseq_number, cummul_line = 1, 0 for ss in group_list: if len(ss.lines) != 0: ans += ss.get_bytes(subseq_number,cummul_line) subseq_number += 1 cummul_line += len(ss.lines) return b''.join(ans) class Sub_Sequence(object): def __init__(self, name): self.name = name self.lines = list() def add_line(self, wfm=("", "", "", ""), repeat_count=1): if not 65536 >= repeat_count >= 0: raise Exception("Maximum of 65536 for repeat_count") if not (len(wfm) == 4): raise Exception("There should be 4 entries in the wfm tuples") if wfm[0] == wfm[1] == wfm[2] == wfm[3] == "": raise Exception("At least one channel must have non-empty wfm") self.lines.append([repeat_count, wfm]) # line = list() # n = len(self.lines + 1) # line.append(AWG_Record("SUBSEQ_LOOP_{}_{}_{}".format(n,self.o,n), , data_type=data_type)) def get_bytes(self, subseq_number, cummul_line): ans = b'' u = cummul_line + 1 o = subseq_number t = _dt.now() tm = [t.year, t.month, t.weekday(), t.day, t.hour, t.minute, t.second, t.microsecond // 1000] rec = [ AWG_Record("SUBSEQ_NAME_{}".format(o), self.name), AWG_Record("SUBSEQ_TIMESTAMP_{}".format(o), struct.pack('<' + 'h' * 8, *tm), data_type='bytes'), AWG_Record("SUBSEQ_LENGTH_{}".format(o), len(self.lines), data_type='long') ] n = 1 for line in self.lines: rec.append(AWG_Record("SUBSEQ_LOOP_{}_{}_{}".format(n,o,u), line[0], data_type='long')) wfm = line[1] for i in range(len(wfm)): if wfm[i] != "": rec.append(AWG_Record("SUBSEQ_WAVEFORM_NAME_CH_{}_{}_{}_{}".format(i + 1, n, o, u), wfm[i])) n += 1 u += 1 for entry in rec: ans += entry.get_bytes() return ans # ----------------------------------- # DEPRECATED # ----------------------------------- def create_wfm(analog, marker1, marker2, clock=None): """ Generate the byte stream for a WFM file given 3 arrays (analog, marker1 and marker2) :param analog: Array of float :param marker1: Array of bool (or 1/0) :param marker2: Array of bool (or 1/0) :param clock: The clock speed that the waveform should be run at :return: Byte Stream in the WFM format """ if not (len(analog) == len(marker1) == len(marker2)): raise ValueError('Mismatched analog and marker lengths') if max(analog) > 1.0 or min(analog) < -1.0: raise ValueError('analog values out of range') header = b'MAGIC 1000\r\n' trailer = bytes('CLOCK {:1.10E}\r\n'.format(clock), encoding='ascii') if clock is not None else b'' body = array_to_iee_block(analog, marker1, marker2) return b''.join((header, body, trailer)) class Sequence(object): def __init__(self): self.seq = [] def add_line(self, ch1_wfm="", ch2_wfm="", ch3_wfm="", ch4_wfm="", repeat_count=0, wait_for_trigger=False, logic_jump_target=0, finished_goto=0): """ This defines a new sequence line to be added to this SEQ file :param ch1_wfm: wfm (or pat) file to be used for CH1 on this line. :param ch2_wfm: wfm (or pat) file to be used for CH2 on this line. :param ch3_wfm: wfm (or pat) file to be used for CH3 on this line. :param ch4_wfm: wfm (or pat) file to be used for CH4 on this line. :param repeat_count: Repeat count for the line. 0 is infinity :param wait_for_trigger: Specify whether or not to wait for a trigger before running the wfm :param logic_jump_target: Line number where to jump upon EVENT IN input or FORCE EVENT triggers. 0 is Off, -1 is next, and -2 is Table-jump :param finished_goto: Line to go after current line. 0 is Next. Maximum 8000. :return: """ wait_for_trigger = 1 if bool(wait_for_trigger) else 0 line = '"{}","{}","{}","{}",{},{},{},{},{}\r\n'.format(ch1_wfm, ch2_wfm, ch3_wfm, ch4_wfm,int(repeat_count), wait_for_trigger, 0, int(logic_jump_target), finished_goto) self.seq.append(line) def verify_line(self, line): line = line.strip() args = line.split(",") print(line) print(args) if len(args) != 9: raise Exception("The number of paramter in the line <{}> is incorrect".format(line)) if args[0]==args[1]==args[2]==args[3]=="": raise Exception("At least one channel must have non-empty wfm") if not 0<=int(args[4])<=65536: raise Exception("Invalid repeat_counts (must be 0 for infinity or [1,65536])") if not args[5] in ["0","1"]: raise Exception("wait_for_trigger must be 0 or 1") if not args[6] == "0": raise Exception("goto_one is not implemented and therefore must be set to 0") if not -2<=int(args[7])<=len(self.seq): raise Exception("Invalid logic_jump_target argument (must be in [-2, N] where N is the number of line in the sequence)") if not 0<=int(args[8])<=len(self.seq): raise Exception("Invalid finnished_goto argument (must be in [0, N] where N is the number of line in the sequence)") def get_str(self): s = "MAGIC 3004A\r\nLINES {}".format(len(self.seq)) if len(self.seq)>8000: raise Exception("More than 8000 lines may not work...") for line in self.seq: self.verify_line(line) s+= line return s def get_bytes(self): return bytes(self.get_str() ,encoding='ascii')
py
1a3345c90dc2e7434958541f422b9b1332fe3d17
from typing import Dict, List, Optional, Union, cast from torch import Tensor, stack from kornia.augmentation import random_generator as rg from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D _planckian_coeffs = { 'blackbody': Tensor( [ [0.6743, 0.4029, 0.0013], [0.6281, 0.4241, 0.1665], [0.5919, 0.4372, 0.2513], [0.5623, 0.4457, 0.3154], [0.5376, 0.4515, 0.3672], [0.5163, 0.4555, 0.4103], [0.4979, 0.4584, 0.4468], [0.4816, 0.4604, 0.4782], [0.4672, 0.4619, 0.5053], [0.4542, 0.4630, 0.5289], [0.4426, 0.4638, 0.5497], [0.4320, 0.4644, 0.5681], [0.4223, 0.4648, 0.5844], [0.4135, 0.4651, 0.5990], [0.4054, 0.4653, 0.6121], [0.3980, 0.4654, 0.6239], [0.3911, 0.4655, 0.6346], [0.3847, 0.4656, 0.6444], [0.3787, 0.4656, 0.6532], [0.3732, 0.4656, 0.6613], [0.3680, 0.4655, 0.6688], [0.3632, 0.4655, 0.6756], [0.3586, 0.4655, 0.6820], [0.3544, 0.4654, 0.6878], [0.3503, 0.4653, 0.6933], ] ), 'CIED': Tensor( [ [0.5829, 0.4421, 0.2288], [0.5510, 0.4514, 0.2948], [0.5246, 0.4576, 0.3488], [0.5021, 0.4618, 0.3941], [0.4826, 0.4646, 0.4325], [0.4654, 0.4667, 0.4654], [0.4502, 0.4681, 0.4938], [0.4364, 0.4692, 0.5186], [0.4240, 0.4700, 0.5403], [0.4127, 0.4705, 0.5594], [0.4023, 0.4709, 0.5763], [0.3928, 0.4713, 0.5914], [0.3839, 0.4715, 0.6049], [0.3757, 0.4716, 0.6171], [0.3681, 0.4717, 0.6281], [0.3609, 0.4718, 0.6380], [0.3543, 0.4719, 0.6472], [0.3480, 0.4719, 0.6555], [0.3421, 0.4719, 0.6631], [0.3365, 0.4719, 0.6702], [0.3313, 0.4719, 0.6766], [0.3263, 0.4719, 0.6826], [0.3217, 0.4719, 0.6882], ] ), } _planckian_coeffs_ratio = { 'blackbody': stack( ( _planckian_coeffs['blackbody'][:, 0] / _planckian_coeffs['blackbody'][:, 1], _planckian_coeffs['blackbody'][:, 2] / _planckian_coeffs['blackbody'][:, 1], ), 1, ), 'CIED': stack( ( _planckian_coeffs['CIED'][:, 0] / _planckian_coeffs['CIED'][:, 1], _planckian_coeffs['CIED'][:, 2] / _planckian_coeffs['CIED'][:, 1], ), 1, ), } class RandomPlanckianJitter(IntensityAugmentationBase2D): r"""Apply planckian jitter transformation to input tensor. .. image:: _static/img/RandomPlanckianJitter.png This is physics based color augmentation, that creates realistic variations in chromaticity, this can simulate the illumination changes in the scene. See :cite:`zini2022planckian` for more details. Args: mode: 'blackbody' or 'CIED'. select_from: choose a list of jitters to apply from. `blackbody` range [0-24], `CIED` range [0-22] same_on_batch: apply the same transformation across the batch. p: probability that the random erasing operation will be performed. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Shape: - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)` - Output: :math:`(B, C, H, W)` Note: Input tensor must be float and normalized into [0, 1]. Examples: To apply planckian jitter based on mode >>> rng = torch.manual_seed(0) >>> input = torch.randn(1, 3, 2, 2) >>> aug = RandomPlanckianJitter(mode='CIED') >>> aug(input) tensor([[[[ 1.0000, -0.2389], [-1.7740, 0.4628]], <BLANKLINE> [[-1.0845, -1.3986], [ 0.4033, 0.8380]], <BLANKLINE> [[-0.9228, -0.5175], [-0.7654, 0.2335]]]]) To apply planckian jitter on image(s) from list of interested jitters >>> rng = torch.manual_seed(0) >>> input = torch.randn(2, 3, 2, 2) >>> aug = RandomPlanckianJitter(mode='blackbody', select_from=[23, 24, 1, 2]) >>> aug(input) tensor([[[[-1.1258, -1.1524], [-0.2506, -0.4339]], <BLANKLINE> [[ 0.8487, 0.6920], [-0.3160, -2.1152]], <BLANKLINE> [[ 0.4681, -0.1577], [ 1.4437, 0.2660]]], <BLANKLINE> <BLANKLINE> [[[ 0.2465, 1.0000], [-0.2125, -0.1653]], <BLANKLINE> [[ 0.9318, 1.0000], [ 1.0000, 0.0537]], <BLANKLINE> [[ 0.2426, -0.1621], [-0.3302, -0.9093]]]]) """ def __init__( self, mode: str = "blackbody", select_from: Optional[Union[int, List[int]]] = None, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False, return_transform: Optional[bool] = None, ) -> None: super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim) if isinstance(select_from, int): select_from = [select_from] if select_from is not None: self.register_buffer('pl', _planckian_coeffs_ratio[mode][select_from]) else: self.register_buffer('pl', _planckian_coeffs_ratio[mode]) # the range of the sampling parameters _param_min: float = 0.0 _param_max: float = float(cast(Tensor, self.pl).shape[0]) self._param_generator = cast(rg.PlanckianJitterGenerator, rg.PlanckianJitterGenerator([_param_min, _param_max])) def apply_transform(self, input: Tensor, params: Dict[str, Tensor], transform: Optional[Tensor] = None) -> Tensor: list_idx = params['idx'].tolist() coeffs = cast(Tensor, self.pl)[list_idx] r_w = coeffs[:, 0][..., None, None] b_w = coeffs[:, 1][..., None, None] r = input[..., 0, :, :] * r_w g = input[..., 1, :, :] b = input[..., 2, :, :] * b_w output = stack([r, g, b], -3) return output.clamp(max=1.0)
py
1a33465483d7d9398fd071158541178754984e1b
import random top20 = open("../Dataset/Top20.tsv").read().split("\n") docs = open("../Dataset/Documents.tsv").read().split("\n") queries = open("../Dataset/queries.tsv").read().split("\n") OUT = open("../Dataset/finetune_en.tsv",'w') doc_list = {} for doc in docs: if doc != "": doc_id = doc.split("\t")[0] doc = doc.split("\t")[1] doc_list[doc_id] = doc doc_len = len(doc_list.keys()) query_list = {} for query in queries: if query != "": query_id = query.split("\t")[0] query_text = query.split("\t")[1] query_list[query_id] = query_text cnt = 0 print(doc_len) for top20_ in top20: if top20_ is not "": cnt += 1 top20_ = top20_.split("\t") query_id = top20_[0] doc_id = top20_[1] ## positive #### negative sampling #### neg_id = top20_[1] while str(neg_id) in top20_: neg_id = random.randint(0,doc_len-1) print("aaaaa") #print("#########",doc_list[str(neg_id)]) OUT.write(query_list[query_id]+"\t"+doc_list[doc_id]+"\t"+doc_list[str(neg_id)]+"\n") print(cnt)
py
1a3348ab103845580e1d60b364011fb3f9bb8da6
from ncclient import manager eos=manager.connect(host="10.83.28.221", port="830", timeout=30, username="arista", password="arista", hostkey_verify=False) conf = ''' <config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0"> <system xmlns="http://arista.com/yang/openconfig/system/"> <config> <domain-name operation="delete">abc.xyz</domain-name> </config> <dns> <servers> <server> <address operation="delete">1.1.1.1</address> </server> </servers> </dns> </system> </config> ''' reply = eos.edit_config(target = "running", config = conf, default_operation="none") conf = ''' <config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0"> <system xmlns="http://arista.com/yang/openconfig/system/"> <aaa> <authentication> <users> <user> <username operation="delete">gnmi</username> </user> </users> </authentication> </aaa> </system> </config> ''' reply = eos.edit_config(target = "running", config = conf, default_operation="none") eos.close_session()
py
1a3348aedbef7a97572cebf3f293a4a6a3b7a974
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest class Namespace: def __init__(self, **kwargs): self.__dict__.update(kwargs) @pytest.fixture(scope='session') def cloud_config(): """Provides a configuration object as a proxy to environment variables.""" return Namespace( project=os.environ.get('GCLOUD_PROJECT'), storage_bucket=os.environ.get('CLOUD_STORAGE_BUCKET'), client_secrets=os.environ.get('GOOGLE_CLIENT_SECRETS'), bigtable_cluster=os.environ.get('BIGTABLE_CLUSTER'), bigtable_zone=os.environ.get('BIGTABLE_ZONE')) def get_resource_path(resource, local_path): local_resource_path = os.path.join(local_path, 'resources', *resource) if os.path.exists(local_resource_path): return local_resource_path else: raise EnvironmentError('Resource {} not found.'.format( os.path.join(*resource))) @pytest.fixture(scope='module') def resource(request): """Provides a function that returns the full path to a local or global testing resource""" local_path = os.path.dirname(request.module.__file__) return lambda *args: get_resource_path(args, local_path)
py
1a334a2505382defba4a5e467e43898796824d3b
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import logging import os.path import re import subprocess import textwrap from reno import create from reno import scanner from reno.tests import base from reno import utils import fixtures import mock from testtools.content import text_content _SETUP_TEMPLATE = """ import setuptools try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr'], pbr=True) """ _CFG_TEMPLATE = """ [metadata] name = testpkg summary = Test Package [files] packages = testpkg """ class GPGKeyFixture(fixtures.Fixture): """Creates a GPG key for testing. It's recommended that this be used in concert with a unique home directory. """ def setUp(self): super(GPGKeyFixture, self).setUp() tempdir = self.useFixture(fixtures.TempDir()) gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])') gnupg_version = utils.check_output(['gpg', '--version'], cwd=tempdir.path) for line in gnupg_version[0].split('\n'): gnupg_version = gnupg_version_re.match(line) if gnupg_version: gnupg_version = (int(gnupg_version.group(1)), int(gnupg_version.group(2)), int(gnupg_version.group(3))) break else: if gnupg_version is None: gnupg_version = (0, 0, 0) config_file = tempdir.path + '/key-config' f = open(config_file, 'wt') try: if gnupg_version[0] == 2 and gnupg_version[1] >= 1: f.write(""" %no-protection %transient-key """) f.write(""" %no-ask-passphrase Key-Type: RSA Name-Real: Example Key Name-Comment: N/A Name-Email: [email protected] Expire-Date: 2d Preferences: (setpref) %commit """) finally: f.close() # Note that --quick-random (--debug-quick-random in GnuPG 2.x) # does not have a corresponding preferences file setting and # must be passed explicitly on the command line instead # if gnupg_version[0] == 1: # gnupg_random = '--quick-random' # elif gnupg_version[0] >= 2: # gnupg_random = '--debug-quick-random' # else: # gnupg_random = '' subprocess.check_call( ['gpg', '--gen-key', '--batch', # gnupg_random, config_file], cwd=tempdir.path) class Base(base.TestCase): def _run_git(self, *args): return utils.check_output( ['git'] + list(args), cwd=self.reporoot, ) def _git_setup(self): os.makedirs(self.reporoot) self._run_git('init', '.') self._run_git('config', '--local', 'user.email', '[email protected]') self._run_git('config', '--local', 'user.name', 'reno developer') self._run_git('config', '--local', 'user.signingkey', '[email protected]') def _git_commit(self, message='commit message'): self._run_git('add', '.') self._run_git('commit', '-m', message) def _add_other_file(self, name): with open(os.path.join(self.reporoot, name), 'w') as f: f.write('adding %s\n' % name) self._git_commit('add %s' % name) def _add_notes_file(self, slug='slug', commit=True, legacy=False): n = self.get_note_num() if legacy: basename = '%016x-%s.yaml' % (n, slug) else: basename = '%s-%016x.yaml' % (slug, n) filename = os.path.join(self.reporoot, 'releasenotes', 'notes', basename) create._make_note_file(filename) self._git_commit('add %s' % basename) return os.path.join('releasenotes', 'notes', basename) def _make_python_package(self): setup_name = os.path.join(self.reporoot, 'setup.py') with open(setup_name, 'w') as f: f.write(_SETUP_TEMPLATE) cfg_name = os.path.join(self.reporoot, 'setup.cfg') with open(cfg_name, 'w') as f: f.write(_CFG_TEMPLATE) pkgdir = os.path.join(self.reporoot, 'testpkg') os.makedirs(pkgdir) init = os.path.join(pkgdir, '__init__.py') with open(init, 'w') as f: f.write("Test package") self._git_commit('add test package') def setUp(self): super(Base, self).setUp() self.logger = self.useFixture( fixtures.FakeLogger( format='%(levelname)8s %(name)s %(message)s', level=logging.DEBUG, nuke_handlers=True, ) ) # Older git does not have config --local, so create a temporary home # directory to permit using git config --global without stepping on # developer configuration. self.useFixture(fixtures.TempHomeDir()) self.useFixture(GPGKeyFixture()) self.useFixture(fixtures.NestedTempfile()) self.temp_dir = self.useFixture(fixtures.TempDir()).path self.reporoot = os.path.join(self.temp_dir, 'reporoot') self.notesdir = os.path.join(self.reporoot, 'releasenotes', 'notes', ) self._git_setup() self._counter = itertools.count(1) self.get_note_num = lambda: next(self._counter) class BasicTest(Base): def test_non_python_no_tags(self): filename = self._add_notes_file() raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'0.0.0': [filename]}, results, ) def test_python_no_tags(self): self._make_python_package() filename = self._add_notes_file() raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'0.0.0': [filename]}, results, ) def test_note_before_tag(self): filename = self._add_notes_file() self._add_other_file('not-a-release-note.txt') self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [filename]}, results, ) def test_note_commit_tagged(self): filename = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [filename]}, results, ) def test_note_commit_after_tag(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') filename = self._add_notes_file() raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0-1': [filename]}, results, ) def test_other_commit_after_tag(self): filename = self._add_notes_file() self._add_other_file('ignore-1.txt') self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self._add_other_file('ignore-2.txt') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [filename]}, results, ) def test_multiple_notes_after_tag(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file() f2 = self._add_notes_file() raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0-2': [f1, f2]}, results, ) def test_multiple_notes_within_tag(self): self._make_python_package() f1 = self._add_notes_file(commit=False) f2 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [f1, f2]}, results, ) def test_multiple_tags(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') f2 = self._add_notes_file() raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f1], '2.0.0-1': [f2], }, results, ) def test_rename_file(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file('slug1') self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') f2 = f1.replace('slug1', 'slug2') self._run_git('mv', f1, f2) self._git_commit('rename note file') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f2], }, results, ) def test_rename_file_sort_earlier(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file('slug1') self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') f2 = f1.replace('slug1', 'slug0') self._run_git('mv', f1, f2) self._git_commit('rename note file') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f2], }, results, ) def test_edit_file(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') with open(os.path.join(self.reporoot, f1), 'w') as f: f.write('---\npreamble: new contents for file') self._git_commit('edit note file') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f1], }, results, ) def test_legacy_file(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file('slug1', legacy=True) self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') f2 = f1.replace('slug1', 'slug2') self._run_git('mv', f1, f2) self._git_commit('rename note file') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f2], }, results, ) def test_rename_legacy_file_to_new(self): self._make_python_package() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') f1 = self._add_notes_file('slug1', legacy=True) self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') # Rename the file with the new convention of placing the UUID # after the slug instead of before. f2 = f1.replace('0000000000000001-slug1', 'slug1-0000000000000001') self._run_git('mv', f1, f2) self._git_commit('rename note file') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'2.0.0': [f2], }, results, ) class MergeCommitTest(Base): def test_1(self): # Create changes on master and in the branch # in order so the history is "normal" n1 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self._run_git('checkout', '-b', 'test_merge_commit') n2 = self._add_notes_file() self._run_git('checkout', 'master') self._add_other_file('ignore-1.txt') self._run_git('merge', '--no-ff', 'test_merge_commit') self._add_other_file('ignore-2.txt') self._run_git('tag', '-s', '-m', 'second tag', '2.0.0') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [n1], '2.0.0': [n2]}, results, ) self.assertEqual( ['2.0.0', '1.0.0'], list(raw_results.keys()), ) def test_2(self): # Create changes on the branch before the tag into which it is # actually merged. self._add_other_file('ignore-0.txt') self._run_git('checkout', '-b', 'test_merge_commit') n1 = self._add_notes_file() self._run_git('checkout', 'master') n2 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self._add_other_file('ignore-1.txt') self._run_git('merge', '--no-ff', 'test_merge_commit') self._add_other_file('ignore-2.txt') self._run_git('tag', '-s', '-m', 'second tag', '2.0.0') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [n2], '2.0.0': [n1]}, results, ) self.assertEqual( ['2.0.0', '1.0.0'], list(raw_results.keys()), ) def test_3(self): # Create changes on the branch before the tag into which it is # actually merged, with another tag in between the time of the # commit and the time of the merge. This should reflect the # order of events described in bug #1522153. self._add_other_file('ignore-0.txt') self._run_git('checkout', '-b', 'test_merge_commit') n1 = self._add_notes_file() self._run_git('checkout', 'master') n2 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self._add_other_file('ignore-1.txt') self._run_git('tag', '-s', '-m', 'second tag', '1.1.0') self._run_git('merge', '--no-ff', 'test_merge_commit') self._add_other_file('ignore-2.txt') self._run_git('tag', '-s', '-m', 'third tag', '2.0.0') self._add_other_file('ignore-3.txt') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } # Since the 1.1.0 tag has no notes files, it does not appear # in the output. It's only there to trigger the bug as it was # originally reported. self.assertEqual( {'1.0.0': [n2], '2.0.0': [n1]}, results, ) self.assertEqual( ['2.0.0', '1.0.0'], list(raw_results.keys()), ) def test_4(self): # Create changes on the branch before the tag into which it is # actually merged, with another tag in between the time of the # commit and the time of the merge. This should reflect the # order of events described in bug #1522153. self._add_other_file('ignore-0.txt') self._run_git('checkout', '-b', 'test_merge_commit') n1 = self._add_notes_file() self._run_git('checkout', 'master') n2 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self._add_other_file('ignore-1.txt') n3 = self._add_notes_file() self._run_git('tag', '-s', '-m', 'second tag', '1.1.0') self._run_git('merge', '--no-ff', 'test_merge_commit') self._add_other_file('ignore-2.txt') self._run_git('tag', '-s', '-m', 'third tag', '2.0.0') self._add_other_file('ignore-3.txt') raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( {'1.0.0': [n2], '1.1.0': [n3], '2.0.0': [n1]}, results, ) self.assertEqual( ['2.0.0', '1.1.0', '1.0.0'], list(raw_results.keys()), ) class UniqueIdTest(Base): def test_legacy(self): uid = scanner._get_unique_id( 'releasenotes/notes/0000000000000001-slug1.yaml' ) self.assertEqual('0000000000000001', uid) def test_modern(self): uid = scanner._get_unique_id( 'releasenotes/notes/slug1-0000000000000001.yaml' ) self.assertEqual('0000000000000001', uid) class BranchTest(Base): def setUp(self): super(BranchTest, self).setUp() self._make_python_package() self.f1 = self._add_notes_file('slug1') self._run_git('tag', '-s', '-m', 'first tag', '1.0.0') self.f2 = self._add_notes_file('slug2') self._run_git('tag', '-s', '-m', 'first tag', '2.0.0') self._add_notes_file('slug3') self._run_git('tag', '-s', '-m', 'first tag', '3.0.0') def test_files_current_branch(self): self._run_git('checkout', '2.0.0') self._run_git('checkout', '-b', 'stable/2') f21 = self._add_notes_file('slug21') log_text = self._run_git('log') self.addDetail('git log', text_content(log_text)) raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( { '1.0.0': [self.f1], '2.0.0': [self.f2], '2.0.0-1': [f21], }, results, ) def test_files_stable_from_master(self): self._run_git('checkout', '2.0.0') self._run_git('checkout', '-b', 'stable/2') f21 = self._add_notes_file('slug21') self._run_git('checkout', 'master') log_text = self._run_git('log', '--pretty=%x00%H %d', '--name-only', 'stable/2') self.addDetail('git log', text_content(log_text)) raw_results = scanner.get_notes_by_version( self.reporoot, 'releasenotes/notes', 'stable/2', ) results = { k: [f for (f, n) in v] for (k, v) in raw_results.items() } self.assertEqual( { '1.0.0': [self.f1], '2.0.0': [self.f2], '2.0.0-1': [f21], }, results, ) class GetTagsParseTest(base.TestCase): EXPECTED = [ '2.0.0', '1.8.1', '1.8.0', '1.7.1', '1.7.0', '1.6.0', '1.5.0', '1.4.0', '1.3.0', '1.2.0', '1.1.0', '1.0.0', '0.11.2', '0.11.1', '0.11.0', '0.10.1', '0.10.0', '0.9.0', '0.8.0', '0.7.1', '0.7.0', '0.6.0', '0.5.1', '0.5.0', '0.4.2', '0.4.1', '0.4.0', '0.3.2', '0.3.1', '0.3.0', '0.2.5', '0.2.4', '0.2.3', '0.2.2', '0.2.1', '0.2.0', '0.1.3', '0.1.2', '0.1.1', '0.1.0', ] def test_keystoneclient_ubuntu_1_9_1(self): # git 1.9.1 as it produces output on ubuntu for python-keystoneclient # git log --simplify-by-decoration --pretty="%d" tag_list_output = textwrap.dedent(""" (HEAD, origin/master, origin/HEAD, gerrit/master, master) (apu/master) (tag: 2.0.0) (tag: 1.8.1) (tag: 1.8.0) (tag: 1.7.1) (tag: 1.7.0) (tag: 1.6.0) (tag: 1.5.0) (tag: 1.4.0) (uncap-requirements) (tag: 1.3.0) (tag: 1.2.0) (tag: 1.1.0) (tag: 1.0.0) (tag: 0.11.2) (tag: 0.11.1) (tag: 0.11.0) (tag: 0.10.1) (tag: 0.10.0) (tag: 0.9.0) (tag: 0.8.0) (tag: 0.7.1) (tag: 0.7.0) (tag: 0.6.0) (tag: 0.5.1) (tag: 0.5.0) (tag: 0.4.2) (tag: 0.4.1) (tag: 0.4.0) (tag: 0.3.2) (tag: 0.3.1) (tag: 0.3.0) (tag: 0.2.5) (tag: 0.2.4) (tag: 0.2.3) (tag: 0.2.2) (tag: 0.2.1) (tag: 0.2.0) (origin/feature/keystone-v3, gerrit/feature/keystone-v3) (tag: 0.1.3) (tag: 0.1.2) (tag: 0.1.1) (tag: 0.1.0) (tag: folsom-1) (tag: essex-rc1) (tag: essex-4) (tag: essex-3) """) with mock.patch('reno.utils.check_output') as co: co.return_value = tag_list_output actual = scanner._get_version_tags_on_branch('reporoot', branch=None) self.assertEqual(self.EXPECTED, actual) def test_keystoneclient_rhel_1_7_1(self): # git 1.7.1 as it produces output on RHEL 6 for python-keystoneclient # git log --simplify-by-decoration --pretty="%d" tag_list_output = textwrap.dedent(""" (HEAD, origin/master, origin/HEAD, master) (tag: 2.0.0) (tag: 1.8.1) (tag: 1.8.0) (tag: 1.7.1) (tag: 1.7.0) (tag: 1.6.0) (tag: 1.5.0) (tag: 1.4.0) (tag: 1.3.0) (tag: 1.2.0) (tag: 1.1.0) (tag: 1.0.0) (tag: 0.11.2) (tag: 0.11.1) (tag: 0.11.0) (tag: 0.10.1) (tag: 0.10.0) (tag: 0.9.0) (tag: 0.8.0) (tag: 0.7.1) (tag: 0.7.0) (tag: 0.6.0) (tag: 0.5.1) (tag: 0.5.0) (tag: 0.4.2) (tag: 0.4.1) (tag: 0.4.0) (tag: 0.3.2) (tag: 0.3.1) (tag: 0.3.0) (tag: 0.2.5) (tag: 0.2.4) (tag: 0.2.3) (tag: 0.2.2) (tag: 0.2.1) (tag: 0.2.0) (tag: 0.1.3) (0.1.2) (tag: 0.1.1) (0.1.0) (tag: folsom-1) (tag: essex-rc1) (essex-4) (essex-3) """) with mock.patch('reno.utils.check_output') as co: co.return_value = tag_list_output actual = scanner._get_version_tags_on_branch('reporoot', branch=None) self.assertEqual(self.EXPECTED, actual)
py
1a334a2b6aa53798b6796483db922a511d31c2f7
import sys import os from argparse import ArgumentParser import copy import glob import numpy as np import json import os import pandas as pd from transformers import AutoModelForCausalLM, AutoTokenizer from urllib.parse import urlparse, urljoin import torch import torch.nn as nn from torch.optim.lr_scheduler import CosineAnnealingLR from pytorch_lightning.core.lightning import LightningModule import pytorch_lightning as pl import tarfile import urllib import wandb #wandb.Api() data = commonsense() from reward_model import Reward from data_loaders import TextDataModule, TextDataset, commonsense from adapters import AdapterLayer, add_adapters, add_adapter_skip from trainer import create_prompt, BaP from pytorch_lightning.loggers import WandbLogger wandb.init() def main(args): wandb_logger = WandbLogger() Text = TextDataModule(args) model = BaP(args) trainer = pl.Trainer(logger=wandb_logger, gpus=1, max_epochs=args.n_epochs, progress_bar_refresh_rate=10, val_check_interval=args.val_check_interval, precision=args.precision, gradient_clip_val=0.5, accumulate_grad_batches=4, log_every_n_steps=1) trainer.fit(model, Text) if __name__ == '__main__': parser = ArgumentParser() # model args parser.add_argument('--self_prune', type = bool, default = False, help='use self-pruning: language model as a reward model') parser.add_argument('--lm_name', type = str, default = 'gpt2-large', help='Name language model') # "EleutherAI/gpt-neo-1.3B" 'sshleifer/tiny-gpt2' parser.add_argument('--use_adapters', type=bool, default = True, help = 'Whether to use adapters') # data loader args parser.add_argument('--batch_size', type=int, default = 4, help = 'Batch size training') parser.add_argument('--val_batch_size', type=int, default = 4, help = 'Batch size validation and test') parser.add_argument('--num_workers', type=int, default = 0, help = 'Number of workers') # trainer args parser.add_argument('--val_check_interval', type=float, default = 1., help = 'Frequency validation set check') parser.add_argument('--precision', type=int, default = 32, help = 'Bit precision') # adapter args parser.add_argument('--reduction_factor', type = int, default = 12, help = 'Reduction factor inner dimension adapters') # babble args parser.add_argument('--num_beams', type = float, default = 16, help='Number of beams') parser.add_argument('--num_return_sequences', type = int, default = 8, help='Number of babbles') parser.add_argument('--max_babble_len', type = int, default = 15, help='Length generated text') # optimizer args parser.add_argument('--lr_init', type = float, default = 1e-3, help='Initial learning rate') parser.add_argument('--lr_min', type = float, default = 1e-4, help='Final learning rate') parser.add_argument('--n_epochs', type = int, default = 4, help='Number of training epochs') parser.add_argument('--scheduler_period', type = int, default = 20, help='Frequency at which the learning rate gets updated') # loss args parser.add_argument('--loss_fn', type = str, default = 'CE', help='Loss function type: in [CE, PPO]') parser.add_argument('--loss_fns_val', type = list, default = ['CE'], help='Validation loss functions') parser.add_argument('--CE_top_k', type = int, default = 1, help='Number of best babbles to train on') parser.add_argument('--beta', type = float, default = 0.1, help='Weight of the regularisation KL term') args = parser.parse_args() main(args)
py
1a334ac6d60533243fa8b1d72d199379452f463e
# Copyright 2021 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from test_tools.generators import scalar_gen import cunumeric as num def test(): test_values = [1, np.e, np.e ** 2, 0] for a in test_values: for (la, na) in zip(scalar_gen(num, a), scalar_gen(np, a)): assert np.array_equal(num.exp2(la), np.exp2(na)) if __name__ == "__main__": test()
py
1a334c51efad9e84473cac7d9bab92e87f271eba
from .models import Customer, CustomerProduct from .serializers import CustomerSerializer, CustomerProductSerializer from rest_framework import viewsets, permissions class CustomerViewSet(viewsets.ModelViewSet): queryset = Customer.objects.all() serializer_class = CustomerSerializer permission_classes = [permissions.IsAuthenticated] class CustomerProductViewSet(viewsets.ModelViewSet): queryset = CustomerProduct.objects.all() serializer_class = CustomerProductSerializer permission_classes = [permissions.IsAuthenticated]
py
1a334cc46f8eee4ca5d2e467562be7411b3f4aee
# encoding: utf-8 import datetime __all__ = [ 'info', ] def info(): return { 'birthday': datetime.date(1992, 11, 15), 'class': 1, 'family_name_en': u'minegishi', 'family_name_kana': u'みねぎし', 'first_name_en': u'minami', 'first_name_kana': u'みなみ', 'graduate_date': None, 'hometown': u'東京', 'name_en': u'Minegishi Minami', 'name_ja': u'峯岸みなみ', 'name_kana': u'みねぎし みなみ', 'nick': u'みいちゃん', 'team': u'K', }
py
1a334e0fb7efb62931a353e8b60cdcf872458ada
#-*- coding: utf-8 -*- import urllib from xml.etree import ElementTree as ET from choco.utils.unicode import u from modules import module from choco.contrib.constants import ContentType from choco.kakao.response import KakaoResponse WEATHER_URL = "http://weather.service.msn.com/data.aspx?weadergreetype=C&culture=ko-kr&weasearchstr={0}" @module.route(ur'^([가-힣]+)\s{0,}?날씨', re=True, prefix=False) def forecast(request, place): text = u'' try: str_place = u(place) xml = urllib.urlopen(WEATHER_URL.format(str_place)).read() doc = ET.fromstring(xml) current = doc[0][0].attrib text = u"[{0} 날씨]\r\n".format(place) text += u"기준: {0} {1}\r\n".format(current['date'], current['observationtime']) text += u"{0}\r\n".format(current['skytext']) text += u"온도: {0}℃\r\n".format(current['temperature']) text += u"습도: {0}%".format(current['humidity']) except Exception, e: text = u"날씨 데이터를 가져오지 못했습니다." return KakaoResponse(text)
py
1a334e3a9558e9e98315388b0c18009bf80a188b
def es_bis(anio): if anio%4 == 0 and (anio%400 == 0 or not anio%100 == 0): return True else: return False def d01M(anio, dia1): if es_bis(anio): lista = [1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336] dia1anio2 = (dia1 + 2)%7 else: lista = [1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335] dia1anio2 = (dia1 + 1)%7 dias = [] for i in lista: dia = (i + dia1 - 1) % 7 dias.append(dia) return(dias, dia1anio2) inicio = 2 total = 0 for i in range(1901, 2000+1): M = d01M(i, inicio) inicio = M[1] total += M[0].count(0) print(total) #171 #[Finished in 0.1s] #http://www.cuandoenelmundo.com/calendario/guatemala/1900
py
1a334f8978ee2fbe757526a5cfdaffbcc4af810c
import os from sandbox.rocky.tf.baselines.nn_baseline import NNBaseline from sandbox.rocky.tf.core.network import ConvNetwork from sandbox.rocky.tf.policies.gaussian_conv_feature_policy import GaussianConvFeaturePolicy from sandbox.rocky.tf.policies.gaussian_conv_policy import GaussianConvPolicy from rllab.baselines.zero_baseline import ZeroBaseline from rllab.envs.normalized_env import normalize from sandbox.rocky.tf.samplers.batch_sampler import BatchSampler from sandbox.rocky.tf.algos.trpo import TRPO from rllab.misc.instrument import stub, run_experiment_lite from sandbox.rocky.tf.envs.base import TfEnv from rllab.envs.gym_env import GymEnv from railrl.algos.icm_trpo_tf import ICM import itertools import tensorflow as tf stub(globals()) # Params range seeds = range(0, 5) for seed in seeds: env = TfEnv(normalize(env=GymEnv('Box3dReachPixel-v11',record_video=False, \ log_dir='/tmp/gym_test',record_log=False))) env_spec = env.spec policy_cnn = ConvNetwork( name="policy_conv_network", input_shape=env_spec.observation_space.shape, output_dim=env_spec.action_space.flat_dim, conv_filters=(64, 64, 64, 32), conv_filter_sizes=((5,5),(3,3),(3,3),(3,3)), conv_strides=(3, 3, 3, 2), conv_pads=('SAME', 'SAME', 'SAME', 'SAME'), hidden_sizes=(256,), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, ) baseline_cnn = ConvNetwork( name="baseline_conv_network", input_shape=env_spec.observation_space.shape, output_dim=env_spec.action_space.flat_dim, conv_filters=(64, 64, 64, 32), conv_filter_sizes=((5,5),(3,3),(3,3),(3,3)), conv_strides=(3, 3, 3, 2), conv_pads=('SAME', 'SAME', 'SAME', 'SAME'), hidden_sizes=(256,), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, ) policy = GaussianConvFeaturePolicy( "conv_feature_policy", env_spec=env_spec, feature_network=policy_cnn, hidden_sizes=(128,64), clip_action=False, ) baseline = NNBaseline( env_spec=env_spec, feature_network=baseline_cnn, hidden_sizes=(128,64), hidden_nonlinearity=tf.nn.relu, init_lr=0.001, n_itr=5, train_feature_network=True, ) batch_size = 9600 algo = TRPO( env=env, policy=policy, baseline=baseline, batch_size=batch_size, whole_paths=True, max_path_length=1000, n_itr=2000, step_size=0.01, subsample_factor=0.2, sampler_cls=BatchSampler, optimizer_args={ 'num_slices' : 4, } ) icm = ICM( env, algo, '/z/dianchen/box3d/trpo_box3d_pixel_v11_tf_icm_pretrained_cnn_norew_fw0.01_%d'%seed, forward_weight=0.01, external_reward_weight=0.0, init_learning_rate=1e-4, forward_cos=True, replay_pool_size=100000, n_updates_per_iter=1000, normalize_input=True, obs_dtype='uint8', pretrained_icm=True, pretrained_icm_path='/z/dianchen/tfmodel_box3d/icm_supervised_box3dpixel_v11_box_dense_2e3_fw_0.01_lr_5e-4.pkl', ) run_experiment_lite( icm.train(), exp_prefix='trpo_box3d_pixel_v11_tf_icm_pretrained_cnn_norew_fw0.01', n_parallel=12, snapshot_mode="gap", snapshot_gap=200, seed=seed, mode="local" )
py
1a334fffcfb07f9129626d120cf6d65ef482ae07
from flask import Blueprint, render_template, request from geopy.geocoders import Nominatim from geopy import distance import pandas as pd from shapely.geometry import Polygon from shapely.geometry import Point import logging # Constants MKAD_LOCATION = (55.8277252, 37.6387268) # Nominatim geolocation service geolocator = Nominatim(user_agent="myapp.py") # Creating the Blueprint distance_bp = Blueprint('distance_bp', __name__) # Configure the logging logging.basicConfig(filename = 'result.log', level=logging.DEBUG) # Functions def create_polygon(): """Return a polygon created with the set of coordinates that delimit the MKAD area. """ df = pd.read_csv(r'mkad_coordinates.csv') df = df.rename(columns = {'37.842762': 'lat', "55.774558" : 'long'}) polygon_matrix = [[df['long'][ind], df['lat'][ind]] for ind in df.index] return Polygon(polygon_matrix) def get_distance(lat, long): """If the point related to lat and long coordinates is inside MKAD area, distance is not calculated else, it assigns the geodesic distance. """ if(create_polygon().contains(Point(lat, long))): dist = "Address is inside of MKAD" logging.debug("distance: {}".format(dist)) else: # Use geopy to get the geodesic distance dist = distance.distance(MKAD_LOCATION, (lat, long)) logging.debug("distance: {}".format(dist)) return dist def get_location_parameters(loc): """If the address is found with geopy, it obtains the lat, long, address and distance. Otherwise it assigns null values to the previous mentioned variables. """ location = geolocator.geocode(loc) if(location is not None): # If the address exists in database latitude = location.latitude longitude = location.longitude formatted_address = location.address dist = get_distance(latitude, longitude) else: latitude, longitude, formatted_address, dist = null_address() return latitude, longitude, formatted_address, dist def no_input(): """Fills variables for null address.""" return "Null input", "Null input", "Null input","Null input" def null_address(): """Fills variables for an unknown address.""" return "Null address", "Null address", "Null address", "Null address" @distance_bp.route('/result',methods = ['POST', 'GET']) def result(): """The blueprint finds the distance from Moscow Ring Road to an addressed passed to the application in an HTTP request. If the address is located inside the MKAD, the distance is stated this way; if the geolocator is unable to find the address the results are stated as null address; and if the input address is null, the results are stated as null input. """ if request.method == 'POST': location = request.form["location"] if(location.strip()): # If location is not null (latitude, longitude, formatted_address, dist) = get_location_parameters(location) else: (latitude, longitude, formatted_address, dist) = no_input() # Write address and distance into .log file logging.debug("{} distance to Moscow Ring Road: {}".format(formatted_address, dist)) return render_template("result.html", result = formatted_address, Latitude=latitude, longitude=longitude, distance=dist)
py
1a335099db8df28a884f81399e60743025946078
__author__ = 'Abel' # -*- coding: utf-8 -*- from django.db import models class Pais(models.Model): """ Nomenclador de países. Ejemplos: Cuba, Brasil, Venezuela @cvar pais: Representa el nombre completo del país. @cvar activo: Valor booleano que representa si un País está activo o no para ser usado en la aplicación. """ nombre = models.CharField('País', max_length=50) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'País' verbose_name_plural = 'Países' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class Provincia(models.Model): """ Nomenclador de provincias. Ejemplos: Matanzas, Paraíba, Distrito Capital @cvar provincia: Representa el nombre completo de la provincia. @cvar pais: Representa la relación 1:M con País. @cvar activo: Valor booleano que representa si una Provincia está activa o no para ser usado en la aplicación. """ nombre = models.CharField('Provincia', max_length=50) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Provincia' verbose_name_plural = 'Provincias' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class Municipio(models.Model): """ Nomenclador de municipios. Ejemplos: Colón, Teixeira, Caracas @cvar provincia: Representa el nombre completo de la provincia. @cvar pais: Representa la relación 1:M con Provincia. @cvar activo: Valor booleano que representa si un Municipio está activo o no para ser usado en la aplicación. """ nombre = models.CharField('Municipio', max_length=50) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Municipio' verbose_name_plural = 'Municipios' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class Ubicacion(models.Model): """ Nomenclador de municipios. Ejemplos: Colón, Teixeira, Caracas @cvar provincia: Representa el nombre completo de la provincia. @cvar pais: Representa la relación 1:M con Provincia. @cvar activo: Valor booleano que representa si un Municipio está activo o no para ser usado en la aplicación. """ # TODO paisId = models.ForeignKey(Pais, verbose_name='País') provinciaId = models.ForeignKey(Provincia, verbose_name='Provincia') municipioId = models.ForeignKey(Municipio, verbose_name='Municipio') activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Ubicación' verbose_name_plural = 'Ubicaciones' unique_together = ('paisId', 'provinciaId', 'municipioId') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return u"%s, %s, %s" % (self.municipioId, self.provinciaId, self.paisId) class Nacionalidad(models.Model): """ Nomenclador de nacionalidades. Ejemplos: Cubana, Brasileña, Venezolana @cvar nombre: Representa el nombre completo de la nacionalidad. @cvar activo: Valor booleano que representa si una Nacionalidad está activa o no para ser usado en la aplicación. """ nombre = models.CharField('Nacionalidad', max_length=50) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Nacionalidad' verbose_name_plural = 'Nacionalidades' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class Sexo(models.Model): """ Nomenclador de sexos. Ejemplos: Masculino, Femenino, Otro. @cvar nombre: Representa el nombre del sexo. @cvar activo: Valor booleano que representa si un sexo está activo para ser usado en la aplicación. """ nombre = models.CharField('Sexo', max_length=50) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Sexo' verbose_name_plural = 'Sexos' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class Etnia(models.Model): """ Nomenclador de etnias. Ejemplos: Blanco, Negro, Mestizo. @cvar nombre: Representa el nombre de la etnia. @cvar activo: Valor booleano que representa si una etnia está activa para ser usado en la aplicación. """ nombre = models.CharField('Etnia', unique=True, max_length=80) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Etnia' verbose_name_plural = 'Etnias' unique_together = ('nombre', 'activo') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return self.nombre class GradoCientifico(models.Model): """ Nomenclador de grados científicos. Ejemplos: Estudiante, Ingeniero, Máster en Ciencias. @cvar gradoCientifico: Representa el nombre completo del grado científico. @cvar abreviatura: Representa la abreviatura del grado científico. @cvar activo: Valor booleano que representa si un ScientificGrade está activo o no para ser usado en el sistema """ gradoCientifico = models.CharField('Grado científico', unique=True, max_length=80) abreviatura = models.CharField('Abreviatura', max_length=5) activo = models.BooleanField('Activo', default=True) class Meta: verbose_name = 'Grado científico' verbose_name_plural = 'Grados científicos' unique_together = ('gradoCientifico', 'abreviatura') default_permissions = ('add', 'change', 'delete', 'view') def __str__(self): return u"%s (%s)" % (self.gradoCientifico, self.abreviatura)
py
1a3351f4bf1d14cfbc0c0adc9e2de5a413afbcd5
#!/usr/bin/python3 # -*- coding: utf-8 -*- # system, numpy import os import time import numpy as np # pytorch, torch vision import torch import torch.optim as optim import torch.nn as nn import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.data.sampler import WeightedRandomSampler # user defined import utils from models import SEM_PCYC from logger import Logger, AverageMeter from options import Options from test import validate from data import DataGeneratorPaired, DataGeneratorSketch, DataGeneratorImage np.random.seed(0) def main(): gc.collect() torch.cuda.empty_cache() # Parse options args = Options().parse() print('Parameters:\t' + str(args)) if args.filter_sketch: assert args.dataset == 'Sketchy' if args.split_eccv_2018: assert args.dataset == 'Sketchy_extended' or args.dataset == 'Sketchy' if args.gzs_sbir: args.test = True # Read the config file and config = utils.read_config() path_dataset = config['path_dataset'] path_aux = config['path_aux'] # modify the log and check point paths ds_var = None if '_' in args.dataset: token = args.dataset.split('_') print('-----token-----') print(token) args.dataset = token[0] ds_var = token[1] str_aux = '' if args.split_eccv_2018: str_aux = 'split_eccv_2018' if args.gzs_sbir: str_aux = os.path.join(str_aux, 'generalized') args.semantic_models = sorted(args.semantic_models) model_name = '+'.join(args.semantic_models) root_path = os.path.join(path_dataset, args.dataset) path_sketch_model = os.path.join(path_aux, 'CheckPoints', args.dataset, 'sketch') print('-----path_sketch_model-----') print(path_sketch_model) path_image_model = os.path.join(path_aux, 'CheckPoints', args.dataset, 'image') print('-----path_image_model-----') print(path_image_model) path_cp = os.path.join(path_aux, 'CheckPoints', args.dataset, str_aux, model_name, str(args.dim_out)) print('-----path_cp-----') print(path_cp) path_log = os.path.join(path_aux, 'LogFiles', args.dataset, str_aux, model_name, str(args.dim_out)) path_results = os.path.join(path_aux, 'Results', args.dataset, str_aux, model_name, str(args.dim_out)) files_semantic_labels = [] sem_dim = 0 for f in args.semantic_models: fi = os.path.join(path_aux, 'Semantic', args.dataset, f + '.npy') files_semantic_labels.append(fi) sem_dim += list(np.load(fi, allow_pickle=True).item().values())[0].shape[0] print('Checkpoint path: {}'.format(path_cp)) print('Logger path: {}'.format(path_log)) print('Result path: {}'.format(path_results)) # Parameters for transforming the images transform_image = transforms.Compose([transforms.Resize((args.im_sz, args.im_sz)), transforms.ToTensor()]) transform_sketch = transforms.Compose([transforms.Resize((args.sk_sz, args.sk_sz)), transforms.ToTensor()]) # Load the dataset print('Loading data...', end='') if args.dataset == 'Sketchy': if ds_var == 'extended': photo_dir = 'extended_photo' # photo or extended_photo photo_sd = '' else: photo_dir = 'photo' photo_sd = 'tx_000000000000' sketch_dir = 'sketch' sketch_sd = 'tx_000000000000' splits = utils.load_files_sketchy_zeroshot(root_path=root_path, split_eccv_2018=args.split_eccv_2018, photo_dir=photo_dir, sketch_dir=sketch_dir, photo_sd=photo_sd, sketch_sd=sketch_sd) elif args.dataset == 'TU-Berlin': photo_dir = 'images' sketch_dir = 'sketches' photo_sd = '' sketch_sd = '' splits = utils.load_files_tuberlin_zeroshot(root_path=root_path, photo_dir=photo_dir, sketch_dir=sketch_dir, photo_sd=photo_sd, sketch_sd=sketch_sd) elif args.dataset == 'intersection': photo_dir = 'images' sketch_dir = 'sketches' photo_sd = '' sketch_sd = '' splits = utils.load_files_tuberlin_zeroshot(root_path=root_path, photo_dir=photo_dir, sketch_dir=sketch_dir, photo_sd=photo_sd, sketch_sd=sketch_sd) else: raise Exception('Wrong dataset.') # Combine the valid and test set into test set splits['te_fls_sk'] = np.concatenate((splits['va_fls_sk'], splits['te_fls_sk']), axis=0) splits['te_clss_sk'] = np.concatenate((splits['va_clss_sk'], splits['te_clss_sk']), axis=0) splits['te_fls_im'] = np.concatenate((splits['va_fls_im'], splits['te_fls_im']), axis=0) splits['te_clss_im'] = np.concatenate((splits['va_clss_im'], splits['te_clss_im']), axis=0) print('--------args.gzs_sbir-----------') print(args.gzs_sbir) if args.gzs_sbir: perc = 0.2 _, idx_sk = np.unique(splits['tr_fls_sk'], return_index=True) tr_fls_sk_ = splits['tr_fls_sk'][idx_sk] tr_clss_sk_ = splits['tr_clss_sk'][idx_sk] _, idx_im = np.unique(splits['tr_fls_im'], return_index=True) tr_fls_im_ = splits['tr_fls_im'][idx_im] tr_clss_im_ = splits['tr_clss_im'][idx_im] if args.dataset == 'Sketchy' and args.filter_sketch: _, idx_sk = np.unique([f.split('-')[0] for f in tr_fls_sk_], return_index=True) tr_fls_sk_ = tr_fls_sk_[idx_sk] tr_clss_sk_ = tr_clss_sk_[idx_sk] idx_sk = np.sort(np.random.choice(tr_fls_sk_.shape[0], int(perc * splits['te_fls_sk'].shape[0]), replace=False)) idx_im = np.sort(np.random.choice(tr_fls_im_.shape[0], int(perc * splits['te_fls_im'].shape[0]), replace=False)) splits['te_fls_sk'] = np.concatenate((tr_fls_sk_[idx_sk], splits['te_fls_sk']), axis=0) splits['te_clss_sk'] = np.concatenate((tr_clss_sk_[idx_sk], splits['te_clss_sk']), axis=0) splits['te_fls_im'] = np.concatenate((tr_fls_im_[idx_im], splits['te_fls_im']), axis=0) splits['te_clss_im'] = np.concatenate((tr_clss_im_[idx_im], splits['te_clss_im']), axis=0) # class dictionary dict_clss = utils.create_dict_texts(splits['tr_clss_im']) data_train = DataGeneratorPaired(args.dataset, root_path, photo_dir, sketch_dir, photo_sd, sketch_sd, splits['tr_fls_sk'], splits['tr_fls_im'], splits['tr_clss_im'], transforms_sketch=transform_sketch, transforms_image=transform_image) data_valid_sketch = DataGeneratorSketch(args.dataset, root_path, sketch_dir, sketch_sd, splits['va_fls_sk'], splits['va_clss_sk'], transforms=transform_sketch) data_valid_image = DataGeneratorImage(args.dataset, root_path, photo_dir, photo_sd, splits['va_fls_im'], splits['va_clss_im'], transforms=transform_image) data_test_sketch = DataGeneratorSketch(args.dataset, root_path, sketch_dir, sketch_sd, splits['te_fls_sk'], splits['te_clss_sk'], transforms=transform_sketch) data_test_image = DataGeneratorImage(args.dataset, root_path, photo_dir, photo_sd, splits['te_fls_im'], splits['te_clss_im'], transforms=transform_image) print('Done') train_sampler = WeightedRandomSampler(data_train.get_weights(), num_samples=args.epoch_size * args.batch_size, replacement=True) # PyTorch train loader train_loader = DataLoader(dataset=data_train, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) # PyTorch valid loader for sketch valid_loader_sketch = DataLoader(dataset=data_valid_sketch, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) # PyTorch valid loader for image valid_loader_image = DataLoader(dataset=data_valid_image, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) # PyTorch test loader for sketch test_loader_sketch = DataLoader(dataset=data_test_sketch, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) # PyTorch test loader for image test_loader_image = DataLoader(dataset=data_test_image, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) # Model parameters params_model = dict() # Paths to pre-trained sketch and image models params_model['path_sketch_model'] = path_sketch_model params_model['path_image_model'] = path_image_model # Dimensions params_model['dim_out'] = args.dim_out params_model['sem_dim'] = sem_dim # Number of classes params_model['num_clss'] = len(dict_clss) # Weight (on losses) parameters params_model['lambda_se'] = args.lambda_se params_model['lambda_im'] = args.lambda_im params_model['lambda_sk'] = args.lambda_sk params_model['lambda_gen_cyc'] = args.lambda_gen_cyc params_model['lambda_gen_adv'] = args.lambda_gen_adv params_model['lambda_gen_cls'] = args.lambda_gen_cls params_model['lambda_gen_reg'] = args.lambda_gen_reg params_model['lambda_disc_se'] = args.lambda_disc_se params_model['lambda_disc_sk'] = args.lambda_disc_sk params_model['lambda_disc_im'] = args.lambda_disc_im params_model['lambda_regular'] = args.lambda_regular # Optimizers' parameters params_model['lr'] = args.lr params_model['momentum'] = args.momentum params_model['milestones'] = args.milestones params_model['gamma'] = args.gamma # Files with semantic labels params_model['files_semantic_labels'] = files_semantic_labels # Class dictionary params_model['dict_clss'] = dict_clss print('-------------------model------------------------') print(params_model) # Model sem_pcyc_model = SEM_PCYC(params_model) cudnn.benchmark = True # Logger print('Setting logger...', end='') logger = Logger(path_log, force=True) print('Done') # Check cuda print('Checking cuda...', end='') # Check if CUDA is enabled if args.ngpu > 0 & torch.cuda.is_available(): print('*Cuda exists*...', end='') sem_pcyc_model = sem_pcyc_model.cuda() print('Done') best_map = 0 early_stop_counter = 0 # Epoch for loop if not args.test: print('***Train***') for epoch in range(args.epochs): sem_pcyc_model.scheduler_gen.step() sem_pcyc_model.scheduler_disc.step() sem_pcyc_model.scheduler_ae.step() # train on training set losses = train(train_loader, sem_pcyc_model, epoch, args) # evaluate on validation set, map_ since map is already there print('***Validation***') valid_data = validate(valid_loader_sketch, valid_loader_image, sem_pcyc_model, epoch, args) map_ = np.mean(valid_data['aps@all']) print('mAP@all on validation set after {0} epochs: {1:.4f} (real), {2:.4f} (binary)' .format(epoch + 1, map_, np.mean(valid_data['aps@all_bin']))) del valid_data if map_ > best_map: best_map = map_ early_stop_counter = 0 utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': sem_pcyc_model.state_dict(), 'best_map': best_map}, directory=path_cp) # utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': sem_pcyc_model, 'best_map': # best_map, 'test':'tooning'}, directory=path_cp) # utils.save_checkpoint(sem_pcyc_model, directory=path_cp) else: if args.early_stop == early_stop_counter: break early_stop_counter += 1 # Logger step logger.add_scalar('semantic autoencoder loss', losses['aut_enc'].avg) logger.add_scalar('generator adversarial loss', losses['gen_adv'].avg) logger.add_scalar('generator cycle consistency loss', losses['gen_cyc'].avg) logger.add_scalar('generator classification loss', losses['gen_cls'].avg) logger.add_scalar('generator regression loss', losses['gen_reg'].avg) logger.add_scalar('generator loss', losses['gen'].avg) logger.add_scalar('semantic discriminator loss', losses['disc_se'].avg) logger.add_scalar('sketch discriminator loss', losses['disc_sk'].avg) logger.add_scalar('image discriminator loss', losses['disc_im'].avg) logger.add_scalar('discriminator loss', losses['disc'].avg) logger.add_scalar('mean average precision', map_) logger.step() # load the best model yet best_model_file = os.path.join(path_cp, 'model_best.pth') if os.path.isfile(best_model_file): print("Loading best model from '{}'".format(best_model_file)) checkpoint = torch.load(best_model_file) epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] sem_pcyc_model.load_state_dict(checkpoint['state_dict']) print("Loaded best model '{0}' (epoch {1}; mAP@all {2:.4f})".format(best_model_file, epoch, best_map)) print('***Test***') valid_data = validate(test_loader_sketch, test_loader_image, sem_pcyc_model, epoch, args) print('Results on test set: mAP@all = {1:.4f}, Prec@100 = {0:.4f}, mAP@200 = {3:.4f}, Prec@200 = {2:.4f}, ' 'Time = {4:.6f} || mAP@all (binary) = {6:.4f}, Prec@100 (binary) = {5:.4f}, mAP@200 (binary) = {8:.4f}, ' 'Prec@200 (binary) = {7:.4f}, Time (binary) = {9:.6f} ' .format(valid_data['prec@100'], np.mean(valid_data['aps@all']), valid_data['prec@200'], np.mean(valid_data['aps@200']), valid_data['time_euc'], valid_data['prec@100_bin'], np.mean(valid_data['aps@all_bin']), valid_data['prec@200_bin'], np.mean(valid_data['aps@200_bin']) , valid_data['time_bin'])) print('Saving qualitative results...', end='') path_qualitative_results = os.path.join(path_results, 'qualitative_results') utils.save_qualitative_results(root_path, sketch_dir, sketch_sd, photo_dir, photo_sd, splits['te_fls_sk'], splits['te_fls_im'], path_qualitative_results, valid_data['aps@all'], valid_data['sim_euc'], valid_data['str_sim'], save_image=args.save_image_results, nq=args.number_qualit_results, best=args.save_best_results) print('Done') else: print("No best model found at '{}'. Exiting...".format(best_model_file)) exit() def train(train_loader, sem_pcyc_model, epoch, args): # Switch to train mode sem_pcyc_model.train() batch_time = AverageMeter() losses_gen_adv = AverageMeter() losses_gen_cyc = AverageMeter() losses_gen_cls = AverageMeter() losses_gen_reg = AverageMeter() losses_gen = AverageMeter() losses_disc_se = AverageMeter() losses_disc_sk = AverageMeter() losses_disc_im = AverageMeter() losses_disc = AverageMeter() losses_aut_enc = AverageMeter() # Start counting time time_start = time.time() for i, (sk, im, cl) in enumerate(train_loader): # Transfer sk and im to cuda if torch.cuda.is_available(): sk, im = sk.cuda(), im.cuda() # Optimize parameters loss = sem_pcyc_model.module.optimize_params(sk, im, cl) # Store losses for visualization losses_aut_enc.update(loss['aut_enc'].item(), sk.size(0)) losses_gen_adv.update(loss['gen_adv'].item(), sk.size(0)) losses_gen_cyc.update(loss['gen_cyc'].item(), sk.size(0)) losses_gen_cls.update(loss['gen_cls'].item(), sk.size(0)) losses_gen_reg.update(loss['gen_reg'].item(), sk.size(0)) losses_gen.update(loss['gen'].item(), sk.size(0)) losses_disc_se.update(loss['disc_se'].item(), sk.size(0)) losses_disc_sk.update(loss['disc_sk'].item(), sk.size(0)) losses_disc_im.update(loss['disc_im'].item(), sk.size(0)) losses_disc.update(loss['disc'].item(), sk.size(0)) # time time_end = time.time() batch_time.update(time_end - time_start) time_start = time_end if (i + 1) % args.log_interval == 0: print('[Train] Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Gen. Loss {loss_gen.val:.4f} ({loss_gen.avg:.4f})\t' 'Disc. Loss {loss_disc.val:.4f} ({loss_disc.avg:.4f})\t' .format(epoch + 1, i + 1, len(train_loader), batch_time=batch_time, loss_gen=losses_gen, loss_disc=losses_disc)) losses = {'aut_enc': losses_aut_enc, 'gen_adv': losses_gen_adv, 'gen_cyc': losses_gen_cyc, 'gen_cls': losses_gen_cls, 'gen_reg': losses_gen_reg, 'gen': losses_gen, 'disc_se': losses_disc_se, 'disc_sk': losses_disc_sk, 'disc_im': losses_disc_im, 'disc': losses_disc} return losses if __name__ == '__main__': main()
py
1a335356c63a08d89fbaf07f434bb505292fa304
# -*- coding: utf-8 -*- ''' unit.loader ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Test Salt's loader ''' # Import Python libs from __future__ import absolute_import import inspect import tempfile import shutil import os import collections # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') import tests.integration # Import Salt libs # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.ext.six as six from salt.ext.six.moves import range from salt.config import minion_config # pylint: enable=no-name-in-module,redefined-builtin from salt.loader import LazyLoader, _module_dirs, grains class LazyLoaderVirtualEnabledTest(TestCase): ''' Test the base loader of salt. ''' def setUp(self): self.opts = minion_config(None) self.opts['disable_modules'] = ['pillar'] self.opts['grains'] = grains(self.opts) self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'), self.opts, tag='module') def test_basic(self): ''' Ensure that it only loads stuff when needed ''' # make sure it starts empty self.assertEqual(self.loader._dict, {}) # get something, and make sure its a func self.assertTrue(inspect.isfunction(self.loader['test.ping'])) # make sure we only loaded "test" functions for key, val in six.iteritems(self.loader._dict): self.assertEqual(key.split('.', 1)[0], 'test') # make sure the depends thing worked (double check of the depends testing, # since the loader does the calling magically self.assertFalse('test.missing_func' in self.loader._dict) def test_badkey(self): with self.assertRaises(KeyError): self.loader[None] # pylint: disable=W0104 with self.assertRaises(KeyError): self.loader[1] # pylint: disable=W0104 def test_disable(self): self.assertNotIn('pillar.items', self.loader) def test_len_load(self): ''' Since LazyLoader is a MutableMapping, if someone asks for len() we have to load all ''' self.assertEqual(self.loader._dict, {}) len(self.loader) # force a load all self.assertNotEqual(self.loader._dict, {}) def test_iter_load(self): ''' Since LazyLoader is a MutableMapping, if someone asks to iterate we have to load all ''' self.assertEqual(self.loader._dict, {}) # force a load all for key, func in six.iteritems(self.loader): break self.assertNotEqual(self.loader._dict, {}) def test_context(self): ''' Make sure context is shared across modules ''' # make sure it starts empty self.assertEqual(self.loader._dict, {}) # get something, and make sure its a func func = self.loader['test.ping'] func.__globals__['__context__']['foo'] = 'bar' self.assertEqual(self.loader['test.echo'].__globals__['__context__']['foo'], 'bar') self.assertEqual(self.loader['grains.get'].__globals__['__context__']['foo'], 'bar') def test_globals(self): func_globals = self.loader['test.ping'].__globals__ self.assertEqual(func_globals['__grains__'], self.opts.get('grains', {})) self.assertEqual(func_globals['__pillar__'], self.opts.get('pillar', {})) # the opts passed into modules is at least a subset of the whole opts for key, val in six.iteritems(func_globals['__opts__']): self.assertEqual(self.opts[key], val) def test_pack(self): self.loader.pack['__foo__'] = 'bar' func_globals = self.loader['test.ping'].__globals__ self.assertEqual(func_globals['__foo__'], 'bar') def test_virtual(self): self.assertNotIn('test_virtual.ping', self.loader) class LazyLoaderVirtualDisabledTest(TestCase): ''' Test the loader of salt without __virtual__ ''' def setUp(self): self.opts = _config = minion_config(None) self.opts['grains'] = grains(self.opts) self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'), self.opts, tag='module', virtual_enable=False) def test_virtual(self): self.assertTrue(inspect.isfunction(self.loader['test_virtual.ping'])) class LazyLoaderWhitelistTest(TestCase): ''' Test the loader of salt with a whitelist ''' def setUp(self): self.opts = _config = minion_config(None) self.loader = LazyLoader(_module_dirs(self.opts, 'modules', 'module'), self.opts, tag='module', whitelist=['test', 'pillar']) def test_whitelist(self): self.assertTrue(inspect.isfunction(self.loader['test.ping'])) self.assertTrue(inspect.isfunction(self.loader['pillar.get'])) self.assertNotIn('grains.get', self.loader) module_template = ''' __load__ = ['test', 'test_alias'] __func_alias__ = dict(test_alias='working_alias') from salt.utils.decorators import depends def test(): return {count} def test_alias(): return True def test2(): return True @depends('non_existantmodulename') def test3(): return True @depends('non_existantmodulename', fallback_function=test) def test4(): return True ''' class LazyLoaderReloadingTest(TestCase): ''' Test the loader of salt with changing modules ''' module_name = 'loadertest' module_key = 'loadertest.test' def setUp(self): self.opts = _config = minion_config(None) self.opts['grains'] = grains(self.opts) self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP) self.count = 0 dirs = _module_dirs(self.opts, 'modules', 'module') dirs.append(self.tmp_dir) self.loader = LazyLoader(dirs, self.opts, tag='module') def tearDown(self): shutil.rmtree(self.tmp_dir) def update_module(self): self.count += 1 with open(self.module_path, 'wb') as fh: fh.write(module_template.format(count=self.count)) fh.flush() os.fsync(fh.fileno()) # flush to disk # pyc files don't like it when we change the original quickly # since the header bytes only contain the timestamp (granularity of seconds) # TODO: don't write them? Is *much* slower on re-load (~3x) # https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode try: os.unlink(self.module_path + 'c') except OSError: pass def rm_module(self): os.unlink(self.module_path) os.unlink(self.module_path + 'c') @property def module_path(self): return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name)) def test_alias(self): ''' Make sure that you can access alias-d modules ''' # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) self.update_module() self.assertNotIn('{0}.test_alias'.format(self.module_name), self.loader) self.assertTrue(inspect.isfunction(self.loader['{0}.working_alias'.format(self.module_name)])) def test_clear(self): self.assertTrue(inspect.isfunction(self.loader['test.ping'])) self.update_module() # write out out custom module self.loader.clear() # clear the loader dict # force a load of our module self.assertTrue(inspect.isfunction(self.loader[self.module_key])) # make sure we only loaded our custom module # which means that we did correctly refresh the file mapping for k, v in six.iteritems(self.loader._dict): self.assertTrue(k.startswith(self.module_name)) def test_load(self): # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) self.update_module() self.assertTrue(inspect.isfunction(self.loader[self.module_key])) def test__load__(self): ''' If a module specifies __load__ we should only load/expose those modules ''' self.update_module() # ensure it doesn't exist self.assertNotIn(self.module_key + '2', self.loader) def test__load__and_depends(self): ''' If a module specifies __load__ we should only load/expose those modules ''' self.update_module() # ensure it doesn't exist self.assertNotIn(self.module_key + '3', self.loader) self.assertNotIn(self.module_key + '4', self.loader) def test_reload(self): # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) # make sure it updates correctly for x in range(1, 3): self.update_module() self.loader.clear() self.assertEqual(self.loader[self.module_key](), self.count) self.rm_module() # make sure that even if we remove the module, its still loaded until a clear self.assertEqual(self.loader[self.module_key](), self.count) self.loader.clear() self.assertNotIn(self.module_key, self.loader) submodule_template = ''' import lib def test(): return ({count}, lib.test()) ''' submodule_lib_template = ''' def test(): return {count} ''' class LazyLoaderSubmodReloadingTest(TestCase): ''' Test the loader of salt with changing modules ''' module_name = 'loadertestsubmod' module_key = 'loadertestsubmod.test' def setUp(self): self.opts = _config = minion_config(None) self.opts['grains'] = grains(self.opts) self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP) os.makedirs(self.module_dir) self.count = 0 self.lib_count = 0 dirs = _module_dirs(self.opts, 'modules', 'module') dirs.append(self.tmp_dir) self.loader = LazyLoader(dirs, self.opts, tag='module') def tearDown(self): shutil.rmtree(self.tmp_dir) def update_module(self): self.count += 1 with open(self.module_path, 'wb') as fh: fh.write(submodule_template.format(count=self.count)) fh.flush() os.fsync(fh.fileno()) # flush to disk # pyc files don't like it when we change the original quickly # since the header bytes only contain the timestamp (granularity of seconds) # TODO: don't write them? Is *much* slower on re-load (~3x) # https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode try: os.unlink(self.module_path + 'c') except OSError: pass def rm_module(self): os.unlink(self.module_path) os.unlink(self.module_path + 'c') def update_lib(self): self.lib_count += 1 with open(self.lib_path, 'wb') as fh: fh.write(submodule_lib_template.format(count=self.lib_count)) fh.flush() os.fsync(fh.fileno()) # flush to disk # pyc files don't like it when we change the original quickly # since the header bytes only contain the timestamp (granularity of seconds) # TODO: don't write them? Is *much* slower on re-load (~3x) # https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode try: os.unlink(self.lib_path + 'c') except OSError: pass def rm_lib(self): os.unlink(self.lib_path) os.unlink(self.lib_path + 'c') @property def module_dir(self): return os.path.join(self.tmp_dir, self.module_name) @property def module_path(self): return os.path.join(self.module_dir, '__init__.py') @property def lib_path(self): return os.path.join(self.module_dir, 'lib.py') def test_basic(self): # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) self.update_module() self.update_lib() self.loader.clear() self.assertIn(self.module_key, self.loader) def test_reload(self): # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) # update both the module and the lib for x in range(1, 3): self.update_module() self.update_lib() self.loader.clear() self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count)) # update just the module for x in range(1, 3): self.update_module() self.loader.clear() self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count)) # update just the lib for x in range(1, 3): self.update_lib() self.loader.clear() self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count)) self.rm_module() # make sure that even if we remove the module, its still loaded until a clear self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count)) self.loader.clear() self.assertNotIn(self.module_key, self.loader) def test_reload_missing_lib(self): # ensure it doesn't exist self.assertNotIn(self.module_key, self.loader) # update both the module and the lib self.update_module() self.update_lib() self.loader.clear() self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count)) # remove the lib, this means we should fail to load the module next time self.rm_lib() self.loader.clear() self.assertNotIn(self.module_key, self.loader) deep_init_base = ''' import top_lib import top_lib.mid_lib import top_lib.mid_lib.bot_lib def top(): return top_lib.test() def mid(): return top_lib.mid_lib.test() def bot(): return top_lib.mid_lib.bot_lib.test() ''' class LazyLoaderDeepSubmodReloadingTest(TestCase): module_name = 'loadertestsubmoddeep' libs = ('top_lib', 'mid_lib', 'bot_lib') def setUp(self): self.opts = _config = minion_config(None) self.tmp_dir = tempfile.mkdtemp(dir=tests.integration.TMP) os.makedirs(self.module_dir) self.lib_count = collections.defaultdict(int) # mapping of path -> count # bootstrap libs with open(os.path.join(self.module_dir, '__init__.py'), 'w') as fh: fh.write(deep_init_base) fh.flush() os.fsync(fh.fileno()) # flush to disk self.lib_paths = {} dir_path = self.module_dir for lib_name in self.libs: dir_path = os.path.join(dir_path, lib_name) self.lib_paths[lib_name] = dir_path os.makedirs(dir_path) self.update_lib(lib_name) dirs = _module_dirs(self.opts, 'modules', 'module') dirs.append(self.tmp_dir) self.loader = LazyLoader(dirs, self.opts, tag='module') @property def module_dir(self): return os.path.join(self.tmp_dir, self.module_name) def update_lib(self, lib_name): path = os.path.join(self.lib_paths[lib_name], '__init__.py') self.lib_count[lib_name] += 1 with open(path, 'wb') as fh: fh.write(submodule_lib_template.format(count=self.lib_count[lib_name])) fh.flush() os.fsync(fh.fileno()) # flush to disk # pyc files don't like it when we change the original quickly # since the header bytes only contain the timestamp (granularity of seconds) # TODO: don't write them? Is *much* slower on re-load (~3x) # https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode try: os.unlink(path + 'c') except OSError: pass def tearDown(self): shutil.rmtree(self.tmp_dir) def test_basic(self): self.assertIn('{0}.top'.format(self.module_name), self.loader) def _verify_libs(self): for lib in self.libs: self.assertEqual(self.loader['{0}.{1}'.format(self.module_name, lib.replace('_lib', ''))](), self.lib_count[lib]) def test_reload(self): ''' Make sure that we can reload all libraries of arbitrary depth ''' self._verify_libs() # update them all for lib in self.libs: for x in xrange(5): self.update_lib(lib) self.loader.clear() self._verify_libs()
py
1a3354b8c0f50e8d21146c3b7e9a1ba45502b22e
#!/usr/bin/env python if __name__ == '__main__': import os import sys if sys.version_info[0:2] < (3, 8): raise SystemExit('Python 3.8+ is required') root_path = os.path.abspath(os.path.dirname(__file__)) try: import mtp_common # NB: this version does not need to be updated unless mtp_common changes significantly if mtp_common.VERSION < (10,): raise ImportError except ImportError: try: import pkg_resources except ImportError: raise SystemExit('setuptools and pip are required') try: pip = pkg_resources.load_entry_point('pip', 'console_scripts', 'pip') except pkg_resources.ResolutionError: raise SystemExit('setuptools and pip are required') print('Pre-installing MTP-common and base requirements') pip(['install', '--requirement', f'{root_path}/requirements/base.txt']) from mtp_common.build_tasks.executor import Executor import mtp_transaction_uploader.build_tasks # noqa exit(Executor(root_path=root_path).run())
py
1a33556991148b25e8e90202d074a583d6fe8122
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from torch.nn import DataParallel from torch.nn.parallel import DistributedDataParallel from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.warnings import WarningCache warning_cache = WarningCache() class _LightningModuleWrapperBase(torch.nn.Module): def __init__(self, pl_module: LightningModule): """ Wraps the user's LightningModule and redirects the forward call to the appropriate method, either ``training_step``, ``validation_step`` or ``test_step``. If the LightningModule is in none of the states `training`, `testing` or `validation`, the inputs will be redirected to the :meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method. Inheriting classes may also modify the inputs or outputs of forward. Args: pl_module: the model to wrap """ super().__init__() self.module = pl_module def forward(self, *inputs, **kwargs): running_stage = self.module.running_stage if running_stage == RunningStage.TRAINING: output = self.module.training_step(*inputs, **kwargs) # In manual_optimization, we need to prevent DDP reducer as # it is done manually in ``LightningModule.manual_backward`` # `require_backward_grad_sync` will be reset in the # ddp_plugin ``post_training_step`` hook if not self.module.automatic_optimization: self.module.trainer.model.require_backward_grad_sync = False warn_if_output_is_none(output, "training_step") elif running_stage == RunningStage.TESTING: output = self.module.test_step(*inputs, **kwargs) warn_if_output_is_none(output, "test_step") elif running_stage == RunningStage.EVALUATING: output = self.module.validation_step(*inputs, **kwargs) warn_if_output_is_none(output, "validation_step") else: output = self.module.predict(*inputs, **kwargs) return output def warn_if_output_is_none(output: Any, method_name: str) -> None: """ Warns user about which method returned None. """ if output is None: warning_cache.warn(f'Your {method_name} returned None. Did you forget to return an output?') def unwrap_lightning_module(wrapped_model) -> LightningModule: model = wrapped_model if isinstance(model, (DistributedDataParallel, DataParallel)): model = model.module if isinstance(model, _LightningModuleWrapperBase): model = model.module return model
py
1a3356f2835a8a4790aca3c9bcfce06737269b56
# -*- coding: utf-8 -*- """ Created on Thu Jun 2 09:35:39 2016 @author: bing """ # -*- coding: utf-8 -*- """ Created on Fri Mar 25 09:42:22 2016 @author: bing """ import numpy as np import scipy import numba import sys bohr_angstrom = 0.52917721092 hartree_wavenumber = 219474.63 #hartree_wavenumber = scipy.constants.value(u'hartree-inverse meter relationship') / 1e2 Vmin = -24.2288 b = np.array([-6.631e-02, 1.346e-01, -3.300e-02, 6e0, -1.4e01, -1.193e02, 2.290e02, \ 1.110e03, -1.850e03, -3.5e03, 6.0e03]) @numba.autojit def derivs(x): """ Morse potential """ PES = 'pH2' if PES == 'Morse': a, x0 = 1.02, 1.4 De = 0.176 / 100.0 d = (1.0-np.exp(-a*x)) v0 = De*d**2 dv = 2. * De * d * a * np.exp(-a*x) elif PES == 'HO': v0 = x**2/2.0 dv = x #ddv = 2.0 * De * (-d*np.exp(-a*((x-x0)))*a**2 + (np.exp(-a*(x-x0)))**2*a**2) elif PES == 'pH2': dx = 1e-4 v0 = np.zeros(Ntraj) dv = np.zeros(Ntraj) for i in range(Ntraj): v0[i] = vpot(x[i]) dv[i] = ( vpot(x[i] + dx) - v0[i])/dx return v0,dv @numba.autojit def qpot(x,p,r,w): """ Linear Quantum Force : direct polynomial fitting of derivative-log density (amplitude) curve_fit : randomly choose M points and do a nonlinear least-square fitting to a predefined functional form """ #tau = (max(xdata) - min(xdata))/(max(x) - min(x)) #if tau > 0.6: # pass #else: # print('Data points are not sampled well.' Nb = 4 S = np.zeros((Nb,Nb)) for j in range(Nb): for k in range(Nb): S[j,k] = np.dot(x**(j+k), w) bp = np.zeros(Nb) br = np.zeros(Nb) for n in range(Nb): bp[n] = np.dot(x**n * p, w) br[n] = np.dot(x**n * r, w) cp = np.linalg.solve(S,bp) cr = np.linalg.solve(S,br) #unit = np.identity(Nb) #r_approx = cr[0] * unit + cr[1] * x + cr[2] * x**2 + cr[3] * x**3 #p_approx = cp[0] * unit + cp[1] * x + cp[2] * x**2 + cp[3] * x**3 dr = cr[1] + 2. * cr[2] * x + 3. * cr[3] * x**2 #+ 4.0 * cr[4] * x**3 dp = cp[1] + 2. * cp[2] * x + 3. * cp[3] * x**2 #+ 4.0 * cp[4] * x**3 ddr = 2. * cr[2] + 6. * cr[3] * x # + 12.0 * cr[4] * x**2 ddp = 2. * cp[2] + 6. * cp[3] * x #+ 12.0 * cp[4] * x**2 fr = -1./2./am * (2. * r * dp + ddp) fq = 1./2./am * (2. * r * dr + ddr) Eu = -1./2./am * np.dot(r**2 + dr,w) return Eu,fq,fr @numba.autojit def sym(V): n = V.shape[-1] for i in range(n): for j in range(i): V[j,i] = V[i,j] return V @numba.autojit def vpot(r): re = 3.47005 De = 24.2288 r = r * bohr_angstrom beta_inf = np.log(2.0 * De / u_LR(re)) s = 0.0 for j in range(11): s += b[j] * y_ref(r,1)**j beta = y_ref(r,6) * beta_inf + (1.0 - y_ref(r,6)) * s vpot = De * (1.0 - u_LR(r)/u_LR(re) * np.exp(- beta * y_eq(r,6)))**2 vpot = vpot + Vmin vpot = vpot / hartree_wavenumber return vpot @numba.autojit def y_eq(r,n): re = 3.47005 y_eq = (r**n - re**n)/(r**n + re**n) return y_eq @numba.autojit def y_ref(r,n): r_ref = 4.60 z = (r**n - r_ref**n)/(r**n + r_ref**n) return z @numba.autojit def u_LR(r): C6 = 5.820364e04 C8 = 2.87052154e05 C10 = 1.80757343e06 z = damp(r,6) * C6/r**6 + damp(r,8) * C8/r**8 + damp(r,10) * C10 / r**10 return z @numba.autojit def damp(r,n): den = 1.10 z = (1.0 - np.exp(-3.30 * den * r / n - 0.423 * (den * r)**2/np.sqrt(float(n))))**(n-1) return z # initialization Ntraj = 4000 a0 = 0.5 x0 = 9.0 x = np.random.randn(Ntraj) #x = np.zeros(Ntraj) #for k in range(Ntraj): # x[k] = np.random.randn() # while x[k] > 3.0: # x[k] = np.random.randn() x = x / np.sqrt(2.0 * a0) + x0 p = np.zeros(Ntraj) r = - a0 * (x-x0) w = np.array([1./Ntraj]*Ntraj) am = 1837.0 Nt = 16000 dt = 1.0 dt2 = dt/2.0 t = 0.0 f = open('traj.dat','w') fe = open('en.out','w') f_MSE = open('rMSE.out','w') nout = 20 # number of trajectories to print fmt = ' {}' * (nout+1) + '\n' Eu = 0. Ndim = 1 # dimensionality of the system fric_cons = 0.0004 # friction constant v0, dv = derivs(x) Eu,fq,fr = qpot(x,p,r,w) for k in range(Nt): t = t + dt p += (- dv + fq) * dt2 - fric_cons * p * dt2 r += fr * dt2 x += p*dt/am # force field Eu, fq, fr = qpot(x,p,r,w) if Eu < 0: print('Error: U = {} should not be negative. \n'.format(Eu)) sys.exit() v0, dv = derivs(x) p += (- dv + fq) * dt2 - fric_cons * p * dt2 r += fr * dt2 f.write(fmt.format(t,*x[0:nout])) Ek = np.dot(p*p,w)/2./am * hartree_wavenumber Ev = np.dot(v0,w) * hartree_wavenumber Eu = Eu * hartree_wavenumber Etot = Ek + Ev + Eu fe.write('{} {} {} {} {} \n'.format(t,Ek,Ev,Eu,Etot)) if k == Nt-1: print('The total energy = {} cm-1. \n'.format(Etot)) fe.close() f.close() #a, x0, De = 1.02, 1.4, 0.176/100 #print('The well depth = {} cm-1. \n'.format(De * hartree_wavenumber)) # #omega = a * np.sqrt(2. * De / am ) #E0 = omega/2. - omega**2/16./De #dE = (Etot-E0) * hartree_wavenumber #print('Exact ground-state energy = {} Hartree. \nEnergy deviation = {} cm-1. \n'.format(E0,dE)) #
py
1a33573382b8115b8823296d94266d363a48169f
""" motifscan.config ---------------- MotifScan configuration handler, this configuration manages the paths of genome and motif data files in the file system. """ import os from configparser import ConfigParser, Error from motifscan.exceptions import InvalidConfigFileError, GenomeNotFoundError, \ MotifSetNotFoundError user_rc_path = os.path.expanduser("~/.motifscanrc") user_genome_dir = os.path.expanduser("~/.motifscan/genomes/") user_motif_dir = os.path.expanduser("~/.motifscan/motifs/") class Config: """Configuration handler for MotifScan.""" _sections = ['motifscan', 'genome', 'motif'] def __init__(self, path=None): self.path = path or user_rc_path self._config = ConfigParser(allow_no_value=False) try: self._config.read(self.path) except Error as e: raise InvalidConfigFileError(self.path) from e # set default for all sections for section in self._sections: if not self._config.has_section(section): self._config.add_section(section) # set the default genome root directory if not self._config.has_option('motifscan', 'genome_dir'): self.set_genome_dir(user_genome_dir) # set the default motif root directory if not self._config.has_option('motifscan', 'motif_dir'): self.set_motif_dir(user_motif_dir) def get_genome_dir(self): """Get the genome root directory.""" return self._config.get('motifscan', 'genome_dir') def set_genome_dir(self, path): """Set the specified path as the genome root directory.""" self._config.set('motifscan', 'genome_dir', path) def get_motif_dir(self): """Get the motif root directory.""" return self._config.get('motifscan', 'motif_dir') def set_motif_dir(self, path): """Set the specified path as the motif root directory.""" self._config.set('motifscan', 'motif_dir', path) def list_genome_assemblies(self): """List configured (installed) genome assemblies.""" for name, path in self._config.items('genome'): yield name, path def has_genome_assembly(self, name): """Returns if the specified genome assembly is configured.""" return self._config.has_option('genome', name) def get_genome_path(self, name): """Get the genome path of the specified genome assembly.""" if self._config.has_option('genome', name): return self._config.get('genome', name) else: raise GenomeNotFoundError(name) def set_genome_path(self, name, path): """Set the genome path for the specified genome assembly.""" self._config.set('genome', name, path) def remove_genome_path(self, name): """Remove the specified genome assembly out of the configuration.""" if self._config.has_option('genome', name): return self._config.remove_option('genome', name) else: raise GenomeNotFoundError(name) def list_motif_sets(self): """List configured (installed) motif PFMs sets.""" for name, path in self._config.items('motif'): yield name, path def has_motif_set(self, name): """Returns if the specified motif PFMs set is configured.""" return self._config.has_option('motif', name) def get_motif_path(self, name): """Get the motif path of the specified motif PFMs set.""" if self._config.has_option('motif', name): return self._config.get('motif', name) else: raise MotifSetNotFoundError(name) def set_motif_path(self, name, path): """Set the motif path for the specified motif PFMs set.""" self._config.set('motif', name, path) def remove_motif_path(self, name): """Remove the specified motif PFMs set out of the configuration.""" if self._config.has_option('motif', name): return self._config.remove_option('motif', name) else: raise MotifSetNotFoundError(name) def write(self, path=None): """Save the configuration.""" path = path or self.path with open(path, 'w') as f_config: self._config.write(f_config)
py
1a3357f69b0cbeb2849eedf4f9a0341a052a816e
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Face class representing the Vision API's face detection response.""" from enum import Enum from google.cloud.vision.geometry import BoundsBase from google.cloud.vision.likelihood import _get_pb_likelihood from google.cloud.vision.likelihood import Likelihood from google.cloud.vision.geometry import Position class Angles(object): """Angles representing the positions of a face.""" def __init__(self, roll, pan, tilt): self._roll = roll self._pan = pan self._tilt = tilt @classmethod def from_api_repr(cls, angle): """Factory: construct the angles from an Vision API response. :type angle: dict :param angle: Dictionary representation of an angle. :rtype: :class:`~google.cloud.vision.face.Angles` :returns: An `Angles` instance with data parsed from `response`. """ roll = angle['rollAngle'] pan = angle['panAngle'] tilt = angle['tiltAngle'] return cls(roll, pan, tilt) @classmethod def from_pb(cls, angle): """Factory: convert protobuf Angle object to local Angle object. :type angle: :class:`~google.cloud.vision_v1.proto.\ image_annotator_pb2.FaceAnnotation` :param angle: Protobuf ``FaceAnnotation`` response with angle data. :rtype: :class:`~google.cloud.vision.face.Angles` :returns: Instance of ``Angles``. """ roll = angle.roll_angle pan = angle.pan_angle tilt = angle.tilt_angle return cls(roll, pan, tilt) @property def roll(self): """Roll angle of face. :rtype: float :returns: Roll angle of face in degrees. """ return self._roll @property def pan(self): """Pan angle of face. :rtype: float :returns: Pan angle of face in degrees. """ return self._pan @property def tilt(self): """Tilt angle of face. :rtype: float :returns: Tilt angle of face in degrees. """ return self._tilt class Bounds(BoundsBase): """The bounding polygon of the entire face.""" class Emotions(object): """Emotions displayed by the face detected in an image.""" def __init__(self, joy_likelihood, sorrow_likelihood, surprise_likelihood, anger_likelihood): self._joy_likelihood = joy_likelihood self._sorrow_likelihood = sorrow_likelihood self._surprise_likelihood = surprise_likelihood self._anger_likelihood = anger_likelihood @classmethod def from_api_repr(cls, emotions): """Factory: construct ``Emotions`` from Vision API response. :type emotions: dict :param emotions: Response dictionary representing a face. :rtype: :class:`~google.cloud.vision.face.Emotions` :returns: Populated instance of ``Emotions``. """ joy_likelihood = Likelihood[emotions['joyLikelihood']] sorrow_likelihood = Likelihood[emotions['sorrowLikelihood']] surprise_likelihood = Likelihood[emotions['surpriseLikelihood']] anger_likelihood = Likelihood[emotions['angerLikelihood']] return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, anger_likelihood) @classmethod def from_pb(cls, emotions): """Factory: construct ``Emotions`` from Vision API response. :type emotions: :class:`~google.cloud.vision_v1.proto.\ image_annotator_pb2.FaceAnnotation` :param emotions: Response dictionary representing a face with emotions. :rtype: :class:`~google.cloud.vision.face.Emotions` :returns: Populated instance of ``Emotions``. """ joy_likelihood = _get_pb_likelihood(emotions.joy_likelihood) sorrow_likelihood = _get_pb_likelihood(emotions.sorrow_likelihood) surprise_likelihood = _get_pb_likelihood(emotions.surprise_likelihood) anger_likelihood = _get_pb_likelihood(emotions.anger_likelihood) return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood, anger_likelihood) @property def anger(self): """Likelihood of anger in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self._anger_likelihood @property def joy(self): """Likelihood of joy in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self._joy_likelihood @property def sorrow(self): """Likelihood of sorrow in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self._sorrow_likelihood @property def surprise(self): """Likelihood of surprise in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self._surprise_likelihood class Face(object): """Representation of a face found by the Vision API""" def __init__(self, angles, bounds, detection_confidence, emotions, fd_bounds, headwear_likelihood, image_properties, landmarks, landmarking_confidence): self._angles = angles self._bounds = bounds self._detection_confidence = detection_confidence self._emotions = emotions self._fd_bounds = fd_bounds self._headwear_likelihood = headwear_likelihood self._landmarks = landmarks self._landmarking_confidence = landmarking_confidence self._image_properties = image_properties @classmethod def from_api_repr(cls, face): """Factory: construct an instance of a Face from an API response :type face: dict :param face: Face annotation dict returned from the Vision API. :rtype: :class:`~google.cloud.vision.face.Face` :returns: A instance of `Face` with data parsed from `response`. """ face_data = { 'angles': Angles.from_api_repr(face), 'bounds': Bounds.from_api_repr(face['boundingPoly']), 'detection_confidence': face['detectionConfidence'], 'emotions': Emotions.from_api_repr(face), 'fd_bounds': FDBounds.from_api_repr(face['fdBoundingPoly']), 'headwear_likelihood': Likelihood[face['headwearLikelihood']], 'image_properties': FaceImageProperties.from_api_repr(face), 'landmarks': Landmarks.from_api_repr(face['landmarks']), 'landmarking_confidence': face['landmarkingConfidence'], } return cls(**face_data) @classmethod def from_pb(cls, face): """Factory: construct an instance of a Face from an protobuf response :type face: :class:`~google.cloud.vision_v1.proto.\ image_annotator_pb2.AnnotateImageResponse` :param face: ``AnnotateImageResponse`` from gRPC call. :rtype: :class:`~google.cloud.vision.face.Face` :returns: A instance of `Face` with data parsed from ``response``. """ face_data = { 'angles': Angles.from_pb(face), 'bounds': Bounds.from_pb(face.bounding_poly), 'detection_confidence': face.detection_confidence, 'emotions': Emotions.from_pb(face), 'fd_bounds': FDBounds.from_pb(face.fd_bounding_poly), 'headwear_likelihood': _get_pb_likelihood( face.headwear_likelihood), 'image_properties': FaceImageProperties.from_pb(face), 'landmarks': Landmarks.from_pb(face.landmarks), 'landmarking_confidence': face.landmarking_confidence, } return cls(**face_data) @property def anger(self): """Accessor to likelihood that the detected face is angry. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self.emotions.anger @property def angles(self): """Accessor to the pan, tilt and roll angles of a Face. :rtype: :class:`~google.cloud.vision.face.Angles` :returns: Pan, tilt and roll angles of the detected face. """ return self._angles @property def bounds(self): """Accessor to the bounding poly information of the detected face. :rtype: :class:`~google.cloud.vision.face.Bounds` :returns: An instance of ``Bounds`` which has a list of vertices. """ return self._bounds @property def detection_confidence(self): """Face detection confidence score determined by the Vision API. :rtype: float :returns: Float representation of confidence ranging from 0 to 1. """ return self._detection_confidence @property def emotions(self): """Accessor to the possible emotions expressed in the detected face. :rtype: :class:`~google.cloud.vision.face.Emotions` :returns: An instance of ``Emotions`` with joy, sorrow, anger, surprise likelihood. """ return self._emotions @property def fd_bounds(self): """Accessor to the skin area bounding poly of the detected face. :rtype: :class:`~google.cloud.vision.image.FDBounds` :returns: An instance of ``FDBounds`` which has a list of vertices. """ return self._fd_bounds @property def headwear(self): """Headwear likelihood. :rtype: :class:`~google.cloud.vision.face.Likelihood` :returns: String representing the likelihood based on :class:`~google.cloud.vision.face.Likelihood` """ return self._headwear_likelihood @property def image_properties(self): """Image properties from imaged used in face detection. :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` :returns: ``FaceImageProperties`` object with image properties. """ return self._image_properties @property def joy(self): """Likelihood of joy in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self.emotions.joy @property def landmarks(self): """Accessor to the facial landmarks detected in a face. :rtype: :class:`~google.cloud.vision.face.Landmarks` :returns: ``Landmarks`` object with facial landmarks as properies. """ return self._landmarks @property def landmarking_confidence(self): """Landmarking confidence score determinged by the Vision API. :rtype: float :returns: Float representing the confidence of the Vision API in determining the landmarks on a face. """ return self._landmarking_confidence @property def sorrow(self): """Likelihood of sorrow in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self.emotions.sorrow @property def surprise(self): """Likelihood of surprise in detected face. :rtype: str :returns: String derived from :class:`~google.cloud.vision.face.Likelihood`. """ return self.emotions.surprise class FaceImageProperties(object): """A representation of the image properties from face detection.""" def __init__(self, blurred_likelihood, underexposed_likelihood): self._blurred_likelihood = blurred_likelihood self._underexposed_likelihood = underexposed_likelihood @classmethod def from_api_repr(cls, face): """Factory: construct image properties from image. :type face: dict :param face: Dictionary representation of a ``Face``. :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` :returns: Instance populated with image property data. """ blurred = Likelihood[face['blurredLikelihood']] underexposed = Likelihood[face['underExposedLikelihood']] return cls(blurred, underexposed) @classmethod def from_pb(cls, face): """Factory: construct image properties from image. :type face: :class:`~google.cloud.vision_v1.proto.image_annotator_pb2.\ FaceAnnotation` :param face: Protobuf instace of `Face`. :rtype: :class:`~google.cloud.vision.face.FaceImageProperties` :returns: Instance populated with image property data. """ blurred = _get_pb_likelihood(face.blurred_likelihood) underexposed = _get_pb_likelihood(face.under_exposed_likelihood) return cls(blurred, underexposed) @property def blurred(self): """Likelihood of the image being blurred. :rtype: str :returns: String representation derived from :class:`~google.cloud.vision.face.Position`. """ return self._blurred_likelihood @property def underexposed(self): """Likelihood that the image used for detection was underexposed. :rtype: str :returns: String representation derived from :class:`~google.cloud.vision.face.Position`. """ return self._underexposed_likelihood class LandmarkTypes(Enum): """A representation of the face detection landmark types. See https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#type_1 """ UNKNOWN_LANDMARK = 0 LEFT_EYE = 1 RIGHT_EYE = 2 LEFT_OF_LEFT_EYEBROW = 3 RIGHT_OF_LEFT_EYEBROW = 4 LEFT_OF_RIGHT_EYEBROW = 5 RIGHT_OF_RIGHT_EYEBROW = 6 MIDPOINT_BETWEEN_EYES = 7 NOSE_TIP = 8 UPPER_LIP = 9 LOWER_LIP = 10 MOUTH_LEFT = 11 MOUTH_RIGHT = 12 MOUTH_CENTER = 13 NOSE_BOTTOM_RIGHT = 14 NOSE_BOTTOM_LEFT = 15 NOSE_BOTTOM_CENTER = 16 LEFT_EYE_TOP_BOUNDARY = 17 LEFT_EYE_RIGHT_CORNER = 18 LEFT_EYE_BOTTOM_BOUNDARY = 19 LEFT_EYE_LEFT_CORNER = 20 RIGHT_EYE_TOP_BOUNDARY = 21 RIGHT_EYE_RIGHT_CORNER = 22 RIGHT_EYE_BOTTOM_BOUNDARY = 23 RIGHT_EYE_LEFT_CORNER = 24 LEFT_EYEBROW_UPPER_MIDPOINT = 25 RIGHT_EYEBROW_UPPER_MIDPOINT = 26 LEFT_EAR_TRAGION = 27 RIGHT_EAR_TRAGION = 28 LEFT_EYE_PUPIL = 29 RIGHT_EYE_PUPIL = 30 FOREHEAD_GLABELLA = 31 CHIN_GNATHION = 32 CHIN_LEFT_GONION = 33 CHIN_RIGHT_GONION = 34 class FDBounds(BoundsBase): """The bounding polygon of just the skin portion of the face.""" class Landmark(object): """A face-specific landmark (for example, a face feature, left eye). :type landmark_type: :class:`~google.cloud.vision.face.LandmarkTypes` :param landmark_type: Instance of ``LandmarkTypes``. :type position: :class:`~google.cloud.vision.face.Position` :param position: """ def __init__(self, position, landmark_type): self._position = position self._landmark_type = landmark_type @classmethod def from_api_repr(cls, landmark): """Factory: construct an instance of a Landmark from a response. :type landmark: dict :param landmark: Landmark representation from Vision API. :rtype: :class:`~google.cloud.vision.face.Landmark` :returns: Populated instance of ``Landmark``. """ position = Position.from_api_repr(landmark['position']) landmark_type = LandmarkTypes[landmark['type']] return cls(position, landmark_type) @classmethod def from_pb(cls, landmark): """Factory: construct an instance of a Landmark from a response. :type landmark: :class:`~google.cloud.vision_v1.proto.\ image_annotator_pb.FaceAnnotation.Landmark` :param landmark: Landmark representation from Vision API. :rtype: :class:`~google.cloud.vision.face.Landmark` :returns: Populated instance of ``Landmark``. """ position = Position.from_pb(landmark.position) landmark_type = LandmarkTypes(landmark.type) return cls(position, landmark_type) @property def position(self): """Landmark position on face. :rtype: :class:`~google.cloud.vision.face.Position` :returns: Instance of `Position` with landmark coordinates. """ return self._position @property def landmark_type(self): """Landmark type of facial feature. :rtype: str :returns: String representation of facial landmark type. """ return self._landmark_type class Landmarks(object): """Landmarks detected on a face represented as properties. :type landmarks: list :param landmarks: List of :class:`~google.cloud.vision.face.Landmark`. """ def __init__(self, landmarks): for landmark in landmarks: setattr(self, landmark.landmark_type.name.lower(), landmark) @classmethod def from_api_repr(cls, landmarks): """Factory: construct facial landmarks from Vision API response. :type landmarks: dict :param landmarks: JSON face annotation. :rtype: :class:`~google.cloud.vision.face.Landmarks` :returns: Instance of ``Landmarks`` populated with facial landmarks. """ return cls([Landmark.from_api_repr(landmark) for landmark in landmarks]) @classmethod def from_pb(cls, landmarks): """Factory: construct facial landmarks from Vision gRPC response. :type landmarks: :class:`~google.protobuf.internal.containers.\ RepeatedCompositeFieldContainer` :param landmarks: List of facial landmarks. :rtype: :class:`~google.cloud.vision.face.Landmarks` :returns: Instance of ``Landmarks`` populated with facial landmarks. """ return cls([Landmark.from_pb(landmark) for landmark in landmarks])
py
1a33586abaecc3d7eded13ff940979fc0bf1bbf1
""" Modified from https://github.com/allenai/allennlp-models/blob/main/allennlp_models/tagging/models/crf_tagger.py Removed some features that we don't need. """ from typing import Dict, Optional, List, Any, cast from overrides import overrides import torch from allennlp.common.checks import ConfigurationError from allennlp.data import TextFieldTensors, Vocabulary from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder from allennlp.modules import ConditionalRandomField from allennlp.modules.conditional_random_field import allowed_transitions from allennlp.models.model import Model from allennlp.nn import InitializerApplicator import allennlp.nn.util as util from allennlp.training.metrics import SpanBasedF1Measure from allennlpadd.common.util import construct_from_params @Model.register("my_crf_tagger") class CrfTagger(Model): """ The `CrfTagger` encodes a sequence of text with a `Seq2SeqEncoder`, then uses a Conditional Random Field model to predict a tag for each token in the sequence. Registered as a `Model` with name "crf_tagger". # Parameters vocab : `Vocabulary`, required A Vocabulary, required in order to compute sizes for input/output projections. text_field_embedder : `TextFieldEmbedder`, required Used to embed the tokens `TextField` we get as input to the model. encoder : `Seq2SeqEncoder` The encoder that we will use in between embedding tokens and predicting output tags. label_namespace : `str`, optional (default=`labels`) This is needed to compute the SpanBasedF1Measure metric. Unless you did something unusual, the default value should be what you want. label_encoding : `str`, optional (default=`None`) Label encoding to use when calculating span f1 and constraining the CRF at decoding time . Valid options are "BIO", "BIOUL", "IOB1", "BMES". Required if `calculate_span_f1` or `constrain_crf_decoding` is true. include_start_end_transitions : `bool`, optional (default=`True`) Whether to include start and end transition parameters in the CRF. constrain_crf_decoding : `bool`, optional (default=`True`) If `True`, the CRF is constrained at decoding time to produce valid sequences of tags. If this is `True`, then `label_encoding` is required. If `None` and label_encoding is specified, this is set to `True`. If `None` and label_encoding is not specified, it defaults to `False`. dropout: `float`, optional (default=`None`) Dropout probability. verbose_metrics : `bool`, optional (default = `False`) If true, metrics will be returned per label class in addition to the overall statistics. initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`) Used to initialize the model parameters. """ def __init__( self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Dict[str, Any], label_namespace: str = "labels", label_encoding: str = "BIO", include_start_end_transitions: bool = True, constrain_crf_decoding: bool = True, dropout: Optional[float] = None, verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), **kwargs, ) -> None: super().__init__(vocab, **kwargs) self.label_namespace = label_namespace self.text_field_embedder = text_field_embedder self.num_tags = self.vocab.get_vocab_size(label_namespace) self.encoder = construct_from_params( Seq2SeqEncoder, input_size=text_field_embedder.get_output_dim(), **encoder) self._verbose_metrics = verbose_metrics if dropout: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None self.tag_projection_layer = torch.nn.Linear(self.encoder.get_output_dim(), self.num_tags) self.label_encoding = label_encoding if constrain_crf_decoding: if not label_encoding: raise ConfigurationError( "constrain_crf_decoding is True, but no label_encoding was specified." ) labels = self.vocab.get_index_to_token_vocabulary(label_namespace) constraints = allowed_transitions(label_encoding, labels) else: constraints = None self.crf = ConditionalRandomField( self.num_tags, constraints, include_start_end_transitions=include_start_end_transitions ) self._f1_metric = SpanBasedF1Measure( vocab, tag_namespace=label_namespace, label_encoding=label_encoding ) initializer(self) @overrides def forward( self, # type: ignore tokens: TextFieldTensors, tags: torch.LongTensor = None, metadata: List[Dict[str, Any]] = None, **kwargs, # to allow for a more general dataset reader that passes args we don't need ) -> Dict[str, torch.Tensor]: """ # Parameters tokens : `TextFieldTensors`, required The output of `TextField.as_array()`, which should typically be passed directly to a `TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer` tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens": Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used for the `TokenIndexers` when you created the `TextField` representing your sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`, which knows how to combine different word representations into a single vector per token in your input. tags : `torch.LongTensor`, optional (default = `None`) A torch tensor representing the sequence of integer gold class labels of shape `(batch_size, num_tokens)`. metadata : `List[Dict[str, Any]]`, optional, (default = `None`) metadata containg the original words in the sentence to be tagged under a 'words' key. # Returns An output dictionary consisting of: logits : `torch.FloatTensor` The logits that are the output of the `tag_projection_layer` mask : `torch.BoolTensor` The text field mask for the input tokens tags : `List[List[int]]` The predicted tags using the Viterbi algorithm. loss : `torch.FloatTensor`, optional A scalar loss to be optimised. Only computed if gold label `tags` are provided. """ embedded_text_input = self.text_field_embedder(tokens, **kwargs) mask = util.get_text_field_mask(tokens) if self.dropout: embedded_text_input = self.dropout(embedded_text_input) encoded_text = self.encoder(embedded_text_input, mask) if self.dropout: encoded_text = self.dropout(encoded_text) logits = self.tag_projection_layer(encoded_text) best_paths = self.crf.viterbi_tags(logits, mask) # Just get the top tags and ignore the scores. predicted_tags = cast(List[List[int]], [x[0] for x in best_paths]) output = {"logits": logits, "mask": mask, "tags": predicted_tags} if tags is not None: # Add negative log-likelihood as loss log_likelihood = self.crf(logits, tags, mask) output["loss"] = -log_likelihood # Represent viterbi tags as "class probabilities" that we can # feed into the metrics class_probabilities = logits * 0.0 for i, instance_tags in enumerate(predicted_tags): for j, tag_id in enumerate(instance_tags): class_probabilities[i, j, tag_id] = 1 self._f1_metric(class_probabilities, tags, mask) if metadata is not None: output["words"] = [x["words"] for x in metadata] return output @overrides def make_output_human_readable( self, output_dict: Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: """ Converts the tag ids to the actual tags. `output_dict["tags"]` is a list of lists of tag_ids, so we use an ugly nested list comprehension. """ def decode_tags(tags): return [ self.vocab.get_token_from_index(tag, namespace=self.label_namespace) for tag in tags ] output_dict["tags"] = [decode_tags(t) for t in output_dict["tags"]] return output_dict @overrides def get_metrics(self, reset: bool = False) -> Dict[str, float]: f1_dict = self._f1_metric.get_metric(reset=reset) if self._verbose_metrics: return f1_dict else: return {x: y for x, y in f1_dict.items() if "overall" in x} default_predictor = "sentence_tagger"
py
1a3358c05e444f743b20f9150511877c85c8ad51
from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from pdb import set_trace as stop def batch_product(iput, mat2): result = None for i in range(iput.size()[0]): op = torch.mm(iput[i], mat2) op = op.unsqueeze(0) if(result is None): result = op else: result = torch.cat((result,op),0) return result.squeeze(2) class rec_attention(nn.Module): # attention with bin context vector per HM and HM context vector def __init__(self,hm,args): super(rec_attention,self).__init__() self.num_directions=2 if args.bidirectional else 1 if (hm==False): self.bin_rep_size=args.bin_rnn_size*self.num_directions else: self.bin_rep_size=args.bin_rnn_size self.bin_context_vector=nn.Parameter(torch.Tensor(self.bin_rep_size,1),requires_grad=True) self.softmax=nn.Softmax(dim=1) self.bin_context_vector.data.uniform_(-0.1, 0.1) def forward(self,iput): alpha=self.softmax(batch_product(iput,self.bin_context_vector)) [batch_size,source_length,bin_rep_size2]=iput.size() repres=torch.bmm(alpha.unsqueeze(2).view(batch_size,-1,source_length),iput) return repres,alpha class recurrent_encoder(nn.Module): # modular LSTM encoder def __init__(self,n_bins,ip_bin_size,hm,args): super(recurrent_encoder,self).__init__() self.bin_rnn_size=args.bin_rnn_size self.ipsize=ip_bin_size self.seq_length=n_bins self.num_directions=2 if args.bidirectional else 1 if (hm==False): self.bin_rnn_size=args.bin_rnn_size else: self.bin_rnn_size=args.bin_rnn_size // 2 self.bin_rep_size=self.bin_rnn_size*self.num_directions self.rnn=nn.LSTM(self.ipsize,self.bin_rnn_size,num_layers=args.num_layers,dropout=args.dropout,bidirectional=args.bidirectional) self.bin_attention=rec_attention(hm,args) def outputlength(self): return self.bin_rep_size def forward(self,single_hm,hidden=None): bin_output, hidden = self.rnn(single_hm,hidden) bin_output = bin_output.permute(1,0,2) hm_rep,bin_alpha = self.bin_attention(bin_output) return hm_rep,bin_alpha class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self class att_chrome(nn.Module): def __init__(self,args): super(att_chrome,self).__init__() self.n_hms=args.n_hms self.n_bins=args.n_bins self.ip_bin_size=1 self.rnn_hms=nn.ModuleList() for i in range(self.n_hms): self.rnn_hms.append(recurrent_encoder(self.n_bins,self.ip_bin_size,False,args)) self.opsize = self.rnn_hms[0].outputlength() self.hm_level_rnn_1=recurrent_encoder(self.n_hms,self.opsize,True,args) self.opsize2=self.hm_level_rnn_1.outputlength() self.diffopsize=2*(self.opsize2) self.fdiff1_1=nn.Linear(self.opsize2,1) def forward(self,iput): bin_a=None level1_rep=None [batch_size,_,_]=iput.size() for hm,hm_encdr in enumerate(self.rnn_hms): hmod=iput[:,:,hm].contiguous() hmod=torch.t(hmod).unsqueeze(2) op,a= hm_encdr(hmod) if level1_rep is None: level1_rep=op bin_a=a else: level1_rep=torch.cat((level1_rep,op),1) bin_a=torch.cat((bin_a,a),1) level1_rep=level1_rep.permute(1,0,2) final_rep_1,hm_level_attention_1=self.hm_level_rnn_1(level1_rep) final_rep_1=final_rep_1.squeeze(1) prediction_m=((self.fdiff1_1(final_rep_1))) return prediction_m args_dict = {'lr': 0.0001, 'model_name': 'attchrome', 'clip': 1, 'epochs': 2, 'batch_size': 10, 'dropout': 0.5, 'cell_1': 'Cell1', 'save_root': 'Results/Cell1', 'data_root': 'data/', 'gpuid': 0, 'gpu': 0, 'n_hms': 5, 'n_bins': 200, 'bin_rnn_size': 32, 'num_layers': 1, 'unidirectional': False, 'save_attention_maps': False, 'attentionfilename': 'beta_attention.txt', 'test_on_saved_model': False, 'bidirectional': True, 'dataset': 'Cell1'} att_chrome_args = AttrDict(args_dict) att_chrome_model = att_chrome(att_chrome_args)
py
1a33590146501a5c20b0ff147ebe8e77a6b706a7
# -*- coding: utf-8 -*- import remi import remi.gui as gui from remi.gui import * from threading import Timer import traceback import time import math import epics #from epics import caget, caput, cainfo style_inheritance_dict = {'opacity':'inherit', 'overflow':'inherit', 'background-color':'inherit', 'background-image':'inherit', 'background-position':'inherit', 'background-repeat':'inherit', 'border-color':'inherit', 'border-width':'inherit', 'border-style':'inherit', 'border-radius':'inherit', 'color':'inherit', 'font-family':'inherit', 'font-size':'inherit', 'font-style':'inherit', 'font-weight':'inherit', 'white-space':'inherit', 'letter-spacing':'inherit'} style_inheritance_text_dict = {'opacity':'inherit', 'overflow':'inherit', 'color':'inherit', 'font-family':'inherit', 'font-size':'inherit', 'font-style':'inherit', 'font-weight':'inherit', 'white-space':'inherit', 'letter-spacing':'inherit'} # noinspection PyUnresolvedReferences class EPICSWidget(object): @property @gui.editor_attribute_decorator('WidgetSpecific','The PV name', str, {}) def epics_pv_name(self): return self.__epics_pv_name @epics_pv_name.setter def epics_pv_name(self, v): self.__epics_pv_name = v self.disconnect() try: self.epics_pv = epics.PV(self.__epics_pv_name, auto_monitor=True, callback=self.onChanges, connection_callback=self.onConnectionChange, connection_timeout=2) except: print(traceback.format_exc()) epics_pv = None # here will be stored the PV instance app_instance = None def __del__(self): self.disconnect() def disconnect(self): if self.epics_pv: self.epics_pv.clear_auto_monitor() self.epics_pv.disconnect() @decorate_set_on_listener("(self, emitter, pvname=None, conn=None, chid=None, **kwargs)") @decorate_event def onConnectionChange(self, pvname=None, conn=None, chid=None, **kwargs): #print('ca connection status changed: ', pvname, conn, chid) #Here I use the outline red color to show the unconnected state # of course this can be avoided or changed self.style['outline'] = "1px solid red" if conn: del self.style['outline'] return (pvname, conn, chid, kwargs) @decorate_set_on_listener("(self, emitter, pvname=None, value=None, **kwargs)") @decorate_event def onChanges(self, pvname=None, value=None, **kwargs): #as default I write the value to the widget itself self.set_value(str(value)) return (pvname, value, kwargs) def search_app_instance(self, node): if issubclass(node.__class__, remi.server.App): return node if not hasattr(node, "get_parent"): return None return self.search_app_instance(node.get_parent()) def get_app_instance(self): if self.app_instance==None: self.app_instance = self.search_app_instance(self) return self.app_instance class EPICSBooleanButton(gui.Container, EPICSWidget): """ A Button widget that sets the bit when clicked. """ icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC4AAAAuCAYAAABXuSs3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAKMSURBVGiB7ZqxaxNRGMB/9+7uXZK2aaMVEUWEKNSh4CQIrhWnujiIQqGig7gXF/+B7tLNbp2KQyfBwcVdsdhWJa5WHZKKvZqXe3cO57VNmraJyd0ZuB88LnkvfO/H43sfB18Mz/OCIAhoN4DdZ9IYhrH7bDesSPJlRfHsrc+nmpGK6HGcGQp4clVwsywBMJRSgVKKqWXNpitS1juaS6M+L26ZSCnDE1dKsenaAHy/MUNjtNJ10JG1WYofHvTbtYnPWwKlFLZth+Ke5wGheKP0EXVireugemizz5rt8TyPIAgQe+KDQZO41jptn47RWofiAL7vp+3TMZGrtb9mAxTfP0bnf3QdMPf1Wt/kjiLytVoXRtZnEhHolf+7cB9BJp40mXjSDKz4gXLYHQ6vHtmUD8z7LC+4zAGz00M8PXv4q3Jl4xdTr7vfuUfx9pvP3xnm9v086893WFzZZjFamMzz7rpg7c02d1d72zOWVJn75oNjcDmO4H+JRXz+tKCyEaZKXPQlVcoTw3yZaJ776dpAox/h2xJLjocXUrI02eg5lw8jllRZXPGoYHBqPI7oIQNbx2MRn522KNc1S/9Qnzslpsvps7yws1e/Y6BH8TpTC/XOf766w5U+XdYsx5MmE0+aTDxpMvGkycSTRkTNoEEh8hXRl0Ehch3cEzcMA9M0GbdV2k7Hcj7/G9M098Qty+LhxSrnHDdtt0M5adW5d2ELy7LCU1dKBa7rUqvVqFaruK5LvV5Ptbvc2lV2HIdCoUCpVGJsbIxCoYAVLRSLRaSUKKVQStHaYkmDSFxKiZSSXC6H4zjhvOd5ge/7aK2bRtQkSruXL4TANM2mIYTA0FoHrWmR9h8QIlpTZv/nP6KyI2uh/zMtAAAAAElFTkSuQmCC" @property @gui.editor_attribute_decorator('WidgetSpecific','Specifies if the button is toggle or must reset the value on release', bool, {}) def toggle(self): return self.__toggle @toggle.setter def toggle(self, v): self.__toggle = v self.button.onmouseup.do(self.reset_bit if not self.__toggle else None) @property @editor_attribute_decorator("WidgetSpecific",'''Text content''', str, {}) def text(self): return self.button.get_text() @text.setter def text(self, value): self.button.set_text(value) button = None # The gui.Button widget instance led = None # The led indicator Widget def __init__(self, button_label='epics button', epics_pv_name='', toggle=False, *args, **kwargs): self.color_inactive = 'darkgray' self.color_active = 'rgb(0,255,0)' self.button = gui.Button(button_label, width="100%", height="100%", style=style_inheritance_dict) self.led = gui.Widget(width=15, height=5, style={'position':'absolute', 'left':'2px', 'top':'2px', 'background-color':self.color_inactive}) self.led_status = False default_style = {'position':'absolute','left':'10px','top':'10px', 'background-color':'rgb(4, 90, 188)', 'color':'white'} default_style.update(kwargs.get('style',{})) kwargs['style'] = default_style kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','100px')) kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','100px')) super(EPICSBooleanButton, self).__init__(*args, **kwargs) _style = {'position':'relative'} _style.update(style_inheritance_dict) self.append(gui.Container(children=[self.button, self.led], width="100%", height="100%", style=_style)) self.toggle = toggle self.epics_pv_name = epics_pv_name self.button.onmousedown.do(self.set_bit) def set_bit(self, emitter, *args, **kwargs): self.pressed = True self.written = False value = 1 if self.toggle: value = 0 if self.led_status else 1 self.epics_pv.put(value, callback = (self.put_done if not self.toggle else None) ) def put_done(self, *args, **kwargs): self.written = True #this function gets called when a set_bit is completed and the button is not toggle # and so the value have to be reset if not self.pressed: self.epics_pv.put(0) def reset_bit(self, emitter, x, y, *args, **kwargs): self.pressed = False if self.written: self.epics_pv.put(0) def set_value(self, value): if not self.get_app_instance(): return with self.get_app_instance().update_lock: #this function gets called when the camonitor notifies a change on the PV self.led_status = float(value)>0.0 self.led.style.update({'background-color':self.color_active if self.led_status else self.color_inactive}) class EPICSLed(HBox, EPICSWidget): """A Status indicator widget. """ icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC4AAAAuCAYAAABXuSs3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAIswAACLMBhC+V2wAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAZXSURBVGiBzZrbbxTXGcB/39o7s2svUCzF3lxEDMakrUhCgCZOQs1FEBtVBFlcouAmbxEBU8fQSEj5B/KSItaNi/tMTVUFrARRE0i52U1CEscSKWqDbQwmQQQQSWBt75wZzNcH7xjjG2tsvPyezsyZOfPb2TNnzne+EcaJqua4rlssIotU9ddAAfAQEEke0gVcBdqB/wKfWpZ1QkR+Gs915R5lQ57nrQNeV9VlQGCMTfQCR4DdlmXtFRFnrA5jElfVLGNMhYhsA6L+/m6vm8+//5xTl0/R9mMbV7qv0O11A5AdzCYvkkdhTiFP5T7F8489T3Ywe2Czl1T1T7Zt/0VEEhMu7jjOKhGpBvIBEl6C+jP17Dm9h8YLjbi9bkrtWBkWxTOKKZ9bTtkTZYSDYb/qnKr+IRQK/XNCxFU1bIyJicgbAD1eDzXNNcS+jHG152pKsiORm5VL1XNVbF6wuf8HqOpfbduuulv3GVW8u7v74czMzAZgHkBDewNVh6vovN45LuHB5E/LJ1YSo7Sg1N/V4nne7yKRyA8jnTOiuOM4s0TkE2CWuWnYfnQ7u77eNaHCd4oIFQsreHfZu1gZFkCHqq4IhUIdwx8/DPF4PNeyrCZgTpfbxfp96zly/sh9kx7I4hmL2bt2L1PtqQAdnue9ONydHyKuqmHXdT8FnrlhblCyp4SWH1omQfk2Cx9eyMevfswUewrA15ZlLRrc54eMv8aYGPCMuWlYs3fNpEsDNF9qZu2+tf5ItcAYs2PwMXeIJ4e8NwC2H91O44XGyTEdhuOdx3nn2DsAiMgmx3FWDqzv7yqqmuW67mlgZkN7A2UflE2u6TAIwv5X9vPSrJcAzlqWNdfvMv133BhTAcxMeAm2Ht6aJtU7UZSKgxX0eD0ABcaYTX5dAPrmHsnXOO83v8/56+fTIjocF25coLalFgAReVtVQ5AU9zxvPRBNeAliX8bSZzkCO7/YScJLADziuu4auN1VXgOoP1M/7tf4/eBy92U+av3I33wNIKCqOaq6FKDuP3Xpcrsrdaf73Zar6i8CrusuBjJ6vB6avmtKo9roHO887j+kGcaY4oCIvAjw2fefpTw1TQdur8vJiycBEJFFgWS4xanLp9IqlgrfXPnGL/4yABQCtP3YljahVGm91uoX5wSAHOh7ch90fEdVzQmQjMa73K50OqVE3MQBEJGpY43OHxgC9K17ELEidzk0/SSDC1Q1HgCuAUQj0dHOeSDIi+QBICLXAkAbwOzps9PplBKFOYV+sTUgIv8DeDrv6fQZpcgAx28DqvpvgBcee8GPrh9I7AybokeLAFDVpoBlWSeA3qxgFsUzitNrNwpL85cSzgwD3LRtuykgIj+JyFGA8rnl6bUbhQ1zN/jFf4nIz/44vhug7IkycrNy0yI2GtHsKKvnrPY3d0MykAgGgx8Al8LBMFXPVaVJb2S2FW0jlBkCuGhZ1j5IiouIo6o7ADYv2Ez+tPy0SQ6mYHoBG+dvBEBV3xMRAwOifNu2a4COcDBMrCSG3Nua/4QiCNUl1f7dbrdtu9av6xcXkYSqVgKUFpRSsbBi8k0H8dazb7F85nIAVHXLwGW4IbfVcZxaEdno9rq8/I+XOdZ5bBJVb7Msfxn71+8nmBFERGosy9oysH64Rc9QctFzftzEKf17Kc2XmidNGIYsen5lWdZv/b7tM2RaKyKOZVmlQOsUewqHNhxixcwVk6QMSx5fwsFXD/rSZz3PWzVYGkbIlonIVVVdCZyNWBHq19VT+ZvK+/rACkLVs1UceOWAP31tv3Xr1opIJDJsaDaqSVdXVzQYDB4AFgAc7jhM5aFKzv18bkKlC6YXUF1S3f8gAl95nrdqJGlILXkVMsbsEJFN0Jdt29Wyi51f7Bx3nBrNjrK1aCtvzn/TH/IQkZpgMPjH4brHmMR9HMdZKSJ/pi9zjHPT4cMzH1J3uo4TnScwvaNepx87w2bJ40sof7Kc1XNW9wsD7aq6JRQKHUqlnbEmaEPGmE0i8jbwiL+/x+vh5MWTdyRo425fYBuxIuRl9yVo50XnUfRokT/L87moqu/Ztl07lgzzvabEbdd11wG/B5YDGWNsohf4BPhbMiWe2t81gHEPE6o6zRizWEQWAb8CZgO53P4IIU7fRwhtwLeq2mTbdqOIXB/Pdf8P7oFocYOtZGkAAAAASUVORK5CYII=" @property @editor_attribute_decorator("Geometry",'''Widget width.''', 'css_size', {}) def css_width(self): return self.style.get('width', None) @css_width.setter def css_width(self, value): self.style['width'] = str(value) self._update_size() @property @editor_attribute_decorator("Geometry",'''Widget height.''', 'css_size', {}) def css_height(self): return self.style.get('height', None) @css_height.setter def css_height(self, value): self.style['height'] = str(value) self._update_size() label_value = None # the gui.Label used to show the value 0 or 1 def __init__(self, epics_pv_name='', *args, **kwargs): self.color_inactive = 'darkgray' self.color_active = 'rgb(0,180,0)' default_style = {'position':'absolute','left':'10px','top':'10px', 'color':'white','background-color':self.color_inactive, 'align-items':'center', 'justify-content':'center'} default_style.update(kwargs.get('style',{})) kwargs['style'] = default_style kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','50px')) kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','50px')) super(EPICSLed, self).__init__(*args, **kwargs) _style = {'text-align':'center'} _style.update(style_inheritance_text_dict) self.label_value = gui.Label("0", style=_style) self.append(self.label_value) self.epics_pv_name = epics_pv_name def _update_size(self): width = gui.from_pix(self.style.get('width', "100").replace("%","")) height = gui.from_pix(self.style.get('height', "100").replace("%","")) radius = min(width, height)/2 self.style['border-radius'] = gui.to_pix(radius) def set_value(self, value): if not self.get_app_instance(): return with self.get_app_instance().update_lock: _value = float(value) self.label_value.set_text( '1' if _value>0.0 else '0' ) self.style.update({'background-color':self.color_active if _value>0.0 else self.color_inactive}) class EPICSValueMeterWidget(Progress, EPICSWidget): """A simple progress bar indicating a value. """ icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACkAAAApCAYAAACoYAD2AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOwgAADsIBFShKgAAAAG1JREFUWEft1qENgEAMQNEec6AwDMG4jIbC3QZAQuXhvijJf6aVP+mJa9cjiptylmYkxUiKkRQjKUZSjKQYSTGSMvyZt/3M7dux9dx487Lm9vLclP++yWo8N8VIipEUIylGUoykGEkxkmIkI+IGyZcQRHB9PC8AAAAASUVORK5CYII=" def __init__(self, epics_pv_name='', max_value=100, *args, **kwargs): default_style = {'position':'absolute','left':'10px','top':'10px'} default_style.update(kwargs.get('style',{})) kwargs['style'] = default_style kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','100px')) kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','30px')) super(EPICSValueMeterWidget, self).__init__(0, max_value,*args, **kwargs) self.epics_pv_name = epics_pv_name def set_value(self, value): if not self.get_app_instance(): return with self.get_app_instance().update_lock: Progress.set_value(self, value) try: import pygal except: print("It is required to install pygal to use EPICSPlotPV widget") class EPICSPlotPV(gui.Svg, EPICSWidget): """A simple plot bar indicating a value. REQUIRES library pygal to be installed """ icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAAAuCAYAAACYlx/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAJjQAACY0BXC+J+AAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAhJSURBVGiB5ZprTFNbFsd/bWlLX1pABKHjI4ZATBwTMblkNPLBB4IJ0cSMUWYCwUd8xOcl6nxwEtQYYzQTHwh3EmQkOlFx9IMyiXFiMCLjY4YwiRodtT7QqlAKpaXY09Oe+VDbodAXXJXLvf9kJ+fstfY6a6/u9d9r71RGDBQXF2dIkvROJpP5YumONUiSJE+IR1Gj0fTt2rVL97Ud+taorKyU5KPtxNdCZWUlL1++jKkX1wpIT0+X6/X6H+3Ul8abN2+4ePEiFRUVQ2TPnj3D5XIRze/x48fHF4APHz74nE7nyD39SrDb7TgcDgK+SZJEQ0MDy5Yt4/3797x7945oftvtdsZ0Cng8Htxud/DdbrdTV1eH2WwmNzcXq9Ua08aYDoAoiiEB6Orqwmaz0dLSQl5eHjabLaaNMc0BCQkJiKIYzHOn00lubi5Xr16lvr6e48eP/7w5oLe3l76+vmCev379mrlz53Lu3DkMBgMulysmB8QVgJ8qRFHk06dPwXer1cq8efOQyWSAnxRjYUxzgCAIIRzQ2dlJeno6q1evjtvGmOcAj8cTzPO+vj4mTZoUXAEqlQqtVotcHv53HvMc4HA46O/vD+a5KIr09fUF5Xq9nrdv35KcnBx2/E+mDrBYLCMaN3gbHIyUlJSYW+GoB6CxsZHKysoRjRVFEVEUAfD5fEOWekpKCl1dXVFtjCoHdHd3c+fOHRYvXsy9e/dYsGDBsMbLZDLUajV6vR6r1UpaWlrIvp+ZmUlPT0/EWmDUOWD//v2sW7cOk8lERUUFc+bMQaFQxD3e6XSiVCpxOBy8evWK8ePHh+z7Wq2Wx48fR6wFRpUDmpubmT59OtOmTUOpVFJQUEBjY+OwbIiiiE6nQxAEOjs7mTBhQoj8i3GAUqlEoVDEbGazGY/HE1Ovv7+fy5cvU1paGuxbunQpTU1NeL3euL6lUCjwer0YDAY8Hg/d3d2kpaWFyFNTU7HZbCH6g23EFQCDwSBTq9VEa0qlkhMnTnDo0CFUKlVU3ZqaGrZu3YpWqw32aTQaSkpKuHTpUtSxAxvAuHHj8Pl82Gw2MjMzQ+Q6nQ5JklCr1ahUKvbt2xci12g08XGAzWaTXC5XVJ0rV65QUFCAXC7n2LFjrF+/Pqxea2srSqWSqVOnMthmbm4uFy5coKioCIPBENMvl8uFRqOhp6cHi8WCXq8fYlMURVwuFx0dHbS1tXH+/HmKi4sBP4d8EQ5wOBw0NzdTUFDA4sWLkSSJ69evD9ETBIGzZ8+ybt26iLbKysqor6+P67uBKtDtduN0OqOe/F69ekVZWRl3796lvb092B9XAJKTk2UajYZIra6ujs2bN6PVatFoNGzbto0HDx7w/PnzEL3Tp09TWlqK0WiMaCs3N5fOzk56e3sj6gSaJEkYjUbAXxaH0wmkp8ViITs7m71791JVVYVKpUKn08UXAKfTKQmCQLj29OlT3G43WVlZwT6Px8OePXuorq6mvb0dQRB4+PAhDoeD2bNnh7UzsK1du5aampqYem63G41Gg9PpxOv1htUxGo18/PgRs9lMRkYGer2eoqIiamtrcbvd8QVAEAS8Xm/YVl1dzZo1a4b0JyQksHv3bg4ePIjD4aCqqoqNGzdGtDOwmUwmfD4fZrM5qp4gCGi1WhwOR5DlB7ekpCQ6OjqwWq0YjUa8Xi/z589HoVAgiuKP44Bbt24xc+bMiIeNiRMnUl5eTmlpKcXFxVFzdDDKy8upra2NqhO4DbJYLCQlJYXViVQLBI7MCcBfgHtA9QC5DLgANAB3wpXCgiBw9epVTp48SUJC5M0kLy+Po0ePMmPGjKiTGQy9Xo/JZOLFixfMmjUrRPb06VOys7ORy+UkJyfz6NEjTCZT2ACbTCba29vRarVD5IFS2Az8EagFhM+yJcAyYAeEL4Xr6upYsWJFyI1MJEyePDnq1VQkrFq1iv3793P48OFgn8/nY8eOHRw5cgRRFJEkiZcvX5KTkxP2G1qtlvv374f1IVAK1wBJwPIBss3AJeBdOMc6Ozt58eIFeXl5w57UcGAwGJg5cyYtLS3BvsbGRvLy8rhx4wYAarWat2/fkpqaGtZGSkoKra2tTJkyJaxcDnQAf8M/aYAp+FfAiUiO1dTUsGHDhuHPaARYuXIlDQ0N+Hw+BEHg5s2bbN++nSdPngD+AFgsliHngACMRiMdHR0RAxBI3irgDvBr4PfAf4B/BpQGckBbWxtpaWnk5OR8mRnGgSVLltDU1ITNZqOkpIRx48aRlZVFS0sLycnJCILAtGnT0Gg0YcdPmDCBGTNmkJiYGNI/8DjcAvwL+B5YyufcDyDAAT6fj1OnTnHgwIER5fRIsWjRInbu3IlSqWTFihU4nU7y8/O5du0aXq8XvV6P1+uN6FNhYSGiKIblgIH0XY2fCDvws/8QXLt2jYULF6LVar/MzOKEXC5n+fLlwaoPIDs7m8LCQhITEyPmfwBbtmyJbHvA83nADfwADKF2l8vF7du3KSwsHKb7Xwb5+flDtsONGzeiVqtjBiAaBq6AefgD8sNgpfT0dPmZM2fYtGlTXKe0bwmdTseGDRuGVWQFMPhK7Hv8S3/I1vfmzRtJoVCQkZER85JxNJCdnT0iv+x2O7LPz1OBfwC/A+4OVCouLk6TyWT/VQznsm6MwOv1iqPtw5hA4Lzwp0H9RcDfgcxv7dBo4Dv8O8Smz++/Aqz8v3r8RWAL/u3xO6AZiO/e6meGv+JfCfeAxBi6YwLDvRD5N6ACPuAPxC8KvwH6gFLADvxhdN35tkjHXyAF/pH4W0AECkbNo28IJXAbaIRg4QTwZ6ALfxE1ZvE/Xo/9xlOEeIkAAAAASUVORK5CYII=" @property @editor_attribute_decorator("WidgetSpecific",'''Defines the maximum values count.''', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 0, 'step': 1}) def max_values_count(self): return self.values.maxlen @max_values_count.setter def max_values_count(self, value): self.values.maxlen = int(value) def __init__(self, epics_pv_name='', max_values_count=100, *args, **kwargs): w = kwargs.get("style", {}).get("width", kwargs.get("width", 100)) h = kwargs.get("style", {}).get("height", kwargs.get("height", 100)) if 'width' in kwargs.keys(): del kwargs["width"] if 'height' in kwargs.keys(): del kwargs["height"] default_style = {'position':'absolute','left':'10px','top':'10px', 'overflow':'hidden', 'background-color':'lightgray', 'margin':'10px'} default_style.update(kwargs.get('style',{})) kwargs['style'] = default_style super(EPICSPlotPV, self).__init__(w, h, *args, **kwargs) self.values = gui.SvgPolyline(max_values_count) self.epics_pv_name = epics_pv_name def set_value(self, value): if not self.get_app_instance(): return with self.get_app_instance().update_lock: self.values.add_coord(time.clock(), float(value)) try: plot = pygal.XY() pairs = [] for i in range(0, len(self.values.coordsX)): pairs.append([self.values.coordsX[i], self.values.coordsY[i]]) plot.add(self.epics_pv_name, pairs) self.add_child("chart", plot.render()) except: self.style['overflow'] = "visible" self.add_child("chart", gui.SvgText(10,10, "Install pygal to use this widget")) class EPICSValueGaugeWidget(Svg, EPICSWidget): """A gauge indicator for an EPICS PV value """ icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD0AAAAuCAYAAACbIBHcAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAesSURBVGhD3ZpZSFVbGMe/c5rneSYjuJREIPWQ0EtEFOiDFPVQlARNxqUIw8J6KHqIaKILEZUUBdFD0UTRRD6EBRUhQpaJdTOzzLI0s8FSz7rr97m39xyH8kx29A/Lvdba6+y9/usb19p6jIVEEbW1tfL+/XsZO3as1NTUSGVlpfDKgQMH6tXr9Urv3r2lZ8+eUlxcLPHx8c4vowevc40oPn/+LG/fvpWSkhItGzdulK9fvyrhbt26SVlZmXz//l37WAgK9dTUVCksLJTy8nKpqKjQMdFAxEgzwQ8fPkh+fr5ev3z5IvX19dK9e3epqqpSqQKfzyd9+/bVq8fj0dLQ0KBjhw0bJoMGDdI6pF++fClPnjzRRYgkwibNBJkU0kPCqGpdXZ38/PlTfvz4IYMHD5ZevXopUUiNGDFCxo8fr+rdv39/6dOnjy4AizVq1Cj59u2bSh5153c8j/azZ8/k48ePOjZchGzTSDY7O1tmzJgh1dXVKjEmNGDAAC2QQcrtBRJ98OCBpKSk6OKxcDwPm+c5LKC7eHl5eZKcnOz8MniERBrCSDctLU127dql0oPokCFDnBHhAe2BOO9geu7ioR25ubly7tw52b9/v7Z79Oih94JBUKQZin26kr1//7563C1btjgjIgvIs8CvX7/Wd2Meq1evlh07dihhfMHo0aO1HgyCIo3dYl/YWb9+/WT48OE6sWDUOBSg5oS969evy+PHjyUzM1PtGzB9iAejZe1yZDyY0ENYwVZxOBAG0SYMsGuIofJLly7VK46S8IfDI1pQ2uvkfitpbrtxFcLjxo1z7vwZQBYBuPNhfiw8SRC+hfI7/Jb0q1ev9EVIlvATC8CWX7x4oaYFcdqulBEKHv5XaJM03QUFBeokUCFUOpbA/PDubhKEquNnIE+snzx5sjOyJdq0adRlxYoVUlRUFHOEAdED7XNjOW1w/vx5ycjI0HpbaFXSJAKsIldCVGJionMn9oBwCJvkCYcPH1Z737NnjyY3EyZMcEYFolXSb968UTuJi4tzemIbqPiGDRs0lG7fvl3zCNR96NChrfqhFurNToiHjBkzxumJfaDaaCNJEvN3s7R3796pE26OFpJGVUaOHKlOoTMB237+/LlqKBLHmcEBTw4ffwRImqAPOhthgDNDlfHk+CJsHBVnX89C+COANKkd277OCrw5UibEki5DnvTUFaaLJtIMQBU6Iq2MFpA2Eka12YYiaTI3HLM/mkjj8fB2nR3k6IBQRiFjQ9r+Dk1Jk9LduXMnpL1pLIITGaTO0ROk4XXkyJEm4kqaFbl79652dAVAGo9NyNq6daskJCSoQ0PtgYas0tLSTu3AmgP/NG3aNE2uOGHlCArHhr9iMTzWnZtPnz51CXv2ByepkCRcuQXPjnC9HAzg8boa8N4cLADyL2wc9dYNCsG8qzgwf0AakjgvtyB5VN/bmePyrwA5MjQkDEdU21Vzr7sP7WogO3NPVVzBIn0i1R+RNCbF4X40gYQhCBAskqeoTZOqdSSOHz+uPmTq1KmaIz98+NC5E3mwuKg1jkzV2i4EpD0+2xNxBbcPtm9sLHZvLnV1LLf8W1Qkf82e7Qz6H0wqGnj69GmTEyNhIUNjwT0l9+6ZOBuwjd182wy9cYJWDdxJezhULy/n7JUEXcSGOO4bG+yVEGPZxVC33tJTVdU4FuIUt26JpduJ/NM4nwBcuHBBFixY4LQiAw4TTp48qUfC2DJkkTabKs/9q1dN4ty57CvF7sLFVFRw5CAeSNhBStTGOwMZCEPQEtXFsDsYHcNC2WJc6dLnB1eT0mzJaqwG4NSpU7Js2TKnFRnwqTcrK0vVG8LuSan6MKtaZklqqklOSTF5BQU0m/B3ZqZJWrLEZOfmOj1twGZ1prbW+GpqjK+y0vjKy42vtNT4iouNr7DQmPx8Y27fNjm7d6PHLYpNEZ0HhQdLytgFNGfOnHF6GmEzTmMX1SxfvtxOtcFwrmRu3LihN+Pj4/UK9u7da+zOROvTp0+3QqzXerhYuXJlAOGjR486d8JHRkaGWbx4sdm8ebPT04iZM2daBa4wJSUlZtGiRUb448K/vmbNGlNWVqb1devWGbtj0XokkG8lf/HiRZ1EpICUwaNHj0x6errWXdgNh1MzZuHChSYgZLkf5QDunZACwt2MXLlyRebMmaOfWTm6IVzNnz9fd0E4HOrshHJycpxfBA83yeIktzn8v29xyuu15NWrgeYJA84A4PpDTWJICCB07Ngx/XAwF6fph6SkJP10tG/fPpk1a5aOjzSs9J2a6Kde76pVq8Sqr1y6dEk/fhHQz549K/Tv3LlTrL3rOVOo0mbzDiZOnCjr16/X/yTwB+21a9fKpEmTtO2ODxXMn/kCQiGaNW/ePDl06JB++bDqbT2Jxc2bN83BgwepmurqanPr1i2t37Mx/MCBA+rxQgV2a9+vdezNhg+tu6BNP2BcuHaOw7ILqfXs7GybflRq3cZsc/r0aa03ziaK8Cd9+fLlVklfu3ZN65Eg3R5EnTSAzIkTJ8yUKVOMdWbat2nTJr3STkhIMNbmmxYn2gg47I8WcGD8J+C2bds0S7LvFY6osD/a+A9smRy5I9DmR/mujA6RdGxB5D9hHwJHVeDJjgAAAABJRU5ErkJggg==" @property @editor_attribute_decorator("WidgetSpecific",'''Defines the minimum value.''', float, {'possible_values': '', 'min': -65535, 'max': 65535, 'default': 0, 'step': 1}) def min_value(self): return self.__dict__.get('__min_value',0) @min_value.setter def min_value(self, value): self.__dict__['__min_value'] = value self.text_min_value.set_text(str(value)) @property @editor_attribute_decorator("WidgetSpecific",'''Defines the maximum value.''', float, {'possible_values': '', 'min': -65535, 'max': 65535, 'default': 0, 'step': 1}) def max_value(self): return self.__dict__.get('__max_value',1) @max_value.setter def max_value(self, value): self.__dict__['__max_value'] = value self.text_max_value.set_text(str(value)) indicator = None # a gui.SvgPolygon that indicates the actual value indicator_pin = None # a gui.SvgCircle around which the indicator rotates text_min_value = None # the gui.SvgText min value indicator text_max_value = None # the gui.SvgText max value indicator text_actual_value = None # the gui.SvgText value indicator def __init__(self, epics_pv_name='', min_value=0, max_value=100, *args, **kwargs): w = kwargs.get("style", {}).get("width", kwargs.get("width", 100)) h = kwargs.get("style", {}).get("height", kwargs.get("height", 100)) if 'width' in kwargs.keys(): del kwargs["width"] if 'height' in kwargs.keys(): del kwargs["height"] default_style = {'position':'absolute','left':'10px','top':'10px'} default_style.update(kwargs.get('style',{})) kwargs['style'] = default_style super(EPICSValueGaugeWidget, self).__init__(width=w, height=h, *args, **kwargs) self.epics_pv_name = epics_pv_name #the indicator self.indicator = gui.SvgPolygon(_maxlen=4) self.indicator.set_stroke(width=0.001, color='red') self.indicator.set_fill('red') indicator_pin_radius = 0.05 self.indicator_pin = gui.SvgCircle(0,0.5,indicator_pin_radius) self.indicator_pin.set_fill('black') #the value signs scale = max_value-min_value radius_min = 0.4 radius_max = 0.5 for i in range(0,10): angle = math.pi/9*i #sign = gui.SvgLine(math.cos(angle)*radius_min, radius_max-math.sin(angle)*radius_min, math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max) sign = gui.SvgLine(math.cos(angle)*(radius_min - 0.01 + 0.1*(i+1)/10), radius_max-math.sin(angle)*(radius_min - 0.01 + 0.1*(i+1)/10), math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max) sign.set_stroke(0.01, 'black') self.append(sign) #subindicators value signs scale = max_value-min_value radius_min = 0.4 radius_max = 0.5 for i in range(0,100): angle = math.pi/99*i #sign = gui.SvgLine(math.cos(angle)*radius_min, radius_max-math.sin(angle)*radius_min, math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max) sign = gui.SvgLine(math.cos(angle)*(radius_min - 0.01 + 0.1*(i+10)/100), radius_max-math.sin(angle)*(radius_min - 0.01 + 0.1*(i+10)/100), math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max) sign.set_stroke(0.002, 'black') self.append(sign) font_size = 0.1 self.text_min_value = gui.SvgText(-radius_max, 0.5 + font_size + 0.01, str(min_value)) self.text_min_value.style['font-size'] = gui.to_pix(font_size) self.text_min_value.style['text-anchor'] = "start" self.text_max_value = gui.SvgText(radius_max, 0.5 + font_size + 0.01, str(max_value)) self.text_max_value.style['font-size'] = gui.to_pix(font_size) self.text_max_value.style['text-anchor'] = "end" self.text_actual_value = gui.SvgText(0, 0.5 + indicator_pin_radius + font_size + 0.01, str(max_value)) self.text_actual_value.style['font-size'] = gui.to_pix(font_size) self.text_actual_value.style['text-anchor'] = "middle" self.text_actual_value.style['font-weight'] = 'bold' self.min_value = min_value self.max_value = max_value self.append([self.indicator, self.indicator_pin, self.text_min_value, self.text_max_value, self.text_actual_value]) self.set_viewbox(-0.5, 0, 1, 0.70) self.value = self.min_value def set_value(self, value): if not self.get_app_instance(): return with self.get_app_instance().update_lock: value = float(value) #min value at left #max value at right #value to radians scale = self.max_value-self.min_value if scale==0.0: return relative_value = value - self.min_value angle = relative_value*math.pi/scale #inversion min at left angle = math.pi - angle radius = 0.5 self.indicator.add_coord(math.cos(angle)*radius, radius-math.sin(angle)*radius) self.indicator.add_coord(math.cos(angle+0.5)*0.04, radius-math.sin(angle+0.5)*0.04) #self.indicator.add_coord(0.02,0.4) self.indicator.add_coord(0,radius) self.indicator.add_coord(math.cos(angle-0.5)*0.04, radius-math.sin(angle-0.5)*0.04) if hasattr(self, "actual_value"): self.text_actual_value.set_text(str(value))
py
1a335963305a48cb1b24f6ebff4cacba6c97ab38
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Components.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise execute_from_command_line(sys.argv)
py
1a335a755dee6e3359bfc95e75c035aa3d634681
import random import string from functools import reduce from typing import TypeVar, Callable, Iterable, Any, Union, Tuple, Type, List as TList, Set from toolz.itertoolz import cons, groupby from amino import maybe, boolean, Path from amino.tc.base import ImplicitsMeta, Implicits from amino.func import curried, I, call_by_name, tailrec, TailrecResult from amino.util.string import safe_string A = TypeVar('A') B = TypeVar('B') class ListMeta(ImplicitsMeta): def __instancecheck__(self, instance: Any) -> bool: if type(instance) is list: return False else: return super().__instancecheck__(instance) def _rand_str(chars: str, num: int=10) -> str: return ''.join(random.choice(chars) for i in range(num)) class List(TList[A], Implicits, implicits=True, metaclass=ListMeta): def __init__(self, *elements: A) -> None: list.__init__(self, elements) def __getitem__(self, arg): # type: ignore s = super().__getitem__(arg) return List.wrap(s) if isinstance(arg, slice) else s @staticmethod def wrap(l: Iterable[B]) -> 'List[B]': return List(*list(l)) @staticmethod def range(*a: int) -> 'List[int]': return List.wrap(range(*a)) @staticmethod def random_string(num: int=10) -> str: chars = string.ascii_letters + string.digits return _rand_str(chars, num) @staticmethod def random_alpha(num: int=10) -> str: chars = string.ascii_letters return _rand_str(chars, num) @staticmethod def gen(num: int, f: Callable[[], 'List[A]']) -> 'List[A]': return List.range(num) // (lambda a: f()) @staticmethod def lines(data: str) -> 'List[str]': return List.wrap(data.splitlines()) def lift(self, index: int) -> 'maybe.Maybe[A]': return ( (maybe.Just(self[index]) if len(self) > index else maybe.Empty()) if index >= 0 else (maybe.Just(self[index]) if len(self) >= -index else maybe.Empty()) ) def lift_all(self, index: int, *indexes: int) -> 'maybe.Maybe[List[A]]': def folder(z: maybe.Maybe[List[A]], n: List[maybe.Maybe[A]]) -> maybe.Maybe[List[A]]: return n.ap(z / (lambda a: a.cat)) els = List.wrap(indexes) / self.lift init = self.lift(index) / List return els.fold_left(init)(folder) def foreach(self, f: Callable[[A], B]) -> None: for el in self: f(el) def forall(self, f: Callable[[A], bool]) -> 'boolean.Boolean': return boolean.Boolean(all(f(el) for el in self)) def contains(self, value: A) -> 'boolean.Boolean': return boolean.Boolean(value in self) def exists(self, f: Callable[[A], bool]) -> bool: return self.find(f).is_just @property def is_empty(self) -> 'boolean.Boolean': return boolean.Boolean(self.length == 0) empty = is_empty @property def nonempty(self) -> 'boolean.Boolean': return not self.empty @property def length(self) -> int: return len(self) @property def head(self) -> 'maybe.Maybe[A]': return self.lift(0) @property def last(self) -> 'maybe.Maybe[A]': return self.lift(-1) @property def init(self) -> 'maybe.Maybe[List[A]]': return maybe.Empty() if self.empty else maybe.Just(self[:-1]) @property def tail(self) -> 'maybe.Maybe[List[A]]': return maybe.Empty() if self.empty else maybe.Just(self[1:]) @property def uncons(self) -> 'maybe.Maybe[Tuple[A, List[A]]]': return self.head.product(self.tail) detach_head = uncons @property def detach_last(self) -> 'maybe.Maybe[Tuple[A, List[A]]]': return self.last.product(self.init) @property def distinct(self) -> 'List[A]': return self.distinct_by(I) def distinct_by(self, f: Callable[[A], B]) -> 'List[A]': seen: Set[B] = set() def pred(a: A) -> bool: v = f(a) if v in seen: return True else: seen.add(v) return False return List.wrap(x for x in self if not pred(x)) def add(self, other: TList[A]) -> 'List[A]': return List.wrap(list.__add__(self, other)) __add__ = add def without(self, el: A) -> 'List[A]': return self.filter(lambda a: a != el) __sub__ = without def split(self, f: Callable[[A], bool]) -> Tuple['List[A]', 'List[A]']: def splitter(z: Tuple[Tuple, Tuple], e: A) -> Tuple[Tuple, Tuple]: l, r = z return (l + (e,), r) if f(e) else (l, r + (e,)) l, r = reduce(splitter, self, ((), (),)) # type: ignore return List.wrap(l), List.wrap(r) def split_type(self, tpe: Union[Type, Tuple[Type[B]]]) -> Tuple['List[B]', 'List[A]']: return self.split(lambda a: isinstance(a, tpe)) def index_of(self, target: A) -> 'maybe.Maybe[int]': return self.index_where(lambda a: a == target) @property def reversed(self) -> 'List[A]': return Lists.wrap(reversed(self)) def mk_string(self, sep: str='') -> str: return sep.join(self / str) @property def join_lines(self) -> str: return self.mk_string('\n') @property def join_comma(self) -> str: return self.mk_string(', ') @property def join_tokens(self) -> str: return self.mk_string(' ') @property def join_dot(self) -> str: return self.mk_string('.') def cons(self, item: A) -> 'List[A]': return List.wrap(cons(item, self)) def cons_m(self, item: 'maybe.Maybe[A]') -> 'List[A]': return item / self.cons | self def cat(self, item: A) -> 'List[A]': return self + List(item) def cat_m(self, item: 'maybe.Maybe[A]') -> 'List[A]': return item / self.cat | self @property def transpose(self) -> 'List[List[A]]': return List.wrap(map(List.wrap, zip(*self))) # type: ignore def take(self, n: int) -> 'List[A]': return self[:n] def take_while(self, pred: Callable[[A], bool]) -> 'List[A]': index = self.index_where(lambda a: not pred(a)) return index / (lambda a: self[:a]) | self def drop(self, n: int) -> 'List[A]': return self[n:] def drop_while(self, pred: Callable[[A], bool]) -> 'List[A]': index = self.index_where(lambda a: not pred(a)) return index / (lambda a: self[a:]) | Nil def drop_while_or_self(self, pred: Callable[[A], bool]) -> 'List[A]': res = self.drop_while(pred) return self if res == Nil else res def drop_right(self, n: int) -> 'List[A]': return self.take(self.length - n) def remove_all(self, els: 'List[A]') -> 'List[A]': return self.filter_not(els.contains) def __repr__(self) -> str: return '{}({})'.format(self.__class__.__name__, ', '.join(map(repr, self))) def __str__(self) -> str: return '[{}]'.format(', '.join(map(safe_string, self))) def __hash__(self) -> int: return hash(tuple(self)) def sort_by(self, f: Callable[[A], bool], reverse: bool=False) -> 'List[A]': return List.wrap(sorted(self, key=f, reverse=reverse)) def sort(self, reverse: bool=False) -> 'List[A]': # type: ignore return self.sort_by(I, reverse) def replace_item(self, a: A, b: A) -> 'List[A]': return self.map(lambda c: b if c == a else c) @curried def replace_where(self, a: A, pred: Callable) -> 'List[A]': return self.map(lambda b: a if pred(b) else b) def __mul__(self, count: int) -> 'List[A]': return List.wrap(super().__mul__(count)) def group_by(self, f: Callable[[A], Any]) -> dict: from amino import Map return Map(groupby(f, self)).valmap(List.wrap) def slice(self, start: int, end: int) -> 'List[A]': return self[start:end] def indent(self, count: int) -> 'List[str]': ws = ' ' * count return self.map(lambda a: f'{ws}{a}') @property def rstrip(self) -> 'List[str]': return self / (lambda a: a.rstrip()) @property def strip_newlines(self) -> 'List[str]': return self / (lambda a: a.replace('\n', '')) chomp = strip_newlines @property def rstrip_newlines(self) -> 'List[str]': return self / (lambda a: a.rstrip('\n')) def collect(self, f: Callable[[A], 'maybe.Maybe[B]']) -> 'List[B]': return self.flat_map(f) def modify_at(self, index: int, mod: Callable[[A], A]) -> 'maybe.Maybe[List[A]]': return self.lift(index).map(lambda a: self[:index].cat(mod(a)) + self[index + 1:]) class Lists: empty: List = List() @staticmethod def wrap(l: Iterable[B]) -> List[B]: return List(*list(l)) @staticmethod def range(*a: int) -> List[int]: return List.wrap(range(*a)) @staticmethod def random_string(num: int=10) -> str: chars = string.ascii_letters + string.digits return _rand_str(chars, num) @staticmethod def random_alpha(num: int=10) -> str: chars = string.ascii_letters return _rand_str(chars, num) @staticmethod def gen(num: int, f: Callable[[], List[A]]) -> List[A]: return List.range(num) // (lambda a: f()) @staticmethod def fill(num: int, value: A) -> List[A]: return Lists.range(num).replace(value) @staticmethod def lines(data: str) -> List[str]: return List.wrap(data.splitlines()) @staticmethod def split(data: str, splitter: str, maxsplit: int=-1) -> List[str]: return List.wrap(data.split(splitter, maxsplit)) @staticmethod def rsplit(data: str, splitter: str, maxsplit: int=-1) -> List[str]: return List.wrap(data.rsplit(splitter, maxsplit)) @staticmethod def tokens(data: str) -> List[str]: return Lists.split(data, ' ') @staticmethod @curried def iff(cond: bool, a: Union[A, Callable[[], A]]) -> List[A]: return List(call_by_name(a)) if cond else List() @staticmethod @curried def iff_l(cond: bool, a: Union[A, Callable[[], A]]) -> List[A]: return call_by_name(a) if cond else List() @staticmethod def file(path: Path) -> List[str]: return Lists.lines(path.read_text()) @staticmethod def find_str_matches(data: str, target: str) -> List[int]: @tailrec def find(current: int, result: List[int]) -> TailrecResult[List[int]]: match = data.find(target, current) return (False, result) if match == -1 else (True, (match + 1, result.cat(match))) return find(0, Nil) Nil = Lists.empty __all__ = ('List', 'Lists', 'Nil')
py
1a335aa769dd7019e68d6ddf1e3df197c1846e7b
''' Created on: 2015 Author: Mizael Martinez '''
py
1a335b66021f84b12f44209a0f5a177e71eaf470
# ------------------------------------------------------------------------------ # Imports # ------------------------------------------------------------------------------ import sys import os # ------------------------------------------------------------------------------ # Parse args # ------------------------------------------------------------------------------ if len(sys.argv) != 2: print 'Usage: $ python gen_aws_design.py ${AWS_HOME}/hdk/cl/examples/spatial_design' sys.exit(0) aws_dir = sys.argv[1] # ------------------------------------------------------------------------------ # Design # ------------------------------------------------------------------------------ # Step 1: Parse SpatialIP.v until we get to module Top and extract # args # We care about the following: # input clock, # input reset, # input [31:0] io_enable, # output [31:0] io_done, # input [31:0] io_scalarIns_0, # output [31:0] io_scalarOuts_0, # The first 4 are always the same, the last 2 can occur multiple times (_0, _1, _2, ..) and need to be counted design_dir = aws_dir + '/design/' top_src = open(design_dir + 'SpatialIP.v') state = 0 num_scalar_in = 0 num_scalar_out = 0 for line in top_src: if 'module SpatialIP(' in line: assert state == 0 state = 1 continue if state == 1: if 'io_scalarIns' in line: num_scalar_in += 1 elif 'io_scalarOuts' in line: num_scalar_out += 1 elif ');' in line: break top_src.close() # Step 2: Replace scalars in two files # cl_dram_dma.sv src = open(design_dir + 'cl_dram_dma.sv_TEMPLATE') dst = open(design_dir + 'cl_dram_dma.sv', 'w') for line in src: if '{{{SPATIAL_INSERT_input_argnum}}}' in line: new_lines = '' for argnum in range(num_scalar_in): line_with_replacements = line line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_input_argnum}}}', str(argnum)) new_lines += line_with_replacements dst.write(new_lines) elif '{{{SPATIAL_INSERT_output_argnum}}}' in line: new_lines = '' for argnum in range(num_scalar_out): line_with_replacements = line line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_output_argnum}}}', str(argnum)) new_lines += line_with_replacements dst.write(new_lines) else: dst.write(line) src.close() dst.close() # cl_ocl_slv.sv src = open(design_dir + 'cl_ocl_slv.sv_TEMPLATE') dst = open(design_dir + 'cl_ocl_slv.sv', 'w') for line in src: if '{{{SPATIAL_INSERT_input_argnum}}}' in line: new_lines = '' for argnum in range(num_scalar_in): line_with_replacements = line line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_input_argnum}}}', str(argnum)) # Replace address of lower 32 bits hex_addr = str(hex(argnum*64 + 65536))[2:] line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_input_argaddr_5h}}}', hex_addr[:-1]) # Replace address of higher 32 bits hex_addr_plus_0x20 = str(hex(argnum*64 + 32 + 65536))[2:] line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_input_argaddr_5h_plus_0x20}}}', hex_addr_plus_0x20[:-1]) new_lines += line_with_replacements dst.write(new_lines) elif '{{{SPATIAL_INSERT_output_argnum}}}' in line: new_lines = '' for argnum in range(num_scalar_out): line_with_replacements = line line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_output_argnum}}}', str(argnum)) # Replace address of lower 32 bits hex_addr = str(hex(argnum*64 + 65536*8))[2:] line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_output_argaddr_5h}}}', hex_addr[:-1]) # Replace address of higher 32 bits hex_addr_plus_0x20 = str(hex(argnum*64 + 32 + 65536*8))[2:] line_with_replacements = line_with_replacements.replace('{{{SPATIAL_INSERT_output_argaddr_5h_plus_0x20}}}', hex_addr_plus_0x20[:-1]) new_lines += line_with_replacements dst.write(new_lines) else: dst.write(line) src.close() dst.close() # Step 3: Use URAMs for SRAM size > 1024 import re p = re.compile(r'WORDS\((\d+)\)') # Pattern to match WORDS(#) src = open(design_dir + 'SpatialIP.v') dst = open(design_dir + 'SpatialIP.v_copy', 'w') for line in src: if 'SRAMVerilogAWS #' in line: # This instantiates an SRAM, so check its size m = p.search(line) assert m num_words = int(m.group(1)) assert num_words > 0 if num_words > 1024:#3136: dst.write(line.replace('SRAMVerilogAWS #', 'SRAMVerilogAWS_U #')) else: dst.write(line) else: dst.write(line) src.close() dst.close() os.system('cp -f ' + design_dir + 'SpatialIP.v ' + design_dir + 'SpatialIP.v.orig') os.system('mv -f ' + design_dir + 'SpatialIP.v_copy ' + design_dir + 'SpatialIP.v')
py
1a335bcb03f4037712a7c85f38025b13fa0362fb
import importlib import datetime as dt import xml.etree.ElementTree as ET from .session import session # Utility Objects and Functions NS = { 'ft': 'http://www.epo.org/fulltext', 'ops': 'http://ops.epo.org', 'ex': 'http://www.epo.org/exchange' } def etree_els_to_text(els) -> str: segments = [' '.join(e.itertext()) for e in els] return "\n".join(segments) def docid_to_inpadoc(doc, model_name='Inpadoc'): klass = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), model_name) date = doc.find('./ex:date', NS) if date is not None: date = dt.datetime.strptime(date.text, '%Y%m%d').date() return klass( doc_type=doc.attrib['document-id-type'], country=doc.find('./ex:country', NS).text, number=doc.find('./ex:doc-number', NS).text, kind_code=doc.find('./ex:kind', NS).text, date=date, ) def parse_family_member(member): family_class = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), 'InpadocFamilyMember') priority_claim_class = getattr(importlib.import_module('patent_client.epo.inpadoc.model'), 'InpadocFamilyPriorityClaim') pub = member.find('.//ex:publication-reference/ex:document-id[@document-id-type="docdb"]', NS) app = member.find('.//ex:application-reference/ex:document-id[@document-id-type="docdb"]', NS) family_id = int(member.attrib['family-id']) priority = member.findall('.//ex:priority-claim', NS) priority_claims = list() for c in priority: doc = docid_to_inpadoc(c.find('.//ex:document-id[@document-id-type="docdb"]', NS), 'Inpadoc') active = c.find('.//ex:priority-active-indicator', NS).text == 'YES' link_type = c.find('.//ex:priority-linkage-type', NS) link_type = link_type.text if link_type is not None else link_type seq = c.attrib['sequence'] kind = c.attrib.get('kind', None) priority_claims.append(priority_claim_class(**{ 'seq': int(seq), 'kind': kind, 'link_type': link_type, 'active': active, 'doc': doc, })) return family_class(**{ 'publication': docid_to_inpadoc(pub, 'InpadocPublication'), 'application': docid_to_inpadoc(app, 'InpadocApplication'), 'priority_claims': priority_claims, 'family_id': family_id, }) # Lookup Functions def lookup_claims(): @property def get(self) -> str: url = f"http://ops.epo.org/3.2/rest-services/published-data/publication/{self.doc_type}/{self.num}/claims" response = session.get(url) claim_els = ET.fromstring(response.text).findall('.//ft:claim-text', NS) return etree_els_to_text(claim_els) return get def lookup_description(): @property def get(self) -> str: url = f"http://ops.epo.org/3.2/rest-services/published-data/publication/{self.doc_type}/{self.num}/description" response = session.get(url) description_els = ET.fromstring(response.text).findall('.//ft:p', NS) return etree_els_to_text(description_els) return get def lookup_family(): @property def get(self): url = f"http://ops.epo.org/3.2/rest-services/family/publication/{self.doc_type}/{self.num}" response = session.get(url) members = ET.fromstring(response.text).findall('.//ops:family-member', NS) return [parse_family_member(m) for m in members] return get
py
1a335f1214995c63cd88490411bd9c139b6044d3
from .version import get_version __version__ = get_version()
py
1a335fc2ce6061ea498e67ace27e349684f63e49
"""Support for Anthem Network Receivers and Processors.""" import logging import voluptuous as vol from homeassistant.components.media_player import ( MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.components.media_player.const import ( SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DOMAIN = 'anthemav' DEFAULT_PORT = 14999 SUPPORT_ANTHEMAV = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up our socket to the AVR.""" import anthemav host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) device = None _LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port) def async_anthemav_update_callback(message): """Receive notification from transport that new data exists.""" _LOGGER.info("Received update callback from AVR: %s", message) hass.async_create_task(device.async_update_ha_state()) avr = await anthemav.Connection.create( host=host, port=port, loop=hass.loop, update_callback=async_anthemav_update_callback) device = AnthemAVR(avr, name) _LOGGER.debug("dump_devicedata: %s", device.dump_avrdata) _LOGGER.debug("dump_conndata: %s", avr.dump_conndata) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close) async_add_entities([device]) class AnthemAVR(MediaPlayerDevice): """Entity reading values from Anthem AVR protocol.""" def __init__(self, avr, name): """Initialize entity with transport.""" super().__init__() self.avr = avr self._name = name def _lookup(self, propname, dval=None): return getattr(self.avr.protocol, propname, dval) @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_ANTHEMAV @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return name of device.""" return self._name or self._lookup('model') @property def state(self): """Return state of power on/off.""" pwrstate = self._lookup('power') if pwrstate is True: return STATE_ON if pwrstate is False: return STATE_OFF return None @property def is_volume_muted(self): """Return boolean reflecting mute state on device.""" return self._lookup('mute', False) @property def volume_level(self): """Return volume level from 0 to 1.""" return self._lookup('volume_as_percentage', 0.0) @property def media_title(self): """Return current input name (closest we have to media title).""" return self._lookup('input_name', 'No Source') @property def app_name(self): """Return details about current video and audio stream.""" return self._lookup('video_input_resolution_text', '') + ' ' \ + self._lookup('audio_input_name', '') @property def source(self): """Return currently selected input.""" return self._lookup('input_name', "Unknown") @property def source_list(self): """Return all active, configured inputs.""" return self._lookup('input_list', ["Unknown"]) async def async_select_source(self, source): """Change AVR to the designated source (by name).""" self._update_avr('input_name', source) async def async_turn_off(self): """Turn AVR power off.""" self._update_avr('power', False) async def async_turn_on(self): """Turn AVR power on.""" self._update_avr('power', True) async def async_set_volume_level(self, volume): """Set AVR volume (0 to 1).""" self._update_avr('volume_as_percentage', volume) async def async_mute_volume(self, mute): """Engage AVR mute.""" self._update_avr('mute', mute) def _update_avr(self, propname, value): """Update a property in the AVR.""" _LOGGER.info( "Sending command to AVR: set %s to %s", propname, str(value)) setattr(self.avr.protocol, propname, value) @property def dump_avrdata(self): """Return state of avr object for debugging forensics.""" attrs = vars(self) return( 'dump_avrdata: ' + ', '.join('%s: %s' % item for item in attrs.items()))
py
1a3360ccf50c3e8dca9476bcc5a77bebfb9ab526
import os import string import random import tabulate import numpy as np import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, GRU from tensorflow.keras.losses import sparse_categorical_crossentropy from tensorflow.keras.models import load_model vocab = sorted(set(string.printable)) char_to_ind = {u: i for i, u in enumerate(vocab)} ind_to_char = np.array(vocab) class Generator(object): def __init__(self): self.model = None self.vocab = sorted(set(string.printable)) self.vocab_size = len(self.vocab) self.hparams = {'rnn_neurons' : 256, 'embed_dim' : 64, 'learning_rate' : 1e-4, 'dropout' : 0.3} def _createModel(self, batch_size): model = Sequential() model.add(Embedding(self.vocab_size, self.hparams['embed_dim'],batch_input_shape=[batch_size, None])) model.add(GRU(self.hparams['rnn_neurons'] ,return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform', dropout=self.hparams['dropout'])) model.add(GRU(self.hparams['rnn_neurons'] ,return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform', dropout=self.hparams['dropout'])) model.add(Dense(self.vocab_size)) opt = tf.keras.optimizers.Adam(learning_rate=self.hparams['learning_rate']) model.compile(optimizer=opt, loss=self._sparse_cat_loss) self.model = model def _sparse_cat_loss(self, y_true, y_pred): return sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) def load_weights(self, weight_file_path): ''' Constructs the model and loads the weights Parameters: weight_file_path (str): Path to weights location Returns: None ''' #if os.path.exists(weight_file_path): self._createModel(batch_size = 1) self.model.load_weights(weight_file_path) self.model.build(tf.TensorShape([1, None])) def train(self, data, epochs=1, verbose=1, save_at=5): ''' Trains the model for a given number of epochs Parameters: epochs (int) : number of epochs to train on verbose (bool) : to print loss and epoch number of not to save_at (int) : to save at ever n th epoch Returns: None ''' self._createModel(batch_size = 128) for epoch in range(1, epochs + 1): print('Epoch' + str(epoch) + '/' + str(epochs) ) self.model.fit(data, epochs=1, verbose=verbose) if (epoch + 1) % save_at == 0: self.model.save('model-' + str(epoch) + '-epochs-256-neurons.h5') def predict(self, start_seed, gen_size=100, temp=random.uniform(0, 1)): ''' Generates further texts according to the seed text Parameters: start_seed (str) : seed that model will use to generate further texts gen_size (int) : number of characters to generate 700 - 1000 are the most ideal ones Returns: None ''' if self.model is None: raise ValueError('Model Object cannot be NoneType') num_generate = gen_size input_eval = [char_to_ind[s] for s in start_seed] input_eval = tf.expand_dims(input_eval, 0) text_generated = [] temperature = temp self.model.reset_states() for _ in range(num_generate): predictions = self.model(input_eval) predictions = tf.squeeze(predictions, 0) predictions = predictions / temperature predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy() input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(ind_to_char[predicted_id]) return (start_seed + ''.join(text_generated)) def hyperparams(self): print('Hyper Parameters') print('+--------------------------+') for key, value in self.hparams.items(): print("|{: <13} | {: >10}|".format(key, value)) print('+--------------------------+') def summary(self): self.model.summary() @property def __doc__(self): return ''' Generator object can construct the model, save the weights, load the weights train the model, and make predictions --------------------------------------------------- Trainging example : model = Generator() # creating an instance of model model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model ---------------------------------------------------- Continue training from a saved weights file : model = Generator() # creating an instance of model model.load_weights('model-3-epochs.h5', mode = 'training') # loading the weights model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model ----------------------------------------------------- Preditction example : model = Generator() # creating an instance of model model.load_weights('model-10-epochs.h5') # loading the weights print(model.predict('hello')) # making prediction and printing ----------------------------------------------------- '''
py
1a33634c3b33dc9a3c754c75d3919f14cd5897a9
# -*- coding: utf-8 -*- """ mslib.msui.mss_qt ~~~~~~~~~~~~~~~~~ This module helps with qt This file is part of mss. :copyright: Copyright 2017-2018 Joern Ungermann, Reimar Bauer :copyright: Copyright 2017-2020 by the mss team, see AUTHORS. :license: APACHE-2.0, see LICENSE for details. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import importlib import logging import os import platform import sys import traceback from fslib.fs_filepicker import getSaveFileName, getOpenFileName, getExistingDirectory from PyQt5 import QtCore, QtWidgets # noqa from mslib.utils import config_loader, FatalUserError def get_open_filename_qt(*args): filename = QtWidgets.QFileDialog.getOpenFileName(*args) return filename[0] if isinstance(filename, tuple) else str(filename) def get_open_filenames_qt(*args): """ To select multiple files simultaneously """ filenames = QtWidgets.QFileDialog.getOpenFileNames(*args) return filenames[0] if isinstance(filenames, tuple) else str(filenames) def get_save_filename_qt(*args): filename = QtWidgets.QFileDialog.getSaveFileName(*args) return filename[0] if isinstance(filename, tuple) else str(filename) def get_existing_directory_qt(*args): dirname = QtWidgets.QFileDialog.getExistingDirectory(*args) return dirname[0] if isinstance(dirname, tuple) else str(dirname) def get_pickertype(tag, typ): default = config_loader(dataset="filepicker_default") if typ is None: if tag is None: typ = default else: typ = config_loader(dataset=tag) return typ def get_open_filename(parent, title, dirname, filt, pickertag=None, pickertype=None): pickertype = get_pickertype(pickertag, pickertype) if pickertype == "fs": filename = getOpenFileName(parent, dirname, filt, title="Import Flight Track") elif pickertype in ["qt", "default"]: filename = get_open_filename_qt(parent, title, os.path.expanduser(dirname), filt) else: raise FatalUserError(f"Unknown file picker type '{pickertype}'.") logging.debug("Selected '%s'", filename) if filename == "": filename = None return filename def get_open_filenames(parent, title, dirname, filt, pickertag=None, pickertype=None): """ Opens multiple files simultaneously Currently implemented only in kmloverlay_dockwidget.py """ pickertype = get_pickertype(pickertag, pickertype) if pickertype in ["qt", "default"]: filename = get_open_filenames_qt(parent, title, os.path.expanduser(dirname), filt) else: raise FatalUserError(f"Unknown file picker type '{pickertype}'.") logging.debug("Selected '%s'", filename) if filename == "": filename = None return filename def get_save_filename(parent, title, filename, filt, pickertag=None, pickertype=None): pickertype = get_pickertype(pickertag, pickertype) if pickertype == "fs": dirname, filename = os.path.split(filename) filename = getSaveFileName( parent, dirname, filt, title=title, default_filename=filename, show_save_action=True) elif pickertype in ["qt", "default"]: filename = get_save_filename_qt(parent, title, os.path.expanduser(filename), filt) else: raise FatalUserError(f"Unknown file picker type '{pickertype}'.") logging.debug("Selected '%s'", filename) if filename == "": filename = None return filename def get_existing_directory(parent, title, defaultdir, pickertag=None, pickertype=None): pickertype = get_pickertype(pickertag, pickertype) if pickertype == "fs": dirname = getExistingDirectory(parent, title=title, fs_url=defaultdir)[0] elif pickertype in ["qt", "default"]: dirname = get_existing_directory_qt(parent, title, defaultdir) else: raise FatalUserError(f"Unknown file picker type '{pickertype}'.") logging.debug("Selected '%s'", dirname) if dirname == "": dirname = None return dirname def variant_to_string(variant): if isinstance(variant, QtCore.QVariant): return str(variant.value()) return str(variant) def variant_to_float(variant, locale=QtCore.QLocale()): if isinstance(variant, QtCore.QVariant): value = variant.value() else: value = variant if isinstance(value, (int, float)): return value try: float_value, ok = locale.toDouble(value) if not ok: raise ValueError except TypeError: # neither float nor string, try Python conversion logging.error("Unexpected type in float conversion: %s=%s", type(value), value) float_value = float(value) return float_value # Import all Dialogues from the proper module directory. for mod in [ "ui_about_dialog", "ui_hexagon_dockwidget", "ui_kmloverlay_dockwidget", "ui_customize_kml", "ui_mainwindow", "ui_performance_settings", "ui_remotesensing_dockwidget", "ui_satellite_dockwidget", "ui_sideview_options", "ui_sideview_window", "ui_tableview_window", "ui_topview_mapappearance", "ui_topview_window", "ui_wms_capabilities", "ui_wms_dockwidget", "ui_wms_password_dialog"]: globals()[mod] = importlib.import_module("mslib.msui.qt5." + mod) # to store config by QSettings QtCore.QCoreApplication.setOrganizationName("mss") # PyQt5 silently aborts on a Python Exception def excepthook(type_, value, traceback_): """ This dumps the error to console, logging (i.e. logfile), and tries to open a MessageBox for GUI users. """ import mslib import mslib.utils tb = "".join(traceback.format_exception(type_, value, traceback_)) traceback.print_exception(type_, value, traceback_) logging.critical("MSS Version: %s", mslib.__version__) logging.critical("Python Version: %s", sys.version) logging.critical("Platform: %s (%s)", platform.platform(), platform.architecture()) logging.critical("Fatal error: %s", tb) if type_ is mslib.utils.FatalUserError: QtWidgets.QMessageBox.critical( None, "fatal error", f"Fatal user error in MSS {mslib.__version__} on {platform.platform()}\n" f"Python {sys.version}\n" f"\n" f"{value}") else: QtWidgets.QMessageBox.critical( None, "fatal error", f"Fatal error in MSS {mslib.__version__} on {platform.platform()}\n" f"Python {sys.version}\n" f"\n" f"Please report bugs in MSS to https://github.com/Open-MSS/MSS\n" f"\n" f"Information about the fatal error:\n" f"\n" f"{tb}") sys.excepthook = excepthook
py
1a336423f3a96fe6c7d4cf8f0d607d2694e08a8b
# -*- coding: utf-8 -*- import collections c1 = collections.Counter(['a', 'b', 'c', 'a', 'b', 'b']) c2 = collections.Counter('alphabet') print('C1:', c1) print('C2:', c2) print('\nCombined counts:') print(c1 + c2) print('\nSubtraction:') print(c1 - c2) print('\nIntersection (taking positive minimums):') print(c1 & c2) print('\nUnion (taking maximums):') print(c1 | c2)
py
1a3364a2dcb9fc49e737107269692312cdeb5127
#!/usr/bin/env python import sys import Bio from Bio import SeqIO, SeqFeature from Bio.SeqRecord import SeqRecord import os # Copyright(C) 2009 Iddo Friedberg & Ian MC Fleming # Released under Biopython license. http://www.biopython.org/DIST/LICENSE # Do not remove this comment def get_interregions(genbank_path,intergene_length=1): seq_record = SeqIO.parse(open(genbank_path), "genbank").__next__() cds_list_plus = [] cds_list_minus = [] intergenic_records = [] # Loop over the genome file, get the CDS features on each of the strands for feature in seq_record.features: if feature.type == 'CDS': mystart = feature.location._start.position myend = feature.location._end.position if feature.strand == -1: cds_list_minus.append((mystart,myend,-1)) elif feature.strand == 1: cds_list_plus.append((mystart,myend,1)) else: sys.stderr.write("No strand indicated %d-%d. Assuming +\n" % (mystart, myend)) cds_list_plus.append((mystart,myend,1)) for i,pospair in enumerate(cds_list_plus[1:]): # Compare current start position to previous end position last_end = cds_list_plus[i][1] this_start = pospair[0] strand = pospair[2] if this_start - last_end >= intergene_length: intergene_seq = seq_record.seq[last_end:this_start] strand_string = "+" intergenic_records.append( SeqRecord(intergene_seq,id="%s-ign-%d" % (seq_record.name,i), description="%s %d-%d %s" % (seq_record.name, last_end+1, this_start,strand_string))) for i,pospair in enumerate(cds_list_minus[1:]): last_end = cds_list_minus[i][1] this_start = pospair[0] strand = pospair[2] if this_start - last_end >= intergene_length: intergene_seq = seq_record.seq[last_end:this_start] strand_string = "-" intergenic_records.append( SeqRecord(intergene_seq,id="%s-ign-%d" % (seq_record.name,i), description="%s %d-%d %s" % (seq_record.name, last_end+1, this_start,strand_string))) outpath = os.path.splitext(os.path.basename(genbank_path))[0] + "_ign.fasta" SeqIO.write(intergenic_records, open(outpath,"w"), "fasta") if __name__ == '__main__': if len(sys.argv) == 2: get_interregions(sys.argv[1]) elif len(sys.argv) == 3: get_interregions(sys.argv[1],int(sys.argv[2])) else: print("Usage: get_intergenic.py gb_file [intergenic_length]") sys.exit(0)
py
1a3364d277dd60c64da4170859bcfddd7dbb2f0c
#!/usr/bin/env python3 """Discord bot for organizing PUGs (pick-up games). Built for Neotokyo, but should work for any two-team game with even number of players total. Usage: Commands: Commands are prefixed with a character defined by the config value "NTBOT_CMD_PREFIX", by default "!", so the command pug becomes "!pug" in the Discord chat, and so on. - clearpuggers — Empty the PUG queue. Command access can be restricted by role(s) with the config value NTBOT_PUG_ADMIN_ROLES. - ping — Bot will simply respond with "Pong". Use to test if the bot is still online and responsive. - ping_puggers — Ping all the players currently in the PUG queue. Can be used to manually organize games with smaller than expected number of players. Expects a message after the command, eg: "!ping_puggers Play 4v4?" - pug — Join the PUG queue if there is room. - puggers — List players currently in the PUG queue. - scramble — Suggest randomly scrambled teams for the last full PUG for balancing reasons. Can be repeated until a satisfactory scramble is reached. - unpug — Leave the PUG queue. Config values: The config values have been documented as comments in the config.yml file itself. For more information, please see the repository at: https://github.com/Rainyan/discord-bot-ntpug """ from ast import literal_eval import asyncio from datetime import datetime, timezone import os import time import random import discord from discord.ext import commands, tasks import pendulum from strictyaml import load, Bool, EmptyList, Float, Int, Map, Seq, Str from strictyaml.ruamel.comments import CommentedSeq # May encounter breaking changes otherwise # NOTE: Discord API "decomissions" are scheduled for April 30, 2022: # https://github.com/discord/discord-api-docs/discussions/4510 # Probably have to upgrade to pycord 2.X dev branch, or # some original discord.py project equivalent whenever it releases. assert discord.version_info.major == 1 and discord.version_info.minor == 7 SCRIPT_NAME = "NT Pug Bot" SCRIPT_VERSION = "0.14.3" CFG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yml") assert os.path.isfile(CFG_PATH) with open(file=CFG_PATH, mode="r", encoding="utf-8") as f_config: YAML_CFG_SCHEMA = Map({ "NTBOT_SECRET_TOKEN": Str(), "NTBOT_CMD_PREFIX": Str(), "NTBOT_PUG_CHANNEL": Str(), "NTBOT_PLAYERS_REQUIRED_TOTAL": Int(), "NTBOT_DEBUG_ALLOW_REQUEUE": Bool(), "NTBOT_POLLING_INTERVAL_SECS": Int(), "NTBOT_PRESENCE_INTERVAL_SECS": Int(), "NTBOT_PUGGER_ROLE": Str(), "NTBOT_PUGGER_ROLE_PING_THRESHOLD": Float(), "NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS": Float(), "NTBOT_PUG_ADMIN_ROLES": Seq(Str()) | EmptyList(), "NTBOT_IDLE_THRESHOLD_HOURS": Int(), "NTBOT_PING_PUGGERS_COOLDOWN_SECS": Float(), }) CFG = load(f_config.read(), YAML_CFG_SCHEMA) assert CFG is not None def cfg(key): """Returns a bot config value from environment variable or config file, in that order. If using an env var, its format has to be constructible to the type determined by the config file's strictyaml schema. """ if os.environ.get(key): ret_type = type(CFG[key].value) if ret_type == CommentedSeq: return CFG[key] return ret_type(literal_eval(os.environ.get(key))) return CFG[key].value bot = commands.Bot(command_prefix=cfg("NTBOT_CMD_PREFIX")) NUM_PLAYERS_REQUIRED = cfg("NTBOT_PLAYERS_REQUIRED_TOTAL") assert NUM_PLAYERS_REQUIRED > 0, "Need positive number of players" assert NUM_PLAYERS_REQUIRED % 2 == 0, "Need even number of players" DEBUG_ALLOW_REQUEUE = cfg("NTBOT_DEBUG_ALLOW_REQUEUE") PUG_CHANNEL_NAME = cfg("NTBOT_PUG_CHANNEL") BOT_SECRET_TOKEN = cfg("NTBOT_SECRET_TOKEN") assert 0 <= cfg("NTBOT_PUGGER_ROLE_PING_THRESHOLD") <= 1 PUGGER_ROLE = cfg("NTBOT_PUGGER_ROLE") assert len(PUGGER_ROLE) > 0 # This is a variable because the text used for detecting previous PUGs when # restoring status during restart. PUG_READY_TITLE = "**PUG is now ready!**" print(f"Now running {SCRIPT_NAME} v.{SCRIPT_VERSION}", flush=True) class PugStatus(): """Object for containing and operating on one Discord server's PUG information. """ # pylint: disable=too-many-instance-attributes # This might need revisiting, but deal with it for now. def __init__(self, guild_channel, players_required=NUM_PLAYERS_REQUIRED, guild_roles=None): self.guild_roles = [] if guild_roles is None else guild_roles self.guild_channel = guild_channel self.jin_players = [] self.nsf_players = [] self.prev_puggers = [] self.players_required_total = players_required self.players_per_team = int(self.players_required_total / 2) self.last_changed_presence = 0 self.last_presence = None self.lock = asyncio.Lock() self.last_role_ping = None async def reset(self): """Stores the previous puggers, and then resets current pugger queue. """ async with self.lock: self.prev_puggers = self.jin_players + self.nsf_players self.jin_players.clear() self.nsf_players.clear() async def player_join(self, player, team=None): """If there is enough room in this PUG queue, assigns this player to a random team to wait in, until the PUG is ready to be started. The specific team rosters can later be shuffled by a !scramble. """ async with self.lock: if not DEBUG_ALLOW_REQUEUE and \ (player in self.jin_players or player in self.nsf_players): return False, (f"{player.mention} You are already queued! " "If you wanted to un-PUG, please use **" f"{bot.command_prefix}unpug** " "instead.") if team is None: team = random.randint(0, 1) # flip a coin between jin/nsf if team == 0: if len(self.jin_players) < self.players_per_team: self.jin_players.append(player) return True, "" if len(self.nsf_players) < self.players_per_team: self.nsf_players.append(player) return True, "" return False, (f"{player.mention} Sorry, this PUG is currently " "full!") async def reload_puggers(self): """Iterate PUG channel's recent message history to figure out who should be pugged. This is used both for restoring puggers after a bot restart, but also for dropping inactive players from the queue after inactivity of "NTBOT_IDLE_THRESHOLD_HOURS" period. """ limit_hrs = cfg("NTBOT_IDLE_THRESHOLD_HOURS") assert limit_hrs > 0 after = pendulum.now().subtract(hours=limit_hrs) # Because Pycord 1.7.3 wants non timezone aware "after" date. after = datetime.fromisoformat(after.in_timezone("UTC").isoformat()) after = after.replace(tzinfo=None) def is_cmd(msg, cmd): """Predicate for whether message equals a specific PUG command. """ return msg.content == f"{bot.command_prefix}{cmd}" def is_pug_start(msg): """Predicate for whether a message signals PUG start. """ return msg.author.bot and msg.content.startswith(PUG_READY_TITLE) backup_nsf = self.nsf_players.copy() backup_jin = self.jin_players.copy() backup_prev = self.prev_puggers.copy() try: # First reset the PUG queue, and then replay the pug/unpug traffic # within the acceptable "restore_puggers_limit_hours" history range await self.reset() # We remove the default max retrieved messages history limit # because we need to always retrieve the full order of events here. # This can be a slow operation if the channel is heavily congested # within the "now-after" search range, but it's acceptable here # because this code only runs on bot init, and then once per # clear_inactive_puggers() task loop period, which is at most once # per hour. async for msg in self.guild_channel.history(limit=None, after=after, oldest_first=True).\ filter(lambda msg: any((is_cmd(msg, "pug"), is_cmd(msg, "unpug"), is_pug_start(msg)))): if is_pug_start(msg): await self.reset() elif is_cmd(msg, "pug"): await self.player_join(msg.author) else: await self.player_leave(msg.author) # Discord frequently HTTP 500's, so need to have pug queue backups. # We can also hit a HTTP 429 here, which might be a pycord bug(?) # as I don't think we're being unreasonable with the history range. except discord.errors.HTTPException as err: self.nsf_players = backup_nsf.copy() self.jin_players = backup_jin.copy() self.prev_puggers = backup_prev.copy() raise err async def player_leave(self, player): """Removes a player from the pugger queue if they were in it. """ async with self.lock: num_before = self.num_queued() self.jin_players = [p for p in self.jin_players if p != player] self.nsf_players = [p for p in self.nsf_players if p != player] num_after = self.num_queued() left_queue = (num_after != num_before) if left_queue: return True, "" return False, (f"{player.mention} You are not currently in the " "PUG queue") def num_queued(self): """Returns the number of puggers currently in the PUG queue. """ return len(self.jin_players) + len(self.nsf_players) def num_expected(self): """Returns the number of puggers expected, total, to start a PUG. """ return self.players_required_total def num_more_needed(self): """Returns how many more puggers are needed to start a PUG. """ return max(0, self.num_expected() - self.num_queued()) def is_full(self): """Whether the PUG queue is currently full or not." """ return self.num_queued() >= self.num_expected() async def start_pug(self): """Starts a PUG match. """ async with self.lock: if len(self.jin_players) == 0 or len(self.nsf_players) == 0: await self.reset() return False, "Error: team was empty" msg = f"{PUG_READY_TITLE}\n" msg += "_Jinrai players:_\n" for player in self.jin_players: msg += f"{player.mention}, " msg = msg[:-2] # trailing ", " msg += "\n_NSF players:_\n" for player in self.nsf_players: msg += f"{player.mention}, " msg = msg[:-2] # trailing ", " msg += ("\n\nTeams unbalanced? Use **" f"{bot.command_prefix}scramble** to suggest new " "random teams.") return True, msg async def update_presence(self): """Updates the bot's status message ("presence"). This is used for displaying things like the PUG queue status. """ async with self.lock: delta_time = int(time.time()) - self.last_changed_presence if delta_time < cfg("NTBOT_PRESENCE_INTERVAL_SECS") + 2: return presence = self.last_presence if presence is None: presence = { "activity": discord.BaseActivity(), "status": discord.Status.idle } puggers_needed = self.num_more_needed() # Need to keep flipping status because activity update in itself # doesn't seem to propagate that well. status = discord.Status.idle if presence["status"] == status: status = discord.Status.online if puggers_needed > 0: text = f"for {puggers_needed} more pugger" if puggers_needed > 1: text += "s" # plural else: text += "!" # need one more! activity = discord.Activity(type=discord.ActivityType.watching, name=text) else: text = "a PUG! 🐩" activity = discord.Activity(type=discord.ActivityType.playing, name=text) presence["activity"] = activity presence["status"] = status await bot.change_presence(activity=presence["activity"], status=presence["status"]) self.last_presence = presence self.last_changed_presence = int(time.time()) async def role_ping_deltatime(self): """Returns a datetime.timedelta of latest role ping, or None if no such ping was found. """ after = pendulum.now().subtract( hours=cfg("NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS")) # Because Pycord 1.7.3 wants non timezone aware "after" date. after = datetime.fromisoformat(after.in_timezone("UTC").isoformat()) after = after.replace(tzinfo=None) try: async for msg in self.guild_channel.history(limit=None, after=after, oldest_first=False): if PUGGER_ROLE in [role.name for role in msg.role_mentions]: # Because Pycord 1.7.3 returns non timezone aware UTC date, # and we need to subtract a timedelta using it. naive_utc_now = datetime.now(timezone.utc) naive_utc_now = naive_utc_now.replace(tzinfo=None) self.last_role_ping = msg.created_at return naive_utc_now - msg.created_at except discord.errors.HTTPException as err: # If it's not a library error, and we got a HTTP 5xx response, # err on the side of caution and treat it as if we found a recent # ping by returning a zeroed timedelta, so that the bot will try # again later. The Discord API throws server side HTTP 5xx errors # pretty much daily, so silently ignoring them here keeps the bot # side error logs cleaner since the Discord bugs aren't really # actionable for us as the API user. if err.code == 0 and str(err.status)[:1] == "5": return datetime.timedelta() raise err return None async def ping_role(self): """Pings the puggers Discord server role, if it's currently allowed. Frequency of these pings is restricted to avoid being too spammy. """ async with self.lock: if self.num_more_needed() == 0: return pugger_ratio = self.num_queued() / self.num_expected() ping_ratio = cfg("NTBOT_PUGGER_ROLE_PING_THRESHOLD") if pugger_ratio < ping_ratio: return last_ping_dt = await self.role_ping_deltatime() hours_limit = cfg("NTBOT_PUGGER_ROLE_PING_MIN_INTERVAL_HOURS") if last_ping_dt is not None: last_ping_hours = last_ping_dt.total_seconds() / 60 / 60 if last_ping_hours < hours_limit: return for role in self.guild_roles: if role.name == PUGGER_ROLE: min_nag_hours = f"{hours_limit:.1f}" min_nag_hours = min_nag_hours.rstrip("0").rstrip(".") msg = (f"{role.mention} Need **" f"{self.num_more_needed()} more puggers** " "for a game!\n_(This is an automatic ping " "to all puggers, because the PUG queue is " f"{(ping_ratio * 100):.0f}% full.\nRest " "assured, I will only ping you once per " f"{min_nag_hours} hours, at most.\n" "If you don't want any of these " "notifications, please consider " "temporarily muting this bot or leaving " f"the {role.mention} server role._)") await self.guild_channel.send(msg) break pug_guilds = {} @bot.command(brief="Test if bot is active") async def ping(ctx): """Just a standard Discord bot ping test command for confirming whether the bot is online or not. """ await ctx.send("pong") @bot.command(brief="Join the PUG queue") async def pug(ctx): """Player command for joining the PUG queue. """ if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME: return response = "" join_success, response = await pug_guilds[ctx.guild].player_join( ctx.message.author) if join_success: response = (f"{ctx.message.author.name} has joined the PUG queue " f"({pug_guilds[ctx.guild].num_queued()} / " f"{pug_guilds[ctx.guild].num_expected()})") await ctx.send(f"{response}") @bot.command(brief="Leave the PUG queue") async def unpug(ctx): """Player command for leaving the PUG queue. """ if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME: return leave_success, msg = await pug_guilds[ctx.guild].player_leave( ctx.message.author) if leave_success: msg = (f"{ctx.message.author.name} has left the PUG queue " f"({pug_guilds[ctx.guild].num_queued()} / " f"{pug_guilds[ctx.guild].num_expected()})") await ctx.send(msg) @bot.command(brief="Empty the server's PUG queue") async def clearpuggers(ctx): """Player command for clearing the PUG queue. This can be restricted to Discord guild specific admin roles. """ if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME: return # If zero pug admin roles are configured, assume anyone can !clearpuggers if len(cfg("NTBOT_PUG_ADMIN_ROLES")) == 0: is_allowed = True else: pug_admin_roles = [role.value for role in cfg("NTBOT_PUG_ADMIN_ROLES")] user_roles = [role.name for role in ctx.message.author.roles] is_allowed = any(role in pug_admin_roles for role in user_roles) if is_allowed: await pug_guilds[ctx.guild].reset() await ctx.send(f"{ctx.message.author.name} has reset the PUG queue") else: await ctx.send(f"{ctx.message.author.mention} The PUG queue can only " f"be reset by users with role(s): _{pug_admin_roles}_") @bot.command(brief="Get new random teams suggestion for the latest PUG") async def scramble(ctx): """Player command for scrambling the latest full PUG queue. Can be called multiple times for generating new random teams. """ msg = "" if len(pug_guilds[ctx.guild].prev_puggers) == 0: msg = (f"{ctx.message.author.mention} Sorry, no previous PUG found to " "scramble") else: random.shuffle(pug_guilds[ctx.guild].prev_puggers) msg = f"{ctx.message.author.name} suggests scrambled teams:\n" msg += f"_(random shuffle id: {random_human_readable_phrase()})_\n" msg += "_Jinrai players:_\n" for i in range(int(len(pug_guilds[ctx.guild].prev_puggers) / 2)): msg += f"{pug_guilds[ctx.guild].prev_puggers[i].name}, " msg = msg[:-2] # trailing ", " msg += "\n_NSF players:_\n" for i in range(int(len(pug_guilds[ctx.guild].prev_puggers) / 2), len(pug_guilds[ctx.guild].prev_puggers)): msg += f"{pug_guilds[ctx.guild].prev_puggers[i].name}, " msg = msg[:-2] # trailing ", " msg += ("\n\nTeams still unbalanced? Use **" f"{bot.command_prefix}scramble** to suggest new random teams.") await ctx.send(msg) @bot.command(brief="List players currently queueing for PUG") async def puggers(ctx): """Player command for listing players currently in the PUG queue. """ if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME: return msg = (f"{pug_guilds[ctx.guild].num_queued()} / " f"{pug_guilds[ctx.guild].num_expected()} player(s) currently " "queued") if pug_guilds[ctx.guild].num_queued() > 0: all_players_queued = pug_guilds[ctx.guild].jin_players + \ pug_guilds[ctx.guild].nsf_players msg += ": " for player in all_players_queued: msg += f"{player.name}, " msg = msg[:-2] # trailing ", " await ctx.send(msg) @commands.cooldown(rate=1, per=cfg("NTBOT_PING_PUGGERS_COOLDOWN_SECS"), type=commands.BucketType.user) @bot.command(brief="Ping all players currently queueing for PUG") async def ping_puggers(ctx): """Player command to ping all players currently inside the PUG queue. """ if ctx.guild not in pug_guilds or not ctx.channel.name == PUG_CHANNEL_NAME: # Don't set cooldown for failed invocations. ping_puggers.reset_cooldown(ctx) return pug_admin_roles = [role.value for role in cfg("NTBOT_PUG_ADMIN_ROLES")] user_roles = [role.name for role in ctx.message.author.roles] is_admin = any(role in pug_admin_roles for role in user_roles) # Only admins and players in the queue themselves are allowed to ping queue if not is_admin: if ctx.message.author not in pug_guilds[ctx.guild].jin_players and \ ctx.message.author not in pug_guilds[ctx.guild].nsf_players: if pug_guilds[ctx.guild].num_queued() == 0: await ctx.send(f"{ctx.author.mention} PUG queue is currently " "empty.") else: await ctx.send(f"{ctx.author.mention} Sorry, to be able to " "ping the PUG queue, you have to be queued " "yourself, or have the role(s): " f"_{pug_admin_roles}_") ping_puggers.reset_cooldown(ctx) return async with pug_guilds[ctx.guild].lock: # Comparing <=1 instead of 0 because it makes no sense to ping others # if you're the only one currently in the queue. if pug_guilds[ctx.guild].num_queued() <= 1: await ctx.send(f"{ctx.author.mention} There are no other players " "in the queue to ping!") ping_puggers.reset_cooldown(ctx) return # Require an info message instead of forcing pingees to spend time figuring # out why they were pinged. We will construct a jump_url to this message. args = ctx.message.content.split(" ", maxsplit=1) if len(args) <= 1 or len(args[1].strip()) == 0: await ctx.send(f"{ctx.author.mention} Please include a message after " "the command, describing why you pinged the PUG queue.") ping_puggers.reset_cooldown(ctx) return msg = "" async with pug_guilds[ctx.guild].lock: for player in [p for p in pug_guilds[ctx.guild].jin_players if p != ctx.author]: msg += f"{player.mention}, " for player in [p for p in pug_guilds[ctx.guild].nsf_players if p != ctx.author]: msg += f"{player.mention}, " msg = msg[:-2] # trailing ", " msg += (f" User {ctx.author.mention} is pinging the PUG queue: " f"{ctx.message.jump_url}") await ctx.send(msg) # No cooldown for admin pings. if is_admin: ping_puggers.reset_cooldown(ctx) def random_human_readable_phrase(): """Generates a random human readable phrase to work as an identifier. Can be used for the !scrambles, to make it easier for players to refer to specific scramble permutations via voice chat by using these phrases. """ base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "static", "phrase_gen") with open(file=os.path.join(base_path, "nouns.txt"), mode="r", encoding="utf-8") as f_nouns: nouns = f_nouns.readlines() with open(file=os.path.join(base_path, "adjectives.txt"), mode="r", encoding="utf-8") as f_adjs: adjectives = f_adjs.readlines() phrase = (f"{adjectives[random.randint(0, len(adjectives) - 1)]} " f"{nouns[random.randint(0, len(nouns) - 1)]}") return phrase.replace("\n", "").lower() class ErrorHandlerCog(commands.Cog): """Helper class for error handling. """ def __init__(self, parent_bot): self.bot = parent_bot @commands.Cog.listener() # pylint: disable=no-self-use async def on_command_error(self, ctx, err): """Error handler for bot commands. """ # This could be a typo, or a command meant for another bot. if isinstance(err, discord.ext.commands.errors.CommandNotFound): print(f"Ignoring unknown command: \"{ctx.message.content}\"") return # This command is on cooldown from being used too often. if isinstance(err, discord.ext.commands.errors.CommandOnCooldown): # Returns a human readable "<so and so long> before" string. retry_after = pendulum.now().diff_for_humans(pendulum.now().add( seconds=err.retry_after)) await ctx.send(f"{ctx.message.author.mention} You're doing it too " f"much! Please wait {retry_after} trying again.") return # Something else happened! Just raise the error for the logs to catch. raise err class PugQueueCog(commands.Cog): """PUG queue main event loop. """ def __init__(self, parent_bot): """Acquire lock for asynchronous queue polling, and start the queue loop. """ # pylint: disable=no-member self.bot = parent_bot self.lock = asyncio.Lock() self.poll_queue.start() self.clear_inactive_puggers.start() @tasks.loop(seconds=cfg("NTBOT_POLLING_INTERVAL_SECS")) async def poll_queue(self): """Poll the PUG queue to see if we're ready to play, and to possibly update our status in various ways. Iterating and caching per-guild to support multiple Discord channels simultaneously using the same bot instance with their own independent player pools. """ async with self.lock: for guild in bot.guilds: for channel in guild.channels: if channel.name != PUG_CHANNEL_NAME: continue if guild not in pug_guilds: pug_guilds[guild] = PugStatus(guild_channel=channel, guild_roles=guild.roles) await pug_guilds[guild].reload_puggers() if pug_guilds[guild].is_full(): pug_start_success, msg = \ await pug_guilds[guild].start_pug() if pug_start_success: # Before starting pug and resetting queue, manually # update presence, so we're guaranteed to have the # presence status fully up-to-date here. pug_guilds[guild].last_changed_presence = 0 await pug_guilds[guild].update_presence() # Ping the puggers await channel.send(msg) # And finally reset the queue, so we're ready for # the next PUGs. await pug_guilds[guild].reset() else: await pug_guilds[guild].update_presence() await pug_guilds[guild].ping_role() @tasks.loop(hours=1) async def clear_inactive_puggers(self): """Periodically clear inactive puggers from the queue(s). """ async with self.lock: for guild in bot.guilds: if guild not in pug_guilds: continue if pug_guilds[guild].is_full(): continue for channel in guild.channels: if channel.name != PUG_CHANNEL_NAME: continue await pug_guilds[guild].reload_puggers() break bot.add_cog(ErrorHandlerCog(bot)) bot.add_cog(PugQueueCog(bot)) bot.run(BOT_SECRET_TOKEN)