hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
d5b52472e7e5df33cf0c5865ffdc86c08a3ea627
1,881
py
Python
dhf_wrapper/base_client.py
Enflow-io/dhf-pay-python
7c32461d3b2a5018151b2a16a0cc0ad6850b88b1
[ "Apache-2.0" ]
null
null
null
dhf_wrapper/base_client.py
Enflow-io/dhf-pay-python
7c32461d3b2a5018151b2a16a0cc0ad6850b88b1
[ "Apache-2.0" ]
null
null
null
dhf_wrapper/base_client.py
Enflow-io/dhf-pay-python
7c32461d3b2a5018151b2a16a0cc0ad6850b88b1
[ "Apache-2.0" ]
null
null
null
from typing import Optional, Callable import requests from requests.auth import AuthBase from requests.exceptions import RequestException class BearerAuth(AuthBase): def __init__(self, token): self.token = token def __call__(self, r): r.headers['Authorization'] = f'Bearer {self.token}' return r class ServiceClient: DEFAULT_MAX_RETRIES = 0 def __init__( self, base_url: str, token: Optional[str] = None, ): self.base_url = base_url.rstrip("/") self.token = token self.session = self._create_client_session() def _dispose(self): """ Class method to close user session """ self.session.close() def _create_client_session(self): """ Class method to create client session """ session = requests.Session() session.auth = self._get_http_auth() return session def _get_http_auth(self): """ Class method to resolve http authentication """ if self.token: return BearerAuth(self.token) def make_full_url(self, path: str) -> str: """ Class method to make full url :param path: str :return: str """ return f"{self.base_url}{path}" def _make_request(self, request: Callable, retries=DEFAULT_MAX_RETRIES, **kwargs) -> dict: """ Class method to make request :param request: Callable :return: dict """ try: with request(**kwargs) as resp: resp.raise_for_status() return resp.json() except RequestException as e: if retries > 0 and e.request.status >= 500: return self._make_request(request=request, retries=retries - 1, **kwargs) else: raise e
25.767123
94
0.576289
1,736
0.922913
0
0
0
0
0
0
456
0.242424
d5b58f8a34e9535374ceecc69e4b47358c97ddb9
1,395
py
Python
flametree/utils.py
Edinburgh-Genome-Foundry/Flametree
a189de5d83ca1eb3526a439320e41df9e2a1162e
[ "MIT" ]
165
2017-02-04T00:40:01.000Z
2021-06-08T03:51:58.000Z
flametree/utils.py
Edinburgh-Genome-Foundry/Flametree
a189de5d83ca1eb3526a439320e41df9e2a1162e
[ "MIT" ]
8
2017-02-10T00:47:09.000Z
2021-05-30T04:38:41.000Z
flametree/utils.py
Edinburgh-Genome-Foundry/Flametree
a189de5d83ca1eb3526a439320e41df9e2a1162e
[ "MIT" ]
19
2017-02-09T17:38:31.000Z
2021-03-23T16:04:32.000Z
import os import shutil from .ZipFileManager import ZipFileManager from .DiskFileManager import DiskFileManager from .Directory import Directory import string printable = set(string.printable) - set("\x0b\x0c") def is_hex(s): return any(c not in printable for c in s) def file_tree(target, replace=False): """Open a connection to a file tree which can be either a disk folder, a zip archive, or an in-memory zip archive. Parameters ---------- target Either the path to a target folder, or a zip file, or '@memory' to write a zip file in memory (at which case a string of the zip file is returned) If the target is already a flametree directory, it is returned as-is. replace If True, will remove the target if it already exists. If False, new files will be written inside the target and some files may be overwritten. """ if isinstance(target, Directory): return target if (not isinstance(target, str)) or is_hex(target): return Directory(file_manager=ZipFileManager(source=target)) elif target == "@memory": return Directory("@memory", file_manager=ZipFileManager("@memory")) elif target.lower().endswith(".zip"): return Directory(target, file_manager=ZipFileManager(target, replace=replace)) else: return Directory(target, file_manager=DiskFileManager(target))
32.44186
86
0.703226
0
0
0
0
0
0
0
0
615
0.44086
d5b74bc11e212074f29e2869fb5c41c2c3cd585b
628
py
Python
audio/audio_client.py
artigianitecnologici/marrtino_apps
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
[ "BSD-4-Clause" ]
null
null
null
audio/audio_client.py
artigianitecnologici/marrtino_apps
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
[ "BSD-4-Clause" ]
null
null
null
audio/audio_client.py
artigianitecnologici/marrtino_apps
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
[ "BSD-4-Clause" ]
null
null
null
import sys import socket import time ip = '127.0.0.1' port = 9001 if (len(sys.argv)>1): ip = sys.argv[1] if (len(sys.argv)>2): port = int(sys.argv[2]) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip,port)) sock.send('bip\n\r') data = sock.recv(80) print data sock.send('TTS[it-IT] ciao, come stai?\n\r') data = sock.recv(80) print data sock.send('TTS[en-US] very well, thank you!\n\r') data = sock.recv(80) print data sock.send('TTS default language is english!\n\r') data = sock.recv(80) print data sock.send('bop\n\r') data = sock.recv(80) print data time.sleep(1) sock.close()
14.604651
56
0.66879
0
0
0
0
0
0
0
0
138
0.219745
d5b8242c634dcf60f9e745fdadd1c86fe716bf6e
3,461
py
Python
qmotor/message/matcher.py
yulinfeng000/qmotor
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
[ "MIT" ]
null
null
null
qmotor/message/matcher.py
yulinfeng000/qmotor
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
[ "MIT" ]
null
null
null
qmotor/message/matcher.py
yulinfeng000/qmotor
ad3e9eea291f5b87e09fcdd5e42f1eb13d752565
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from typing import List from .common import ( AtCell, BasicMessage, GroupMessage, FriendMessage, MsgCellType, MessageType, PlainCell, ) from ..utils import is_str_blank, str_contains class MsgMatcher(ABC): def msg_chain_from_ctx(self, ctx): return BasicMessage(ctx.msg).messageChain() def get_cell_type(self, msg_cell): return msg_cell.get("type", None) @abstractmethod def match(self, ctx) -> bool: pass class GroupMsg(MsgMatcher): def match(self, ctx) -> bool: return BasicMessage(ctx.msg).type() == MessageType.GroupMessage class FriendMsg(MsgMatcher): def match(self, ctx) -> bool: return BasicMessage(ctx.msg).type() == MessageType.FriendMessage class TempMsg(MsgMatcher): def match(self, ctx) -> bool: return BasicMessage(ctx.msg).type() == MessageType.TempMessage class AtMsg(GroupMsg): def match(self, ctx) -> bool: if not super().match(ctx): return False msg_chain = self.msg_chain_from_ctx(ctx) return self.get_cell_type(msg_chain[1]) == MsgCellType.At class AtMeMsg(AtMsg): me_qq: int def __init__(self, me_qq) -> None: super(AtMeMsg, self).__init__() self.me_qq = me_qq def match(self, ctx) -> bool: if not super().match(ctx): return False msg_chain = GroupMessage(ctx.msg).messageChain() at = AtCell(msg_chain[1]) return self.me_qq == at.target() class JustAtMeMsg(AtMeMsg): def __init__(self, me_qq) -> None: super(JustAtMeMsg, self).__init__(me_qq) def match(self, ctx) -> bool: if not super().match(ctx): return False msg_chain = self.msg_chain_from_ctx(ctx) plain = PlainCell(msg_chain[2]) return is_str_blank(plain.text()) class AtMeCmdMsg(AtMeMsg): cmd_list: List[str] def __init__(self, me_qq, cmd) -> None: super(AtMeCmdMsg, self).__init__(me_qq) self.cmd_list = cmd def match(self, ctx) -> bool: if not super().match(ctx): return False msg_chain = self.msg_chain_from_ctx(ctx) return str_contains(PlainCell(msg_chain[2]).text(), self.cmd_list) class SpecificFriendMsg(FriendMsg): friend_qq: int def __init__(self, friend_qq) -> None: super(SpecificFriendMsg, self).__init__() self.friend_qq = friend_qq def match(self, ctx) -> bool: if not super().match(ctx): return False return self.friend_qq == FriendMessage(ctx.msg).friend_qq() class SpecificGroupMsg(GroupMsg): group_qq: int def __init__(self, group_qq) -> None: super(SpecificGroupMsg, self).__init__() self.group_qq = group_qq def match(self, ctx) -> bool: if not super().match(ctx): return False return self.group_qq == GroupMessage(ctx.msg).group_qq() if __name__ == "__main__": msg_matcher = JustAtMeMsg(123) class Ctx: def __init__(self, msg) -> None: self.msg = msg msg = { "type": "GroupMessage", "sender": {"id": 123, "nickname": "", "remark": ""}, "messageChain": [ {"type": "Source", "id": 123456, "time": 123456}, {"type": "At", "target": 1234, "display": "@Mirai"}, {"type": "Plain", "text": " "}, ], } print(msg_matcher.match(Ctx(msg)))
25.637037
74
0.612251
2,754
0.795724
0
0
62
0.017914
0
0
165
0.047674
d5b8367e1c83c38e170646eb1abb34d55d607542
240
py
Python
invert-binary-tree/invert-binary-tree.py
Atri10/Leet-code---Atri_Patel
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
[ "MIT" ]
1
2021-10-10T20:21:18.000Z
2021-10-10T20:21:18.000Z
invert-binary-tree/invert-binary-tree.py
Atri10/Leet-code---Atri_Patel
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
[ "MIT" ]
null
null
null
invert-binary-tree/invert-binary-tree.py
Atri10/Leet-code---Atri_Patel
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
[ "MIT" ]
null
null
null
class Solution: def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]: if root: root.left,root.right = self.invertTree(root.right),self.invertTree(root.left) return root return None
40
89
0.641667
240
1
0
0
0
0
0
0
0
0
d5b96915a161658ab58f977d3518461eda8624b2
1,407
py
Python
main/admin.py
sinahmr/childf
4e01f46867425b36b6431713b79debf585d69d37
[ "MIT" ]
null
null
null
main/admin.py
sinahmr/childf
4e01f46867425b36b6431713b79debf585d69d37
[ "MIT" ]
null
null
null
main/admin.py
sinahmr/childf
4e01f46867425b36b6431713b79debf585d69d37
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin from django.contrib.auth.models import Group from django.utils.translation import ugettext_lazy as _ from main.models import UserInfo, User, Child, Volunteer, Donor, Letter, Need, PurchaseForInstitute, PurchaseForNeed, \ Activity, OngoingUserInfo @admin.register(User) class UserAdmin(DjangoUserAdmin): class UserInfoInline(admin.TabularInline): model = UserInfo extra = 1 max_num = 1 fieldsets = ( (None, {'fields': ('email', 'password')}), (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}), (_('Important dates'), {'fields': ('last_login', 'date_joined')}), ) add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('email', 'password1', 'password2'), }), ) list_display = ('email', 'userinfo', 'is_staff') search_fields = ('email', 'userinfo__first_name', 'userinfo__last_name') ordering = ('email',) inlines = [UserInfoInline] admin.site.unregister(Group) admin.site.register(Child) admin.site.register(Volunteer) admin.site.register(Donor) admin.site.register(Letter) admin.site.register(Need) admin.site.register(PurchaseForInstitute) admin.site.register(PurchaseForNeed) admin.site.register(Activity) admin.site.register(OngoingUserInfo)
31.977273
119
0.687278
714
0.507463
0
0
736
0.523099
0
0
267
0.189765
d5b9d02c239d39cdf1dcff5670b5cc5e359e73a5
2,515
py
Python
hunting/display/render.py
MoyTW/RL_Arena_Experiment
fb79c67576cd4de3e4a58278b4515098f38fb584
[ "MIT" ]
null
null
null
hunting/display/render.py
MoyTW/RL_Arena_Experiment
fb79c67576cd4de3e4a58278b4515098f38fb584
[ "MIT" ]
null
null
null
hunting/display/render.py
MoyTW/RL_Arena_Experiment
fb79c67576cd4de3e4a58278b4515098f38fb584
[ "MIT" ]
null
null
null
import tdl import time import hunting.constants as c class Renderer: def __init__(self, main_console=None, level_display_width=c.SCREEN_WIDTH, level_display_height=c.SCREEN_HEIGHT): if main_console is None: self.main_console = tdl.init(level_display_width, level_display_height, 'From Renderer Default Constructor') else: self.main_console = main_console self.level_display_width = level_display_width self.level_display_height = level_display_height self._level_console = tdl.Console(level_display_width, level_display_height) def _render_level(self, con, level): for x in range(level.width): for y in range(level.height): if level[x][y].blocks is not False: self._level_console.draw_rect(x, y, 1, 1, None, bg=[120, 0, 50]) else: self._level_console.draw_rect(x, y, 1, 1, None, bg=[30, 255, 30]) # TODO: This is pretty hacky! i = 1 for o in level._all_objects: if o.faction == '1': # TODO: Better faction implementation! color = [255, 0, 0] else: color = [0, 0, 255] self._level_console.draw_char(o.x, o.y, i, color) i += 1 con.blit(self._level_console) def render_all(self, level): self._render_level(self.main_console, level) tdl.flush() def clear(self, level): for o in level._all_objects: self._level_console.draw_char(o.x, o.y, ' ') def render_event(self, level, event): if event[c.EVENT_TYPE] == c.MOVEMENT_EVENT: # Clear previous location self._level_console.draw_char(event[c.MOVEMENT_PREV_X], event[c.MOVEMENT_PREV_Y], ' ', bg=[0, 15, 7]) # Retrieve faction and color o = level.get_object_by_id(event[c.OBJ_ID]) if o.faction == '1': # TODO: Better faction implementation! color = [255, 0, 0] else: color = [0, 0, 255] self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], o.faction, fg=color) elif event[c.EVENT_TYPE] == c.OBJECT_DESTRUCTION_EVENT: self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], ' ', bg=[0, 15, 7]) # Render self.main_console.blit(self._level_console) tdl.flush() def visualize(level, show_time=1): Renderer().render_all(level) time.sleep(show_time)
36.985294
120
0.603579
2,364
0.93996
0
0
0
0
0
0
216
0.085885
d5ba579f0453b95d1e8c11d5b88d94830943af72
1,732
py
Python
ideas/models.py
neosergio/hackatrix-api
27f0180415efa97bd7345d100b314d8807486b67
[ "Apache-2.0" ]
1
2021-02-12T10:25:28.000Z
2021-02-12T10:25:28.000Z
ideas/models.py
neosergio/hackatrix-api
27f0180415efa97bd7345d100b314d8807486b67
[ "Apache-2.0" ]
7
2020-02-21T00:53:38.000Z
2022-02-10T12:22:53.000Z
ideas/models.py
neosergio/hackatrix-api
27f0180415efa97bd7345d100b314d8807486b67
[ "Apache-2.0" ]
null
null
null
from django.db import models class Idea(models.Model): title = models.CharField(max_length=255, unique=True) description = models.TextField() author = models.OneToOneField('events.Registrant', related_name='author_idea', on_delete=models.CASCADE, blank=True, null=True) written_by = models.ForeignKey('users.User', related_name='written_idea', on_delete=models.CASCADE, blank=True, null=True) event = models.ForeignKey('events.Event', related_name='event_idea', on_delete=models.CASCADE, blank=True, null=True) is_valid = models.BooleanField(default=False) max_number_of_participants = models.PositiveIntegerField(default=7) created_at = models.DateTimeField(auto_now_add=True) modified_at = models.DateTimeField(auto_now=True) is_active = models.BooleanField(default=True) class Meta(): ordering = ['-created_at', '-id'] def __str__(self): return self.title class IdeaTeamMember(models.Model): idea = models.ForeignKey(Idea, related_name='idea_team_member', on_delete=models.CASCADE) member = models.OneToOneField('events.Registrant', related_name='member_idea', on_delete=models.CASCADE) class Meta(): ordering = ['idea'] unique_together = ('idea', 'member') verbose_name = 'Team Member' verbose_name_plural = 'Groups'
39.363636
108
0.560624
1,697
0.979792
0
0
0
0
0
0
193
0.111432
d5ba81a91490ddb0a286042ea3d0c0e723e0af52
2,348
py
Python
section2/out/src/data_prep/SlicesDataset.py
ssheikh85/AIHCND_c3_3d_imaging
6502985d4199244328a683459b4d819090d58f3c
[ "MIT" ]
null
null
null
section2/out/src/data_prep/SlicesDataset.py
ssheikh85/AIHCND_c3_3d_imaging
6502985d4199244328a683459b4d819090d58f3c
[ "MIT" ]
null
null
null
section2/out/src/data_prep/SlicesDataset.py
ssheikh85/AIHCND_c3_3d_imaging
6502985d4199244328a683459b4d819090d58f3c
[ "MIT" ]
null
null
null
""" Module for Pytorch dataset representations """ import torch from torch.utils.data import Dataset class SlicesDataset(Dataset): """ This class represents an indexable Torch dataset which could be consumed by the PyTorch DataLoader class """ def __init__(self, data): self.data = data self.slices = [] for i, d in enumerate(data): for j in range(d["image"].shape[0]): self.slices.append((i, j)) def __getitem__(self, idx): """ This method is called by PyTorch DataLoader class to return a sample with id idx Arguments: idx {int} -- id of sample Returns: Dictionary of 2 Torch Tensors of dimensions [1, W, H] """ slc = self.slices[idx] sample = dict() sample["id"] = idx # You could implement caching strategy here if dataset is too large to fit # in memory entirely # Also this would be the place to call transforms if data augmentation is used # TASK: Create two new keys in the "sample" dictionary, named "image" and "seg" # The values are 3D Torch Tensors with image and label data respectively. # First dimension is size 1, and last two hold the voxel data from the respective # slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors. # Your tensor needs to be of shape [1, patch_size, patch_size] # Don't forget that you need to put a Torch Tensor into your dictionary element's value # Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array # and the slice number are in the slc variable. # Hint2: You can use None notation like so: arr[None, :] to add size-1 # dimension to a Numpy array # <YOUR CODE GOES HERE> img = self.data[slc[0]]["image"][slc[1]] sample['image'] = torch.from_numpy(img[None,:]) seg = self.data[slc[0]]["seg"][slc[1]] sample['seg'] = torch.from_numpy(seg[None,:]) return sample def __len__(self): """ This method is called by PyTorch DataLoader class to return number of samples in the dataset Returns: int """ return len(self.slices)
35.044776
103
0.609881
2,244
0.955707
0
0
0
0
0
0
1,514
0.644804
d5bb9bbb0fed4afc892e132a8963124e532f19f2
845
py
Python
zenslackchat/zendesk_webhooks.py
uktrade/zenslackchat
8071757e1ea20a433783c6a7c47f25b046692682
[ "MIT" ]
2
2020-12-30T07:46:12.000Z
2022-02-01T16:37:34.000Z
zenslackchat/zendesk_webhooks.py
uktrade/zenslackchat
8071757e1ea20a433783c6a7c47f25b046692682
[ "MIT" ]
7
2021-04-14T16:17:29.000Z
2022-01-25T11:48:18.000Z
zenslackchat/zendesk_webhooks.py
uktrade/zenslackchat
8071757e1ea20a433783c6a7c47f25b046692682
[ "MIT" ]
1
2021-06-06T09:46:47.000Z
2021-06-06T09:46:47.000Z
from zenslackchat.zendesk_base_webhook import BaseWebHook from zenslackchat.zendesk_email_to_slack import email_from_zendesk from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk class CommentsWebHook(BaseWebHook): """Handle Zendesk Comment Events. """ def handle_event(self, event, slack_client, zendesk_client): """Handle the comment trigger event we have been POSTed. Recover and update the comments with lastest from Zendesk. """ comments_from_zendesk(event, slack_client, zendesk_client) class EmailWebHook(BaseWebHook): """Handle Zendesk Email Events. """ def handle_event(self, event, slack_client, zendesk_client): """Handle an email created issue and create it on slack. """ email_from_zendesk(event, slack_client, zendesk_client)
33.8
72
0.744379
641
0.75858
0
0
0
0
0
0
285
0.337278
d5bbaeac59cde7e794de669fe4ec0942d528fc8d
699
py
Python
Examples/PagesOperations/MovePage.py
groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples
af736c94240eeefef28bd81012c96ab2ea779088
[ "MIT" ]
null
null
null
Examples/PagesOperations/MovePage.py
groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples
af736c94240eeefef28bd81012c96ab2ea779088
[ "MIT" ]
null
null
null
Examples/PagesOperations/MovePage.py
groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples
af736c94240eeefef28bd81012c96ab2ea779088
[ "MIT" ]
null
null
null
# Import modules import groupdocs_merger_cloud from Common import Common # This example demonstrates how to move document page to a new position class MovePage: @classmethod def Run(cls): pagesApi = groupdocs_merger_cloud.PagesApi.from_config(Common.GetConfig()) options = groupdocs_merger_cloud.MoveOptions() options.file_info = groupdocs_merger_cloud.FileInfo("WordProcessing/four-pages.docx") options.output_path = "Output/move-pages.docx" options.page_number = 1 options.new_page_number = 2 result = pagesApi.move(groupdocs_merger_cloud.MoveRequest(options)) print("Output file path = " + result.path)
36.789474
93
0.711016
553
0.79113
0
0
533
0.762518
0
0
164
0.234621
d5bbb325b8069e32756e2756a7150bcc81d9e24f
221
py
Python
src/models/predict_model.py
joseluistello/Regression-Analysis-Apple-Data
85952edd22ba8c382f43357efc510763185fd6d1
[ "MIT" ]
null
null
null
src/models/predict_model.py
joseluistello/Regression-Analysis-Apple-Data
85952edd22ba8c382f43357efc510763185fd6d1
[ "MIT" ]
null
null
null
src/models/predict_model.py
joseluistello/Regression-Analysis-Apple-Data
85952edd22ba8c382f43357efc510763185fd6d1
[ "MIT" ]
null
null
null
y_pred=ml.predict(x_test) print(y_pred) from sklearn.metrics import r2_score r2_score(y_test,y_pred) pred_y_df=pd.DataFrame({'Actual Value':y_test,'Predicted Value':y_pred, 'Difference': y_test-y_pred}) pred_y_df[0:20]
24.555556
101
0.791855
0
0
0
0
0
0
0
0
43
0.19457
d5bd90ba6b204f06ed13dd7eaecdd9ec577e33cb
5,512
py
Python
src/models/utils_func.py
Soufiane-Fartit/cars-prices
8eee8aa168251adab7f4947c45a78752e4145041
[ "MIT" ]
null
null
null
src/models/utils_func.py
Soufiane-Fartit/cars-prices
8eee8aa168251adab7f4947c45a78752e4145041
[ "MIT" ]
null
null
null
src/models/utils_func.py
Soufiane-Fartit/cars-prices
8eee8aa168251adab7f4947c45a78752e4145041
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ This module offers util functions to be called and used in other modules """ from datetime import datetime import os import json import pickle import string import random import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn import tree def id_generator(size=6, chars=string.ascii_lowercase + string.digits): """GENERATE A RANDOM STRING TO BE USED AS AN ID Args: size (int, optional): size of the string. Defaults to 6. chars (str, optional): charachters to be used to generate the string. Defaults to string.ascii_lowercase+string.digits. Returns: [str]: a random chain of charachters """ return "".join(random.choice(chars) for _ in range(size)) def save_model(path, model): """SAVE MODEL INTO PICKLE FILE Args: path (str): path where to save the model model (binary): the model to be saved """ with open(path, "wb") as file: pickle.dump(model, file) def update_history(models_hist_path, model_id, model_name, model, params): """SAVE METADATA RELATED TO THE TRAINED MODEL INTO THE HISTORY FILE Args: models_hist_path (str): path to the history file model_id (str): unique id of the model model_name (str): model name = "model_"+model_id+".pkl" model (binary): binary file of the model params (dict): dictionnary containing the hyper-parameters used to fit the model """ model_metadata = dict() model_metadata["trained"] = str(datetime.now()) model_metadata["model_type"] = type(model).__name__ model_metadata["model_id"] = model_id model_metadata["params"] = params print(model_metadata) with open(models_hist_path, "r+") as outfile: try: hist = json.load(outfile) hist[model_name] = model_metadata outfile.seek(0) json.dump(hist, outfile, indent=4) except json.decoder.JSONDecodeError: json.dump({model_name: model_metadata}, outfile, indent=4) def update_history_add_eval( models_hist_path, model_id=None, model_name=None, metrics=None ): """ADD EVALUATION METRICS THE HISTORY FILE FOR THE SPECIFIED MODEL Args: models_hist_path (str): path to the history file model_id (str, optional): the id of the model. Defaults to None. model_name (str, optional): the name of the model. Defaults to None. metrics (dict, optional): a dictionnary containing metadata related to the model evaluation. Defaults to None. """ assert ( model_id is not None or model_name is not None ), "At least the model id or name must be given" assert models_hist_path is not None, "You must specify the path to the history file" if not model_name: model_name = "model_" + model_id + ".pkl" eval_metadata = dict() eval_metadata["datetime"] = str(datetime.now()) eval_metadata["metrics"] = metrics with open(models_hist_path, "r+") as outfile: try: hist = json.load(outfile) hist[model_name]["evaluation"] = eval_metadata outfile.seek(0) json.dump(hist, outfile, indent=4) except json.decoder.JSONDecodeError: print("cannot save evaluation metadata") def generate_features_importance_plot(model, features, model_id): """GENERATES A PLOT DESCRIBING FEATURES IMPORTANCE FOR THE MODEL TO MAKE THE PREDICTION. Args: model (tree-based model): a tree based model (decision tree, random forest ...) features (pandas dataframe): a table of the features on which we trained the model model_id (str): the unique id of the model """ mean_importances = model.feature_importances_ importances_indices = np.argsort(mean_importances)[::-1] ordered_columns = [features.columns[i] for i in importances_indices] importances = pd.DataFrame( [tree.feature_importances_ for tree in model.estimators_], columns=features.columns, ) importances = importances[ordered_columns] _, ax = plt.subplots(figsize=(12, 8)) sns.boxplot(x="variable", y="value", ax=ax, data=pd.melt(importances)) figure = ax.get_figure() figure.savefig( "models/models-training/run_" + model_id + "/features_importance.png" ) def plot_trees(rf, feature_names, target_names, model_id): """GENERATES A PLOT THAT SHOWS THE DECISION MAKING OF THE TREES Args: rf (model): a tree based model (random forest ...) feature_names (list): names of the columns of the training set target_names (str): name of the target columns model_id (str): unique id of the model """ fn = feature_names cn = target_names fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(10, 2), dpi=900) for index in range(0, 5): tree.plot_tree( rf.estimators_[index], feature_names=fn, class_names=cn, filled=True, ax=axes[index], ) axes[index].set_title("Estimator: " + str(index), fontsize=11) fig.savefig("models/models-training/run_" + model_id + "/Trees.png") def get_id_list(N=6): print (os.getcwd()) print([x[0] for x in os.walk("../../models/models-training")]) return [x[0][-N:] for x in os.walk("../../models/models-training")][1:]
33.815951
90
0.649492
0
0
0
0
0
0
0
0
2,524
0.45791
d5c0292ca1d781849b4c6bb27642731423800d86
7,504
py
Python
modules/finance.py
KpaBap/palbot
38d2b7958e310f45a28cf1b3173967b92f819946
[ "MIT" ]
null
null
null
modules/finance.py
KpaBap/palbot
38d2b7958e310f45a28cf1b3173967b92f819946
[ "MIT" ]
null
null
null
modules/finance.py
KpaBap/palbot
38d2b7958e310f45a28cf1b3173967b92f819946
[ "MIT" ]
null
null
null
import asyncio import discord from discord.ext import commands import re import sqlite3 from urllib.parse import quote as uriquote import html CURR = ["AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR"] class Finance(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def coin(self, ctx, *, line: str): """Look up a cryptocurrency such as Bitcoin Optionally specify a quantity such as `0.6 ETH` Optionally specify a conversion value such as `2 BTC in ETH` or `ETH in CAD`""" coin = await self.parse_coinline(line) if not coin: await ctx.send(f"Unable to find coin {line}") return url = f"https://api.coinmarketcap.com/v1/ticker/{coin['coin']}{coin['currency']}" async with self.bot.session.get(url) as resp: data = await resp.json() data = data[0] cid = data['symbol'].upper() name = data['name'] pUSD = data['price_usd'] pC24 = data['percent_change_24h'] pC1 = data['percent_change_1h'] output = "" if coin.get('cvtto', ''): cvtval = await self.convert_coin(coin, data) if not cvtval: await ctx.send(f"Failed to look up {coin['cvtto']}") return if coin['qty'] == 1: output = "{} {} | Value: {} {} (${} USD) | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, cvtval, coin['cvtto'].upper(), pUSD, pC1, pC24) else: usdfinal = float(pUSD) * coin['qty'] output = "{} {} : {} {} (${:.2f} USD)".format(coin['qty'], cid, cvtval, coin['cvtto'].upper(), usdfinal) else: if coin['qty'] == 1: output = "{} {} | Value: ${} | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, pUSD, pC1, pC24) else: finalprice = float(pUSD) * coin['qty'] output = "{} {} : ${:.2f}".format(coin['qty'], cid, finalprice) if output: await ctx.send(output) async def convert_coin(self, coin, data): if coin['currency']: cvtval = "{:.2f}".format(float(data['price_{}'.format(coin['cvtto'].lower())]) * coin['qty']) else: if not coin['cvtto']: cvtval = '' if coin['cvtto'] == "bitcoin": #api gives us BTC by default cvtval = self.ffstr(float(data['price_btc']) * coin['qty']) coin['cvtto'] = "BTC" else: pUSD = data['price_usd'] url = "https://api.coinmarketcap.com/v1/ticker/{}".format(coin['cvtto']) async with self.bot.session.get(url) as resp: tojson = await resp.json() coin['cvtto'] = tojson[0]['symbol'].upper() toval = float(tojson[0]['price_usd']) cvtval = self.ffstr((float(pUSD) * coin['qty']) / toval) return cvtval def ffstr(self, number): return "{:.8f}".format(float(number)).rstrip('0').rstrip('.') async def parse_coinline(self, line): coinqty = 1 qtycheck = re.search(r"(^(\d*\.)?\d+)\s?(\w.+)", line) if qtycheck: coinqty = float(qtycheck.group(1)) line = qtycheck.group(3).strip() curr = "" cvtto = "" if " in " in line or " to " in line: if " in " in line: coin, cvtto = line.split(" in ") elif " to " in line: coin, cvtto = line.split(" to ") coinid = await self.findcoin(coin) if cvtto.upper() in CURR: curr = "?convert={}".format(cvtto) else: cvtto = await self.findcoin(cvtto) else: coin = line coinid = await self.findcoin(coin) if not coinid: return None return {'coin': coinid, 'qty': coinqty, 'currency': curr, 'cvtto': cvtto} async def findcoin(self, coin): conn = sqlite3.connect("coins.sqlite3") cursor = conn.cursor() result = cursor.execute("SELECT coinid FROM coins WHERE coinid = (?) OR symbol = (?)", (coin, coin)).fetchone() if not result: like = "%{}%".format(coin) result = cursor.execute("SELECT coinid FROM coins WHERE name LIKE (?)", [like]).fetchone() if result: return result[0] @commands.command(hidden=True) @commands.is_owner() async def newcoins(self, ctx): conn = sqlite3.connect("coins.sqlite3") cursor = conn.cursor() result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coins';").fetchone() if not result: cursor.execute("CREATE TABLE 'coins' ('symbol' TEXT, 'coinid' TEXT UNIQUE ON CONFLICT REPLACE, 'name' TEXT);") conn.commit() url = "https://api.coinmarketcap.com/v1/ticker/?limit=0" async with self.bot.session.get(url) as resp: data = await resp.json() for coin in data: sym = coin['symbol'].lower() cid = coin['id'].lower() name = coin['name'].lower() cursor.execute("insert into coins values (?, ?, ?)", (sym,cid,name)) conn.commit() conn.close() @commands.command(aliases=['stonks', 'stocks']) async def stock (self, ctx, name: str): """Look up a stock and show its current price, change, etc""" symbol = "" url = f"https://autoc.finance.yahoo.com/autoc?query={uriquote(name)}&region=1&lang=en&guccounter=1" async with self.bot.session.get(url) as resp: data = await resp.json() symbol = data['ResultSet']['Result'][0]['symbol'] if not symbol: await ctx.send(f"Unable to find a stonk named `{name}`") return url = f"http://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}" async with self.bot.session.get(url) as resp: data = await resp.json() data = data["quoteResponse"]["result"][0] downup = "\N{CHART WITH UPWARDS TREND}" if data['regularMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}" outstr = "{}{}: {} {} :: Today's change: {:.2f} ({:.2f}%) {}" longn = ' ({})'.format(data['shortName']) if 'shortName' in data else '' outstr = outstr.format(data['symbol'], longn, data['regularMarketPrice'], data['currency'], float(data['regularMarketChange']), float(data['regularMarketChangePercent']), downup) if 'postMarketPrice' in data and (data['marketState'] == "CLOSED" or "POST" in data['marketState']): pdu = "\N{CHART WITH UPWARDS TREND}" if data['postMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}" outstr += " :: After Hours: {:.2f} - Change: {:.2f} {}".format(data['postMarketPrice'], data['postMarketChange'], pdu) await ctx.send(html.unescape(outstr)) def setup(bot): bot.add_cog(Finance(bot))
40.344086
166
0.518124
7,043
0.938566
0
0
4,489
0.598214
6,685
0.890858
2,330
0.310501
d5c051b72ce68a91896ab21b2fd4b6e93e7e9a10
174
py
Python
SG_GetDataForClassifier.py
shubha1593/MovieReviewAnalysis
c485eea0c8b35e554027cce7a431212b406e672c
[ "MIT" ]
7
2015-04-01T12:41:55.000Z
2019-08-01T18:13:56.000Z
SG_GetDataForClassifier.py
shubha1593/MovieReviewAnalysis
c485eea0c8b35e554027cce7a431212b406e672c
[ "MIT" ]
null
null
null
SG_GetDataForClassifier.py
shubha1593/MovieReviewAnalysis
c485eea0c8b35e554027cce7a431212b406e672c
[ "MIT" ]
null
null
null
from SG_GetFeatureMatrix import * from SG_VectorY import * featureMatrix = featureMatrixFromReviews() Y = getYVector() def getDataForClassifier() : return featureMatrix, Y
21.75
42
0.804598
0
0
0
0
0
0
0
0
0
0
d5c05a70d2bfb21530d973639155b0914281d250
1,882
py
Python
greenbounty/bounties/migrations/0001_initial.py
Carnales/green-bounty
beb765082b32c096139463bf75ccc1ec3d530692
[ "MIT" ]
1
2021-01-18T21:43:05.000Z
2021-01-18T21:43:05.000Z
greenbounty/bounties/migrations/0001_initial.py
Thinkr3/green-bounty
c74fe79121d211728c9f70ffd87e239c8ba5d131
[ "MIT" ]
1
2021-01-18T06:35:07.000Z
2021-01-18T06:35:07.000Z
greenbounty/bounties/migrations/0001_initial.py
Thinkr3/green-bounty
c74fe79121d211728c9f70ffd87e239c8ba5d131
[ "MIT" ]
2
2021-01-18T06:22:50.000Z
2021-01-18T06:24:22.000Z
# Generated by Django 3.1.4 on 2021-01-17 19:12 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Organization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=25, null=True)), ('balance', models.FloatField()), ('total', models.FloatField()), ], ), migrations.CreateModel( name='Hunter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, null=True)), ('image', models.ImageField(blank=True, null=True, upload_to='')), ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Bounty', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=25, null=True)), ('image', models.ImageField(blank=True, null=True, upload_to='')), ('price', models.FloatField()), ('city', models.CharField(max_length=25, null=True)), ('hunter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bounties.hunter')), ], ), ]
40.042553
144
0.584485
1,723
0.915515
0
0
0
0
0
0
197
0.104676
d5c06f16c3fcc96993938e0c35fe7c62d8dfa422
8,621
py
Python
nova/tests/virt/docker/test_driver.py
osrg/nova
14b6bc655145c832bd9c822e48f877818e0e53ff
[ "Apache-2.0" ]
null
null
null
nova/tests/virt/docker/test_driver.py
osrg/nova
14b6bc655145c832bd9c822e48f877818e0e53ff
[ "Apache-2.0" ]
null
null
null
nova/tests/virt/docker/test_driver.py
osrg/nova
14b6bc655145c832bd9c822e48f877818e0e53ff
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2013 dotCloud, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import socket import mock from nova import context from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import units from nova import test from nova.tests import utils import nova.tests.virt.docker.mock_client from nova.tests.virt.test_virt_drivers import _VirtDriverTestCase from nova.virt.docker import hostinfo from nova.virt.docker import network class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase): driver_module = 'nova.virt.docker.DockerDriver' def setUp(self): super(DockerDriverTestCase, self).setUp() self.stubs.Set(nova.virt.docker.driver.DockerDriver, 'docker', nova.tests.virt.docker.mock_client.MockClient()) def fake_setup_network(self, instance, network_info): return self.stubs.Set(nova.virt.docker.driver.DockerDriver, '_setup_network', fake_setup_network) def fake_get_registry_port(self): return 5042 self.stubs.Set(nova.virt.docker.driver.DockerDriver, '_get_registry_port', fake_get_registry_port) # Note: using mock.object.path on class throws # errors in test_virt_drivers def fake_teardown_network(container_id): return self.stubs.Set(network, 'teardown_network', fake_teardown_network) self.context = context.RequestContext('fake_user', 'fake_project') def test_driver_capabilities(self): self.assertFalse(self.connection.capabilities['has_imagecache']) self.assertFalse(self.connection.capabilities['supports_recreate']) #NOTE(bcwaldon): This exists only because _get_running_instance on the # base class will not let us set a custom disk/container_format. def _get_running_instance(self, obj=False): instance_ref = utils.get_test_instance(obj=obj) network_info = utils.get_test_network_info() network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' image_info = utils.get_test_image_info(None, instance_ref) image_info['disk_format'] = 'raw' image_info['container_format'] = 'docker' self.connection.spawn(self.ctxt, jsonutils.to_primitive(instance_ref), image_info, [], 'herp', network_info=network_info) return instance_ref, network_info def test_get_host_stats(self): self.mox.StubOutWithMock(socket, 'gethostname') socket.gethostname().AndReturn('foo') socket.gethostname().AndReturn('bar') self.mox.ReplayAll() self.assertEqual('foo', self.connection.get_host_stats()['host_hostname']) self.assertEqual('foo', self.connection.get_host_stats()['host_hostname']) def test_get_available_resource(self): memory = { 'total': 4 * units.Mi, 'free': 3 * units.Mi, 'used': 1 * units.Mi } disk = { 'total': 50 * units.Gi, 'available': 25 * units.Gi, 'used': 25 * units.Gi } # create the mocks with contextlib.nested( mock.patch.object(hostinfo, 'get_memory_usage', return_value=memory), mock.patch.object(hostinfo, 'get_disk_usage', return_value=disk) ) as ( get_memory_usage, get_disk_usage ): # run the code stats = self.connection.get_available_resource(nodename='test') # make our assertions get_memory_usage.assert_called_once_with() get_disk_usage.assert_called_once_with() expected_stats = { 'vcpus': 1, 'vcpus_used': 0, 'memory_mb': 4, 'memory_mb_used': 1, 'local_gb': 50L, 'local_gb_used': 25L, 'disk_available_least': 25L, 'hypervisor_type': 'docker', 'hypervisor_version': 1000, 'hypervisor_hostname': 'test', 'cpu_info': '?', 'supported_instances': ('[["i686", "docker", "lxc"],' ' ["x86_64", "docker", "lxc"]]') } self.assertEqual(expected_stats, stats) def test_plug_vifs(self): # Check to make sure the method raises NotImplementedError. self.assertRaises(NotImplementedError, self.connection.plug_vifs, instance=utils.get_test_instance(), network_info=None) def test_unplug_vifs(self): # Check to make sure the method raises NotImplementedError. self.assertRaises(NotImplementedError, self.connection.unplug_vifs, instance=utils.get_test_instance(), network_info=None) def test_create_container(self, image_info=None): instance_href = utils.get_test_instance() if image_info is None: image_info = utils.get_test_image_info(None, instance_href) image_info['disk_format'] = 'raw' image_info['container_format'] = 'docker' self.connection.spawn(self.context, instance_href, image_info, 'fake_files', 'fake_password') self._assert_cpu_shares(instance_href) def test_create_container_vcpus_2(self, image_info=None): flavor = utils.get_test_flavor(options={ 'name': 'vcpu_2', 'flavorid': 'vcpu_2', 'vcpus': 2 }) instance_href = utils.get_test_instance(flavor=flavor) if image_info is None: image_info = utils.get_test_image_info(None, instance_href) image_info['disk_format'] = 'raw' image_info['container_format'] = 'docker' self.connection.spawn(self.context, instance_href, image_info, 'fake_files', 'fake_password') self._assert_cpu_shares(instance_href, vcpus=2) def _assert_cpu_shares(self, instance_href, vcpus=4): container_id = self.connection.find_container_by_name( instance_href['name']).get('id') container_info = self.connection.docker.inspect_container(container_id) self.assertEqual(vcpus * 1024, container_info['Config']['CpuShares']) def test_create_container_wrong_image(self): instance_href = utils.get_test_instance() image_info = utils.get_test_image_info(None, instance_href) image_info['disk_format'] = 'raw' image_info['container_format'] = 'invalid_format' self.assertRaises(exception.InstanceDeployFailure, self.test_create_container, image_info) @mock.patch.object(network, 'teardown_network') @mock.patch.object(nova.virt.docker.driver.DockerDriver, 'find_container_by_name', return_value={'id': 'fake_id'}) def test_destroy_container(self, byname_mock, teardown_mock): instance = utils.get_test_instance() self.connection.destroy(self.context, instance, 'fake_networkinfo') byname_mock.assert_called_once_with(instance['name']) teardown_mock.assert_called_with('fake_id') def test_get_memory_limit_from_sys_meta_in_object(self): instance = utils.get_test_instance(obj=True) limit = self.connection._get_memory_limit_bytes(instance) self.assertEqual(2048 * units.Mi, limit) def test_get_memory_limit_from_sys_meta_in_db_instance(self): instance = utils.get_test_instance(obj=False) limit = self.connection._get_memory_limit_bytes(instance) self.assertEqual(2048 * units.Mi, limit)
40.85782
79
0.629741
7,522
0.872521
0
0
483
0.056026
0
0
2,047
0.237443
d5c1a9c69d580b85cf1676ca01e443acef7eb239
9,048
py
Python
pyx/tests/test_http.py
l04m33/pyx
b70efec605832ba3c7079e991584db3f5d1da8cb
[ "MIT" ]
2
2015-08-25T11:31:42.000Z
2015-10-16T11:30:15.000Z
pyx/tests/test_http.py
l04m33/pyx
b70efec605832ba3c7079e991584db3f5d1da8cb
[ "MIT" ]
null
null
null
pyx/tests/test_http.py
l04m33/pyx
b70efec605832ba3c7079e991584db3f5d1da8cb
[ "MIT" ]
null
null
null
import unittest import unittest.mock as mock import asyncio import pyx.http as http def create_dummy_message(): msg = http.HttpMessage(None) msg.headers = [ http.HttpHeader('Server', 'Pyx'), http.HttpHeader('Cookie', 'a'), http.HttpHeader('Cookie', 'b'), ] return msg def create_dummy_connection(): loop = asyncio.get_event_loop() reader = asyncio.StreamReader(loop=loop) @asyncio.coroutine def dummy_drain(): yield from asyncio.sleep(0.001) writer = mock.Mock(spec=asyncio.StreamWriter) writer.attach_mock(mock.Mock(wraps=dummy_drain), 'drain') conn = http.HttpConnection(reader, writer) return conn def create_dummy_request(): conn = create_dummy_connection() req = http.HttpRequest(conn) return req class TestHttpMessage(unittest.TestCase): def test_get_header(self): msg = create_dummy_message() self.assertEqual(msg.get_header("server"), ["Pyx"]) self.assertEqual(msg.get_header("SERVER"), ["Pyx"]) self.assertEqual(msg.get_header("pragma"), []) self.assertEqual(msg.get_header("cookie"), ["a", "b"]) self.assertEqual(msg.get_first_header("cookie"), "a") self.assertTrue(msg.get_first_header("pragma") is None) def test_write_headers(self): msg = create_dummy_message() self.assertEqual(msg.write_headers(), ['Server: Pyx', 'Cookie: a', 'Cookie: b']) msg.headers = [] self.assertEqual(msg.write_headers(), []) class TestHttpRequest(unittest.TestCase): def test_parse_req_line(self): req = create_dummy_request() req._parse_req_line(b'POST / HTTP/1.1\r\n') self.assertEqual(req.method, 'POST') self.assertEqual(req.path, '/') self.assertTrue(req.query is None) self.assertEqual(req.protocol, 'HTTP') self.assertEqual(req.version, (1, 1)) req._parse_req_line( b'GET /some/path?some=query&some_other=query HTTP/1.1\r\n') self.assertEqual(req.method, 'GET') self.assertEqual(req.path, '/some/path') self.assertEqual(req.query, 'some=query&some_other=query') with self.assertRaises(http.BadHttpRequestError): req._parse_req_line(b'') with self.assertRaises(http.BadHttpRequestError): req._parse_req_line(b'GET /\r\n') with self.assertRaises(http.BadHttpRequestError): req._parse_req_line(b'GET / GARBAGE\r\n') req._parse_req_line(b'GET / HTTP/1\r\n') self.assertEqual(req.version, (1, 0)) def test_parse_header(self): req = create_dummy_request() req._parse_header(b'Server: Pyx\r\n') self.assertEqual(req.headers, [http.HttpHeader('Server', 'Pyx')]) req.headers = [] with self.assertRaises(http.BadHttpHeaderError): req._parse_header(b'Server\r\n') req.headers = [] req._parse_header(b'Server:\r\n') self.assertEqual(req.headers, [http.HttpHeader('Server', '')]) req.headers = [] req._parse_header(b'Server: \r\n') self.assertEqual(req.headers, [http.HttpHeader('Server', '')]) req.headers = [] req._parse_header(b'Host: some.badasshost.com:8080\r\n') self.assertEqual(req.headers, [http.HttpHeader('Host', 'some.badasshost.com:8080')]) with self.assertRaises(http.BadHttpHeaderError): req._parse_header(b': pyx\r\n') with self.assertRaises(http.BadHttpHeaderError): req._parse_header(b' : pyx') with self.assertRaises(http.BadHttpHeaderError): req._parse_header(b' \t : pyx') def test_parse(self): loop = asyncio.get_event_loop() conn = create_dummy_connection() reader = conn.reader reader.feed_data( b'GET /?q=p&s=t HTTP/1.1\r\n' b'Host: localhost\r\n' b'Connection: Keep-Alive\r\n' b'Pragma: Test\r\n' b' : Test\r\n' b'\r\n') req = loop.run_until_complete(http.HttpRequest.parse(conn)) self.assertEqual(req.method, 'GET') self.assertEqual(req.path, '/') self.assertEqual(req.query, 'q=p&s=t') self.assertEqual(req.protocol, 'HTTP') self.assertEqual(req.version, (1, 1)) self.assertEqual(req.headers, [ http.HttpHeader('Host', 'localhost'), http.HttpHeader('Connection', 'Keep-Alive'), http.HttpHeader('Pragma', 'Test'), ]) def test_respond(self): req = create_dummy_request() req.version = (1, 1) resp = req.respond(200) self.assertEqual(resp.code, 200) self.assertEqual(resp.version, (1, 1)) req.version = (1, 0) resp = req.respond(400) self.assertEqual(resp.code, 400) self.assertEqual(resp.version, (1, 0)) class TestHttpResponse(unittest.TestCase): def test_write(self): resp = http.HttpResponse(200, None) resp.headers = [ http.HttpHeader('Server', 'Pyx'), http.HttpHeader('Connection', 'keep-alive') ] self.assertEqual(resp.write(), ['HTTP/1.1 200 OK', 'Server: Pyx', 'Connection: keep-alive', '\r\n']) self.assertEqual(str(resp), 'HTTP/1.1 200 OK\r\n' 'Server: Pyx\r\n' 'Connection: keep-alive\r\n' '\r\n') def test_send(self): loop = asyncio.get_event_loop() req = create_dummy_request() resp = req.respond(200) self.assertEqual(resp.code, 200) self.assertFalse(req.responded) resp.headers = [ http.HttpHeader('Server', 'Pyx'), http.HttpHeader('Content-Length', '100'), http.HttpHeader('Content-Type', 'text/plain'), ] loop.run_until_complete(resp.send()) resp.connection.writer.write.assert_called_with(str(resp).encode()) self.assertTrue(req.responded) def test_send_body(self): loop = asyncio.get_event_loop() req = create_dummy_request() resp = req.respond(200) loop.run_until_complete(resp.send()) self.assertTrue(req.responded) loop.run_until_complete(resp.send_body(b'Yes, this is the body.')) resp.connection.writer.write.assert_called_with(b'Yes, this is the body.') loop.run_until_complete(resp.send_body('This is another string body.')) resp.connection.writer.write.assert_called_with(b'This is another string body.') class DummyResource(http.UrlResource): def get_child(self, key): if key == 'hello': return self elif key == "static": return http.StaticRootResource('.') else: raise http.HttpError(404, '{} not found'.format(key)) class TestUrlResource(unittest.TestCase): def test_traverse(self): res = DummyResource() self.assertEqual(res.traverse(''), res) self.assertEqual(res.traverse('/'), res) self.assertEqual(res.traverse('/hello'), res) with self.assertRaises(http.HttpError): res.traverse('/does/not/exist') sres = res.traverse('/static') self.assertEqual(sres.root, '.') self.assertEqual(sres._build_real_path(), '.') sres = res.traverse('/static/') self.assertEqual(sres._build_real_path(), '.') sres = res.traverse('/static/some/path') self.assertEqual(sres._build_real_path(), './some/path') def test_not_implemented(self): res = http.UrlResource() with self.assertRaises(NotImplementedError): res.traverse('/hello') req = create_dummy_request() with self.assertRaises(NotImplementedError): res.handle_request(req) class TestStaticRootResource(unittest.TestCase): def test_build_real_path(self): res = http.StaticRootResource('local_root') res = res.traverse('/some/long/path/where/ever/it/leads/') self.assertEqual(res._build_real_path(), 'local_root/some/long/path/where/ever/it/leads') res = http.StaticRootResource('local_root') res = res.traverse('/some/../dangerous/path') self.assertEqual(res._build_real_path(), 'local_root/dangerous/path') res = http.StaticRootResource('local_root') res = res.traverse('/some/../../dangerous/path') self.assertEqual(res._build_real_path(), 'local_root/dangerous/path') res = http.StaticRootResource('local_root') res = res.traverse('/some/%2e%2e%2f%2e%2e/dangerous/path') self.assertEqual(res._build_real_path(), 'local_root/dangerous/path')
33.511111
92
0.59527
8,228
0.909372
375
0.041446
81
0.008952
0
0
1,507
0.166556
d5c40e739be914cd8694a4a6735e497e975d7778
1,791
py
Python
tests/test_webdriver_chrome.py
kidosoft/splinter
6d5052fd73c0a626299574cea76924e367c67faa
[ "BSD-3-Clause" ]
1
2016-09-21T19:32:47.000Z
2016-09-21T19:32:47.000Z
tests/test_webdriver_chrome.py
kidosoft/splinter
6d5052fd73c0a626299574cea76924e367c67faa
[ "BSD-3-Clause" ]
null
null
null
tests/test_webdriver_chrome.py
kidosoft/splinter
6d5052fd73c0a626299574cea76924e367c67faa
[ "BSD-3-Clause" ]
1
2019-12-02T15:19:07.000Z
2019-12-02T15:19:07.000Z
# -*- coding: utf-8 -*- # Copyright 2013 splinter authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import os import unittest from splinter import Browser from .fake_webapp import EXAMPLE_APP from .base import WebDriverTests from selenium.common.exceptions import WebDriverException def chrome_installed(): try: Browser("chrome") except WebDriverException: return False return True class ChromeBrowserTest(WebDriverTests, unittest.TestCase): @classmethod def setUpClass(cls): cls.browser = Browser("chrome") @classmethod def tearDownClass(cls): cls.browser.quit() def setUp(self): self.browser.visit(EXAMPLE_APP) def test_attach_file(self): "should provide a way to change file field value" file_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'mockfile.txt' ) self.browser.attach_file('file', file_path) self.browser.find_by_name('upload').click() html = self.browser.html self.assertIn('text/plain', html) self.assertIn(open(file_path).read().encode('utf-8'), html) def test_should_support_with_statement(self): with Browser('chrome') as internet: pass class ChromeBrowserFullscreenTest(WebDriverTests, unittest.TestCase): @classmethod def setUpClass(cls): cls.browser = Browser("chrome", fullscreen=True) @classmethod def tearDownClass(cls): cls.browser.quit() def setUp(self): self.browser.visit(EXAMPLE_APP) def test_should_support_with_statement(self): with Browser('chrome', fullscreen=True) as internet: pass
25.225352
69
0.672808
1,288
0.719151
0
0
305
0.170296
0
0
314
0.175321
d5c480f55405e4b344842fed3a1082b875de03dd
1,349
py
Python
main.py
DuskXi/ArkX
7b416ae0c4ec2b383c6f414ed475930dd228909f
[ "Apache-2.0" ]
2
2022-02-18T03:08:38.000Z
2022-03-03T04:20:08.000Z
main.py
DuskXi/ArkX
7b416ae0c4ec2b383c6f414ed475930dd228909f
[ "Apache-2.0" ]
null
null
null
main.py
DuskXi/ArkX
7b416ae0c4ec2b383c6f414ed475930dd228909f
[ "Apache-2.0" ]
null
null
null
import os import json from File.file import File os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' def fileRead(fileName, encoding='utf-8'): with open(fileName, encoding=encoding) as f: return f.read() def main(): from Automation.distributor import Distributor from Performance import recoder from WebInterface import web modelConfig = json.loads(fileRead("config/model.json")) labelsName = json.loads(fileRead("config/labelsName.json")) config = json.loads(fileRead("config/config.json")) # file = File() classifyModel = modelConfig["imageClassificationModel"] # if not file.mergedFile(classifyModel["filePath"], classifyModel["fileName"], classifyModel["files"]): # print("文件合并失败") # print("回车退出") # input() # exit(0) recoder.Recoder.debug = False recoder.Recoder.debugSleepingTime = 60 * 60 recoder.Recoder.initDataSet([modelConfig["objectDetectionModel"]["modelName"], modelConfig["addSanityModel"]["modelName"]], [classifyModel["modelName"]]) # modelConfig["imageClassificationModel"]["filePath"] = os.path.join(classifyModel["filePath"], classifyModel["fileName"]) distributor = Distributor(modelConfig, config["adb_path"], labelsName) web.run(distributor, config) if __name__ == "__main__": main()
30.659091
127
0.681987
0
0
0
0
0
0
0
0
538
0.392988
d5c5f4f5c79da67180264a12457b76158e4ccc4b
4,814
py
Python
tests/test_simple.py
cprogrammer1994/miniglm
696764ff200dd106dd533264ff45a060d5f7b230
[ "MIT" ]
4
2017-11-03T14:48:52.000Z
2019-03-07T03:48:11.000Z
tests/test_simple.py
cprogrammer1994/miniglm
696764ff200dd106dd533264ff45a060d5f7b230
[ "MIT" ]
2
2017-11-27T15:40:01.000Z
2021-01-30T08:40:51.000Z
tests/test_simple.py
cprogrammer1994/miniglm
696764ff200dd106dd533264ff45a060d5f7b230
[ "MIT" ]
3
2017-11-27T15:25:07.000Z
2021-03-02T10:31:30.000Z
import struct import numpy as np import pytest import miniglm def test_add_vec_vec(): res = miniglm.add((1.0, 2.0, 3.0), (1.5, 1.8, 1.2)) np.testing.assert_almost_equal(res, (2.5, 3.8, 4.2)) assert type(res) is tuple def test_add_vec_scalar(): res = miniglm.add((1.0, 2.0, 3.0), 0.5) np.testing.assert_almost_equal(res, (1.5, 2.5, 3.5)) assert type(res) is tuple def test_sub_vec_vec(): res = miniglm.sub((5.0, 6.0, 7.0), (1.5, 1.8, 1.2)) np.testing.assert_almost_equal(res, (3.5, 4.2, 5.8)) assert type(res) is tuple def test_sub_vec_scalar(): res = miniglm.sub((5.0, 6.0, 7.0), 1.5) np.testing.assert_almost_equal(res, (3.5, 4.5, 5.5)) assert type(res) is tuple def test_mul_vec_vec(): res = miniglm.mul((5.0, 6.0, 7.0), (1.5, 1.8, 1.2)) np.testing.assert_almost_equal(res, (7.5, 10.8, 8.4)) assert type(res) is tuple def test_mul_vec_scalar(): res = miniglm.mul((1.0, 2.0, 3.0), 2.0) np.testing.assert_almost_equal(res, (2.0, 4.0, 6.0)) assert type(res) is tuple def test_cross(): res = miniglm.cross((2.0, 3.5, 7.1), (0.2, 10.0, 3.3)) np.testing.assert_almost_equal(res, (-59.45, -5.18, 19.3)) assert type(res) is tuple def test_dot_vec(): res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3)) np.testing.assert_almost_equal(res, 58.83) def test_dot_quat(): res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3)) np.testing.assert_almost_equal(res, 58.83) def test_mix_vec(): res = miniglm.mix((2.5, 3.4, 4.6), (7.2, 1.1, 3.2), 0.2) np.testing.assert_almost_equal(res, (3.44, 2.94, 4.32)) assert type(res) is tuple def test_mix_scalar(): res = miniglm.mix(1.0, 3.0, 0.5) np.testing.assert_almost_equal(res, 2.0) def test_rotate(): res = miniglm.rotate(miniglm.pi / 3.0, miniglm.norm((0.48, 0.60, 0.64))) expected = (0.24, 0.3, 0.32, 0.8660254037844387) np.testing.assert_almost_equal(res, expected) assert type(res) is tuple def test_split_quat(): quat = (0.24, 0.3, 0.32, 0.8660254037844387) angle, axis = miniglm.split(quat) np.testing.assert_almost_equal(angle, miniglm.pi / 3.0) np.testing.assert_almost_equal(axis, (0.48, 0.60, 0.64)) assert type(axis) is tuple def test_rotate_x_90_deg(): res = miniglm.rotate(miniglm.pi / 2.0, (1.0, 0.0, 0.0)) np.testing.assert_almost_equal(res, (np.sqrt(2.0) / 2.0, 0.0, 0.0, np.sqrt(2.0) / 2.0)) def test_rotate_y_90_deg(): res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 1.0, 0.0)) np.testing.assert_almost_equal(res, (0.0, np.sqrt(2.0) / 2.0, 0.0, np.sqrt(2.0) / 2.0)) def test_rotate_z_90_deg(): res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 0.0, 1.0)) np.testing.assert_almost_equal(res, (0.0, 0.0, np.sqrt(2.0) / 2.0, np.sqrt(2.0) / 2.0)) def test_norm_vec(): res = miniglm.norm((48.0, 60.0, 64.0)) expected = (0.48, 0.60, 0.64) np.testing.assert_almost_equal(res, expected) assert type(res) is tuple def test_norm_quat(): res = miniglm.norm((2.0, 4.0, 8.0, 4.0)) expected = (0.2, 0.4, 0.8, 0.4) np.testing.assert_almost_equal(res, expected) assert type(res) is tuple def test_norm_mat(): mat = ( 0.074, 0.962, -0.259, -0.518, 0.259, 0.814, 0.851, 0.074, 0.518, ) res = miniglm.norm(mat) np.testing.assert_almost_equal(miniglm.det(res), 1.0) np.testing.assert_almost_equal(miniglm.cross(res[0:3], res[3:6]), res[6:9]) np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[3:6]), 0.0) np.testing.assert_almost_equal(miniglm.dot(res[3:6], res[6:9]), 0.0) np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[6:9]), 0.0) assert type(res) is tuple def test_cast(): quat = (0.2, 0.4, 0.8, 0.4) mat = (-0.6, 0.8, 0.0, -0.48, -0.36, 0.8, 0.64, 0.48, 0.6) np.testing.assert_almost_equal(miniglm.cast(quat), mat) np.testing.assert_almost_equal(miniglm.cast(mat), quat) np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(quat)), quat) np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(mat)), mat) def test_swizzle_vec(): res = miniglm.swizzle((1.0, 2.0, 3.0), 'yxz') np.testing.assert_almost_equal(res, (2.0, 1.0, 3.0)) def test_swizzle_quat(): res = miniglm.swizzle((0.1, 0.7, 0.5, 0.5), 'wxyz') np.testing.assert_almost_equal(res, (0.5, 0.1, 0.7, 0.5)) def test_pack_scalar(): assert miniglm.pack(1.75) == struct.pack('f', 1.75) def test_pack_vec(): vec = (1.0, 2.0, 3.0) assert miniglm.pack(vec) == struct.pack('fff', *vec) def test_pack_quat(): quat = (0.1, 0.7, 0.5, 0.5) assert miniglm.pack(quat) == struct.pack('ffff', *quat) def test_pack_mat(): mat = (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0) assert miniglm.pack(mat) == struct.pack('fffffffff', *mat)
28.826347
91
0.623598
0
0
0
0
0
0
0
0
36
0.007478
d5c61844c85a34a814f44efd7ddfec47f1e2a5e5
1,131
py
Python
flaskbb/plugins/news/views.py
konstantin1985/forum
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
[ "BSD-3-Clause" ]
null
null
null
flaskbb/plugins/news/views.py
konstantin1985/forum
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
[ "BSD-3-Clause" ]
null
null
null
flaskbb/plugins/news/views.py
konstantin1985/forum
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from flask import Blueprint, redirect from flaskbb.utils.helpers import render_template from .forms import AddForm, DeleteForm from .models import MyPost from flaskbb.extensions import db news = Blueprint("news", __name__, template_folder="templates") def inject_news_link(): return render_template("navigation_snippet.html") @news.route("/") def index(): return render_template("index.html", newsposts = MyPost.query.all()) @news.route('/add', methods=['GET', 'POST']) def add(): form = AddForm() if form.validate_on_submit(): p = MyPost(name = form.name.data, text = form.text.data) db.session.add(p) db.session.commit() return redirect('/news') return render_template('add.html', form=form) @news.route('/delete', methods=['GET', 'POST']) def delete(): form = DeleteForm() if form.validate_on_submit(): p = MyPost.query.filter(MyPost.name == form.name.data).first() db.session.delete(p) db.session.commit() return redirect('/news') return render_template('delete.html', form=form)
26.928571
72
0.660477
0
0
0
0
763
0.674624
0
0
154
0.136163
d5c64f687d9f59ed689fc14b8df6d5ee61f23931
23,742
py
Python
stix_shifter_modules/aws_athena/tests/stix_translation/test_aws_athena_json_to_stix.py
nkhetia31/stix-shifter
ace07581cb227fd35e450b2f8871475227a041d0
[ "Apache-2.0" ]
33
2018-05-25T17:07:28.000Z
2019-09-30T10:08:53.000Z
stix_shifter_modules/aws_athena/tests/stix_translation/test_aws_athena_json_to_stix.py
nkhetia31/stix-shifter
ace07581cb227fd35e450b2f8871475227a041d0
[ "Apache-2.0" ]
54
2018-06-01T18:17:24.000Z
2019-09-30T18:36:15.000Z
stix_shifter_modules/aws_athena/tests/stix_translation/test_aws_athena_json_to_stix.py
subbyte/stix-shifter
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
[ "Apache-2.0" ]
37
2018-07-24T13:29:46.000Z
2019-09-29T19:06:27.000Z
from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers from stix_shifter_modules.aws_athena.entry_point import EntryPoint import unittest MODULE = "aws_athena" entry_point = EntryPoint() map_data = entry_point.get_results_translator().map_data data_source = { "type": "identity", "id": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff", "name": "aws_athena", "identity_class": "events" } options = {} class TestAwsResultsToStix(unittest.TestCase): """ class to perform unit test case for Aws Athena logs translate results """ @staticmethod def get_first(itr, constraint): """ return the obj in the itr if constraint is true """ return next( (obj for obj in itr if constraint(obj)), None ) @staticmethod def get_first_of_type(itr, typ): """ to check whether the object belongs to respective stix object """ return TestAwsResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ) def test_common_prop(self): """ to test the common stix object properties """ data = { "guardduty": { "accountid": 979326520502, "region": "us-east-1", "type": "UnauthorizedAccess:EC2/SSHBruteForce", "resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal", "resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104", "resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4", "resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1." "amazonaws.com", "resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a", "resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128", "resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13", "resource_instancedetails_imageid": "ami-0015fcaa5516c75ed", "resource_instancedetails_instanceid": "i-031cb81e1f32a36e1", "resource_instancedetails_availabilityzone": "us-east-1f", "service_eventfirstseen": "2020-07-31T06:19:09Z", "service_action_networkconnectionaction_protocol": "TCP", "service_action_networkconnectionaction_remoteportdetails_port": "38420", "service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden", "service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94", "service_action_networkconnectionaction_remoteipdetails_city_cityname": "\u00d6rebro", "service_action_networkconnectionaction_localportdetails_port": "22", "service_eventlastseen": "2020-09-12T09:19:40Z", "severity": 2, "title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.", "arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/" "7ab9d1cb6248e05a0e419a79528761cb", "createdat": "2020-07-31T06:37:13.745Z", "description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1. " "Brute force attacks are used to gain unauthorized access to your instance by " "guessing the SSH password.", "finding_id": "7ab9d1cb6248e05a0e419a79528761cb", "partition": "aws", "resource": { "instancedetails": { "imagedescription": "Provided by Red Hat, Inc.", "instancestate": "running", "instancetype": "t2.large", "launchtime": "2020-09-11T23:16:03Z", "tags": { "0": { "key": "Name", "value": "ArcSight Logger" } } }, "resourcetype": "Instance" }, "schemaversion": 2.0, "service": { "action": { "actiontype": "NETWORK_CONNECTION", "networkconnectionaction": { "connectiondirection": "INBOUND", "localportdetails": { "portname": "SSH" }, "remoteipdetails": { "geolocation": { "lat": "59.2741", "lon": "15.2066" }, "organization": { "asn": "2119", "asnorg": "Telenor Norge AS", "isp": "Telenor Sverige AB", "org": "Telenor Sverige AB" } }, "remoteportdetails": { "portname": "Unknown" } } }, "count": "20", "detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df", "resourcerole": "TARGET", "servicename": "guardduty" }, "updatedat": "2020-09-12T09:25:34.086Z" } } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert result_bundle['type'] == 'bundle' result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert result_bundle_identity['type'] == data_source['type'] assert result_bundle_identity['id'] == data_source['id'] assert result_bundle_identity['name'] == data_source['name'] assert result_bundle_identity['identity_class'] == data_source['identity_class'] observed_data = result_bundle_objects[1] assert observed_data['id'] is not None assert observed_data['type'] == "observed-data" assert observed_data['created_by_ref'] == result_bundle_identity['id'] assert observed_data['created'] is not None assert observed_data['modified'] is not None assert observed_data['number_observed'] is not None def test_vpc_flow_network_json_to_stix(self): """to test network stix object properties""" data = { "vpcflow": { "account": 979326520502, "interfaceid": "eni-04b762de832716892", "sourceaddress": "89.248.172.85", "destinationaddress": "172.31.62.249", "sourceport": 58387, "destinationport": 51289, "protocol": "tcp", "starttime": 1592547796, "endtime": 1592547798, "action": "REJECT", "date": "2020-06-19", "logstatus": "OK", "numbytes": 40, "region": "us-east-1", "version": 2 } } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert result_bundle_identity['type'] == data_source['type'] observed_data = result_bundle_objects[1] assert 'objects' in observed_data objects = observed_data['objects'] network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic') assert network_obj is not None, 'network-traffic object type not found' assert network_obj.keys() == {'type', 'src_ref', 'dst_ref', 'src_port', 'dst_port', 'protocols', 'start', 'end'} assert network_obj['type'] == 'network-traffic' assert network_obj['src_ref'] == '1' assert network_obj['dst_ref'] == '4' assert network_obj['src_port'] == 58387 assert network_obj['dst_port'] == 51289 assert network_obj['protocols'] == ['tcp'] assert network_obj['start'] == '2020-06-19T06:23:16.000Z' assert network_obj['end'] == '2020-06-19T06:23:18.000Z' def test_vpc_flow_custom_attr_json_to_stix(self): """to test network stix object properties""" data = { "vpcflow": { "account": 979326520502, "interfaceid": "eni-04b762de832716892", "sourceaddress": "89.248.172.85", "destinationaddress": "172.31.62.249", "sourceport": 58387, "destinationport": 51289, "protocol": "tcp", "starttime": 1592547796, "endtime": 1592547798, "action": "REJECT", "date": "2020-06-19", "logstatus": "OK", "numbytes": 40, "region": "us-east-1", "version": 2 } } options = {"unmapped_fallback": True} result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert result_bundle_identity['type'] == data_source['type'] observed_data = result_bundle_objects[1] assert 'objects' in observed_data objects = observed_data['objects'] custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena') assert custom_object.keys() == {'type', 'interfaceid', 'date', 'logstatus', 'numbytes', 'region', 'version'} assert custom_object['date'] == '2020-06-19' assert custom_object['logstatus'] == 'OK' assert custom_object['numbytes'] == 40 assert custom_object['region'] == 'us-east-1' assert custom_object['version'] == 2 def test_guardduty_network_json_to_stix(self): """to test network stix object properties""" data = { "guardduty": { "accountid": 979326520502, "region": "us-east-1", "type": "UnauthorizedAccess:EC2/SSHBruteForce", "resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal", "resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104", "resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4", "resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1." "amazonaws.com", "resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a", "resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128", "resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13", "resource_instancedetails_imageid": "ami-0015fcaa5516c75ed", "resource_instancedetails_instanceid": "i-031cb81e1f32a36e1", "resource_instancedetails_availabilityzone": "us-east-1f", "service_eventfirstseen": "2020-07-31T06:19:09Z", "service_action_networkconnectionaction_protocol": "TCP", "service_action_networkconnectionaction_remoteportdetails_port": "38420", "service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden", "service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94", "service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro", "service_action_networkconnectionaction_localportdetails_port": "22", "service_eventlastseen": "2020-09-12T09:19:40Z", "severity": 2, "title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.", "arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding" "/7ab9d1cb6248e05a0e419a79528761cb", "createdat": "2020-07-31T06:37:13.745Z", "description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1. " "Brute force attacks are used to gain unauthorized access to your instance by " "guessing the SSH password.", "finding_id": "7ab9d1cb6248e05a0e419a79528761cb", "partition": "aws", "resource": { "instancedetails": { "imagedescription": "Provided by Red Hat, Inc.", "instancestate": "running", "instancetype": "t2.large", "launchtime": "2020-09-11T23:16:03Z", "tags": { "0": { "key": "Name", "value": "ArcSight Logger" } } }, "resourcetype": "Instance" }, "schemaversion": 2.0, "service": { "action": { "actiontype": "NETWORK_CONNECTION", "networkconnectionaction": { "connectiondirection": "INBOUND", "localportdetails": { "portname": "SSH" }, "remoteipdetails": { "geolocation": { "lat": "59.2741", "lon": "15.2066" }, "organization": { "asn": "2119", "asnorg": "Telenor Norge AS", "isp": "Telenor Sverige AB", "org": "Telenor Sverige AB" } }, "remoteportdetails": { "portname": "Unknown" } } }, "count": "20", "detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df", "resourcerole": "TARGET", "servicename": "guardduty" }, "updatedat": "2020-09-12T09:25:34.086Z" } } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert result_bundle_identity['type'] == data_source['type'] observed_data = result_bundle_objects[1] assert 'objects' in observed_data objects = observed_data['objects'] network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic') assert network_obj is not None, 'network-traffic object type not found' assert network_obj.keys() == {'type', 'dst_port', 'src_ref', 'dst_ref', 'src_port', 'protocols'} assert network_obj['type'] == 'network-traffic' assert network_obj['dst_port'] == 38420 assert network_obj['src_ref'] == '3' assert network_obj['dst_ref'] == '9' assert network_obj['src_port'] == 22 assert network_obj['protocols'] == ['tcp'] def test_guardduty_custom_attr_json_to_stix(self): """to test network stix object properties""" data = { "guardduty": { "accountid": 979326520502, "region": "us-east-1", "type": "UnauthorizedAccess:EC2/SSHBruteForce", "resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal", "resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104", "resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4", "resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1." "amazonaws.com", "resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a", "resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128", "resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f", "resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13", "resource_instancedetails_imageid": "ami-0015fcaa5516c75ed", "resource_instancedetails_instanceid": "i-031cb81e1f32a36e1", "resource_instancedetails_availabilityzone": "us-east-1f", "service_eventfirstseen": "2020-07-31T06:19:09Z", "service_action_networkconnectionaction_protocol": "TCP", "service_action_networkconnectionaction_remoteportdetails_port": "38420", "service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden", "service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94", "service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro", "service_action_networkconnectionaction_localportdetails_port": "22", "service_eventlastseen": "2020-09-12T09:19:40Z", "severity": 2, "title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.", "arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/" "7ab9d1cb6248e05a0e419a79528761cb", "createdat": "2020-07-31T06:37:13.745Z", "description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1." " Brute force attacks are used to gain unauthorized access to your instance by guessing " "the SSH password.", "finding_id": "7ab9d1cb6248e05a0e419a79528761cb", "partition": "aws", "resource": { "instancedetails": { "imagedescription": "Provided by Red Hat, Inc.", "instancestate": "running", "instancetype": "t2.large", "launchtime": "2020-09-11T23:16:03Z", "tags": { "0": { "key": "Name", "value": "ArcSight Logger" } } }, "resourcetype": "Instance" }, "schemaversion": 2.0, "service": { "action": { "actiontype": "NETWORK_CONNECTION", "networkconnectionaction": { "connectiondirection": "INBOUND", "localportdetails": { "portname": "SSH" }, "remoteipdetails": { "geolocation": { "lat": "59.2741", "lon": "15.2066" }, "organization": { "asn": "2119", "asnorg": "Telenor Norge AS", "isp": "Telenor Sverige AB", "org": "Telenor Sverige AB" } }, "remoteportdetails": { "portname": "Unknown" } } }, "count": "20", "detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df", "resourcerole": "TARGET", "servicename": "guardduty" }, "updatedat": "2020-09-12T09:25:34.086Z" } } options = {"unmapped_fallback": True} result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert result_bundle_identity['type'] == data_source['type'] observed_data = result_bundle_objects[1] assert 'objects' in observed_data objects = observed_data['objects'] custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena') assert custom_object.keys() == {'type', 'service_action_networkconnectionaction_remoteipdetails_country_countryname', 'finding_id', 'arn', 'createdat', 'partition', 'resource', 'schemaversion', 'service', 'updatedat'} assert custom_object['arn'] == 'arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed' \ '494f3b7ca56acdc74df/finding/7ab9d1cb6248e05a0e419a79528761cb' assert custom_object['finding_id'] == '7ab9d1cb6248e05a0e419a79528761cb' assert custom_object['createdat'] == '2020-07-31T06:37:13.745Z' assert custom_object['partition'] == 'aws' assert custom_object['schemaversion'] == 2.0 assert custom_object['updatedat'] == '2020-09-12T09:25:34.086Z'
52.18022
126
0.539087
23,188
0.976666
0
0
481
0.020259
0
0
10,930
0.460366
d5c68966a759ee86d163e95dee1679657c063de3
2,236
py
Python
Python Spider/xpath/03 login.py
CodingGorit/Coding-with-Python
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
[ "Apache-2.0" ]
1
2020-01-31T15:57:29.000Z
2020-01-31T15:57:29.000Z
Python Spider/xpath/03 login.py
CodingGorit/Coding-with-Python
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
[ "Apache-2.0" ]
null
null
null
Python Spider/xpath/03 login.py
CodingGorit/Coding-with-Python
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- #file: 03 login.py #@author: Gorit #@contact: [email protected] #@time: 2020/1/20 12:44 import requests from lxml import etree # 封装类,进行学习猿地的登录和订单的获取 class lMonKey(): # 登录请求地址 loginUrl = "https://www.lmonkey.com/login" # 账户中心地址 orderUrl = "https://www.lmonkey.com/my/order" headers = { "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400" } # 请求对象 req = None # token 口令 token = '' # 订单号 # 初始化的方法 def __init__(self): # 请求对象的初始化 self.req = requests.session() if self.getlogin(): # get 登录成功 if self.postlogin(): # post 登录成功 self.getordder() # get 登录页面,获取 _token def getlogin(self): # 1. get 请求 login页面,设置 cookie,获取_token res = self.req.get(url=self.loginUrl,headers=self.headers) if res.status_code == 200: print("get 页面请求成功") html = etree.HTML(res.text) self.token = html.xpath("//input[@name='_token']/@value")[0] #找到 input 标签下的,属性为 name="_token" 的标签,找它的 vcalue 的值,也就是 token 的值 # input[@name='xxx'] 找到指定标签 print("token 获取成功") return True else: print("请求错误") # post 登录,设置 cookie def postlogin(self): uname = input("输入你的手机号:") passw = input("请输入你的密码:") data = { "_token": self.token, "username": uname, "password": passw } # 发起 post 请求 res = self.req.post(url=self.loginUrl,headers=self.headers,data=data) if res.status_code==200 or res.status_code==302: print("登录成功!!") return True def getordder(self): # 获取订单页,使用 get 请求即可,获取默认订单号 # 解析数据即可 res = self.req.get(url=self.orderUrl,headers=self.headers) if res.status_code == 200: print("请求订单页页面成功") html = etree.HTML(res.text) # 頁面解析 r = html.xpath("//div[@class='avatar-content']/small/text()") print(r) else: print("頁面請求失敗") obj = lMonKey()
27.268293
175
0.549195
2,388
0.907295
0
0
0
0
0
0
1,247
0.473784
d5c72a3c1f9827cd7d71f3da809f2313db6f0a32
9,730
py
Python
src/gui/MultiplayerPlayerInfo.py
fireclawthefox/AnkandoraLight
05b71e1a2919141cce02cb1aade95fbac682614b
[ "BSD-2-Clause" ]
3
2020-07-31T10:27:06.000Z
2022-01-11T20:28:55.000Z
src/gui/MultiplayerPlayerInfo.py
fireclawthefox/AnkandoraLight
05b71e1a2919141cce02cb1aade95fbac682614b
[ "BSD-2-Clause" ]
null
null
null
src/gui/MultiplayerPlayerInfo.py
fireclawthefox/AnkandoraLight
05b71e1a2919141cce02cb1aade95fbac682614b
[ "BSD-2-Clause" ]
1
2020-07-30T08:23:28.000Z
2020-07-30T08:23:28.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # This file was created using the DirectGUI Designer from direct.gui import DirectGuiGlobals as DGG from direct.gui.DirectFrame import DirectFrame from direct.gui.DirectLabel import DirectLabel from direct.gui.DirectButton import DirectButton from direct.gui.DirectOptionMenu import DirectOptionMenu from panda3d.core import ( LPoint3f, LVecBase3f, LVecBase4f, TextNode ) class GUI: def __init__(self, rootParent=None): self.frmMain = DirectFrame( frameColor=(1, 1, 1, 1), frameSize=(-1.777778, 1.77777778, -1.1638, 1.1638), hpr=LVecBase3f(0, 0, 0), image='assets/menu/Background.png', pos=LPoint3f(0, 0, 0), image_scale=LVecBase3f(1.77778, 1, 1.1638), image_pos=LPoint3f(0, 0, 0), parent=rootParent, ) self.frmMain.setTransparency(0) self.frmSinglePlayerCreateGame = DirectFrame( borderWidth=(0.01, 0.01), frameColor=(1, 1, 1, 1), frameSize=(-0.65, 0.65, -0.55, 0.55), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.425, 0, 0), relief=5, parent=self.frmMain, ) self.frmSinglePlayerCreateGame.setTransparency(0) self.pg703 = DirectLabel( hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(0, 0, 0.425), scale=LVecBase3f(0.1, 0.1, 0.1), text='Player Info', text_align=TextNode.A_center, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmSinglePlayerCreateGame, ) self.pg703.setTransparency(0) self.pg13803 = DirectButton( hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.35, 0, -0.45), scale=LVecBase3f(0.1, 0.1, 0.1), text='Start', text_align=TextNode.A_center, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmSinglePlayerCreateGame, command=base.messenger.send, extraArgs=["multiplayerPlayerInfo_start"], ) self.pg13803.setTransparency(0) self.pg5219 = DirectLabel( hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.6, 0, 0.02), scale=LVecBase3f(0.1, 0.1, 0.1), text='Player Class', text_align=TextNode.A_left, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmSinglePlayerCreateGame, ) self.pg5219.setTransparency(0) self.optionPlayerClass = DirectOptionMenu( items=['item1'], frameSize=(0.07500000298023224, 3.012500149011612, -0.11250001192092896, 0.75), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(0.2, 0, 0.005), scale=LVecBase3f(0.1, 0.1, 0.1), text='item1', cancelframe_frameSize=(-1, 1, -1, 1), cancelframe_hpr=LVecBase3f(0, 0, 0), cancelframe_pos=LPoint3f(0, 0, 0), cancelframe_relief=None, item_frameSize=(0.07500000298023224, 2.4125001430511475, -0.11250001192092896, 0.75), item_hpr=LVecBase3f(0, 0, 0), item_pos=LPoint3f(-0.075, 0, -0.75), item_text='item1', item0_text_align=TextNode.A_left, item0_text_scale=(1, 1), item0_text_pos=(0, 0), item0_text_fg=LVecBase4f(0, 0, 0, 1), item0_text_bg=LVecBase4f(0, 0, 0, 0), item0_text_wordwrap=None, popupMarker_frameSize=(-0.5, 0.5, -0.2, 0.2), popupMarker_hpr=LVecBase3f(0, 0, 0), popupMarker_pos=LPoint3f(2.7125, 0, 0.31875), popupMarker_relief=2, popupMarker_scale=LVecBase3f(0.4, 0.4, 0.4), popupMenu_frameSize=(0, 2.3375001400709152, -0.862500011920929, 0), popupMenu_hpr=LVecBase3f(0, 0, 0), popupMenu_pos=LPoint3f(0, 0, 0), popupMenu_relief='raised', text_align=TextNode.A_left, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmSinglePlayerCreateGame, ) self.optionPlayerClass.setTransparency(0) self.btnCancel = DirectButton( hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(0.325, 0, -0.45), scale=LVecBase3f(0.1, 0.1, 0.1), text='Cancel', text_align=TextNode.A_center, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmSinglePlayerCreateGame, command=base.messenger.send, extraArgs=["multiplayerPlayerInfo_cancel"], ) self.btnCancel.setTransparency(0) self.frmPlayerInfo = DirectFrame( borderWidth=(0.01, 0.01), frameColor=(1, 1, 1, 1), frameSize=(-0.5, 0.5, -0.55, 0.55), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(0.765, 0, 0), relief=3, parent=self.frmMain, ) self.frmPlayerInfo.setTransparency(0) self.lblInfoHeader = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(0, 0, 0.45), scale=LVecBase3f(0.1, 0.1, 0.1), text='Info', text_align=TextNode.A_center, text_scale=(1, 1), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmPlayerInfo, ) self.lblInfoHeader.setTransparency(0) self.frmImageHero = DirectFrame( frameColor=(1, 1, 1, 1), frameSize=(-0.15, 0.15, -0.2, 0.2), hpr=LVecBase3f(0, 0, 0), image='/home/fireclaw/workspace/Ankandora/AnkandoraLight/design/guiGraphics/heroArcher.png', pos=LPoint3f(-0.275, 0, 0.195), image_scale=LVecBase3f(0.15, 1, 0.2), image_pos=LPoint3f(0, 0, 0), parent=self.frmPlayerInfo, ) self.frmImageHero.setTransparency(1) self.lblClassDescription = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.12, 0, 0.31), scale=LVecBase3f(0.1, 0.1, 0.1), text='The archer shoots from afar and gains the first-strike', text_align=TextNode.A_left, text_scale=(0.6, 0.6), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=10.0, parent=self.frmPlayerInfo, ) self.lblClassDescription.setTransparency(0) self.lblHealth = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.28, 0, -0.1), scale=LVecBase3f(0.1, 0.1, 0.1), text='Health', text_align=TextNode.A_center, text_scale=(0.7, 0.7), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmPlayerInfo, ) self.lblHealth.setTransparency(0) self.lblAttack = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.275, 0, -0.285), scale=LVecBase3f(0.1, 0.1, 0.1), text='Attack', text_align=TextNode.A_center, text_scale=(0.7, 0.7), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmPlayerInfo, ) self.lblAttack.setTransparency(0) self.lblHealthValue = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.275, 0, -0.17), scale=LVecBase3f(0.1, 0.1, 0.1), text='7', text_align=TextNode.A_center, text_scale=(0.6, 0.6), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmPlayerInfo, ) self.lblHealthValue.setTransparency(0) self.lblAttackValue = DirectLabel( frameColor=(0.8, 0.8, 0.8, 0.0), hpr=LVecBase3f(0, 0, 0), pos=LPoint3f(-0.275, 0, -0.36), scale=LVecBase3f(0.1, 0.1, 0.1), text='4', text_align=TextNode.A_center, text_scale=(0.6, 0.6), text_pos=(0, 0), text_fg=LVecBase4f(0, 0, 0, 1), text_bg=LVecBase4f(0, 0, 0, 0), text_wordwrap=None, parent=self.frmPlayerInfo, ) self.lblAttackValue.setTransparency(0) def show(self): self.frmMain.show() def hide(self): self.frmMain.hide() def destroy(self): self.frmMain.destroy()
35.126354
104
0.528777
9,295
0.955293
0
0
0
0
0
0
419
0.043063
d5c7e9662e071c24633307f69bc18856ffa49ecf
634
py
Python
publications/time_mag.py
mkoo21/rss-review-scraper
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
[ "BSD-3-Clause" ]
null
null
null
publications/time_mag.py
mkoo21/rss-review-scraper
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
[ "BSD-3-Clause" ]
1
2021-06-01T23:47:57.000Z
2021-06-01T23:47:57.000Z
publications/time_mag.py
mkoo21/rss-review-scraper
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
[ "BSD-3-Clause" ]
null
null
null
from . import FROM_FEED_PUBLISHED_TODAY, STRINGIFY def filter_by_tag(tag, entries): matches = list(filter( lambda x: any(list(map( lambda y: y.term == tag, x.tags ))), entries )) if len(matches) == 0: return "" return "<h2>TIME {} - {} results</h2>".format(tag, len(matches)) + \ "".join(list(map(lambda x: STRINGIFY(x, 'TIME'), matches))) def TIME(): pub_today = FROM_FEED_PUBLISHED_TODAY('https://feeds2.feedburner.com/time/entertainment') return filter_by_tag('movies', pub_today) + \ filter_by_tag('Television', pub_today)
28.818182
93
0.594637
0
0
0
0
0
0
0
0
111
0.175079
d5c8ad01f8962aad9216b71e8846b60294d68306
3,017
py
Python
2020/21/code.py
irobin591/advent-of-code-2019
279c28a2863558bd014b289802fff4b444c5d6cf
[ "MIT" ]
null
null
null
2020/21/code.py
irobin591/advent-of-code-2019
279c28a2863558bd014b289802fff4b444c5d6cf
[ "MIT" ]
null
null
null
2020/21/code.py
irobin591/advent-of-code-2019
279c28a2863558bd014b289802fff4b444c5d6cf
[ "MIT" ]
null
null
null
# Advent of Code 2020 # Day 21 # Author: irobin591 import os import doctest import re re_entry = re.compile(r'^([a-z ]+) \(contains ([a-z, ]*)\)$') with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file: input_data = input_file.read().strip().split('\n') def part1(input_data): """ >>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n')) 5 """ # dict['allergen'] = ['asdfa', 'agbsfb'] allergens = {} ingredients = [] # map strings to allergens for entry in input_data: r = re_entry.match(entry) if not r: raise RuntimeError("") contents = set(r.group(1).split(' ')) ingredients.extend(contents) for allergen in r.group(2).split(', '): if allergen not in allergens: allergens[allergen] = contents else: # only keep already added ingredients allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]] # print(allergens) # print(ingredients) ingredients_with_allergens = set([y for x in allergens.values() for y in x]) # print(list(filter(lambda i: i not in ingredients_with_allergens, ingredients))) return len(list(filter(lambda i: i not in ingredients_with_allergens, ingredients))) def part2(input_data): """ >>> part2(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n')) 'mxmxvkd,sqjhc,fvjkl' """ # dict['allergen'] = ['asdfa', 'agbsfb'] allergens = {} ingredients = [] # map strings to allergens for entry in input_data: r = re_entry.match(entry) if not r: raise RuntimeError("") contents = set(r.group(1).split(' ')) ingredients.extend(contents) for allergen in r.group(2).split(', '): if allergen not in allergens: allergens[allergen] = list(contents) else: # only keep already added ingredients allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]] # print(allergens) # (allergen, ingredient) assigned_allergens = [] while sum([len(ingreds) for ingreds in allergens.values()]) > 0: for allergen in allergens: if len(allergens[allergen]) == 1: ingredient = allergens[allergen][0] assigned_allergens.append((allergen, ingredient)) for allergen2 in allergens: if ingredient in allergens[allergen2]: allergens[allergen2].remove(ingredient) assigned_allergens.sort(key=lambda x: x[0]) return ",".join([x[1] for x in assigned_allergens]) if __name__ == "__main__": doctest.testmod() print("Part One: {}".format(part1(input_data))) print("Part Two: {}".format(part2(input_data))) pass
30.785714
114
0.599271
0
0
0
0
0
0
0
0
808
0.267816
d5c9c3dcfd93144a733bdffa2a7d7a7dc364d51d
2,807
py
Python
tests/test_html_escaping.py
copart/pandoc-mustache
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
[ "CC0-1.0" ]
43
2017-12-27T05:57:00.000Z
2022-03-18T10:07:28.000Z
tests/test_html_escaping.py
copart/pandoc-mustache
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
[ "CC0-1.0" ]
10
2018-02-07T11:20:37.000Z
2021-04-22T21:44:19.000Z
tests/test_html_escaping.py
copart/pandoc-mustache
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
[ "CC0-1.0" ]
8
2018-11-05T13:10:35.000Z
2021-08-30T18:14:02.000Z
""" Test that escaping characters for HTML is disabled. """ import os, subprocess def test_escape_singlequote(tmpdir): # Define empty dictionaries doc = {} template = {} # Prepare file names doc['path'] = tmpdir.join("document.md") template['path'] = tmpdir.join("template.yaml") # Prepare file contents doc['metadata'] = '''--- mustache: {mustachefile} --- ''' doc['mfiles'] = { "mustachefile": template['path'] } doc['text'] = 'Hello {{place}}' template['content'] = "place: world ' universe" # Write contents to files with open(doc['path'].strpath, "a") as myfile: myfile.write(doc['metadata'].format(**doc['mfiles'])) myfile.write(doc['text']) template['path'].write(template['content']) # Run pandoc output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True) # Test output assert output == "Hello world ' universe\n" def test_escape_gt(tmpdir): # Define empty dictionaries doc = {} template = {} # Prepare file names doc['path'] = tmpdir.join("document.md") template['path'] = tmpdir.join("template.yaml") # Prepare file contents doc['metadata'] = '''--- mustache: {mustachefile} --- ''' doc['mfiles'] = { "mustachefile": template['path'] } doc['text'] = 'Hello {{place}}' template['content'] = "place: world > universe" # Write contents to files with open(doc['path'].strpath, "a") as myfile: myfile.write(doc['metadata'].format(**doc['mfiles'])) myfile.write(doc['text']) template['path'].write(template['content']) # Run pandoc output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True) # Test output assert output == "Hello world > universe\n" def test_escape_ampersand(tmpdir): # Define empty dictionaries doc = {} template = {} # Prepare file names doc['path'] = tmpdir.join("document.md") template['path'] = tmpdir.join("template.yaml") # Prepare file contents doc['metadata'] = '''--- mustache: {mustachefile} --- ''' doc['mfiles'] = { "mustachefile": template['path'] } doc['text'] = 'Hello {{place}}' template['content'] = "place: world & universe" # Write contents to files with open(doc['path'].strpath, "a") as myfile: myfile.write(doc['metadata'].format(**doc['mfiles'])) myfile.write(doc['text']) template['path'].write(template['content']) # Run pandoc output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True) # Test output assert output == "Hello world & universe\n"
28.642857
139
0.617385
0
0
0
0
0
0
0
0
1,322
0.470965
d5cb7cb45edf1a90b51258da74fc6a1d2b6758fa
2,761
py
Python
app.py
iandees/microdata2osm
1505b8072880055033ddbb85626fcdb857c97d4e
[ "MIT" ]
1
2019-11-05T16:02:17.000Z
2019-11-05T16:02:17.000Z
app.py
iandees/microdata2osm
1505b8072880055033ddbb85626fcdb857c97d4e
[ "MIT" ]
null
null
null
app.py
iandees/microdata2osm
1505b8072880055033ddbb85626fcdb857c97d4e
[ "MIT" ]
null
null
null
from flask import Flask, jsonify, request from w3lib.html import get_base_url import extruct import requests app = Flask(__name__) def extract_osm_tags(data): tags = {} schema_org_type = data.get('@type') if schema_org_type == 'Restaurant': tags['amenity'] = 'restaurant' serves_cuisine = tags.get('servesCuisine') if serves_cuisine: cuisine = [] if 'Burgers' in serves_cuisine: cuisine.append('burger') if 'Fast Casual' in serves_cuisine: tags['amenity'] = 'fast_food' elif schema_org_type == 'Hotel': tags['tourism'] = 'hotel' elif schema_org_type == 'ExerciseGym': tags['leisure'] = 'fitness_centre' elif schema_org_type == 'BankOrCreditUnion': tags['amenity'] = 'bank' else: return {} address = data.get('address', {}).get('streetAddress') if address: tags['addr:full'] = address address = data.get('address', {}).get('addressLocality') if address: tags['addr:city'] = address address = data.get('address', {}).get('addressRegion') if address: tags['addr:state'] = address address = data.get('address', {}).get('postalCode') if address: tags['postcode'] = address address = data.get('address', {}).get('addressCountry') if address: tags['addr:country'] = address brand = data.get('brand') if brand: tags['brand'] = brand name = data.get('name') if name: tags['name'] = name telephone = data.get('telephone') if telephone: tags['phone'] = telephone faxNumber = data.get('faxNumber') if faxNumber: tags['fax'] = faxNumber url = data.get('url') if url: tags['website'] = url return tags @app.route("/extract") def extract(): url = request.args.get('url') if not url: return jsonify(error="Must specify url parameter"), 400 app.logger.info("Extracting json-ld from %s", url) r = requests.get(url) if r.status_code != 200: app.logger.info("HTTP %s from %s", r.status_code, url) return jsonify(error="Error fetching url"), 502 base_url = get_base_url(r.text, r.url) data = extruct.extract(r.text, base_url=base_url, syntaxes=["json-ld"]) data = data.get('json-ld') output = {} suggested_tags = {} for entry in data: suggested_tags.update(extract_osm_tags(entry)) output = { 'status': { 'url': url, 'success': len(suggested_tags) > 0, }, 'suggested_tags': suggested_tags, } if request.args.get('include_extracted', type=bool): output['extracted'] = data return jsonify(output)
25.803738
75
0.589279
0
0
0
0
942
0.341181
0
0
646
0.233973
d5cdc3a0f5e46ad0ab740a282e0265f0e1bb27d5
702
py
Python
dags/simple_python_taskflow_api.py
davemasino/airflow101
f940e169b9c562e3834a201827b615744a99b86d
[ "Apache-2.0" ]
null
null
null
dags/simple_python_taskflow_api.py
davemasino/airflow101
f940e169b9c562e3834a201827b615744a99b86d
[ "Apache-2.0" ]
null
null
null
dags/simple_python_taskflow_api.py
davemasino/airflow101
f940e169b9c562e3834a201827b615744a99b86d
[ "Apache-2.0" ]
null
null
null
""" A simple Python DAG using the Taskflow API. """ import logging import time from datetime import datetime from airflow import DAG from airflow.decorators import task log = logging.getLogger(__name__) with DAG( dag_id='simple_python_taskflow_api', schedule_interval=None, start_date=datetime(2021, 1, 1), catchup=False, tags=['airflow101'], ) as dag: @task(task_id="hello_message") def say_hello(): """Print a hello message""" print("Hello, World!") hello_task = say_hello() @task(task_id="go_to_sleep") def sleep_for_1(): """Go to sleep""" time.sleep(1) sleeping_task = sleep_for_1() hello_task >> sleeping_task
20.057143
43
0.665242
0
0
0
0
217
0.309117
0
0
178
0.253561
d5cdc4a618ee4e3bc14a1bf765626931e9530f36
1,744
py
Python
pyunmarked/roylenichols.py
kenkellner/pyunmarked
485bd96b4ca12a019b478fc19f68f577279ac9b8
[ "MIT" ]
null
null
null
pyunmarked/roylenichols.py
kenkellner/pyunmarked
485bd96b4ca12a019b478fc19f68f577279ac9b8
[ "MIT" ]
null
null
null
pyunmarked/roylenichols.py
kenkellner/pyunmarked
485bd96b4ca12a019b478fc19f68f577279ac9b8
[ "MIT" ]
null
null
null
from . import model import numpy as np from scipy import special, stats class RoyleNicholsModel(model.UnmarkedModel): def __init__(self, det_formula, abun_formula, data): self.response = model.Response(data.y) abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs) det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs) self.submodels = model.SubmodelDict(abun=abun, det=det) def negloglik(self, x, mod, K): x = np.array(x) beta_abun = x[mod["abun"].index] beta_det = x[mod["det"].index] y = mod.response.y N, J = y.shape lam = mod["abun"].predict(beta=beta_abun, interval=False) r = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J) q = 1 - r nll = 0.0 for i in range(N): kvals = range(int(mod.response.Kmin[i]), int(K)+1) f = stats.poisson.pmf(kvals, lam[i]) ymat = np.tile(y[i,], (len(kvals), 1)) qmat = np.tile(q[i,], (len(kvals), 1)) kmat = np.tile(kvals, (J, 1)).transpose() pmat = 1 - qmat**kmat g = stats.binom.logpmf(ymat, 1, pmat).sum(axis=1) fg = f * np.exp(g) nll -= np.log(fg.sum()) return nll def simulate(self): N, J = self.response.y.shape lam = self.predict("abun", interval=False) q = 1 - self.predict("det", interval=False).reshape(N, J) z = np.random.poisson(lam, N) zrep = np.tile(z, (J,1)).transpose() p = 1 - q**zrep y = np.empty((N, J)) for i in range(N): y[i,] = np.random.binomial(1, p[i,], J) return y
37.913043
91
0.544151
1,670
0.957569
0
0
0
0
0
0
66
0.037844
d5cdf640db99a0e2d2dcf804807be669d9939f1e
75,933
py
Python
proc_chords_xarray.py
pgriewank/ASR_tools
306a7d92725888485a35f8824433ad7b0451b569
[ "MIT" ]
null
null
null
proc_chords_xarray.py
pgriewank/ASR_tools
306a7d92725888485a35f8824433ad7b0451b569
[ "MIT" ]
null
null
null
proc_chords_xarray.py
pgriewank/ASR_tools
306a7d92725888485a35f8824433ad7b0451b569
[ "MIT" ]
null
null
null
#Contains the functions needed to process both chords and regularized beards # proc_chords is used for chords #proc_beard_regularize for generating beards #proc_pdf saves pdfs of a variable below cloud base #Both have a large overlap, but I split them in two to keep the one script from getting to confusing. import numpy as np import math from netCDF4 import Dataset import os import time as ttiimmee from scipy.interpolate import interp1d from scipy.interpolate import interp2d #from scipy.interpolate import griddata #from mpl_toolkits.axes_grid1 import make_axes_locatable import pickle import sys #sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/") #from unionfind import UnionFind from cusize_functions import * #import matplotlib.pyplot as plt import pandas as pd import gc import glob import xarray as xr #turned into a function #removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly #Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank #want to keep the automatic x and y calculation #Scaling shouldn't be needed, as all chord properties should be indepenent of wind direction (right?) #Similarly, no basedefinition is needed, all values are relative to cloud base #Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file. #Changing 3D output #Default is now to always go over x and y directions #TODO #plot_flag disabled for the mean time def proc_chords( date_str='20160611', directory_input='/data/testbed/lasso/sims/', directory_output='/data/testbed/lasso/chords/', data_dim_flag=1, base_percentile = 25, special_name='', chord_times = 0, N_it_min=0, N_it_max=1e9): # plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled # data_dim_flag: 1 = column, 3 = 3D snapshot # chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible # directory_input = '/data/testbed/lasso/sims/' #+date # N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly # N_it_min = start number of iterables, 3D timesteps or column files. Only reall makes sense for 3D to avoid some weird initial fields. time_begin = ttiimmee.time() dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded dx = 25.0 date = date_str n_percentiles = 7 #Number of percentiles percentiles = np.array([5,10,35,50,65,90,95]) #1D clustering parameters in seconds, taken to agree with Lareau if chord_times == 0: t_gap = 20 t_min = 30 t_max = 1200*100 #Made a 100 times longer cell_min = 3 #Minimal number of cells needed per chord # #1D clustering parameters, #set super strict, but goes on for a loooong time as well if chord_times == 1: t_gap = 0. #should be pretty strict, no gaps allowed! t_min = 0.0 t_max = 1e9 cell_min = 3 #Minimal number of cells needed per chord ql_min = 1e-5 #value used to determine existence of cloud z_min = 10 #Index of minimum z_vlvl of the cbl print('looking into date: ',date) if data_dim_flag==1: filename_column = [] #uses glob to get all files which contain column. column_files = glob.glob(directory_input+date+'/*column*.nc') for c_file in column_files: filename_column.append(c_file) print('filename column included:',c_file) if data_dim_flag==3: filename_w = directory_input+date+'/w.nc' filename_l = directory_input+date+'/ql.nc' filename_qt = directory_input+date+'/qt.nc' filename_thl = directory_input+date+'/thl.nc' file_w = Dataset(filename_w,read='r') file_ql = Dataset(filename_l,read='r') file_thl = Dataset(filename_thl,read='r') file_qt = Dataset(filename_qt,read='r') [nz, nx, ny] = get_zxy_dimension(filename_l,'ql') filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0] #if date=='bomex': # filename_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filename_prof,read='r') n_chords = 0 #I will try lists first, which I will then convert to arrays in the end before saving in pandas chord_timesteps = [] chord_length = [] chord_duration = [] chord_time = [] chord_height = [] #percentile of cloud base chord_w = [] chord_w_up = [] #mean over updrafts chord_w_base = [] chord_w_star = [] chord_thl_star = [] chord_qt_star = [] chord_thl = [] chord_thl_25 = [] chord_thl_75 = [] chord_qt = [] chord_qt_25 = [] chord_qt_75 = [] chord_w_flux = [] #Sum of w below #Coming next chord_w_per = np.zeros([0,n_percentiles]) chord_w_per_up = np.zeros([0,n_percentiles]) #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter apply the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] thl_prof = file_prof['thl'][:,:] qt_prof = file_prof['qt'][:,:] nz_prof = w2.shape[1] z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] print('dz: ',dz) time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack together the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(len(time_prof)): w_var = 1.0 z=z_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =len(filename_column) if data_dim_flag==3: n_iter =len(time_prof) #for col in filename_column: n_iter = min(n_iter,N_it_max) for it in range(N_it_min,n_iter): print('n_chords: ',n_chords) time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filename_column[it]) file_col = Dataset(filename_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] print('t_1d',t_1d) thl_2d = file_col.variables['thl'][:] thl_2d = thl_2d.transpose() qt_2d = file_col.variables['qt'][:] qt_2d = qt_2d.transpose() u_2d = file_col.variables['u'][:] u_2d = u_2d.transpose() v_2d = file_col.variables['v'][:] v_2d = v_2d.transpose() #lets try saving memory by closing files #file_col.close() #The needed cbl height cbl_1d = t_1d*0 #The needed surface_bouyancy_flux bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d = t_1d*0 #Now we go through profile time snapshots and allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(len(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to get anomalies of thl and qt we subtract the closet mean profile for tt in range(len(time_prof)): #globals().update(locals()) tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = thl_prof[tt,:] #because the vectors don't perfectly align thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = qt_prof[tt,:] #because the vectors don't perfectly align qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') qt_3d = grab_3d_field(file_qt ,it,'qt') thl_3d = grab_3d_field(file_thl ,it,'thl') #Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) qt_2d = np.array(qt_3d.reshape((nz,nx*ny))) thl_2d = np.array(thl_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) qt_3d = np.transpose(qt_3d, (0, 2, 1)) thl_3d = np.transpose(thl_3d, (0, 2, 1)) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))]) qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))]) #Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though del w_3d del ql_3d del thl_3d del qt_3d #hopefully this helps gc.collect() #Getting anomalies of thl and qt qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose() thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose() #to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] u_ref = file_prof['u'][it,ref_lvl] v_ref = file_prof['v'][it,ref_lvl] V_ref = np.sqrt(u_ref**2+v_ref**2) time_resolution = dx/V_ref print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] ) #fake t vector, t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it #dt_1d = t_1d*0 #dt_1d[1:] = t_1d[1:]-t_1d[:-1] else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) thl_2d = np.zeros((nz,1)) qt_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = len(cbl_1d) cl_base = np.zeros(nt) #Detecting all cloudy cells #Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud. for t in range(nt): if np.max(ql_2d[:,t])>ql_min : cl_base[t]=np.argmax(ql_2d[:,t]>1e-6) else: cl_base[t]=10000000 cl_base=cl_base.astype(int) #Now find c base lower than the max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 t_cbl_cl=t_1d[cbl_cl_idx] ### Clustering 1D #Now we simply go through all cloudy timesteps and detect chords #If they fulful chord time requirements and have a number of values which fulfills cell_min they are counted as a chord #and their properties are calculatted immediately t_cloudy_idx = 0 #n_chords = 0 chord_idx_list = [] print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns') chord_idx_list = [] while t_cloudy_idx < len(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN #print(t_chord_begin) t_chord_begin = t_cloudy_idx #now connecting all cloudy indexes #Originally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count ##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap): t_cloudy_idx += 1 t_chord_end = t_cloudy_idx #Checking if it fulfils chord criteria regaring time #we also added a minimum height of 100 m to screen out fog/dew stuff at the surface if t_chord_end-t_chord_begin>cell_min: chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]) ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] else: chord_z_min = 0 ch_duration = 0 if ch_duration>t_min and ch_duration<t_max and chord_z_min > 4: if t_chord_end-t_chord_begin>cell_min-1: n_chords += 1 #Getting the chord beginning and end idx_beg_chord = cbl_cl_idx[t_chord_begin] idx_end_chord = cbl_cl_idx[t_chord_end] time_beg_chord = t_1d[idx_beg_chord] time_end_chord = t_1d[idx_end_chord] #chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_chord_end])) #list of relevant chord indexes ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end]) #getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds if data_dim_flag==1: u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l]) v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l]) V_ref=np.sqrt(u_ref**2+v_ref**2) ### Now appending chord properties chord_timesteps.append(t_chord_end-t_chord_begin) chord_duration.append(ch_duration) chord_length.append(ch_duration*V_ref) tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz chord_height.append(tmp_base_height) #25th percentile of cloud base surf_b_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord]) w_star = (tmp_base_height*surf_b_flux)**(1./3.) surf_qt_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord]) qt_star = surf_qt_flux/w_star surf_thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord]) thl_star = surf_thl_flux/w_star chord_w_star.append(w_star ) chord_thl_star.append(thl_star ) chord_qt_star.append(qt_star ) chord_w_base.append(np.mean(w_2d[cl_base[ch_idx_l],ch_idx_l])) chord_w.append(np.mean(w_2d[cl_base[ch_idx_l]-1,ch_idx_l])) chord_thl.append(np.mean(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l])) #get a fourth and 3/4 of the cloud base cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.) cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.) #print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0]) chord_thl_25.append(np.mean(thl_2d[cl_base_25_idx,ch_idx_l])) chord_thl_75.append(np.mean(thl_2d[cl_base_75_idx,ch_idx_l])) chord_qt.append(np.mean(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l])) chord_qt_75.append(np.mean(qt_2d[cl_base_75_idx,ch_idx_l])) chord_qt_25.append(np.mean(qt_2d[cl_base_25_idx,ch_idx_l])) chord_w_flux.append(np.sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l])) w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l] chord_w_up.append(np.mean(w_base_vec[w_base_vec>0.0])) tmp_w_per = np.percentile(w_base_vec,percentiles) if len(w_base_vec[w_base_vec>0.0])>0: tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles) else: tmp_w_per_up = np.zeros(n_percentiles) tmp_w_per_up[:] = 'nan' chord_w_per = np.vstack([chord_w_per,tmp_w_per]) chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up]) if data_dim_flag==1: chord_time.append(np.mean(t_1d[ch_idx_l])) if data_dim_flag==3: chord_time.append(time_prof[it]) t_cloudy_idx += 1 time3 = ttiimmee.time() print('iterable: ',it) print('n_chords: ',n_chords) print('number of time points included: ',len(cbl_cl_idx)) #Does it matter if I turn these from lists to arrays? Fuck it, will do it anyway chord_timesteps=np.asarray(chord_timesteps) chord_duration =np.asarray(chord_duration) chord_length =np.asarray(chord_length) chord_height =np.asarray(chord_height) chord_w_base =np.asarray(chord_w_base) chord_w_star =np.asarray(chord_w_star) chord_thl_star =np.asarray(chord_thl_star) chord_qt_star =np.asarray(chord_qt_star) chord_w =np.asarray(chord_w) chord_w_up =np.asarray(chord_w_up) chord_w_flux =np.asarray(chord_w_flux) chord_thl =np.asarray(chord_thl) chord_thl_25 =np.asarray(chord_thl_25) chord_thl_75 =np.asarray(chord_thl_75) chord_qt =np.asarray(chord_qt) chord_qt_25 =np.asarray(chord_qt_25) chord_qt_75 =np.asarray(chord_qt_75) chord_time =np.asarray(chord_time) #Saving print('all chords: ',len(chord_duration)) save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times) if N_it_min>0: save_string_base = save_string_base+'_Nmin'+str(N_it_min) if N_it_max<1e9: save_string_base = save_string_base+'_Nmax'+str(n_iter) save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords) filename_chord_panda = directory_output+save_string_base+'.pkl' data_for_panda = list(zip(chord_timesteps,chord_duration,chord_length,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up, chord_w_star,chord_thl_star,chord_qt_star, chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75)) df = pd.DataFrame(data = data_for_panda, columns=['timesteps','duration','length','height','w_base','w','w_flux','time','w up','w per','w per up', 'w star','thl star','qt star', 'thl','thl 25','thl 75','qt','qt 25','qt 75']) df.to_pickle(filename_chord_panda) time_end = ttiimmee.time() print('total run time of proc_chords in minutes: ',(time_end-time_begin)/60.) print(':') print(':') print('chordlength properties saved as panda in ',filename_chord_panda) print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') return #turned into a function #removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly #Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file. #If the input data is a 3D field it will always go over x and y directions #Two different scale_flags added to rotate the curtain to point upwind. #TODO #plot_flag disabled for the mean time def proc_beard_regularize(reg_var = 'w', date_str='20160611', directory_input='/data/testbed/lasso/sims/', directory_output = 'data_curtains/', data_dim_flag=1, base_smoothing_flag=2, plot_curtains_flag = 0, base_percentile = 25, special_name='', scale_flag=2, chord_times = 0, anomaly_flag = 0, N_it_max=1e9, N_it_min=0, size_bin_flag=0, N_bins=12, bin_size = 250, curtain_extra = 1.0, chord_max = 1e9, boundary_scaling_flag = 0 ): # reg_var = variable that will be regularized # plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var # data_dim_flag: 1 = column, 3 = 3D snapshot # time_slice_curtain: 0 only puts out the total sums, 1: adds a seperate output for each time slice, is needed for scale_flag # scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1 # 1 the ref_lvl used is determined from the mean cloud base height # 2, similar to 1 but now using a profile # # base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile # base_percentile: percentile used to find chordlength bottom # chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible # anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet mean profile # directory_input = '/data/testbed/lasso/sims/' #+date # N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly # size_bin_flag bins the beards by their chord_lenth. Currently using 8 bins of 250 meters length to get started. The lowest bin should be empty, because we only calculate curtains when at least curtain_min is used # curtain_extra: Regularized chord length before and after in the curtain, default is 1 # chord_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_max/2 is reached # boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt* time_begin = ttiimmee.time() dz = 25.0 #39.0625 #Is recalculated from the profile file later on dx = 25.0 date = date_str #1D clustering parameters in seconds, taken to agree with Lareau if chord_times == 0: t_gap = 20 t_min = 30 t_max = 120000 cell_min = 3 #Minimal number of cells needed per chord curtain_min = 10 #Minimal number of cells needed to convert into a curtain # #1D clustering parameters, #set super strict if chord_times == 1: t_gap = 0.#No gaps allowed! t_min = 0 t_max = 1e9 cell_min = 10 #Minimal number of cells needed per chord curtain_min = 10 #Minimal number of cells needed per curtain #value used to determine existence of cloud ql_min = 1e-5 z_min = 10 #Index of minimum z_vlvl of the cbl #z_min = 0 #Index of minimum z_vlvl of the cbl #Flag clean up if data_dim_flag==1: scale_flag=0 #Creating dictionary to save all properties settings_dict = { 'reg_var': reg_var, 'date_str':date_str, 'directory_input':directory_input, 'data_dim_flag':data_dim_flag, 'base_smoothing_flag':base_smoothing_flag, 'plot_curtains_flag' :plot_curtains_flag, 'base_percentile':base_percentile, 'special_name':special_name, 'scale_flag':scale_flag, 'chord_times':chord_times, 'anomaly_flag':anomaly_flag, 'N_it_max':N_it_max, 'N_it_min':N_it_min, 'size_bin_flag':size_bin_flag, 'bin_size':bin_size, 'N_bins':N_bins, 'curtain_extra':curtain_extra } #moved to an inner function to avoid issues with global and local variables def func_curtain_reg(input_2d_field): #function regularizes to cloud base #2019-03-20: added smoother to hopefully avoid impact of harsch jumps #2019-03-28: Added simplified version for base_smoothing_flag == 2 which gets rid of 1D pre interpolation #I originally used interp2d, tried griddata but it was a lot slower #Calculating the regularized t axis but for original resolution #It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra #takes the original time vector, subtracts it by mean time, then scales it by 1/(time_end_chord-time_beg_chord) t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2. t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord) #Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution #mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed var_t_low_z_high = np.zeros([curtain_cells,n_z_reg]) #introducing z_idx_base vector #Assigning reference cloud base where no cloud present z_idx_base=cl_base*1.0+0.0 z_idx_base[:] = z_idx_base_default for i in range(idx_beg_chord,idx_end_chord): if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]: z_idx_base[i] = cl_base[i] #Here the smoother comes into play: #We started with a simple 5 cell running mean, #But now we are making it a function of the chordlength, using a 0.1 running mean if base_smoothing_flag ==1: z_idx_base_smooth = z_idx_base*1.0 N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1) for i in range(idx_beg_chord-N,idx_end_chord+N): z_idx_base_smooth[i] = sum(z_idx_base[i-N:i+N])/(2*N) z_idx_base[:] = z_idx_base_smooth[:] if base_smoothing_flag==2: #just put the percentile back z_idx_base[:] = z_idx_base_default #default version for variable base height if base_smoothing_flag<2: #Now for each of the columns of the original curtain a vertical interpolation is done for i in range(idx_beg_curtain,idx_end_curtain): #assigining column value var_orig_col = input_2d_field[:,i] #Regularizing the z axes so that cloud base is at 1 d_z_tmp = 1.0/z_idx_base[i] nz = var_orig_col.shape[0] z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #HAve to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) var_orig_col = np.hstack([var_orig_col[0],var_orig_col]) #1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high #f = interp1d(z_reg_orig, var_orig_col, kind='next') f = interp1d(z_reg_orig, var_orig_col, kind='nearest') try: var_reg_inter = f(z_reg_mid) except: print(z_idx_base[i]) print(z_reg_orig) print(z_reg_mid) var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter #Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid #print(t_reg_orig.shape,z_reg_mid.shape) f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear') var_curtain = f(t_reg_mid,z_reg_mid) #constant base height version if base_smoothing_flag==2: #Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one. i=idx_beg_curtain d_z_tmp = 1.0/z_idx_base[i] var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain] nz = var_orig_2d.shape[0] z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #Have to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d]) f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear') var_curtain = f(t_reg_mid,z_reg_mid) return var_curtain #Creating regularized grid. d_reg = 0.005 n_z_reg = int(1.5/d_reg) n_t_reg = int((1+2*curtain_extra)/d_reg) t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1) t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg) z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1) z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg) mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid) var_curtain = np.zeros([n_t_reg,n_z_reg]) var_curtain_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg]) n_curtain = 0 n_curtain_up = 0 n_curtain_dw = 0 if size_bin_flag==1: N_bins = 12 n_curtain_bin = np.zeros([N_bins]) n_curtain_bin_up = np.zeros([N_bins]) n_curtain_bin_dw = np.zeros([N_bins]) var_curtain_bin_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) var_curtain_bin_up_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) var_curtain_bin_dw_sum = np.zeros([N_bins,n_t_reg,n_z_reg]) mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins) print('mid_bin_size',mid_bin_size) print('looking into date: ',date) if data_dim_flag==1: filename_column = [] #uses glob to get all files which contain column. column_files = glob.glob(directory_input+date+'/*column*.nc') for c_file in column_files: filename_column.append(c_file) print('filename column included:',c_file) if data_dim_flag==3: filename_w = directory_input+date+'/w.nc' filename_l = directory_input+date+'/ql.nc' file_w = Dataset(filename_w,read='r') file_ql = Dataset(filename_l,read='r') [nz, nx, ny] = get_zxy_dimension(filename_l,'ql') #getting variable to be regularized filename_var = directory_input+date+'/'+reg_var+'.nc' file_var = Dataset(filename_var,read='r') filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0] #if date=='bomex': # filename_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filename_prof,read='r') extra_string = '' n_chords = 0 #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter apply the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] nz_prof = w2.shape[1] var_prof = file_prof[reg_var][:,:] #needed for anomaly processing #Just grabbing this to calculate dz z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] print('dz: ',dz) #for boundary scaling total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack together the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(len(time_prof)): w_var = 1.0 z=z_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =len(filename_column) if data_dim_flag==3: n_iter =len(time_prof) #Setting curtains for var var_curtain_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg]) var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg]) n_curtain = 0 n_chord = 0 n_curtain_up = 0 n_curtain_dw = 0 #for col in filename_column: n_iter = min(n_iter,N_it_max) for it in range(N_it_min,n_iter): print('n_chords: ',n_chords) print('n_curtain: ',n_curtain) time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filename_column[it]) file_col = Dataset(filename_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] u_2d = file_col.variables['u'][:] u_2d = u_2d.transpose() v_2d = file_col.variables['v'][:] v_2d = v_2d.transpose() print('t_1d',t_1d) #Load the var file, even if means that we doable load w_2d or ql_2d var_2d = file_col.variables[reg_var][:] var_2d = var_2d.transpose() #The needed cbl height cbl_1d = t_1d*0 bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d= t_1d*0 #Now we go through profile time snapshots and allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(len(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to get anomalies we subtract the closet mean profile if anomaly_flag==1: for tt in range(len(time_prof)): tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = var_prof[tt,:] #because the vectors don't perfectly align var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') var_3d = grab_3d_field(file_var ,it,reg_var) #Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) var_2d = np.array(var_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) var_3d = np.transpose(var_3d, (0, 2, 1)) #globals().update(locals()) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))]) #Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though del w_3d del ql_3d del var_3d gc.collect() #Switching to anomalies if anomaly flag is used if anomaly_flag==1: #because the vectors don't perfectly align var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose() #to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] u_ref = file_prof['u'][it,ref_lvl] v_ref = file_prof['v'][it,ref_lvl] V_ref = np.sqrt(u_ref**2+v_ref**2) time_resolution = dx/V_ref print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution ) print('ref_lvl used to determine reference winds',ref_lvl ) #fake t vector, t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) var_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = len(cbl_1d) cl_base = np.zeros(nt) #Detecting all cloudy cells #Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud. for t in range(nt): if np.max(ql_2d[:,t])>ql_min : cl_base[t]=np.argmax(ql_2d[:,t]>ql_min) else: cl_base[t]=10000000 cl_base=cl_base.astype(int) #Now find c base lower than the max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 t_cbl_cl=t_1d[cbl_cl_idx] #Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is assigned when no clouds are present if scale_flag > 0 and t_1d.shape[0]>3: #calculate the profiles of u and v and their scaling u_ref_prof = file_prof['u'][it,:] v_ref_prof = file_prof['v'][it,:] V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2) scaling_factor_x_prof = u_ref_prof/V_ref_prof scaling_factor_y_prof = v_ref_prof/V_ref_prof #Using the mean cloud base height as the reference lvl ref_idx = np.mean(cl_base[cbl_cl_idx]) if scale_flag == 1: #a new reference level is com scaling_factor_x = scaling_factor_x_prof[int(ref_idx)] scaling_factor_y = scaling_factor_y_prof[int(ref_idx)] print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx)) if scale_flag == 2: #Regularizing the scaling profiles and interpolation them onto the regularized z axis d_z_tmp = 1.0/ref_idx nz = scaling_factor_x_prof.shape[0] z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2 z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz) #HAve to add 0 to the z_reg_orig to enable interpolation z_reg_orig = np.hstack([[0],z_reg_orig]) scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof]) scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof]) #1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest') f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest') scaling_factor_x_inter = f_x(z_reg_mid) scaling_factor_y_inter = f_y(z_reg_mid) print('Scaling flag 2:, mean scaling_factor_x_inter: ',np.mean(scaling_factor_x_inter), ' mean scaling_factor_y_inter: ',np.mean(scaling_factor_y_inter)) ### Clustering 1D #Now we simply go through all cloudy timesteps #As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud #As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them. #if the difference is larger than 20s the cloud is over, and a chordlength is created which is a list of all timesteps that below to that chordlength #However if the duration of the chordlength is lower than t_min or higher than t_max seconds it isn't #I added an additional constraint that each chord must include at least cell_min cells, because it is possible to get #Small chord lengths with more than t_min which are mostly gaps. t_cloudy_idx = 0 #n_chords = 0 chord_idx_list = [] print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns') while t_cloudy_idx < len(cbl_cl_idx)-1 and n_chords<chord_max: #print('t_chord_begin',t_chord_begin) t_chord_begin = t_cloudy_idx #now connecting all cloudy indexes while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap): t_cloudy_idx += 1 t_chord_end = t_cloudy_idx #print('t_chord_end',t_chord_end) #Checking if it fulfils chord criteria regaring time #we also added a minimum height of 100 m to screen out fog/dew stuff at the surface if t_chord_end-t_chord_begin>cell_min: chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]) chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] else: chord_z_min = 0 chord_duration = 0 if chord_duration>t_min and chord_duration<t_max and chord_z_min > 4: if t_chord_end-t_chord_begin>cell_min-1: n_chords += 1 #chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx])) #Here we start the interpolation stuff #Getting the chord beginning and end idx_beg_chord = cbl_cl_idx[t_chord_begin] idx_end_chord = cbl_cl_idx[t_chord_end] time_beg_chord = t_1d[idx_beg_chord] time_end_chord = t_1d[idx_end_chord] #Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).argmin()-1 idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).argmin()+2 idx_end_curtain = min(idx_end_curtain,nt-1) time_beg_curtain = t_1d[idx_beg_curtain] time_end_curtain = t_1d[idx_end_curtain] chord_cells = t_chord_end-t_chord_begin curtain_cells = idx_end_curtain-idx_beg_curtain #If curtain has more than curtain_min cells and curtain tail noes not extend beyond end of 2d field or the beginning extend before #I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used. if idx_end_curtain<nt-2 and idx_beg_curtain>2 and len(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_min-1: n_curtain += 1 #First thing to do is calculate the chord base using the 25 percentile in agreement with Neil z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile)) #Regularized curtains, I am too lazy to pass on all my variables to func_curtain_reg so I instead made it a nested function var_curtain_tmp = (func_curtain_reg(var_2d)).transpose() if boundary_scaling_flag == 1: #Now adding the boundary scaling using w* surf_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord]) base_height = z_idx_base_default*dz w_star=(base_height*surf_flux)**(1/3) if reg_var=='w': boundary_scaling = w_star if reg_var=='qt': surf_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord]) boundary_scaling = surf_flux/w_star if reg_var=='thl': thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord]) boundary_scaling = surf_flux/w_star var_curtain_tmp = var_curtain_tmp/boundary_scaling #Finally add it to the mean one and track one more curtain #detecting if chord base has a positive or negative w, then adds to the sum of up or downdraft chords w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]] #print(w_tmp) #Scaling is now added here, #Things are applied twice so that deviding by n it comes out fin #We assume here that n_x and n_y are roughly same #Could be made cleaner later on if scale_flag>0 and data_dim_flag==3: if scale_flag==1: #find out if we need scaling_factor_x or y by seeing if we are in the first or second half if idx_end_curtain<nt/2: scaling_factor = 2*scaling_factor_x else: scaling_factor = 2*scaling_factor_y if scaling_factor>0: var_curtain_tmp = var_curtain_tmp[::-1,:] var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp if scale_flag==2: if idx_end_curtain<nt/2: scaling_factor_prof = 2*scaling_factor_x_inter else: scaling_factor_prof = 2*scaling_factor_y_inter for n_prof in range(scaling_factor_prof.shape[0]): if scaling_factor_prof[n_prof]>0: var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof] var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof] #Now adding the var_curtain_tmp to the sums var_curtain_sum = var_curtain_sum+var_curtain_tmp if np.mean(w_tmp)>0.: n_curtain_up += 1 var_curtain_up_sum += var_curtain_tmp elif np.mean(w_tmp)<0.: n_curtain_dw += 1 var_curtain_dw_sum += var_curtain_tmp else: print('wtf how is this zero: ',np.mean(w_tmp),w_tmp) #globals().update(locals()) ############################################################################################################################################### ################## SIZE BINNING ############################################################################################################## ############################################################################################################################################### if size_bin_flag: #getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds if data_dim_flag==1: ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end]) u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l]) v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l]) V_ref=np.sqrt(u_ref**2+v_ref**2) ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin] chord_length = ch_duration*V_ref #if scale_flag==0: # scaling_factor=1. #find index of bin close to mid size bin bin_idx = np.where(np.abs(chord_length-mid_bin_size)<125)[0] if bin_idx.size>0: #print('bin_idx,chord_length',bin_idx,chord_length) n_curtain_bin[bin_idx] += 1 var_curtain_bin_sum[bin_idx,:,:] = var_curtain_bin_sum[bin_idx,:,:] + var_curtain_tmp if np.mean(w_tmp)>0.: n_curtain_bin_up[bin_idx] += 1 var_curtain_bin_up_sum[bin_idx,:,:] += var_curtain_tmp elif np.mean(w_tmp)<0.: n_curtain_bin_dw[bin_idx] += 1 var_curtain_bin_dw_sum[bin_idx,:,:] += var_curtain_tmp else: print('wtf how is this zero: ',np.mean(w_tmp),w_tmp) ############################################################################################################################## #PLOTS ############################################################################################################################## #If the plot flag is set the pre regularization curtains are plotted. if plot_curtains_flag ==1: print('plotting not implemented yet') ############################################################################################################################## #switching to y direction if half of max chords reached ############################################################################################################################## if n_chords == int(chord_max/2): t_cloudy_idx = int(len(cbl_cl_idx)/2) t_cloudy_idx += 1 time3 = ttiimmee.time() print('curtain processing:',(time3-time2)/60.0,'minutes') print(':') print(':') print(':') time_end = ttiimmee.time() print('total run time of proc_beard_regularize in minutes: ',(time_end-time_begin)/60.) print(':') print(':') print(':') #Replacing saving with xarray xr_dataset = xr.Dataset( data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_sum.transpose()/n_curtain), reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_sum.transpose()/n_curtain_up), reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_sum.transpose()/n_curtain_dw)}, coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid}) xr_dataset[reg_var].attrs['n']=n_curtain xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw xr_dataset.attrs = settings_dict #Making save string save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra)) if data_dim_flag==3: save_string_base = save_string_base+'_sf'+str(scale_flag) if N_it_min>0: save_string_base = save_string_base+'_Nmin'+str(N_it_min) if N_it_max<1e9: save_string_base = save_string_base+'_Nmax'+str(n_iter) if boundary_scaling_flag==1: save_string_base = 'star'+save_string_base save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain) save_string = directory_output+ reg_var+save_string_base +'.nc' xr_dataset.to_netcdf(save_string) print('saved beard data to '+save_string) if size_bin_flag==1: xr_dataset = xr.Dataset( data_vars = {reg_var :(('regularized height', 'regularized time','length'), var_curtain_bin_sum.transpose()/n_curtain_bin), reg_var+'_up':(('regularized height', 'regularized time','length'), var_curtain_bin_up_sum.transpose()/n_curtain_bin_up), reg_var+'_dw':(('regularized height', 'regularized time','length'), var_curtain_bin_dw_sum.transpose()/n_curtain_bin_dw)}, coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'length':mid_bin_size}) xr_dataset[reg_var].attrs['n'] =n_curtain_bin xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw xr_dataset.attrs = settings_dict save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc' xr_dataset.to_netcdf(save_string) print('saved size binned beards to '+save_string) print(':') print(':') print(':') print(':') print(':') return #A simple script which calculates a histogram below the cloud base and saves it #I will try to keep it at least somewhat general with a flexible variable def proc_pdf(reg_var = 'w', date_str='20160611', directory_input ='/data/testbed/lasso/sims/', directory_output ='data_pdfs/', data_dim_flag=3, special_name='', N_it_max=1e9, N_it_min=0, anomaly_flag =0, N_bins=400, base_percentile = 25, boundary_scaling_flag = 1, range_var = [-10,10] ): #We are starting out with histograms of w from -10 to 10 and a 0.1 spacing var_hist_sum=np.zeros(N_bins) date = date_str #value used to determine existence of cloud ql_min = 1e-5 z_min = 10 #Index of minimum z_vlvl of the cbl print('looking into date: ',date) if data_dim_flag==1: filename_column = [] #uses glob to get all files which contain column. column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc') for c_file in column_files: filename_column.append(c_file) print('filename column included:',c_file) if data_dim_flag==3: filename_w = directory_input+date+'/w.nc' filename_l = directory_input+date+'/ql.nc' file_w = Dataset(filename_w,read='r') file_ql = Dataset(filename_l,read='r') [nz, nx, ny] = get_zxy_dimension(filename_l,'ql') #getting variable to be regularized filename_var = directory_input+date+'/'+reg_var+'.nc' file_var = Dataset(filename_var,read='r') filename_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0] #filename_prof=directory_input+date+'/testbed.default.0000000.nc' if date=='bomex': filename_prof=directory_input+date+'/bomex.default.0000000.nc' file_prof = Dataset(filename_prof,read='r') extra_string = '' #This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile, #Then latter apply the nearest value to the full 1d time vec #First loading surface variables from default profile print('calculating cbl height from profile file') T = file_prof['thl'][:,0] p = file_prof['p'][:,0]*0.0+99709 qt = file_prof['qt'][:,0] w2 = file_prof['w2'][:,:] nz_prof = w2.shape[1] var_prof = file_prof[reg_var][:,:] #needed for anomaly processing #Just grabbing this to calculate dz z_prof = file_prof['z'][:] dz = z_prof[1]-z_prof[0] print('dz: ',dz) #for boundary scaling total_surf_buoy_flux = file_prof['bflux'][:,1] total_surf_thl_flux = file_prof['thlflux'][:,1] total_surf_qt_flux = file_prof['qtflux'][:,1] time_prof = file_prof['time'][:] cbl_1d_prof = time_prof*0.0 #Hack together the Lifting condensation level LCL qt_pressure = p*qt sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 )) #rel_hum = np.asmatrix(qt_pressure/sat_qv)[0] rel_hum = qt_pressure/sat_qv #Dewpoint A = 17.27 B = 237.7 alpha = ((A * (T- 273.15)) / (B + (T-273.15))) alpha = alpha + np.log(rel_hum) dewpoint = (B * alpha) / (A - alpha) dewpoint = dewpoint + 273.15 LCL = 125.*(T-dewpoint) LCL_index = np.floor(LCL/dz) #now calculate the cbl top for each profile time for tt in range(len(time_prof)): w_var = 1.0 z=z_min while w_var > 0.08: z += 1 w_var = w2[tt,z] #w_var = np.var(w_1d[z,:]) #Mimimum of LCL +100 or variance plus 300 m cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt]) #To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens if cbl_1d_prof[tt]>0.6*nz_prof: print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt) cbl_1d_prof[tt] = math.floor(nz*0.6) print('resulting indexes of cbl over time: ',cbl_1d_prof) print('calculated LCL: ',LCL_index) #Now we either iterate over columns or timesteps if data_dim_flag==1: n_iter =len(filename_column) if data_dim_flag==3: n_iter =len(time_prof) #for col in filename_column: n_iter = min(n_iter,N_it_max) for it in range(N_it_min,n_iter): time1 = ttiimmee.time() if data_dim_flag ==1: print('loading column: ',filename_column[it]) file_col = Dataset(filename_column[it],read='r') w_2d = file_col.variables['w'][:] w_2d = w_2d.transpose() ql_2d = file_col.variables['ql'][:] ql_2d = ql_2d.transpose() t_1d = file_col.variables['time'][:] print('t_1d',t_1d) #Load the var file, even if means that we doable load w_2d or ql_2d var_2d = file_col.variables[reg_var][:] var_2d = var_2d.transpose() #The needed cbl height cbl_1d = t_1d*0 bflux_s_1d = t_1d*0 qtflux_s_1d = t_1d*0 thlflux_s_1d= t_1d*0 #Now we go through profile time snapshots and allocate the closest full time values to the profile values dt_2 = (time_prof[1]-time_prof[0])/2 for tt in range(len(time_prof)): cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt] bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt] qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt] thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt] #to get anomalies we subtract the closet mean profile if anomaly_flag==1: for tt in range(len(time_prof)): tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2] tmp_vector = var_prof[tt,:] #because the vectors don't perfectly align var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose() # = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:] if data_dim_flag ==3: if sum(file_prof['ql'][it,:])>0.0: print('loading timestep: ',it) ql_3d = grab_3d_field(file_ql ,it,'ql') w_3d = grab_3d_field(file_w ,it,'w') var_3d = grab_3d_field(file_var ,it,reg_var) #Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector w_2d = np.array(w_3d.reshape((nz,nx*ny))) ql_2d = np.array(ql_3d.reshape((nz,nx*ny))) var_2d = np.array(var_3d.reshape((nz,nx*ny))) #Now we do the same thing with the transposed field, use to be an either or, now just add it on w_3d = np.transpose( w_3d, (0, 2, 1)) ql_3d = np.transpose(ql_3d, (0, 2, 1)) var_3d = np.transpose(var_3d, (0, 2, 1)) #globals().update(locals()) w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))]) ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))]) var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))]) #This might save a bit of memory if reg_var == 'w': var_2d = w_2d if reg_var == 'ql': var_2d = ql_2d #Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though del w_3d del ql_3d del var_3d gc.collect() #fake t vector, t_1d = np.linspace(0,2*nx*ny,2*nx*ny) #Switching to anomalies if anomaly flag is used if anomaly_flag==1: #because the vectors don't perfectly align var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose() #to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution #we use the calculated cbl+300 meter or lcl as reference height ref_lvl = cbl_1d_prof[it] else: #If no clouds are present we pass a very short empty fields over to the chord searcher print('skipping timestep: ',it,' cause no clouds') ql_2d = np.zeros((nz,1)) w_2d = np.zeros((nz,1)) var_2d = np.zeros((nz,1)) t_1d = np.zeros(1) #The needed cbl height, which constant everywhere cbl_1d = t_1d*0 cbl_1d[:] = cbl_1d_prof[it] #The needed surface buoyancy flux, which is constant everywhere bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it] qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it] thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it] time2 = ttiimmee.time() print('loading time:',(time2-time1)*1.0,) ### Detecting lowest cloud cell is within 300 m of CBL nt = len(cbl_1d) cl_base = np.zeros(nt) #Detecting all cloudy cells #Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud. for t in range(nt): if np.max(ql_2d[:,t])>ql_min : cl_base[t]=np.argmax(ql_2d[:,t]>ql_min) else: cl_base[t]=10000000 cl_base=cl_base.astype(int) #Now find c base lower than the max height cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0] cbl_cl_binary = cl_base*0 cbl_cl_binary[cbl_cl_idx]=1 print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns') if len(cbl_cl_idx)>0: #Now calculating the var at cloud base var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx] #If boundary scaling is used, the variable is scaled accordingly #Only called if there are any clouds if boundary_scaling_flag == 1 and len(cbl_cl_idx)>1: #First thing to do is calculate the chord base using the 25 percentile in agreement with Neil if data_dim_flag==3: z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile)) # Can't think of a good way to do this, will throw up an error for the mean time. if data_dim_flag==1: print('sorry, but I havent implemented star scaling for 1d data') sys.exit() #Now adding the boundary scaling using w* #Is a bit overcooked currently as it only works with 3D data and thus all surface fluxes are the same everywhere. surf_flux = np.mean(bflux_s_1d) base_height = z_idx_base_default*dz w_star=(base_height*surf_flux)**(1/3) if reg_var=='w': boundary_scaling = w_star if reg_var=='qt': surf_flux = np.mean(qtflux_s_1d) boundary_scaling = surf_flux/w_star if reg_var=='thl': thl_flux = np.mean(thlflux_s_1d) boundary_scaling = surf_flux/w_star var_cl_base = var_cl_base/boundary_scaling #Calculating the histogram, and adding it to the total histogram var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins) var_hist_sum = var_hist_sum+var_hist else: print('no cloudy columns apparently') var_pdf = var_hist_sum save_string_base = '_pdf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag) if N_it_min>0: save_string_base = save_string_base+'_Nmin'+str(N_it_min) if N_it_max<1e9: save_string_base = save_string_base+'_Nmax'+str(n_iter) if boundary_scaling_flag==1: save_string_base = 'star'+save_string_base save_string = directory_output+ reg_var+save_string_base save_string = save_string+'.npz' np.savez(save_string,var_pdf=var_pdf,range_var=range_var) print('saved pdf with ', sum(var_pdf), 'points to '+save_string) print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') print(':') return
40.605882
218
0.563326
0
0
0
0
0
0
0
0
24,414
0.32152
d5ce012afb2ebb7c4522ad96e38d4259432b472d
1,656
py
Python
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2019-09-13T13:24:18.000Z
2019-09-13T13:24:18.000Z
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
65
2019-07-24T16:23:08.000Z
2020-03-06T22:18:47.000Z
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2021-06-02T19:09:35.000Z
2021-06-02T19:09:35.000Z
"""D. mel housekeeping genes based on tau. Uses the intersection of w1118 and orgR to create a list of D. mel housekeeping genes. """ import os from functools import partial import pandas as pd from larval_gonad.io import pickle_load, pickle_dump def main(): # Load mapping of YOgn to FBgn annot = pickle_load(snakemake.input.annot[0]) pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male) pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female) def intersect_fbgns(file_names, annot): return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names)))) def convert_to_fbgn(file_name, annot): return set( [ fbgn for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name)) if fbgn is not None ] ) if __name__ == "__main__": if os.getenv("SNAKE_DEBUG", False): from larval_gonad.debug import snakemake_debug snakemake = snakemake_debug( workdir="expression-atlas-wf", input=dict( male=[ "../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl", "../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl", ], female=[ "../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl", "../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl", ], annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl", ), ) main()
30.109091
96
0.630435
0
0
0
0
0
0
0
0
524
0.316425
d5ce93a21169fedfe3df6edeca6f8d5d29633b0f
2,226
py
Python
api-server/server/core/key.py
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
7a3d78d4067303d61c4a25d45c0671ae7e984222
[ "MIT" ]
75
2020-07-22T15:24:56.000Z
2022-03-30T08:34:06.000Z
api-server/server/core/key.py
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
7a3d78d4067303d61c4a25d45c0671ae7e984222
[ "MIT" ]
null
null
null
api-server/server/core/key.py
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
7a3d78d4067303d61c4a25d45c0671ae7e984222
[ "MIT" ]
34
2020-07-23T02:54:03.000Z
2022-03-29T09:51:21.000Z
""" Api Key validation """ from typing import Optional from fastapi.security.api_key import APIKeyHeader from fastapi import HTTPException, Security, Depends from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN from server.core.security import verify_key from server.db.mongodb import AsyncIOMotorClient, get_database from server.models.user import User from server.db.crud.user import get_user_by_email from pydantic import EmailStr api_key_scheme = APIKeyHeader(name="X-API-KEY", auto_error=False) email_scheme = APIKeyHeader(name="X-EMAIL-ID", auto_error=False) async def validate_request( api_key: Optional[str] = Security(api_key_scheme), email_id: Optional[EmailStr] = Security(email_scheme), db: AsyncIOMotorClient = Depends(get_database) ) -> Optional[User]: """Validate a request with given email and api key to any endpoint resource """ if api_key is None: raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="X-API-KEY is missing", headers={} ) if email_id is None: raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="X-EMAIL-ID is missing", headers={} ) user = await get_user_by_email(db, email_id) # verify email & API key if user: api_key = str(user.salt) + str(api_key) if not verify_key(api_key, user.hashed_api_key): # api key mismatch raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Access not allowed", headers={} ) if user.disabled: # disabled user raise HTTPException( status_code=HTTP_403_FORBIDDEN, detail="User is disabled", headers={} ) if not user.is_active: # user's email is not verified raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Email not verified", headers={} ) # All verified return User(**user.dict()) else: # not a valid email provided raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="Unknown Email", headers={} )
33.223881
92
0.666667
0
0
0
0
0
0
1,614
0.725067
383
0.172058
d5cee84d7663e55b77b23428667b37ccfb80fbf9
1,253
py
Python
scripts/kconfig-split.py
Osirium/linuxkit
b710224cdf9a8425a7129cdcb84fc1af00f926d7
[ "Apache-2.0" ]
7,798
2017-04-18T15:19:24.000Z
2022-03-30T19:34:42.000Z
scripts/kconfig-split.py
Osirium/linuxkit
b710224cdf9a8425a7129cdcb84fc1af00f926d7
[ "Apache-2.0" ]
1,673
2017-04-18T16:15:20.000Z
2022-03-31T06:14:17.000Z
scripts/kconfig-split.py
Osirium/linuxkit
b710224cdf9a8425a7129cdcb84fc1af00f926d7
[ "Apache-2.0" ]
1,099
2017-04-18T15:19:33.000Z
2022-03-31T20:23:20.000Z
#!/usr/bin/env python # This is a slightly modified version of ChromiumOS' splitconfig # https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig """See this page for more details: http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration """ import os import re import sys allconfigs = {} # Parse config files for config in sys.argv[1:]: allconfigs[config] = set() for line in open(config): m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line) if not m: continue option, value = m.groups() allconfigs[config].add((option, value)) # Split out common config options common = allconfigs.values()[0].copy() for config in allconfigs.keys(): common &= allconfigs[config] for config in allconfigs.keys(): allconfigs[config] -= common allconfigs["common"] = common # Generate new splitconfigs for config in allconfigs.keys(): f = open("split-" + config, "w") for option, value in sorted(list(allconfigs[config])): if value == "is not set": print >>f, "# CONFIG_%s %s" % (option, value) else: print >>f, "CONFIG_%s=%s" % (option, value) f.close()
27.844444
127
0.651237
0
0
0
0
0
0
0
0
505
0.403033
d5cef9720c8cb2b94870da749da3f4cf31757f01
1,631
py
Python
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
2
2021-06-05T17:51:26.000Z
2021-11-17T11:17:56.000Z
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
3
2020-05-27T20:16:26.000Z
2020-07-23T19:46:49.000Z
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
5
2020-05-09T17:47:09.000Z
2020-10-01T19:52:06.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class LivyStatementOutput(Model): """LivyStatementOutput. :param status: :type status: str :param execution_count: :type execution_count: int :param data: :type data: object :param ename: :type ename: str :param evalue: :type evalue: str :param traceback: :type traceback: list[str] """ _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'execution_count': {'key': 'execution_count', 'type': 'int'}, 'data': {'key': 'data', 'type': 'object'}, 'ename': {'key': 'ename', 'type': 'str'}, 'evalue': {'key': 'evalue', 'type': 'str'}, 'traceback': {'key': 'traceback', 'type': '[str]'}, } def __init__(self, **kwargs): super(LivyStatementOutput, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.execution_count = kwargs.get('execution_count', None) self.data = kwargs.get('data', None) self.ename = kwargs.get('ename', None) self.evalue = kwargs.get('evalue', None) self.traceback = kwargs.get('traceback', None)
33.285714
76
0.563458
1,115
0.68363
0
0
0
0
0
0
1,040
0.637646
d5d04044860f90c923e15fee006637515d70252d
6,215
py
Python
src/main.py
mafshar/sub-puppo
20fe5bf3ca3d250d846c545085f748e706c4a33e
[ "MIT" ]
1
2018-03-02T04:24:33.000Z
2018-03-02T04:24:33.000Z
src/main.py
mafshar/sub-puppo
20fe5bf3ca3d250d846c545085f748e706c4a33e
[ "MIT" ]
null
null
null
src/main.py
mafshar/sub-puppo
20fe5bf3ca3d250d846c545085f748e706c4a33e
[ "MIT" ]
null
null
null
#!/usr/bin/env python ''' Notes: - Weak implies weakly supervised learning (4 classes) - Strong implies strongly (fully) superversied learning (10 classes) - frame number is set to 22ms (default); that is the "sweet spot" based on dsp literature - sampling rate is 16kHz (for the MFCC of each track) - Accuracy increases as the test set gets smaller, which implies that a lot of these machine learning models are heavily data-driven (i.e. feed more data for more performance boosts) - Currently, optimal benchmark results are achieved with a test set size of 10 percent of the total data ''' import os import glob import sys import time import warnings warnings.filterwarnings("ignore") import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from processing import mfcc_processing, datasets from deep_models import models from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import normalize input_path = './data/genres/' mfcc_path = './data/processed/mfcc/' have_mfccs = True def normalize_and_split(data, test_size, verbose=False): scaler = MinMaxScaler() features = scaler.fit_transform(data['features']) labels = data['labels'] X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42) norm_data = {} norm_data['X_train'] = X_train norm_data['X_test'] = X_test norm_data['y_train'] = y_train norm_data['y_test'] = y_test if verbose: print 'Training sample feature size:', X_train.shape print 'Training sample label size:', y_train.shape print 'Test sample feature size:', X_test.shape print 'Test sample label size:', y_test.shape return norm_data def svm_classifier(data, test_size, weak=False, verbose=False): norm_data = normalize_and_split(data, test_size, verbose) X_train = norm_data['X_train'] X_test = norm_data['X_test'] y_train = norm_data['y_train'] y_test = norm_data['y_test'] tic = time.time() svm_clf = SVC(C=10000, kernel='poly', degree=3, tol=0.0001, max_iter=5000, decision_function_shape='ovr') if weak \ else SVC(C=10000, kernel='poly', degree=6, tol=0.01, max_iter=5000, decision_function_shape='ovr') svm_clf.fit(X_train, y_train) print 'TEST ACCURACY:', svm_clf.score(X_test, y_test) toc = time.time() if verbose: print '\ttime taken for SVM classifier to run is', toc-tic return def knn_classifier(data, test_size, weak=False, verbose=False): norm_data = normalize_and_split(data, test_size, verbose) X_train = norm_data['X_train'] X_test = norm_data['X_test'] y_train = norm_data['y_train'] y_test = norm_data['y_test'] tic = time.time() knn_clf = KNeighborsClassifier(n_neighbors=3, weights='distance', p=1, n_jobs=-1) if weak \ else KNeighborsClassifier(n_neighbors=8, weights='distance', p=1, n_jobs=-1) knn_clf.fit(X_train, y_train) print 'TEST ACCURACY:', knn_clf.score(X_test, y_test) toc = time.time() if verbose: print '\ttime taken for KNN classifier to run is', toc-tic return def mfcc_nn_model(num_epochs, test_size, weak=False, verbose=False): tic = time.time() tensorize = datasets.ToTensor() dataset = None net = None if weak: dataset = datasets.MfccDatasetWeak(mfcc_path, tensorize) net = models.MfccNetWeak() else: dataset = datasets.MfccDataset(mfcc_path, tensorize) net = models.MfccNet() trainloader, testloader = datasets.train_test_dataset_split(dataset) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.8) for epoch in range(num_epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward optimizer.step() # print statistics running_loss += loss.item() if verbose and i % 5 == 0: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): for data in testloader: inputs, labels = data outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print 'TEST ACCURACY:', 1. * correct / total toc = time.time() if verbose: print '\ttime taken for Mfcc NN to run is', toc-tic return if __name__ == '__main__': mfccs = None data = None if not have_mfccs: have_mfccs = True print 'calculating mfccs...' mfccs = mfcc_processing.write_mfccs(input_path, mfcc_path, True) else : print 'retrieving mfccs...' mfccs = mfcc_processing.read_mfccs(mfcc_path, True) data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True) print weak = False if weak: data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True) print svm_classifier(data, test_size=0.10, weak=True, verbose=True) print knn_classifier(data, test_size=0.10, weak=True, verbose=True) print mfcc_nn_model(num_epochs=10, test_size=0.10, weak=True, verbose=True) else: data = mfcc_processing.featurize_data(mfccs, weak=False, verbose=True) print svm_classifier(data, test_size=0.10, weak=False, verbose=True) print knn_classifier(data, test_size=0.10, weak=False, verbose=True) print mfcc_nn_model(num_epochs=10, test_size=0.10, weak=False, verbose=True)
32.710526
187
0.665809
0
0
0
0
0
0
0
0
1,295
0.208367
d5d07c6912264faadbd6b41b6918a6a30e91f2bc
8,638
py
Python
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
nmantani/FileInsight-plugins
a6b036672e4c72ed06678729a86293212b7213db
[ "BSD-2-Clause", "CC0-1.0", "MIT" ]
120
2015-02-28T14:49:12.000Z
2022-03-27T07:13:24.000Z
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
nmantani/FileInsight-plugins
a6b036672e4c72ed06678729a86293212b7213db
[ "BSD-2-Clause", "CC0-1.0", "MIT" ]
null
null
null
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
nmantani/FileInsight-plugins
a6b036672e4c72ed06678729a86293212b7213db
[ "BSD-2-Clause", "CC0-1.0", "MIT" ]
17
2016-04-04T15:53:03.000Z
2021-12-10T18:07:59.000Z
# # Blowfish encrypt - Encrypt selected region with Blowfish # # Copyright (c) 2019, Nobutaka Mantani # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import binascii import re import sys import time import tkinter import tkinter.ttk import tkinter.messagebox try: import Cryptodome.Cipher.Blowfish import Cryptodome.Util.Padding except ImportError: exit(-1) # PyCryptodome is not installed # Print selected items def encrypt(data, root, cm, ckt, ek, cit, ei): blowfish_mode = {"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB, "CBC":Cryptodome.Cipher.Blowfish.MODE_CBC, "CFB":Cryptodome.Cipher.Blowfish.MODE_CFB, "OFB":Cryptodome.Cipher.Blowfish.MODE_OFB, "CTR":Cryptodome.Cipher.Blowfish.MODE_CTR} mode = cm.get() key_type = ckt.get() key = ek.get() iv_type = cit.get() iv = ei.get() if key_type == "Hex": if re.match("^([0-9A-Fa-f]{2})+$", key): key = binascii.a2b_hex(key) else: tkinter.messagebox.showerror("Error:", message="Key is not in hex format.") return else: key = key.encode() if mode in ["CBC", "CFB", "OFB", "CTR"] and iv_type == "Hex": if re.match("^([0-9A-Fa-f]{2})+$", iv): iv = binascii.a2b_hex(iv) else: tkinter.messagebox.showerror("Error:", message="IV is not in hex format.") return else: iv = iv.encode() if mode in ["CBC", "CFB", "OFB", "CTR"] and len(iv) != Cryptodome.Cipher.Blowfish.block_size: tkinter.messagebox.showerror("Error:", message="IV size must be %d bytes." % Cryptodome.Cipher.Blowfish.block_size) return key_length = len(key) if key_length < 4 or key_length > 56: tkinter.messagebox.showerror("Error:", message="Key size must be in the range from 4 bytes and 56 bytes.") return try: if mode == "CFB": cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv, segment_size=Cryptodome.Cipher.Blowfish.block_size * 8) elif mode in ["CBC", "OFB"]: cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv) elif mode == "CTR": # The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef). cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], nonce=iv[0:7], initial_value=iv[7]) else: cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode]) if mode in ["ECB", "CBC"]: data = Cryptodome.Util.Padding.pad(data, Cryptodome.Cipher.Blowfish.block_size) d = cipher.encrypt(data) except Exception as e: tkinter.messagebox.showerror("Error:", message=e) root.quit() exit(1) # Not decrypted sys.stdout.buffer.write(d) root.quit() exit(0) # Decrypted successfully def combo_mode_selected(root, cm, cit, ei, lc): mode = cm.get() if mode == "ECB": cit.configure(state = "disabled") ei.configure(state = "disabled") else: cit.configure(state = "readonly") ei.configure(state = "normal") if mode == "CTR": lc.grid() else: lc.grid_remove() # Receive data data = sys.stdin.buffer.read() # Create input dialog root = tkinter.Tk() root.title("Blowfish encrypt") root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit())) label_mode = tkinter.Label(root, text="Mode:") label_mode.grid(row=0, column=0, padx=5, pady=5, sticky="w") combo_mode = tkinter.ttk.Combobox(root, width=5, state="readonly") combo_mode["values"] = ("ECB", "CBC", "CFB", "OFB", "CTR") combo_mode.current(0) combo_mode.grid(row=0, column=1, padx=5, pady=5, sticky="w") label_key_type = tkinter.Label(root, text="Key type:") label_key_type.grid(row=1, column=0, padx=5, pady=5, sticky="w") combo_key_type = tkinter.ttk.Combobox(root, width=5, state="readonly") combo_key_type["values"] = ("Text", "Hex") combo_key_type.current(0) combo_key_type.grid(row=1, column=1, padx=5, pady=5) label_key = tkinter.Label(root, text="Key:") label_key.grid(row=1, column=2, padx=5, pady=5, sticky="w") entry_key = tkinter.Entry(width=32) entry_key.grid(row=1, column=3, padx=5, pady=5, sticky="w") entry_key.focus() # Focus to this widget label_iv_type = tkinter.Label(root, text="IV type:") label_iv_type.grid(row=2, column=0, padx=5, pady=5, sticky="w") combo_iv_type = tkinter.ttk.Combobox(root, width=5, state="readonly") combo_iv_type["values"] = ("Text", "Hex") combo_iv_type.current(0) combo_iv_type.grid(row=2, column=1, padx=5, pady=5) label_iv = tkinter.Label(root, text="IV:") label_iv.grid(row=2, column=2, padx=5, pady=5, sticky="w") entry_iv = tkinter.Entry(width=32) entry_iv.grid(row=2, column=3, padx=5, pady=5, sticky="w") button = tkinter.Button(root, text="OK", command=(lambda data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))) button.grid(row=3, column=0, padx=5, pady=5, columnspan=4) label_ctr = tkinter.Label(root, text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef).", justify="left") label_ctr.grid(row=4, column=0, padx=5, pady=5, columnspan=4, sticky="w") label_ctr.grid_remove() # Set callback functions combo_mode.bind('<<ComboboxSelected>>', lambda event, root=root, cm=combo_mode, cit=combo_iv_type, ei=entry_iv, lc=label_ctr: combo_mode_selected(root, cm, cit, ei, lc)) combo_mode.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) combo_key_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) entry_key.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) combo_iv_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) entry_iv.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) button.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)) # These are disabled in the initial state (ECB mode) combo_iv_type.configure(state = "disabled") entry_iv.configure(state = "disabled") # Adjust window position sw = root.winfo_screenwidth() sh = root.winfo_screenheight() root.update_idletasks() # Necessary to get width and height of the window ww = root.winfo_width() wh = root.winfo_height() root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2))) root.mainloop() exit(1) # Not decrypted
44.297436
210
0.678629
0
0
0
0
0
0
0
0
2,705
0.313151
d5d16bd87f7bfb96643e0e75dbd1d494645de558
5,734
py
Python
dns/rdtypes/IN/IPSECKEY.py
preo/dnspython
465785f85f87508209117264c677080e901e957c
[ "0BSD" ]
null
null
null
dns/rdtypes/IN/IPSECKEY.py
preo/dnspython
465785f85f87508209117264c677080e901e957c
[ "0BSD" ]
null
null
null
dns/rdtypes/IN/IPSECKEY.py
preo/dnspython
465785f85f87508209117264c677080e901e957c
[ "0BSD" ]
null
null
null
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import cStringIO import struct import dns.exception import dns.inet import dns.name class IPSECKEY(dns.rdata.Rdata): """IPSECKEY record @ivar precedence: the precedence for this key data @type precedence: int @ivar gateway_type: the gateway type @type gateway_type: int @ivar algorithm: the algorithm to use @type algorithm: int @ivar gateway: the public key @type gateway: None, IPv4 address, IPV6 address, or domain name @ivar key: the public key @type key: string @see: RFC 4025""" __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key'] def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm, gateway, key): super(IPSECKEY, self).__init__(rdclass, rdtype) if gateway_type == 0: if gateway != '.' and not gateway is None: raise SyntaxError('invalid gateway for gateway type 0') gateway = None elif gateway_type == 1: # check that it's OK junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway) elif gateway_type == 2: # check that it's OK junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway) elif gateway_type == 3: pass else: raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type) self.precedence = precedence self.gateway_type = gateway_type self.algorithm = algorithm self.gateway = gateway self.key = key def to_text(self, origin=None, relativize=True, **kw): if self.gateway_type == 0: gateway = '.' elif self.gateway_type == 1: gateway = self.gateway elif self.gateway_type == 2: gateway = self.gateway elif self.gateway_type == 3: gateway = str(self.gateway.choose_relativity(origin, relativize)) else: raise ValueError('invalid gateway type') return '%d %d %d %s %s' % (self.precedence, self.gateway_type, self.algorithm, gateway, dns.rdata._base64ify(self.key)) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): precedence = tok.get_uint8() gateway_type = tok.get_uint8() algorithm = tok.get_uint8() if gateway_type == 3: gateway = tok.get_name().choose_relativity(origin, relativize) else: gateway = tok.get_string() chunks = [] while 1: t = tok.get().unescape() if t.is_eol_or_eof(): break if not t.is_identifier(): raise dns.exception.SyntaxError chunks.append(t.value) b64 = ''.join(chunks) key = b64.decode('base64_codec') return cls(rdclass, rdtype, precedence, gateway_type, algorithm, gateway, key) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): header = struct.pack("!BBB", self.precedence, self.gateway_type, self.algorithm) file.write(header) if self.gateway_type == 0: pass elif self.gateway_type == 1: file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway)) elif self.gateway_type == 2: file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway)) elif self.gateway_type == 3: self.gateway.to_wire(file, None, origin) else: raise ValueError('invalid gateway type') file.write(self.key) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): if rdlen < 3: raise dns.exception.FormError header = struct.unpack('!BBB', wire[current : current + 3]) gateway_type = header[1] current += 3 rdlen -= 3 if gateway_type == 0: gateway = None elif gateway_type == 1: gateway = dns.inet.inet_ntop(dns.inet.AF_INET, wire[current : current + 4]) current += 4 rdlen -= 4 elif gateway_type == 2: gateway = dns.inet.inet_ntop(dns.inet.AF_INET6, wire[current : current + 16]) current += 16 rdlen -= 16 elif gateway_type == 3: (gateway, cused) = dns.name.from_wire(wire[: current + rdlen], current) current += cused rdlen -= cused else: raise dns.exception.FormError('invalid IPSECKEY gateway type') key = wire[current : current + rdlen].unwrap() return cls(rdclass, rdtype, header[0], gateway_type, header[2], gateway, key) from_wire = classmethod(from_wire)
38.743243
81
0.591734
4,859
0.847401
0
0
0
0
0
0
1,472
0.256714
d5d20f7a81be3ee7ffae45e074584da66ec78259
210
py
Python
multistream_select/__init__.py
Projjol/py-multistream-select
624becaaeefa0a76d6841e27fbf7dea3240d2fe0
[ "MIT" ]
null
null
null
multistream_select/__init__.py
Projjol/py-multistream-select
624becaaeefa0a76d6841e27fbf7dea3240d2fe0
[ "MIT" ]
null
null
null
multistream_select/__init__.py
Projjol/py-multistream-select
624becaaeefa0a76d6841e27fbf7dea3240d2fe0
[ "MIT" ]
null
null
null
__version = '0.1.0' __all__ = ['MultiStreamSelect', 'hexify'] __author__ = 'Natnael Getahun ([email protected])' __name__ = 'multistream' from .multistream import MultiStreamSelect from .utils import hexify
26.25
52
0.766667
0
0
0
0
0
0
0
0
86
0.409524
d5d2163f998824781f4cf67aa89ebfc2260b9f51
42,648
py
Python
python/input_reader.py
dagesundholm/DAGE
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
[ "MIT" ]
3
2018-03-29T08:48:57.000Z
2020-02-16T22:40:22.000Z
python/input_reader.py
dagesundholm/DAGE
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
[ "MIT" ]
null
null
null
python/input_reader.py
dagesundholm/DAGE
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
[ "MIT" ]
1
2019-04-08T14:40:57.000Z
2019-04-08T14:40:57.000Z
"""---------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------""" # Input file reader import os import sys import xml.etree.ElementTree as ET import numpy, ast from .generate_objects import SettingsGenerator from collections import OrderedDict class InputProgrammingError(Exception): pass class InputXML(object): tag_type = 'input' definition_tag = 'input_definition' def __init__(self, filename = None, \ definition_filename = None,\ input_object = None,\ parent_object = None,\ definition = None, \ directory = None): if (input_object is not None): self.root = input_object elif filename is not None: if definition_filename is None: definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml" if os.path.exists(filename): self.tree = ET.parse(filename) self.root = self.tree.getroot() else: print("Path for definition file: '{}' does not exist".format(filename)) else: self.root = None self.parent_object = parent_object if directory is not None: self.directory = directory elif filename is not None and os.path.exists(filename): self.directory = os.path.dirname(filename) elif self.parent_object is not None: self.directory = self.parent_object.directory else: self.directory = None if definition is not None: self.definition = definition elif definition_filename is not None: if os.path.exists(definition_filename): definition = ET.parse(definition_filename) self.definition = definition.getroot() else: sys.exit("Input definition filename does not exist: {}".format(definition_filename)) elif self.parent_object is not None: definition = self.parent_object.definition.find(self.definition_tag) if definition is not None: self.definition = definition else: sys.exit("Definition tag '{}' not found from parent definition tree", self.definition_tag) else: sys.exit("Definition tag input not given.") self.retrieve() def prepare(self): """ Prepare the input to have all things required to call the Fortran interface """ self.parse() self.handle_folders() self.fill_id_values() kwargs = OrderedDict() self.get_interface_argument_values(kwargs) return kwargs def form_new_directory_path(self, path_text, original_directory = None): """ Creates a new directory path from 'path_text' and 'original_directory' and validate that it exists. Returns the new path. """ if original_directory is not None: complete_path = os.path.join(original_directory, path_text) else: complete_path = path_text directory_path = os.path.dirname(complete_path) # check if the path exists if not os.path.exists(directory_path): raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path)) return directory_path def retrieve_path(self, path_text, directory): """ Retrieves content of xml file at path 'path_text' to and store it to 'parameter_name' atribute of 'self'. """ if directory is not None: complete_path = os.path.join(directory, path_text) else: complete_path = path_text # check if the path exists if os.path.exists(complete_path): tree = ET.parse(complete_path) return tree.getroot() else: raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path)) def retrieve(self): """ Retrieves content to the tag from external file(s), if the tag has attribute or child named 'path' and/or 'extends_path'. """ if self.root is not None: # check if current tag has an attribute or child with # name 'path' path_text = InputXML.read_tag_or_attribute_value(self.root, 'path') # try to retrieve the content from path_text if path_text is not None and path_text != "": try: self.root = self.retrieve_path(path_text, self.directory) self.directory = self.form_new_directory_path(path_text, self.directory) except Exception as e: sys.exit(str(e)) # check if current tag has an attribute or child with # name 'extends_path' path_text = InputXML.read_tag_or_attribute_value(self.root, 'extends_path') self.extends_roots = [] self.extends_directories = [] directory = self.directory while path_text is not None: # try to retrieve the content from path_text try: self.extends_roots.append(self.retrieve_path(path_text, directory)) self.extends_directories.append(self.form_new_directory_path(path_text, directory)) except Exception as e: sys.exit(str(e)) # prepare for the next loop by getting the next extends path and corresponding directory directory = self.extends_directories[-1] path_text = InputXML.read_tag_or_attribute_value(self.extends_roots[-1], 'extends_path') def fill_id_values(self): """ Finds the id for each parameter where reference is made with name and fills it to the correct place """ for parameter_name in self.parameter_values: if parameter_name.endswith("_id"): # check if the tag has value that is not 0, in that case # we are not finding the value if self.get_parameter_value(parameter_name) == 0: tagtype = parameter_name[:parameter_name.rfind('_')] name_tag_found = tagtype+"_name" in self.parameter_values if name_tag_found: name = self.parameter_values[tagtype+"_name"] if name is not None and name != "": id_value = self.get_tagid_for_name(tagtype, name) if id_value != -1: self.parameter_values[parameter_name] = id_value for child in self.children: child.fill_id_values() def get_tagid_for_name(self, tagtype, name): if self.parent_object is not None: for child in self.parent_object.children: if hasattr(child, 'tag_type') and child.tag_type == tagtype and hasattr(child, 'name') and child.name == name: return child.id return -1 def get_parameter_definition(self, parameter_name): """ Retrieve the parameter definition for parameter name 'parameter_name'. """ for parameter_definition in self.definition.findall('parameter'): if parameter_definition.attrib['name'] == parameter_name: return parameter_definition return None def get_definition_tag(self, tag_name): """ Retrieve the definition tag for a tag with name = tag_name """ definition = self.definition.find('{}'.format(tag_name)) return definition def _parse_children(self, root, directory): """ Parse children of root xml-tag 'root' and store them as children in the 'self'. Note: this function is a subfunctionality of function 'parse' and it should not be used independently. """ for tag in root: if tag.tag not in self.parameter_values: # try to find the correct definition tag by using the "*_input"-format definition = self.definition.find('{}_input'.format(tag.tag)) # if the input definition was not found, try to find the definition from # the '<class>'-tags if definition is None: definition_found = False for definition_tag in self.definition.findall('class'): if definition_tag.attrib['name'] == tag.tag: definition = definition_tag definition_found = True break if not definition_found: print("Warning: Found unknown tag with name '{}'. Ignoring.".format(tag.tag)) continue else: child = InputXML(parent_object = self, definition = definition, input_object = tag, directory = directory) else: if tag.tag == 'settings': child = SettingsXML(parent_object = self, definition = definition, input_object = tag, directory = directory) elif tag.tag == 'structure': child = StructureXML(parent_object = self, definition = definition, input_object = tag, directory = directory) elif tag.tag == 'basis_set': child = BasisSetXML(parent_object = self, definition = definition, input_object = tag, directory = directory) elif tag.tag == 'action': child = ActionXML(parent_object = self, definition = definition, input_object = tag, directory = directory) elif tag.tag == 'scf_energetics': child = SCFEnergeticsXML(parent_object = self, definition = definition, input_object = tag, directory = directory) self.children.append(child) self.child_definitions.append(tag.tag) self.add_counters(child) child.parse() def parse(self): """ Parse paremeters and child xml-tags of the root-xml tags stored in self.root and self.extends_roots. Stores the found child-xml classes to 'self.children' and the parameter values to 'self.parameter_values'. The corresponding definitions are stored to 'self.child_definitions' and 'self.parameter_definitions', respectively. User must note that this function is recursive as it calls 'parse' for all found children in '_parse_children' calls. """ self.parameter_values = OrderedDict() self.parameter_definitions = OrderedDict() self.children = [] self.child_definitions = [] # handle the parameters first for parameter_definition in self.definition.findall('parameter'): if SettingsGenerator.is_valid_parameter(parameter_definition): self.set_parameter_value(parameter_definition, self.read_parameter_value(parameter_definition)) self.parameter_definitions[parameter_definition.attrib['name']] = parameter_definition if parameter_definition.attrib['name'] == 'name': self.name = self.parameter_values['name'] else: print("PARAMETER is not valid", parameter_definition.attrib['name']) # if the object has extends_root, then parse the children from it # and store them to 'self' if hasattr(self, 'extends_roots') and self.extends_roots is not None\ and hasattr(self, 'extends_directories') and self.extends_directories is not None: for i, extends_root in enumerate(self.extends_roots): self._parse_children(extends_root, self.extends_directories[i]) # parse the children from the xml-root of this object and store them # to 'self' if self.root is not None: self._parse_children(self.root, self.directory) # add the tag classes that are not found in the input file, just to # input the default values. for definition_tag in self.definition.findall('class'): if definition_tag.attrib['name'] not in self.child_definitions: child = InputXML(parent_object = self, definition = definition_tag) self.children.append(child) child.parse() def handle_folders(self): """ Creates missing folders and replaces relative paths with non-relative ones """ for parameter_name in self.parameter_values: if parameter_name in ['output_folder', 'input_folder', 'folder_path']: if self.parameter_values[parameter_name] is not None: # convert the non absolute paths to absolute ones if not os.path.isabs(self.parameter_values[parameter_name]): # join the directory of the file with the input directory path = os.path.join(self.directory, self.parameter_values[parameter_name]) # make the path more readable by removing extra slashes and dots self.parameter_values[parameter_name] = os.path.normpath(path) # if the output folder does not exist, create it if parameter_name == 'output_folder' and not os.path.exists(self.parameter_values[parameter_name]): os.makedirs(self.parameter_values[parameter_name]) for child in self.children: child.handle_folders() def get_interface_argument_values(self, argument_values, parameter_definitions = {}, abbreviation = None, counter_present = False): """ This function converts the values of the parameters to a form suitable for the Fortran interface. The converted values are stored to input-output dictionary 'arguments_values'. """ if 'abbreviation' in self.definition.attrib: abbreviation = self.definition.attrib['abbreviation'] for parameter_name in self.parameter_values: if SettingsGenerator.generate_fortran(self.parameter_definitions[parameter_name]): if abbreviation is not None: argument_key = "{}_{}".format(abbreviation, parameter_name) else: argument_key = parameter_name if counter_present: # Check if the parameter value is None. If the value is None, the # parameter is not present in the input file, and the default # value of the parameter is not specified. if self.parameter_values[parameter_name] is not None: if argument_key in argument_values and argument_values[argument_key] is not None: argument_values[argument_key].append(self.parameter_values[parameter_name]) else: argument_values[argument_key] = [self.parameter_values[parameter_name]] parameter_definitions[argument_key] = self.parameter_definitions[parameter_name] else: if argument_key not in parameter_definitions: argument_values[argument_key] = None parameter_definitions[argument_key] = self.parameter_definitions[parameter_name] else: if argument_key in argument_values: print("Warning: Found two (or more) arguments for the same parameter: {}".format(argument_key)) else: argument_values[argument_key] = self.parameter_values[parameter_name] parameter_definitions[argument_key] = self.parameter_definitions[parameter_name] for child in self.children: if 'global_index_counter' in child.definition.attrib or 'local_index_counter' in child.definition.attrib or 'counters' in child.definition.attrib: counter_present = True if SettingsGenerator.generate_fortran(child.definition): child.get_interface_argument_values(argument_values, parameter_definitions, abbreviation = abbreviation, counter_present = counter_present) # if we are at the root, convert the values with type list to numpy arrays if self.parent_object is None: for argument_key in list(argument_values): # the string lists need some special attention: if parameter_definitions[argument_key].attrib['type'].startswith('string') and type(argument_values[argument_key]) == list: temp = numpy.empty((256, len(argument_values[argument_key])+1), dtype="c") for j, value in enumerate(argument_values[argument_key]): temp[:, j] = "{0:{width}}".format(argument_values[argument_key][j], width=256) argument_values[argument_key] = numpy.array(temp, dtype="c").T elif type(argument_values[argument_key]) == list: temp_array = numpy.array(argument_values[argument_key], order='F').T shape = temp_array.shape if len(shape) == 3: new_shape = (shape[0], shape[1], shape[2]+1) elif len(shape) == 2: new_shape = (shape[0], shape[1]+1) else: new_shape = (shape[0]+1) new_array = numpy.empty(new_shape, order='F') if len(shape) == 3: new_array[:, :, :shape[2]] = temp_array[:, :, :] elif len(shape) == 2: new_array[:, :shape[1]] = temp_array[:, :] else: new_array[:shape[0]] = temp_array[:] argument_values[argument_key] = new_array elif argument_values[argument_key] is None: del argument_values[argument_key] def add_counters(self, child): """ Add all the counter values for the child object 'child' of 'self' by one """ if 'global_index_counter' in child.definition.attrib: success = self.add_counter_value(child.definition.attrib['global_index_counter']) if not success: print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['global_index_counter'])) else: child.id = self.get_counter_value(child.definition.attrib['global_index_counter']) if 'local_index_counter' in child.definition.attrib: success = self.add_counter_value(child.definition.attrib['local_index_counter']) if not success: print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['local_index_counter'])) if 'counters' in child.definition.attrib: success = self.add_counter_value(child.definition.attrib['counters']) if not success: print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['counters'])) def add_counter_value(self, counter_name): """ Add value of counter parameter with name=='counter_name' by one. If the counter is not found in the local object, it is seached from the parent objects. """ if counter_name in self.parameter_values: if self.parameter_values[counter_name] is None: self.parameter_values[counter_name] = 0 self.parameter_values[counter_name] += 1 return True else: if self.parent_object is not None: return self.parent_object.add_counter_value(counter_name) else: return False def get_counter_value(self, counter_name): """ Get the value of a counter with name 'counter_name'. If the counter is not found in the local object, it is seached from the parent objects. """ if counter_name in self.parameter_values: return self.parameter_values[counter_name] else: if self.parent_object is not None: return self.parent_object.get_counter_value(counter_name) else: return -1 def set_parameter_value(self, parameter_definition, value): """ Set an arbitrary value 'value' for the parameter with definition 'parameter_definition'. """ # convert the value to right data type and check that it is valid final_value = self.convert_argument_value(value, parameter_definition) # check that value is within given limits self.check_value_range(final_value, parameter_definition) # set the parameter value self.parameter_values[parameter_definition.attrib['name']] = final_value @staticmethod def read_tag_or_attribute_value(root, name): """ Reads the value of a tag or attribute with name 'name' in an xml. If attribute or tag is not found, None is returned. """ value = None if root is not None: tag = root.find(name) if tag is not None: value = tag.text elif name in root.attrib: value = root.attrib[name] return value def read_parameter_value(self, parameter_definition): """ Read the value of the parameter first from the values of the XML-element, secondarily from the objects we are extending from and thirdly from the default value of the parameter definition. """ value = InputXML.read_tag_or_attribute_value(self.root, parameter_definition.attrib['name']) # if value is not found at root, then use the value from extends roots if value is None and hasattr(self, 'extends_roots') and self.extends_roots is not None: for extends_root in self.extends_roots: value = InputXML.read_tag_or_attribute_value(extends_root, parameter_definition.attrib['name']) # if value is found, break the iteration if value is not None: break # fall back to default value/or None if one is not specified if value is None: if 'default' in parameter_definition.attrib: value = parameter_definition.attrib['default'] return value def get_parameter_value(self, parameter_name): """ Get the value of the parameter from the parsed parameters. If the parameter is not found an InputProgrammingError is raised. """ if hasattr(self, 'parameter_values') and parameter_name in self.parameter_values: return self.parameter_values[parameter_name] else: raise InputProgrammingError("Accessed parameter: '{}' is not in the values ".format(parameter_name)+ \ "of the object. Have you perfomed 'parse' for the object?") def parameter_values_are_equal(self, other, parameter_name): """ Compare the values of parameter with name 'parameter_name' for two objects of the same type. """ # check that the input objects are of same type if type(self) != type(other): raise InputProgrammingError("The objects compared with parameter_values_are_equal"+ " are not of same type.") # get the values for both input objects self_value = self.get_parameter_value(parameter_name) other_value = other.get_parameter_value(parameter_name) if isinstance(self_value, list) or isinstance(self_value, numpy.ndarray): if len(self_value) != len(other_value): return False for i in range(len(self_value)): if type(self_value[i]) == float or type(self_value[i]) == numpy.float64 or type(self_value[i]) == numpy.float32 or type(self_value[i]) == numpy.float16: if abs(self_value[i] - other_value[i]) > 1e-10: return False elif self_value[i] != other_value[i]: return False return True else: return self_value == other_value def all_parameter_values_are_equal(self, other): """ Check if all parameter values of 'self' and 'other' are equal """ for parameter_name in self.parameter_values: if not self.parameter_values_are_equal(other, parameter_name): return False return True def is_of_same_type_as(self, other): """ Check if self is of same type as other """ return type(self) == type(other) \ and self.definition.attrib['name'] == other.definition.attrib['name'] def children_are_equal(self, other): """ Check if children of 'self' and 'other' are equal with definition and value """ for child in self.children: equal_found = False # go through all the children and check if there is equal for other_child in other.children: if child == other_child: equal_found = True # if not, the children cannot be equal if not equal_found: return False return True def __eq__(self, other): """ Check if two InputXML objects are equal with each other """ return self.is_of_same_type_as(other)\ and self.all_parameter_values_are_equal(other)\ and self.children_are_equal(other) def __ne__(self, other): return not self.__eq__(other) def read_array_values(self, value_text, argument_type): is_number = argument_type.startswith("int") or \ argument_type.startswith("float") or \ argument_type.startswith("double") # try to evaluate the molecular orbitals as dict try: dictionary = ast.literal_eval("{"+ value_text +"}") size = max(dictionary.keys()) # init array of size if is_number: result = [0] * size else: result = [None] * size for key in dictionary: # convert the indexing from the 1-starting to 0-starting result[key-1] = dictionary[key] except: try: result = ast.literal_eval("["+ value_text +"]") except: raise Exception("Bad form of array, should have a list or a dictionary, value is: {}.".format(value_text)) return result def convert_argument_value(self, value_text, parameter_definition): argument_type = parameter_definition.attrib['type'] if SettingsGenerator.has_options(parameter_definition): value_text = self.get_option_value(value_text, parameter_definition) if SettingsGenerator.is_array(parameter_definition): if value_text is None: value = None else: # do the parsing of the input array (could also be a dictionary), which # has to be changed to a list array_values = self.read_array_values(value_text, argument_type) # get the final size of the result array from the parameter definition size = int(parameter_definition.attrib['shape']) value = numpy.zeros(size) try: for i, arg in enumerate(array_values): if argument_type.startswith('int'): value[i] = int(arg) if argument_type.startswith('float'): value[i] = float(arg) if argument_type.startswith('double'): value[i] = float(arg) if argument_type.startswith('string'): if SettingsGenerator.generate_fortran(parameter_definition): value[i] = str(arg) else: value[i] = str(arg) if argument_type.startswith('bool'): if arg.lower() == 'false': value[i] = False elif arg.lower() == 'true': value[i] = True else: value[i] = bool(arg) except ValueError: sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text)) else: try: if value_text is None: value = None elif argument_type.startswith('int'): value = int(value_text) elif argument_type.startswith('float'): value = float(value_text) elif argument_type.startswith('double'): value = float(value_text) elif argument_type.startswith('string'): if SettingsGenerator.generate_fortran(parameter_definition): value = str(value_text) else: value = str(value_text) elif argument_type.startswith('bool'): if value_text.lower() == 'false': value = False elif value_text.lower() == 'true': value = True else: value = bool(arg) except ValueError: sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text)) return value def check_value_range(self, value, parameter_definition): if value is not None: if 'minval' in parameter_definition.attrib: minval = parameter_definition.attrib['minval'] if value < float(minval): sys.exit('Error: argument with name {} and value {} is smaller than the smallest allowed value: {}', parameter_definition.attrib['name'], value, float(minval)) if 'maxval' in parameter_definition.attrib: maxval = parameter_definition.attrib['maxval'] if value > float(maxval): sys.exit('Error: argument with name {} and value {} is larger than the largest allowed value: {}', parameter_definition.attrib['name'], value, float(maxval)) def get_option_value(self, value_text, parameter_definition): options = parameter_definition.findall('option') result = None if len(options) > 0: valid_options = "" for option in options: if 'value' in option.attrib and value_text == option.attrib['value']: return value_text elif 'text_value' in option.attrib and value_text == option.attrib['text_value']: return option.attrib['value'] else: valid_options += ("{}: {} ".format(option.attrib['value'], option.attrib['text_value'])) sys.exit('Error: The value "{}" for argument with name "{}" is not within allowed options: {} '.format(value_text, parameter_definition.attrib['name'], valid_options)) def get_root_object(self): if self.parent_object is None: return self else: return self.parent_object.get_root_object() class SCFEnergeticsXML(InputXML): tag_type = 'scf_energetics' definition_tag = 'scf_energetics_input' class ActionXML(InputXML): tag_type = 'action' definition_tag = 'action_input' def parse(self): super(ActionXML, self).parse() self.handle_output_files() def handle_output_files(self): """ Reads in the output files and creates the corresponding objects to the tree """ if 'output_folder' in self.parameter_values: scf_energetics_filename = \ os.path.join(self.parameter_values['output_folder'], "scf_energetics.xml") root_object = self.get_root_object() # if scf energetics file exists, parse it and add as a child of the root # and set it as the input scf energetics of the action if os.path.exists(os.path.join(self.directory, scf_energetics_filename)): scf_energetics_definition = root_object.definition.find('scf_energetics_input') scf_energetics = SCFEnergeticsXML(parent_object = root_object, \ definition = scf_energetics_definition) scf_energetics.root = scf_energetics.retrieve_path(scf_energetics_filename, scf_energetics.directory) root_object.children.append(scf_energetics) root_object.child_definitions.append('scf_energetics') root_object.add_counters(scf_energetics) scf_energetics.parse() scf_energetics_id_definition = self.get_parameter_definition('scf_energetics_id') self.set_parameter_value(scf_energetics_id_definition, scf_energetics.id) structure_filename = \ os.path.join(self.parameter_values['output_folder'], "structure.xml") # if structure file exists, parse it and add it as a child of the root # and set it as the input structure of the action if os.path.exists(os.path.join(self.directory, structure_filename)): structure_definition = root_object.definition.find('structure_input') structure = StructureXML(parent_object = root_object, \ definition = structure_definition) structure.root = structure.retrieve_path(structure_filename, structure.directory) root_object.children.append(structure) root_object.child_definitions.append('structure') root_object.add_counters(structure) structure.parse() structure_id_definition = self.get_parameter_definition('structure_id') self.set_parameter_value(structure_id_definition, structure.id) class BasisSetXML(InputXML): tag_type = 'basis_set' definition_tag = 'basis_set_input' class SettingsXML(InputXML): tag_type = 'settings' definition_tag = 'settings_input' class StructureXML(InputXML): tag_type = 'structure' definition_tag = 'structure_input' atom_types = {'H':1, 'He':2, 'Li':3, 'Be':4, 'B':5, 'C':6, 'N':7, 'O':8, 'F':9, 'Ne':10, 'Na': 11, 'Mg':12, 'Al':13, 'Si':14, 'P':15, 'S':16, 'Cl':17, 'Ar':18} def read_input(self): charge = self.root.find('charge') # read relative charge if (charge is not None): self.charge = int(charge.text) else: self.charge = 0 # read coordinates and atom types self.coordinates = [] self.types = [] self.charges = [] # first read atom coordinates in 'atom' tags for i, atom in enumerate(self.root.findall('atom')): self.read_atom_coordinates_and_type(atom) # then read atoms in 'atoms' tags for i, atoms in enumerate(self.root.findall('atoms')): self.read_atoms_coordinates_and_types(atoms) def read_atom_coordinates_and_type(self, atom): result = [0.0, 0.0, 0.0] x = atom.find('x') if (x is not None): result[0] = float(x.text) y = atom.find('y') if (y is not None): result[1] = float(y.text) z = atom.find('z') if (z is not None): result[2] = float(z.text) xyz = atom.find('xyz') atom_type = self.read_atom_type(atom) if (xyz is not None): xyz_text = xyz.text.strip().split(" ") if (len(xyz_text) == 4): atom_type = get_atom_type(xyz_text[0]) atom_charge = get_atom_charge(xyz_text[0]) result[0] = float(xyz_text[1]) result[1] = float(xyz_text[2]) result[2] = float(xyz_text[3]) else: sys.exit("Error: Too many or too few coordinates in 'atom'->'xyz' -tag.") self.coordinates.append(result) self.types.append(atom_type) self.charges.append(atom_charge) def get_atom_type(self, atom_type_text): return int(self.atom_types[atom_type_text]) def get_atom_charge(self, atom_type_text): return float(self.atom_types[atom_type_text]) def read_atom_type(self, atom): if 'type' in atom.attrib: return atom.attrib['type'] else: sys.exit("Error: The mandatory attribute 'type' not found in 'atom'-tag") def read_atoms_coordinates_and_types(self, atoms): xyz = atoms.find('xyz') coordinates = [] types = [] charges = [] if (xyz is not None): xyz_lines = xyz.text.splitlines() for xyz in xyz_lines: xyz_text = xyz.strip().split(" ") xyz_coord = [0.0, 0.0, 0.0] # ignore empty lines if (len(xyz_text) == 1 and xyz_text[0] == ""): continue elif (len(xyz_text) == 4): types.append(self.get_atom_type(xyz_text[0])) charges.append(self.get_atom_charge(xyz_text[0])) xyz_coord[0] = float(xyz_text[1]) xyz_coord[1] = float(xyz_text[2]) xyz_coord[2] = float(xyz_text[3]) coordinates.append(xyz_coord) else: sys.exit("Error: Too many or too few coordinates in 'atoms'->'xyz' -line.") self.coordinates.extend(coordinates) self.types.extend(types) self.charges.extend(charges) if __name__ == "__main__": if len(sys.argv) <= 1: print("Give the input file name as an input.") else: inp = InputXML(filename = sys.argv[1], definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml") import dage_fortran dage_fortran.python_interface.run(**inp.prepare())
47.020948
179
0.559487
40,078
0.939739
0
0
480
0.011255
0
0
11,432
0.268055
d5d27a9aec4e8518393324c6681b93cf4f6993a5
506
py
Python
tests/test_mate_hashes_methods.py
MacHu-GWU/pathlib_mate-project
5b8f5441e681730d02209211cce7f46986147418
[ "MIT" ]
9
2017-09-07T21:21:43.000Z
2020-10-11T09:47:24.000Z
tests/test_mate_hashes_methods.py
MacHu-GWU/pathlib_mate-project
5b8f5441e681730d02209211cce7f46986147418
[ "MIT" ]
2
2018-10-16T14:30:26.000Z
2020-12-05T02:40:46.000Z
tests/test_mate_hashes_methods.py
MacHu-GWU/pathlib_mate-project
5b8f5441e681730d02209211cce7f46986147418
[ "MIT" ]
2
2017-09-05T14:06:01.000Z
2021-06-29T15:31:13.000Z
# -*- coding: utf-8 -*- import pytest from pathlib_mate.pathlib2 import Path class TestHashesMethods(object): def test(self): p = Path(__file__) assert len({ p.md5, p.get_partial_md5(nbytes=1 << 20), p.sha256, p.get_partial_sha256(nbytes=1 << 20), p.sha512, p.get_partial_sha512(nbytes=1 << 20), }) == 3 if __name__ == "__main__": import os basename = os.path.basename(__file__) pytest.main([basename, "-s", "--tb=native"])
23
59
0.592885
290
0.573123
0
0
0
0
0
0
50
0.098814
d5d2a60bb0dcf9c3c7f564f0707f97c252020d5c
4,183
py
Python
tools/lib/auth.py
shoes22/openpilot
a965de3c96a53b67d106cfa775e3407db82dd0e1
[ "MIT" ]
121
2019-03-27T06:34:51.000Z
2021-06-15T14:37:29.000Z
tools/lib/auth.py
shoes22/openpilot
a965de3c96a53b67d106cfa775e3407db82dd0e1
[ "MIT" ]
54
2019-04-11T08:51:58.000Z
2021-06-13T17:04:22.000Z
tools/lib/auth.py
shoes22/openpilot
a965de3c96a53b67d106cfa775e3407db82dd0e1
[ "MIT" ]
139
2019-07-16T07:25:05.000Z
2021-06-09T11:27:53.000Z
#!/usr/bin/env python3 """ Usage:: usage: auth.py [-h] [{google,apple,github,jwt}] [jwt] Login to your comma account positional arguments: {google,apple,github,jwt} jwt optional arguments: -h, --help show this help message and exit Examples:: ./auth.py # Log in with google account ./auth.py github # Log in with GitHub Account ./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI """ import argparse import sys import pprint import webbrowser from http.server import BaseHTTPRequestHandler, HTTPServer from typing import Any, Dict from urllib.parse import parse_qs, urlencode from tools.lib.api import APIError, CommaApi, UnauthorizedError from tools.lib.auth_config import set_token, get_token PORT = 3000 class ClientRedirectServer(HTTPServer): query_params: Dict[str, Any] = {} class ClientRedirectHandler(BaseHTTPRequestHandler): def do_GET(self): if not self.path.startswith('/auth'): self.send_response(204) return query = self.path.split('?', 1)[-1] query = parse_qs(query, keep_blank_values=True) self.server.query_params = query self.send_response(200) self.send_header('Content-type', 'text/plain') self.end_headers() self.wfile.write(b'Return to the CLI to continue') def log_message(self, format, *args): # pylint: disable=redefined-builtin pass # this prevent http server from dumping messages to stdout def auth_redirect_link(method): provider_id = { 'google': 'g', 'apple': 'a', 'github': 'h', }[method] params = { 'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/", 'state': f'service,localhost:{PORT}', } if method == 'google': params.update({ 'type': 'web_server', 'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com', 'response_type': 'code', 'scope': 'https://www.googleapis.com/auth/userinfo.email', 'prompt': 'select_account', }) return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params) elif method == 'github': params.update({ 'client_id': '28c4ecb54bb7272cb5a4', 'scope': 'read:user', }) return 'https://github.com/login/oauth/authorize?' + urlencode(params) elif method == 'apple': params.update({ 'client_id': 'ai.comma.login', 'response_type': 'code', 'response_mode': 'form_post', 'scope': 'name email', }) return 'https://appleid.apple.com/auth/authorize?' + urlencode(params) else: raise NotImplementedError(f"no redirect implemented for method {method}") def login(method): oauth_uri = auth_redirect_link(method) web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler) print(f'To sign in, use your browser and navigate to {oauth_uri}') webbrowser.open(oauth_uri, new=2) while True: web_server.handle_request() if 'code' in web_server.query_params: break elif 'error' in web_server.query_params: print('Authentication Error: "%s". Description: "%s" ' % ( web_server.query_params['error'], web_server.query_params.get('error_description')), file=sys.stderr) break try: auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']}) set_token(auth_resp['access_token']) except APIError as e: print(f'Authentication Error: {e}', file=sys.stderr) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Login to your comma account') parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt']) parser.add_argument('jwt', nargs='?') args = parser.parse_args() if args.method == 'jwt': if args.jwt is None: print("method JWT selected, but no JWT was provided") exit(1) set_token(args.jwt) else: login(args.method) try: me = CommaApi(token=get_token()).get('/v1/me') print("Authenticated!") pprint.pprint(me) except UnauthorizedError: print("Got invalid JWT") exit(1)
28.650685
140
0.672962
667
0.159455
0
0
0
0
0
0
1,739
0.41573
d5d313602da6567472c45152b7f1fb43db070947
901
py
Python
datedfolder.py
IgorRidanovic/flapi
7eb35cc670a5d1a06b01fb13982ffa63345369de
[ "MIT" ]
3
2020-09-21T13:07:05.000Z
2021-01-29T19:44:02.000Z
datedfolder.py
IgorRidanovic/flapi
7eb35cc670a5d1a06b01fb13982ffa63345369de
[ "MIT" ]
null
null
null
datedfolder.py
IgorRidanovic/flapi
7eb35cc670a5d1a06b01fb13982ffa63345369de
[ "MIT" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- ''' Create a Baselight folder with current date and time stamp. You must refresh the Job Manager after running the script. Copyright (c) 2020 Igor Riđanović, Igor [at] hdhead.com, www.metafide.com ''' import flapi from getflapi import getflapi from datetime import datetime def make_dated_folder(ip, scene, foldername): conn, msg = getflapi() jobman = conn.JobManager stamp = datetime.now().strftime('_%d-%b-%Y_%H.%M.%S') try: jobman.create_folder(ip, scene, foldername + stamp) except flapi.FLAPIException: print 'Could not create a folder.' return False # Cleanup conn.close() if __name__=='__main__': conn, msg = getflapi() print msg + '\n' ip = 'localhost' currentScene = 'Test01' folderName = 'MyFolder' make_dated_folder(ip, currentScene, folderName)
23.710526
73
0.662597
0
0
0
0
0
0
0
0
347
0.384275
d5d51d8a99234145a06442d575334e8b8cd54c32
4,762
py
Python
elastica/wrappers/callbacks.py
zhidou2/PyElastica
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
[ "MIT" ]
71
2020-04-15T17:02:42.000Z
2022-03-26T04:53:51.000Z
elastica/wrappers/callbacks.py
zhidou2/PyElastica
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
[ "MIT" ]
59
2020-05-15T03:51:46.000Z
2022-03-28T13:53:01.000Z
elastica/wrappers/callbacks.py
zhidou2/PyElastica
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
[ "MIT" ]
57
2020-06-17T20:34:02.000Z
2022-03-16T08:09:54.000Z
__doc__ = """ CallBacks ----------- Provides the callBack interface to collect data over time (see `callback_functions.py`). """ from elastica.callback_functions import CallBackBaseClass class CallBacks: """ CallBacks class is a wrapper for calling callback functions, set by the user. If the user wants to collect data from the simulation, the simulator class has to be derived from the CallBacks class. Attributes ---------- _callbacks: list List of call back classes defined for rod-like objects. """ def __init__(self): self._callbacks = [] super(CallBacks, self).__init__() def collect_diagnostics(self, system): """ This method calls user-defined call-back classes for a user-defined system or rod-like object. You need to input the system or rod-like object that you want to collect data from. Parameters ---------- system: object System is a rod-like object. Returns ------- """ sys_idx = self._get_sys_idx_if_valid(system) # Create _Constraint object, cache it and return to user _callbacks = _CallBack(sys_idx) self._callbacks.append(_callbacks) return _callbacks def _finalize(self): # From stored _CallBack objects, instantiate the boundary conditions # inplace : https://stackoverflow.com/a/1208792 # dev : the first index stores the rod index to collect data. # Technically we can use another array but it its one more book-keeping # step. Being lazy, I put them both in the same array self._callbacks[:] = [ (callback.id(), callback(self._systems[callback.id()])) for callback in self._callbacks ] # Sort from lowest id to highest id for potentially better memory access # _callbacks contains list of tuples. First element of tuple is rod number and # following elements are the type of boundary condition such as # [(0, MyCallBack), (1, MyVelocityCallBack), ... ] # Thus using lambda we iterate over the list of tuples and use rod number (x[0]) # to sort callbacks. self._callbacks.sort(key=lambda x: x[0]) self._callBack(time=0.0, current_step=0) # TODO: same as above naming of _callBack function def _callBack(self, time, current_step: int, *args, **kwargs): for sys_id, callback in self._callbacks: callback.make_callback( self._systems[sys_id], time, current_step, *args, **kwargs ) class _CallBack: """ CallBack wrapper private class Attributes ---------- _sys_idx: rod object index _callback_cls: list *args Variable length argument list. **kwargs Arbitrary keyword arguments. """ def __init__(self, sys_idx: int): """ Parameters ---------- sys_idx: int """ self._sys_idx = sys_idx self._callback_cls = None self._args = () self._kwargs = {} def using(self, callback_cls, *args, **kwargs): """ This method is a wrapper to set which callback class is used to collect data from user defined rod-like object. Parameters ---------- callback_cls: object User defined callback class. *args Variable length argument list **kwargs Arbitrary keyword arguments. Returns ------- """ assert issubclass( callback_cls, CallBackBaseClass ), "{} is not a valid call back. Did you forget to derive from CallBackClass?".format( callback_cls ) self._callback_cls = callback_cls self._args = args self._kwargs = kwargs return self def id(self): return self._sys_idx def __call__(self, *args, **kwargs): """Constructs a callback functions after checks Parameters ---------- args kwargs Returns ------- """ if not self._callback_cls: raise RuntimeError( "No callback provided to act on rod id {0}" "but a callback was registered. Did you forget to call" "the `using` method".format(self.id()) ) try: return self._callback_cls(*self._args, **self._kwargs) except (TypeError, IndexError): raise TypeError( r"Unable to construct callback class.\n" r"Did you provide all necessary callback properties?" )
29.214724
94
0.584208
4,567
0.959051
0
0
0
0
0
0
2,779
0.583578
d5d580ea25dd4fecd8cfeb8103bdbe268c389416
2,961
py
Python
vitrage/evaluator/template_data.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
vitrage/evaluator/template_data.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
vitrage/evaluator/template_data.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple ActionSpecs = namedtuple( 'ActionSpecs', ['id', 'type', 'targets', 'properties']) EdgeDescription = namedtuple('EdgeDescription', ['edge', 'source', 'target']) ENTITY = 'entity' RELATIONSHIP = 'relationship' class Scenario(object): def __init__(self, id, version, condition, actions, subgraphs, entities, relationships, enabled=False): self.id = id self.version = version self.condition = condition self.actions = actions self.subgraphs = subgraphs self.entities = entities self.relationships = relationships self.enabled = enabled def __eq__(self, other): return self.id == other.id and \ self.condition == other.condition and \ self.actions == other.actions and \ self.subgraphs == other.subgraphs and \ self.entities == other.entities and \ self.relationships == other.relationships # noinspection PyAttributeOutsideInit class TemplateData(object): def __init__(self, name, template_type, version, entities, relationships, scenarios): self.name = name self.template_type = template_type self.version = version self.entities = entities self.relationships = relationships self.scenarios = scenarios @property def name(self): return self._name @name.setter def name(self, template_name): self._name = template_name @property def template_type(self): return self._template_type @template_type.setter def template_type(self, template_type): self._template_type = template_type @property def version(self): return self._version @version.setter def version(self, version): self._version = version @property def entities(self): return self._entities @entities.setter def entities(self, entities): self._entities = entities @property def relationships(self): return self._relationships @relationships.setter def relationships(self, relationships): self._relationships = relationships @property def scenarios(self): return self._scenarios @scenarios.setter def scenarios(self, scenarios): self._scenarios = scenarios
27.933962
77
0.667342
2,094
0.707194
0
0
942
0.318136
0
0
701
0.236744
d5d5b53df6261a4974bd6d3bb678fc4435a6413e
15,032
py
Python
scripts/summarize-kmer-counts.py
rpetit3/anthrax-metagenome-study
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
[ "MIT" ]
null
null
null
scripts/summarize-kmer-counts.py
rpetit3/anthrax-metagenome-study
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
[ "MIT" ]
null
null
null
scripts/summarize-kmer-counts.py
rpetit3/anthrax-metagenome-study
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 """Parse through the simulated sequencing group specific kmer counts.""" import argparse as ap from collections import OrderedDict import glob import gzip import os import sys import time import numpy as np import multiprocessing as mp SAMPLES = OrderedDict() KMERS = {} HAMMING = OrderedDict() SAMPLE_COLS = [ 'sample', 'is_bcg', 'is_ba', 'has_lethal', 'simulated_coverage', 'group', 'total_kmers', 'tp', 'tn', 'fp', 'fn', 'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max', 'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean', 'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max' ] KMER_COLS = [ 'kmer', 'simulated_coverage', 'group', 'hamming_distance', 'tp', 'tn', 'fp', 'fn', 'group_kmer_cov_min', 'group_kmer_cov_mean', 'group_kmer_cov_median', 'group_kmer_cov_max', 'non_zero_group_kmer_cov_min', 'non_zero_group_kmer_cov_mean', 'non_zero_group_kmer_cov_median', 'non_zero_group_kmer_cov_max', 'outgroup_kmer_cov_min', 'outgroup_kmer_cov_mean', 'outgroup_kmer_cov_median', 'outgroup_kmer_cov_max', 'non_zero_outgroup_kmer_cov_min', 'non_zero_outgroup_kmer_cov_mean', 'non_zero_outgroup_kmer_cov_median', 'non_zero_outgroup_kmer_cov_max' ] def get_group_status(sample, group): """Return if a sample is within a group or not.""" within_group = None if group == 'ba': within_group = True if SAMPLES[sample]['is_ba'] == 'True' else False elif group == 'bcg': within_group = True if SAMPLES[sample]['is_bcg'] == 'True' else False else: # lef within_group = True if SAMPLES[sample]['has_lethal'] else False return within_group def get_coverage_stats(coverage): """Return summary stats of a set of coverages.""" non_zero = [c for c in coverage if c] np_array = np.array(coverage) non_zero_array = np.array(non_zero) return { 'min': min(coverage) if coverage else 0, 'median': int(np.median(np_array)) if coverage else 0, 'mean': "{0:.4f}".format(np.mean(np_array)) if coverage else 0, 'max': max(coverage) if coverage else 0, 'non_zero_min': min(non_zero_array) if non_zero else 0, 'non_zero_median': int(np.median(non_zero_array)) if non_zero else 0, 'non_zero_mean': int(round(np.mean(non_zero_array))) if non_zero else 0, 'non_zero_max': max(non_zero_array) if non_zero else 0, } def reverse_complement(seq): """Reverse complement a DNA sequence.""" complement = { 'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'a': 't', 't': 'a', 'g': 'c', 'c': 'g' } return ''.join([complement[b] for b in seq[::-1]]) def parse_counts(counts, sample, coverage, group, skip_kmers=False, filter_kmers=False): """Parse kmer counts.""" within_group = get_group_status(sample, group) sample_row = {'coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0} with gzip.open(counts, 'r') as count_handle: for line in count_handle: kmer, count = line.decode().rstrip().split() count = int(count) parse = True if filter_kmers: parse = kmer in KMERS or reverse_complement(kmer) in KMERS elif not skip_kmers: if kmer not in KMERS: kmer = reverse_complement(kmer) if within_group: KMERS[kmer][coverage]['group_coverages'].append(count) if count: KMERS[kmer][coverage]['tp'] += 1 else: KMERS[kmer][coverage]['fn'] += 1 else: KMERS[kmer][coverage]['outgroup_coverages'].append(count) if count: KMERS[kmer][coverage]['fp'] += 1 else: KMERS[kmer][coverage]['tn'] += 1 if parse: sample_row['coverages'].append(count) if within_group: if count: sample_row['tp'] += 1 else: sample_row['fn'] += 1 else: if count: sample_row['fp'] += 1 else: sample_row['tn'] += 1 coverage_stats = get_coverage_stats(sample_row['coverages']) SAMPLES[sample]['results'].append({ 'simulated_coverage': coverage, 'within_group': within_group, 'tp': sample_row['tp'], 'tn': sample_row['tn'], 'fp': sample_row['fp'], 'fn': sample_row['fn'], 'kmer_cov_min': coverage_stats['min'], 'kmer_cov_mean': coverage_stats['mean'], 'kmer_cov_median': coverage_stats['median'], 'kmer_cov_max': coverage_stats['max'], 'non_zero_kmer_cov_min': coverage_stats['non_zero_min'], 'non_zero_kmer_cov_mean': coverage_stats['non_zero_mean'], 'non_zero_kmer_cov_median': coverage_stats['non_zero_median'], 'non_zero_kmer_cov_max': coverage_stats['non_zero_max'], }) def parse_kmers(kmers, coverages, skip_kmers=False, has_hamming=True): with open(kmers, 'r') as kmer_handle: for line in kmer_handle: if line.startswith(">"): line = line.rstrip().replace(">", "") kmer, distance = line.split("-") if not has_hamming: distance = False KMERS[kmer] = OrderedDict() HAMMING[kmer] = distance if not skip_kmers: for coverage in coverages: KMERS[kmer][coverage] = { 'group_coverages': [], 'outgroup_coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0 } def parse_summary(summary): """Parse Summary file.""" cols = None with open(summary, 'r') as summary_handle: # Column Names: # accession, gi, is_bcg, is_ba, species, genome_size, description for line in summary_handle: line = line.rstrip() if line.startswith('#'): cols = line.replace('#', '').split('\t') else: row = dict(zip(cols, line.split('\t'))) SAMPLES[row['accession']] = row if row['accession'] == 'NZ_CP009941': # NZ_CP009941 - Bacillus cereus w/ lef on chromosome SAMPLES[row['accession']]['has_lethal'] = True else: SAMPLES[row['accession']]['has_lethal'] = False SAMPLES[row['accession']]['results'] = [] def print_sample_summary(file_output): """Print the final per sample summaries.""" with open(file_output, 'w') as output_handle: output_handle.write(("\t".join(SAMPLE_COLS))) output_handle.write("\n") for sample in SAMPLES: if SAMPLES[sample]['results']: for result in SAMPLES[sample]['results']: row = { 'sample': sample, 'is_bcg': SAMPLES[sample]['is_bcg'], 'is_ba': SAMPLES[sample]['is_ba'], 'has_lethal': SAMPLES[sample]['has_lethal'], 'simulated_coverage': result['simulated_coverage'], 'group': args.group, 'within_group': result['within_group'], 'total_kmers': total_kmers, 'tp': result['tp'], 'tn': result['tn'], 'fp': result['fp'], 'fn': result['fn'], 'kmer_cov_min': result['kmer_cov_min'], 'kmer_cov_mean': result['kmer_cov_mean'], 'kmer_cov_median': result['kmer_cov_median'], 'kmer_cov_max': result['kmer_cov_max'], 'non_zero_kmer_cov_min': result['non_zero_kmer_cov_min'], 'non_zero_kmer_cov_mean': result['non_zero_kmer_cov_mean'], 'non_zero_kmer_cov_median': result['non_zero_kmer_cov_median'], 'non_zero_kmer_cov_max': result['non_zero_kmer_cov_max'] } output_handle.write(("\t".join([ str(row[col]) for col in SAMPLE_COLS ]))) output_handle.write("\n") def print_kmer_summary(file_output): """Print the final per kmer summaries.""" with open(file_output, 'w') as output_handle: output_handle.write(("\t".join(KMER_COLS))) output_handle.write("\n") for kmer, coverages in KMERS.items(): for coverage in coverages: within_group = get_coverage_stats( KMERS[kmer][coverage]['group_coverages'] ) outgroup = get_coverage_stats( KMERS[kmer][coverage]['outgroup_coverages'] ) row = { 'kmer': kmer, 'simulated_coverage': coverage, 'group': args.group, 'hamming_distance': HAMMING[kmer], 'tp': KMERS[kmer][coverage]['tp'], 'tn': KMERS[kmer][coverage]['tn'], 'fp': KMERS[kmer][coverage]['fp'], 'fn': KMERS[kmer][coverage]['fn'], 'group_kmer_cov_min': within_group['min'], 'group_kmer_cov_mean': within_group['mean'], 'group_kmer_cov_median': within_group['median'], 'group_kmer_cov_max': within_group['max'], 'non_zero_group_kmer_cov_min': within_group['non_zero_min'], 'non_zero_group_kmer_cov_mean': within_group['non_zero_mean'], 'non_zero_group_kmer_cov_median': within_group['non_zero_median'], 'non_zero_group_kmer_cov_max': within_group['non_zero_max'], 'outgroup_kmer_cov_min': outgroup['min'], 'outgroup_kmer_cov_mean': outgroup['mean'], 'outgroup_kmer_cov_median': outgroup['median'], 'outgroup_kmer_cov_max': outgroup['max'], 'non_zero_outgroup_kmer_cov_min': outgroup['non_zero_min'], 'non_zero_outgroup_kmer_cov_mean': outgroup['non_zero_mean'], 'non_zero_outgroup_kmer_cov_median': outgroup['non_zero_median'], 'non_zero_outgroup_kmer_cov_max': outgroup['non_zero_max'], } output_handle.write(("\t".join([ str(row[col]) for col in KMER_COLS ]))) output_handle.write("\n") def read_lines(input_file): """Return lines in a text file as a list.""" lines = [] with open(input_file, 'r') as input_handle: for line in input_handle: lines.append(line.rstrip()) return lines def parse_filter_kmers(kmers): with open(kmers, 'r') as kmer_handle: for line in kmer_handle: if line.startswith(">"): line = line.rstrip().replace(">", "") KMERS[line.split("-")[0]] = True if __name__ == '__main__': parser = ap.ArgumentParser( prog='summarize-kmer-counts.py', conflict_handler='resolve', description=("Summarize kmer counts of each simulation.") ) parser.add_argument('summary', type=str, metavar="SUMMARY", help='Summary of Bacillus genomes.') parser.add_argument('directory', type=str, metavar="SIMUALTION_DIR", help='Directory with group specific 31-mer counts.') parser.add_argument('group', type=str, metavar="GROUP", help='Which group to parse (ba, bcg or lef).') parser.add_argument('kmers', type=str, metavar="KMERS", help='Group specific k-mers.') parser.add_argument('coverages', type=str, metavar="COVERAGES", help=('Coverages to subsample to.')) parser.add_argument('outdir', type=str, metavar="OUTDIR", help='Directory to output to.') parser.add_argument('--cpu', default=1, type=int, metavar="INT", help='Number of cores to use (Default: 1)') parser.add_argument('--single_sample', type=str, metavar="STR", help='Process a single sample.') parser.add_argument('--skip_kmers', action='store_true', default=False, help='Skip kmer processing.') parser.add_argument('--filter', action='store_true', default=False, help='Filter counts based on input kmers.') args = parser.parse_args() if args.group not in ['ba', 'bcg', 'lef']: raise Exception("GROUPS must be 'ba', 'bcg' or 'lef'") coverages = read_lines(args.coverages) print("Parsing Summary") parse_summary(args.summary) print("Parsing Kmers") if args.filter: print("Filtering Kmers") args.skip_kmers = True parse_filter_kmers(args.kmers) else: print("Parsing Kmers") parse_kmers(args.kmers, coverages, skip_kmers=args.skip_kmers, has_hamming=False if args.group == 'lef' else True) total_kmers = len(KMERS) current = 1 samples = list(SAMPLES.keys()) if args.single_sample: samples = [args.single_sample] total = len(samples) for sample in samples: path = "{0}/{1}".format(args.directory, sample) if os.path.exists(path): print("Working on {0} ({1} of {2})".format(sample, current, total)) current += 1 count_files = sorted(glob.glob( "{0}/*-{1}.txt.gz".format(path, args.group) )) for count_file in count_files: coverage = os.path.basename(count_file).split('-')[1] parse_counts(count_file, sample, coverage, args.group, skip_kmers=args.skip_kmers, filter_kmers=args.filter) print("Output sample summary") if args.single_sample: print_sample_summary("{0}/count-summary-{1}-{2}.txt".format( args.outdir, args.single_sample, args.group )) else: print_sample_summary("{0}/count-summary-sample-{1}.txt".format( args.outdir, args.group )) if not args.skip_kmers: print("Output kmer summary") if args.single_sample: print_kmer_summary("{0}/count-summary-kmer-{1}-{2}.txt".format( args.outdir, args.single_sample, args.group )) else: print_kmer_summary("{0}/count-summary-kmer-{1}.txt".format( args.outdir, args.group ))
40.408602
87
0.549494
0
0
0
0
0
0
0
0
4,349
0.289316
d5d6cadbdf0418e7793af6364477d1005bd12ded
327
py
Python
movies/exceptions.py
te0dor/netguru-movies
8e2cc4585851ad31794ec9e6a3e4dd70cc0980c5
[ "MIT" ]
null
null
null
movies/exceptions.py
te0dor/netguru-movies
8e2cc4585851ad31794ec9e6a3e4dd70cc0980c5
[ "MIT" ]
null
null
null
movies/exceptions.py
te0dor/netguru-movies
8e2cc4585851ad31794ec9e6a3e4dd70cc0980c5
[ "MIT" ]
null
null
null
from marshmallow.exceptions import ValidationError class ObjectDoesNotExist(Exception): """Exception if not found results""" pass class CommunicationError(Exception): """Exception for diferents problem with communications.""" pass __all__ = ('ValidationError', 'ObjectDoesNotExist', 'CommunicationError')
21.8
73
0.75841
194
0.593272
0
0
0
0
0
0
151
0.461774
d5d747b80a8ea5e6c6c092c35a44d7f1c0635eb8
117
py
Python
music_api/apps/music_app/admin.py
fejiroofficial/Simple_music
2dd9dcf8e5c7374e29dcf96987c053eebf1cba2a
[ "MIT" ]
null
null
null
music_api/apps/music_app/admin.py
fejiroofficial/Simple_music
2dd9dcf8e5c7374e29dcf96987c053eebf1cba2a
[ "MIT" ]
8
2019-12-04T23:40:12.000Z
2022-02-10T07:58:28.000Z
music_api/apps/music_app/admin.py
fejiroofficial/simple_music
2dd9dcf8e5c7374e29dcf96987c053eebf1cba2a
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Songs admin.site.register(Songs) # Register your models here.
16.714286
32
0.794872
0
0
0
0
0
0
0
0
28
0.239316
d5d9540eff941a339f643e59edbea5708ee6a194
2,354
py
Python
scripts/generate_image_series.py
JIC-Image-Analysis/senescence-in-field
f310e34df377eb807423c38cf27d1ade0782f5a2
[ "MIT" ]
null
null
null
scripts/generate_image_series.py
JIC-Image-Analysis/senescence-in-field
f310e34df377eb807423c38cf27d1ade0782f5a2
[ "MIT" ]
null
null
null
scripts/generate_image_series.py
JIC-Image-Analysis/senescence-in-field
f310e34df377eb807423c38cf27d1ade0782f5a2
[ "MIT" ]
null
null
null
# Draw image time series for one or more plots from jicbioimage.core.image import Image import dtoolcore import click from translate_labels import rack_plot_to_image_plot from image_utils import join_horizontally, join_vertically def identifiers_where_match_is_true(dataset, match_function): return [i for i in dataset.identifiers if match_function(i)] def generate_image_series_for_plot(rack, plot): n_image, n_plot = rack_plot_to_image_plot(rack, plot) # n_image, n_plot = 55, 24 print "{}_{}".format(n_image, n_plot) dataset_uri = 'file:/Users/hartleym/data_intermediate/separate_plots' dataset = dtoolcore.DataSet.from_uri(dataset_uri) plot_number_overlay = dataset.get_overlay('plot_number') ordering_overlay = dataset.get_overlay('ordering') date_overlay = dataset.get_overlay('date') def is_match(i): try: ordering_as_int = int(ordering_overlay[i]) except TypeError: return False if ordering_as_int != n_image: return False if int(plot_number_overlay[i]) != n_plot: return False return True identifiers = identifiers_where_match_is_true(dataset, is_match) def sort_identifiers_by_date(identifiers): dates_and_identifiers = [(date_overlay[i], i) for i in identifiers] sorted_dates_and_identifiers = sorted(dates_and_identifiers) _, sorted_identifiers = zip(*sorted_dates_and_identifiers) return(sorted_identifiers) sorted_identifiers = sort_identifiers_by_date(identifiers) def identifiers_to_joined_image(identifiers): images = [] for identifier in identifiers: image_fpath = dataset.item_content_abspath(identifier) image = Image.from_file(image_fpath) images.append(image) return join_horizontally(images) result = identifiers_to_joined_image(sorted_identifiers) output_fname = 'example_from_tobin.png' with open(output_fname, 'wb') as fh: fh.write(result.png()) @click.command() def main(): # Early leaf senescence # generate_image_series_for_plot(3, 16) # generate_image_series_for_plot(7, 9) # generate_image_series_for_plot(9, 1) # Late leaf senescence generate_image_series_for_plot(7, 15) if __name__ == '__main__': main()
26.75
75
0.712404
0
0
0
0
257
0.109176
0
0
361
0.153356
d5d98de44dcdd3336c05cb1bd2a44010685446b0
4,505
py
Python
pytpp/properties/response_objects/system_status.py
Venafi/pytpp
42af655b2403b8c9447c86962abd4aaa0201f646
[ "MIT" ]
4
2022-02-04T23:58:55.000Z
2022-02-15T18:53:08.000Z
pytpp/properties/response_objects/system_status.py
Venafi/pytpp
42af655b2403b8c9447c86962abd4aaa0201f646
[ "MIT" ]
null
null
null
pytpp/properties/response_objects/system_status.py
Venafi/pytpp
42af655b2403b8c9447c86962abd4aaa0201f646
[ "MIT" ]
null
null
null
from pytpp.properties.response_objects.dataclasses import system_status from pytpp.tools.helpers.date_converter import from_date_string class SystemStatus: @staticmethod def Engine(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.Engine( dn=response_object.get('DN'), display_name=response_object.get('DisplayName'), guid=response_object.get('Guid'), id=response_object.get('Id'), name=response_object.get('Name'), ) @staticmethod def Services(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.Services( vplatform=SystemStatus.Service(response_object.get('vPlatform')), log_server=SystemStatus.Service(response_object.get('logServer')), iis=SystemStatus.Service(response_object.get('iis')), ) @staticmethod def Service(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.Service( modules=response_object.get('modules'), time_since_first_seen=from_date_string(response_object.get('timeSinceFirstSeen'), duration_format=True), time_since_last_seen=from_date_string(response_object.get('timeSinceLastSeen'), duration_format=True), status=response_object.get('Status'), ) @staticmethod def SystemStatus(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.SystemStatus( engine_name=response_object.get('engineName'), services=SystemStatus.Services(response_object.get('services')), version=response_object.get('version'), ) @staticmethod def Task(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.Task( display_name=response_object.get('DisplayName'), name=response_object.get('Name'), start_time=from_date_string(response_object.get('StartTime')), stop_time=from_date_string(response_object.get('StopTime')), warning_count=response_object.get('WarningCount'), ) @staticmethod def UpgradeInfo(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.UpgradeInfo( id=response_object.get('Id'), start_time=from_date_string(response_object.get('StartTime')), versions=response_object.get('Versions'), ) @staticmethod def UpgradeStatus(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.UpgradeStatus( engine=SystemStatus.Engine(response_object.get('Engine')), status=response_object.get('Status'), upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')), upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')), tasks_completed=[SystemStatus.Task(t) for t in response_object.get('TasksCompleted', [])], tasks_pending=[SystemStatus.Task(t) for t in response_object.get('TasksPending', [])], tasks_running=[SystemStatus.Task(t) for t in response_object.get('TasksRunning', [])], ) @staticmethod def UpgradeSummary(response_object: dict): if not isinstance(response_object, dict): response_object = {} return system_status.UpgradeSummary( status=response_object.get('Status'), upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')), upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')), completed_tasks=response_object.get('CompletedTasks'), target_version=response_object.get('TargetVersion'), engines_complete=response_object.get('EnginesComplete'), engines_running=response_object.get('EnginesRunning'), engines_blocked=response_object.get('EnginesBlocked'), engines_in_error=response_object.get('EnginesInError'), engines_pending_install=response_object.get('EnginesPendingInstall'), )
42.102804
116
0.668368
4,366
0.969145
0
0
4,300
0.954495
0
0
483
0.107214
d5d9b42548010e4777afbfec7a0536b09a13b146
1,883
py
Python
src/data/dataModule.py
mikkelfo/Title-prediction-from-abstract
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
[ "MIT" ]
null
null
null
src/data/dataModule.py
mikkelfo/Title-prediction-from-abstract
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
[ "MIT" ]
null
null
null
src/data/dataModule.py
mikkelfo/Title-prediction-from-abstract
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
[ "MIT" ]
null
null
null
from typing import Optional import pytorch_lightning as pl import torch from omegaconf import OmegaConf from torch.utils.data import DataLoader, random_split from transformers import T5Tokenizer from src.data.PaperDataset import PaperDataset class ArvixDataModule(pl.LightningDataModule): def __init__(self, config: str = "src/data/config.yaml") -> None: super().__init__() self.config = OmegaConf.load(config) def prepare_data(self) -> None: # Add tokenizing tokenizer = T5Tokenizer.from_pretrained("t5-base") titles, abstracts = torch.load("data/processed/data.pt").T #titles, abstracts = torch.load("data/processed/data.pt").T tokenized_abstracts = tokenizer.batch_encode_plus( abstracts, padding=True, truncation=True, return_tensors="pt" ) tokenized_titles = tokenizer.batch_encode_plus( titles, padding=True, truncation=True, return_tensors="pt" ) self.data = PaperDataset(tokenized_abstracts, tokenized_titles) def setup(self, stage: Optional[str] = None): train, val, test = random_split( self.data, [self.config.n_train, self.config.n_val, self.config.n_test], generator=torch.Generator().manual_seed(1337), ) if stage == "fit" or stage is None: self.train_set = train self.val_set = val if stage == "test": self.test_set = test def train_dataloader(self) -> DataLoader: return DataLoader(self.train_set, batch_size=32, num_workers=4) def val_dataloader(self) -> DataLoader: return DataLoader(self.val_set, batch_size=32, num_workers=4) def test_dataloader(self) -> DataLoader: return DataLoader(self.test_set, batch_size=32, num_workers=4) if __name__ == "__main__": dm = ArvixDataModule()
32.465517
73
0.669676
1,580
0.839087
0
0
0
0
0
0
159
0.08444
d5d9d4fd434e21de06a534a9b7ddf3881191564e
10,573
py
Python
shs/gui/RootFrame.py
ansobolev/shs
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
[ "MIT" ]
1
2016-06-22T13:30:25.000Z
2016-06-22T13:30:25.000Z
shs/gui/RootFrame.py
ansobolev/shs
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
[ "MIT" ]
1
2017-12-01T04:49:45.000Z
2017-12-01T04:49:45.000Z
shs/gui/RootFrame.py
ansobolev/shs
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import os import sys import time import subprocess import wx import ConfigParser from wx.lib.mixins.listctrl import getListCtrlSelection from wx.lib.pubsub import pub from gui.RootGUI import RootGUI from StepsDialog import StepsDialog from PlotFrame import PlotFuncFrame, PlotCorrFrame import interface import mbox class RootFrame(RootGUI): calcs = [] plot_frame = None def __init__(self, *args, **kwds): super(RootFrame, self).__init__(*args, **kwds) # set root self.root = self.set_root() # initialize choices self.propChoices = interface.dataClasses() calc_data_types = self.propChoices.types() calc_data_classes = self.propChoices.classes(calc_data_types[0]) corr_classes = self.propChoices.classes("Histogram") self.propType.SetItems(calc_data_types) self.propChoice.SetItems(calc_data_classes) self.xCorr.SetItems(corr_classes) self.yCorr.SetItems(corr_classes) self.propType.SetSelection(0) self.propChoice.SetSelection(0) self.xCorr.SetSelection(0) self.yCorr.SetSelection(0) # initialize calc tree self.build_tree(self.root, self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())) # initialize calc list self.calcList.InsertColumn(0, 'Directory', width=180) self.calcList.InsertColumn(1, 'Type', width=70) self.calcList.InsertColumn(2, 'NSteps', width=100) def set_root(self): """ Sets root directory fr GUI based on config file :return: Root directory """ config_dir = os.path.expanduser("~/.local/shs") config_file = os.path.join(config_dir, "shs_gui.cfg") # check the file and create one if it's not there if not os.path.isfile(config_file): os.makedirs(config_dir) open(config_file, 'w').close() config = ConfigParser.ConfigParser() config.read(config_file) # if config exists and has needed option if config.has_option("general", "root_dir"): return config.get("general", "root_dir") # make config if not config.has_section("general"): config.add_section("general") dlg = wx.DirDialog(self, "Select root directory") if dlg.ShowModal() == wx.ID_OK: root_dir = dlg.GetPath() config.set("general", "root_dir", root_dir) else: sys.exit(1) with open(config_file, 'w') as f: config.write(f) return root_dir def build_tree(self, root, calc_type): """ Adds a new root element and then its children :param root: root directory for the tree :param calc_type: calculation type """ self.calcTree.DeleteAllItems() r = len(root.split(os.sep)) ids = {root: self.calcTree.AddRoot(root)} for (dir_path, dir_names, file_names) in os.walk(root): if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names): # find the number of steps in MDE file, quickly nsteps = interface.GetNumMDESteps(dir_path) ancdirs = dir_path.split(os.sep)[r:] if nsteps is not None: ancdirs[-1] += ' [%i]' % nsteps ad = root for ancdir in ancdirs: d = os.path.join(ad, ancdir) if not d in ids: ids[d] = self.calcTree.AppendItem(ids[ad], ancdir) self.calcTree.SortChildren(ids[ad]) ad = d def get_selection_dir(self): item = self.calcTree.GetSelection() parent = self.calcTree.GetItemParent(item) path = [self.calcTree.GetItemText(item)] while parent.IsOk(): path.append(self.calcTree.GetItemText(parent)) parent = self.calcTree.GetItemParent(parent) # calculation directory calc_dir = os.sep.join(path[::-1]).split()[0] return calc_dir # return os.sep.join((self.root, calc_dir)) def onSelChange(self, event): # calculation type ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()) # calculation directory cdir = self.get_selection_dir() if interface.isCalcOfType(ctype, dir=cdir): self.enqueueBtn.Enable() else: self.enqueueBtn.Enable(False) def propTypeChange(self, event): # property type pt_num = self.propType.GetSelection() pt = self.propType.GetItems()[pt_num] self.propChoice.SetItems(self.propChoices.classes(pt)) self.propChoice.SetSelection(0) def typeChange(self, event): ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()) self.build_tree(self.root, ctype) def upBtnPress(self, event): # selection indices sind = getListCtrlSelection(self.calcList) if sind: # number of deleted strings ds = 0 for si in sind: self.calcs.pop(si - ds) self.calcList.DeleteItem(si - ds) ds += 1 return 0 return 1 def downBtnPress(self, event): # current list count clc = self.calcList.GetItemCount() # calculation type ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()) # calculation directory cdir = self.get_selection_dir() if not interface.isCalcOfType(ctype, dir=cdir): mbox.NoResults(cdir, ctype) return 1 # init steps range r = None if ctype in ('.output', '.ANI'): # enter dialog dlg = StepsDialog(None) if dlg.ShowModal() == wx.ID_OK: r = dlg.GetRange() dlg.Destroy() self.calcs.append(interface.getCalc(cdir, ctype, r)) self.calcList.InsertStringItem(clc, cdir[len(self.root)+1:]) self.calcList.SetStringItem(clc, 1, ctype) self.calcList.SetStringItem(clc, 2, str(len(r)) if r is not None else '') return 0 def on_enqueue_press(self, _): from sshutils import getMount, getDevice, getRemoteDir # on which device are we? calc_dir = self.get_selection_dir() mount_path = getMount(calc_dir) device_name, device_type = getDevice(mount_path) if 'ssh' in device_type: user, host_dir = device_name.split('@') hostname, remote_mount_path = host_dir.split(':') remote_dir = getRemoteDir(calc_dir, mount_path, remote_mount_path) self.enqueue_remote(remote_dir, hostname, user) else: self.enqueue_local(calc_dir) @staticmethod def enqueue_local(calc_dir): """ Enqueue a task on a local filesystem :param calc_dir: calculation directory on a local filesystem :return: error_code (0 is OK) """ import distutils.spawn # find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM) if distutils.spawn.find_executable('qstat') is not None: q = 'pbs' elif distutils.spawn.find_executable('sinfo') is not None: q = 'slurm' else: mbox.JobSubmit(None, ()) return -1 comm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q, q + '.sh')) submit = subprocess.Popen(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE) mbox.JobSubmit(q, submit.communicate()) @staticmethod def enqueue_remote(calc_dir, host, user): """ Enqueue a task on a remote filesystem :param calc_dir: calculation directory on a remote filesystem :param host: host where to enqueue a task :param user: user of a remote system who enqueues a task :return: error code (0 is OK) """ from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand ssh = getSSHClient(host, user) # find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM) q = getQueue(ssh) if q is None: mbox.JobSubmit(None, ()) return None # queue putter on a local machine local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q)) putter = q + '.sh' sftp = copyFile(ssh, putter, local_dir, calc_dir) remote_file = os.path.join(calc_dir, putter) stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir) mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines()))) removeFile(sftp, remote_file) ssh.close() def plotBtnPress(self, event): if self.noteBook.GetSelection() == 0: self.plot_property() else: self.plot_correlation() def plot_property(self): # plot options - get all the data to plot ptype = self.propType.GetItems()[self.propType.GetSelection()] pchoice = self.propChoice.GetItems()[self.propChoice.GetSelection()] data_class = self.propChoices.dataClass(ptype, pchoice) leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)] t1 = time.clock() plot_data = interface.getData(ptype, data_class, leg, [self.calcs[i] for i in getListCtrlSelection(self.calcList)]) self.SetStatusText('Calculation time: %7.2f s.' % (time.clock() - t1)) msg = plot_data try: self.plot_frame.Raise() except (AttributeError, wx.PyDeadObjectError): self.plot_frame = PlotFuncFrame(self) self.plot_frame.Show() pub.sendMessage('data.plot', message=msg) def plot_correlation(self): # correlate options - get all the data to plot xchoice = self.xCorr.GetSelection() ychoice = self.yCorr.GetSelection() leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)] data, info = interface.getCorr(xchoice, ychoice, [self.calcs[i] for i in getListCtrlSelection(self.calcList)]) msg = [leg, data, info] try: self.plot_frame.Raise() except (AttributeError, wx.PyDeadObjectError): self.plot_frame = PlotCorrFrame(self) self.plot_frame.Show() pub.sendMessage('corr.plot', message=msg)
39.01476
120
0.608909
10,229
0.967464
0
0
2,037
0.192661
0
0
1,876
0.177433
d5da19776d7a24ff632b755eb644da772dbdd1cc
6,063
py
Python
saleor/order/migrations/0015_auto_20170206_0407.py
acabezasg/urpi-master
7c9cd0fbe6d89dad70652482712ca38b21ba6f84
[ "BSD-3-Clause" ]
6
2019-01-06T08:39:20.000Z
2022-03-04T18:07:47.000Z
saleor/order/migrations/0015_auto_20170206_0407.py
valentine217/saleor
323963748e6a2702265ec6635b930a234abde4f5
[ "BSD-3-Clause" ]
5
2021-03-09T16:22:37.000Z
2022-02-10T19:10:03.000Z
saleor/order/migrations/0015_auto_20170206_0407.py
valentine217/saleor
323963748e6a2702265ec6635b930a234abde4f5
[ "BSD-3-Clause" ]
1
2020-12-26T10:25:37.000Z
2020-12-26T10:25:37.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-06 10:07 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django_prices.models class Migration(migrations.Migration): dependencies = [ ('order', '0014_auto_20161028_0955'), ] operations = [ migrations.AlterModelOptions( name='deliverygroup', options={'verbose_name': 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'}, ), migrations.AlterModelOptions( name='order', options={'ordering': ('-last_status_change',), 'verbose_name': 'Order', 'verbose_name_plural': 'Orders'}, ), migrations.AlterModelOptions( name='ordereditem', options={'verbose_name': 'Ordered item', 'verbose_name_plural': 'Ordered items'}, ), migrations.AlterModelOptions( name='orderhistoryentry', options={'ordering': ('date',), 'verbose_name': 'Order history entry', 'verbose_name_plural': 'Order history entries'}, ), migrations.AlterModelOptions( name='ordernote', options={'verbose_name': 'Order note', 'verbose_name_plural': 'Order notes'}, ), migrations.AlterModelOptions( name='payment', options={'ordering': ('-pk',), 'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'}, ), migrations.AlterField( model_name='deliverygroup', name='last_updated', field=models.DateTimeField(auto_now=True, null=True, verbose_name='last updated'), ), migrations.AlterField( model_name='deliverygroup', name='shipping_method_name', field=models.CharField(blank=True, default=None, editable=False, max_length=255, null=True, verbose_name='shipping method name'), ), migrations.AlterField( model_name='deliverygroup', name='tracking_number', field=models.CharField(blank=True, default='', max_length=255, verbose_name='tracking number'), ), migrations.AlterField( model_name='order', name='billing_address', field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='billing address'), ), migrations.AlterField( model_name='order', name='discount_amount', field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'), ), migrations.AlterField( model_name='order', name='discount_name', field=models.CharField(blank=True, default='', max_length=255, verbose_name='discount name'), ), migrations.AlterField( model_name='order', name='shipping_address', field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='shipping address'), ), migrations.AlterField( model_name='order', name='total_net', field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total net'), ), migrations.AlterField( model_name='order', name='total_tax', field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total tax'), ), migrations.AlterField( model_name='order', name='tracking_client_id', field=models.CharField(blank=True, editable=False, max_length=36, verbose_name='tracking client id'), ), migrations.AlterField( model_name='order', name='user_email', field=models.EmailField(blank=True, default='', editable=False, max_length=254, verbose_name='user email'), ), migrations.AlterField( model_name='order', name='voucher', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='discount.Voucher', verbose_name='voucher'), ), migrations.AlterField( model_name='ordereditem', name='delivery_group', field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='order.DeliveryGroup', verbose_name='delivery group'), ), migrations.AlterField( model_name='ordereditem', name='stock', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.Stock', verbose_name='stock'), ), migrations.AlterField( model_name='orderhistoryentry', name='comment', field=models.CharField(blank=True, default='', max_length=100, verbose_name='comment'), ), migrations.AlterField( model_name='orderhistoryentry', name='order', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='order.Order', verbose_name='order'), ), migrations.AlterField( model_name='orderhistoryentry', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'), ), migrations.AlterField( model_name='payment', name='order', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payments', to='order.Order', verbose_name='order'), ), ]
45.586466
181
0.626752
5,811
0.958436
0
0
0
0
0
0
1,441
0.237671
d5dac56aef00dcc6cc7a0b56db80a25f82caafb4
3,357
py
Python
testrail_client/api/configurations.py
tonybearpan/testrail-lib
267070bd017bb1d80ac40e1b84ea40dc2c2e3956
[ "MIT" ]
null
null
null
testrail_client/api/configurations.py
tonybearpan/testrail-lib
267070bd017bb1d80ac40e1b84ea40dc2c2e3956
[ "MIT" ]
null
null
null
testrail_client/api/configurations.py
tonybearpan/testrail-lib
267070bd017bb1d80ac40e1b84ea40dc2c2e3956
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from .base import TestRailAPIBase class Config(TestRailAPIBase): """ Use the following API methods to request details about configurations and to create or modify configurations. """ def __repr__(self): return '<TestRailAPI config>' def get(self, project_id): """ Returns a list of available configurations, grouped by configuration groups (requires TestRail 3.1 or later). :param project_id: The ID of the project """ return self._get('get_configs/{}'.format(project_id)) def add(self, config_group_id, name=''): """ Creates a new configuration (requires TestRail 5.2 or later). :param config_group_id: The ID of the configuration group the configuration should be added to :param name: str, The name of the configuration (required) """ return self._post('add_config/{}'.format(config_group_id), json=dict(name=name)) def update(self, config_group_id, name=''): """ Updates an existing configuration (requires TestRail 5.2 or later). :param config_group_id: The ID of the configuration group the configuration should be added to :param name: str, The name of the configuration (required) """ return self._post('update_config/{}'.format(config_group_id), json=dict(name=name)) def delete(self, config_id): """ Deletes an existing configuration (requires TestRail 5.2 or later). :param config_id: """ return self._post('delete_config/{}'.format(config_id)) def add_group(self, project_id, name=''): """ Creates a new configuration group (requires TestRail 5.2 or later). :param project_id: The ID of the project the configuration group should be added to :param name: The name of the configuration group (required) """ return self._post('add_config_group/{}'.format(project_id), json=dict(name=name)) def update_group(self, config_group_id, name): """ Updates an existing configuration group (requires TestRail 5.2 or later). :param config_group_id: The ID of the configuration group :param name: The name of the configuration group """ return self._post('update_config_group/{}'.format(config_group_id), json=dict(name=name)) def delete_group(self, config_group_id): """ Deletes an existing configuration (requires TestRail 5.2 or later). :param config_group_id: The ID of the configuration """ return self._post('delete_config_group/{}'.format(config_group_id)) def priority(self): """ Returns a list of available priorities. """ return self._get('get_priorities') def template(self, project_id): """ Returns a list of available templates (requires TestRail 5.2 or later). :param project_id:The ID of the project """ return self._get('get_templates/{}'.format(project_id))
36.48913
165
0.597557
3,128
0.931784
0
0
0
0
0
0
2,186
0.651177
d5db929efd07ee76ec7142cbf1f91188279ae61e
1,104
py
Python
tests/asserts_wrapper.py
QARancher/k8s_client
b290caa5db12498ed9fbb2c972ab20141ff2c401
[ "Unlicense" ]
null
null
null
tests/asserts_wrapper.py
QARancher/k8s_client
b290caa5db12498ed9fbb2c972ab20141ff2c401
[ "Unlicense" ]
4
2020-05-05T14:42:33.000Z
2020-05-10T08:15:28.000Z
tests/asserts_wrapper.py
QARancher/k8s_client
b290caa5db12498ed9fbb2c972ab20141ff2c401
[ "Unlicense" ]
null
null
null
def assert_not_none(actual_result, message=""): if not message: message = f"{actual_result} resulted with None" assert actual_result, message def assert_equal(actual_result, expected_result, message=""): if not message: message = f"{actual_result} is not equal to expected " \ f"result {expected_result}" assert actual_result == expected_result, message def assert_in_list(searched_list, wanted_element, message=""): if not message: message = f"Failed to find '{wanted_element}' in list {searched_list}" assert wanted_element in searched_list, message def assert_not_in_list(searched_list, unwanted_element, message=""): if not message: message = f"'{unwanted_element}' found in list {searched_list} \n " \ f"although it should not be" assert unwanted_element not in searched_list, message def assert_of_type(wanted_type, wanted_object, message=""): if not message: message = f"{wanted_object} is not of type: {wanted_type}" assert isinstance(wanted_object, wanted_type), message
35.612903
78
0.701087
0
0
0
0
0
0
0
0
311
0.281703
d5dbc4975d95ff84c0f9a2e3857f8af1ed9103e1
40,713
py
Python
tools/proto/transaction_pb2.py
ctring/Detock
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
[ "MIT" ]
null
null
null
tools/proto/transaction_pb2.py
ctring/Detock
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
[ "MIT" ]
null
null
null
tools/proto/transaction_pb2.py
ctring/Detock
a1171a511d9cd1f79cc3a8d54ec17f759d088de4
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/transaction.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='proto/transaction.proto', package='slog', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x17proto/transaction.proto\x12\x04slog\"1\n\x0eMasterMetadata\x12\x0e\n\x06master\x18\x01 \x01(\r\x12\x0f\n\x07\x63ounter\x18\x02 \x01(\r\"\x81\x01\n\nValueEntry\x12\r\n\x05value\x18\x01 \x01(\x0c\x12\x11\n\tnew_value\x18\x02 \x01(\x0c\x12\x1b\n\x04type\x18\x03 \x01(\x0e\x32\r.slog.KeyType\x12(\n\x08metadata\x18\x04 \x01(\x0b\x32\x14.slog.MasterMetadataH\x00\x42\n\n\x08optional\"C\n\rKeyValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12%\n\x0bvalue_entry\x18\x02 \x01(\x0b\x32\x10.slog.ValueEntry\"j\n\x14TransactionEventInfo\x12%\n\x05\x65vent\x18\x01 \x01(\x0e\x32\x16.slog.TransactionEvent\x12\x0c\n\x04time\x18\x02 \x01(\x03\x12\x0f\n\x07machine\x18\x03 \x01(\x05\x12\x0c\n\x04home\x18\x04 \x01(\x05\"\x8c\x03\n\x13TransactionInternal\x12\n\n\x02id\x18\x01 \x01(\x04\x12#\n\x04type\x18\x02 \x01(\x0e\x32\x15.slog.TransactionType\x12\x0c\n\x04home\x18\x03 \x01(\x05\x12\x1b\n\x13\x63oordinating_server\x18\x04 \x01(\r\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12\x1b\n\x13involved_partitions\x18\x06 \x03(\r\x12\x19\n\x11\x61\x63tive_partitions\x18\x07 \x03(\r\x12\x18\n\x10involved_regions\x18\x08 \x03(\r\x12*\n\x06\x65vents\x18\t \x03(\x0b\x32\x1a.slog.TransactionEventInfo\x12\'\n\x1fmh_depart_from_coordinator_time\x18\n \x01(\x03\x12\x1e\n\x16mh_arrive_at_home_time\x18\x0b \x01(\x03\x12!\n\x19mh_enter_local_batch_time\x18\x0c \x01(\x03\x12\x1c\n\x14global_log_positions\x18\r \x03(\x03\"H\n\x11RemasterProcedure\x12\x12\n\nnew_master\x18\x01 \x01(\r\x12\x1f\n\x17is_new_master_lock_only\x18\x02 \x01(\x08\"\x19\n\tProcedure\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\x0c\"1\n\nProcedures\x12#\n\nprocedures\x18\x01 \x03(\x0b\x32\x0f.slog.Procedure\"\xb1\x02\n\x0bTransaction\x12+\n\x08internal\x18\x01 \x01(\x0b\x32\x19.slog.TransactionInternal\x12 \n\x04\x63ode\x18\x02 \x01(\x0b\x32\x10.slog.ProceduresH\x00\x12+\n\x08remaster\x18\x03 \x01(\x0b\x32\x17.slog.RemasterProcedureH\x00\x12!\n\x04keys\x18\x04 \x03(\x0b\x32\x13.slog.KeyValueEntry\x12\x14\n\x0c\x64\x65leted_keys\x18\x05 \x03(\x0c\x12\'\n\x06status\x18\x06 \x01(\x0e\x32\x17.slog.TransactionStatus\x12#\n\nabort_code\x18\x07 \x01(\x0e\x32\x0f.slog.AbortCode\x12\x14\n\x0c\x61\x62ort_reason\x18\x08 \x01(\tB\t\n\x07program*L\n\x0fTransactionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0bSINGLE_HOME\x10\x01\x12\x1b\n\x17MULTI_HOME_OR_LOCK_ONLY\x10\x02*@\n\x11TransactionStatus\x12\x0f\n\x0bNOT_STARTED\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07\x41\x42ORTED\x10\x02*7\n\tAbortCode\x12\t\n\x05OTHER\x10\x00\x12\x10\n\x0cRATE_LIMITED\x10\x01\x12\r\n\tRESTARTED\x10\x02*\x1e\n\x07KeyType\x12\x08\n\x04READ\x10\x00\x12\t\n\x05WRITE\x10\x01*\xde\x06\n\x10TransactionEvent\x12\x07\n\x03\x41LL\x10\x00\x12\x10\n\x0c\x45NTER_SERVER\x10\x01\x12\x1c\n\x18\x45XIT_SERVER_TO_FORWARDER\x10\x02\x12\x13\n\x0f\x45NTER_FORWARDER\x10\x03\x12\x1f\n\x1b\x45XIT_FORWARDER_TO_SEQUENCER\x10\x04\x12(\n$EXIT_FORWARDER_TO_MULTI_HOME_ORDERER\x10\x05\x12\x1c\n\x18\x45NTER_MULTI_HOME_ORDERER\x10\x06\x12%\n!ENTER_MULTI_HOME_ORDERER_IN_BATCH\x10\x07\x12$\n EXIT_MULTI_HOME_ORDERER_IN_BATCH\x10\x08\x12\x1b\n\x17\x45XIT_MULTI_HOME_ORDERER\x10\t\x12\x13\n\x0f\x45NTER_SEQUENCER\x10\n\x12.\n*EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH\x10\x0b\x12\x15\n\x11\x45NTER_LOCAL_BATCH\x10\x0c\x12\x1c\n\x18\x45NTER_SEQUENCER_IN_BATCH\x10\r\x12\x1b\n\x17\x45XIT_SEQUENCER_IN_BATCH\x10\x0e\x12\x1e\n\x1a\x45NTER_LOG_MANAGER_IN_BATCH\x10\x0f\x12\x1b\n\x17\x45NTER_LOG_MANAGER_ORDER\x10\x10\x12\x14\n\x10\x45XIT_LOG_MANAGER\x10\x11\x12\x13\n\x0f\x45NTER_SCHEDULER\x10\x12\x12\x16\n\x12\x45NTER_SCHEDULER_LO\x10\x13\x12\x16\n\x12\x45NTER_LOCK_MANAGER\x10\x14\x12\x15\n\x11\x44\x45\x41\x44LOCK_DETECTED\x10\x15\x12\x0e\n\nDISPATCHED\x10\x16\x12\x13\n\x0f\x44ISPATCHED_FAST\x10\x17\x12\x13\n\x0f\x44ISPATCHED_SLOW\x10\x18\x12\x1e\n\x1a\x44ISPATCHED_SLOW_DEADLOCKED\x10\x19\x12\x10\n\x0c\x45NTER_WORKER\x10\x1a\x12\x14\n\x10GOT_REMOTE_READS\x10\x1b\x12\x1f\n\x1bGOT_REMOTE_READS_DEADLOCKED\x10\x1c\x12\x0f\n\x0b\x45XIT_WORKER\x10\x1d\x12\x14\n\x10RETURN_TO_SERVER\x10\x1e\x12\x19\n\x15\x45XIT_SERVER_TO_CLIENT\x10\x1f\x62\x06proto3' ) _TRANSACTIONTYPE = _descriptor.EnumDescriptor( name='TransactionType', full_name='slog.TransactionType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SINGLE_HOME', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MULTI_HOME_OR_LOCK_ONLY', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1252, serialized_end=1328, ) _sym_db.RegisterEnumDescriptor(_TRANSACTIONTYPE) TransactionType = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONTYPE) _TRANSACTIONSTATUS = _descriptor.EnumDescriptor( name='TransactionStatus', full_name='slog.TransactionStatus', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='NOT_STARTED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='COMMITTED', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ABORTED', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1330, serialized_end=1394, ) _sym_db.RegisterEnumDescriptor(_TRANSACTIONSTATUS) TransactionStatus = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONSTATUS) _ABORTCODE = _descriptor.EnumDescriptor( name='AbortCode', full_name='slog.AbortCode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='OTHER', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RATE_LIMITED', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RESTARTED', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1396, serialized_end=1451, ) _sym_db.RegisterEnumDescriptor(_ABORTCODE) AbortCode = enum_type_wrapper.EnumTypeWrapper(_ABORTCODE) _KEYTYPE = _descriptor.EnumDescriptor( name='KeyType', full_name='slog.KeyType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='READ', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='WRITE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1453, serialized_end=1483, ) _sym_db.RegisterEnumDescriptor(_KEYTYPE) KeyType = enum_type_wrapper.EnumTypeWrapper(_KEYTYPE) _TRANSACTIONEVENT = _descriptor.EnumDescriptor( name='TransactionEvent', full_name='slog.TransactionEvent', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='ALL', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_SERVER', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_SERVER_TO_FORWARDER', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_FORWARDER', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_FORWARDER_TO_SEQUENCER', index=4, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_FORWARDER_TO_MULTI_HOME_ORDERER', index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_MULTI_HOME_ORDERER', index=6, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_MULTI_HOME_ORDERER_IN_BATCH', index=7, number=7, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_MULTI_HOME_ORDERER_IN_BATCH', index=8, number=8, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_MULTI_HOME_ORDERER', index=9, number=9, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_SEQUENCER', index=10, number=10, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH', index=11, number=11, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_LOCAL_BATCH', index=12, number=12, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_SEQUENCER_IN_BATCH', index=13, number=13, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_SEQUENCER_IN_BATCH', index=14, number=14, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_LOG_MANAGER_IN_BATCH', index=15, number=15, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_LOG_MANAGER_ORDER', index=16, number=16, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_LOG_MANAGER', index=17, number=17, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_SCHEDULER', index=18, number=18, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_SCHEDULER_LO', index=19, number=19, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_LOCK_MANAGER', index=20, number=20, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DEADLOCK_DETECTED', index=21, number=21, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DISPATCHED', index=22, number=22, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DISPATCHED_FAST', index=23, number=23, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DISPATCHED_SLOW', index=24, number=24, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DISPATCHED_SLOW_DEADLOCKED', index=25, number=25, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ENTER_WORKER', index=26, number=26, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='GOT_REMOTE_READS', index=27, number=27, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='GOT_REMOTE_READS_DEADLOCKED', index=28, number=28, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_WORKER', index=29, number=29, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RETURN_TO_SERVER', index=30, number=30, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXIT_SERVER_TO_CLIENT', index=31, number=31, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1486, serialized_end=2348, ) _sym_db.RegisterEnumDescriptor(_TRANSACTIONEVENT) TransactionEvent = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONEVENT) UNKNOWN = 0 SINGLE_HOME = 1 MULTI_HOME_OR_LOCK_ONLY = 2 NOT_STARTED = 0 COMMITTED = 1 ABORTED = 2 OTHER = 0 RATE_LIMITED = 1 RESTARTED = 2 READ = 0 WRITE = 1 ALL = 0 ENTER_SERVER = 1 EXIT_SERVER_TO_FORWARDER = 2 ENTER_FORWARDER = 3 EXIT_FORWARDER_TO_SEQUENCER = 4 EXIT_FORWARDER_TO_MULTI_HOME_ORDERER = 5 ENTER_MULTI_HOME_ORDERER = 6 ENTER_MULTI_HOME_ORDERER_IN_BATCH = 7 EXIT_MULTI_HOME_ORDERER_IN_BATCH = 8 EXIT_MULTI_HOME_ORDERER = 9 ENTER_SEQUENCER = 10 EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH = 11 ENTER_LOCAL_BATCH = 12 ENTER_SEQUENCER_IN_BATCH = 13 EXIT_SEQUENCER_IN_BATCH = 14 ENTER_LOG_MANAGER_IN_BATCH = 15 ENTER_LOG_MANAGER_ORDER = 16 EXIT_LOG_MANAGER = 17 ENTER_SCHEDULER = 18 ENTER_SCHEDULER_LO = 19 ENTER_LOCK_MANAGER = 20 DEADLOCK_DETECTED = 21 DISPATCHED = 22 DISPATCHED_FAST = 23 DISPATCHED_SLOW = 24 DISPATCHED_SLOW_DEADLOCKED = 25 ENTER_WORKER = 26 GOT_REMOTE_READS = 27 GOT_REMOTE_READS_DEADLOCKED = 28 EXIT_WORKER = 29 RETURN_TO_SERVER = 30 EXIT_SERVER_TO_CLIENT = 31 _MASTERMETADATA = _descriptor.Descriptor( name='MasterMetadata', full_name='slog.MasterMetadata', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='master', full_name='slog.MasterMetadata.master', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='counter', full_name='slog.MasterMetadata.counter', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=33, serialized_end=82, ) _VALUEENTRY = _descriptor.Descriptor( name='ValueEntry', full_name='slog.ValueEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='value', full_name='slog.ValueEntry.value', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_value', full_name='slog.ValueEntry.new_value', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='slog.ValueEntry.type', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='metadata', full_name='slog.ValueEntry.metadata', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='optional', full_name='slog.ValueEntry.optional', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=85, serialized_end=214, ) _KEYVALUEENTRY = _descriptor.Descriptor( name='KeyValueEntry', full_name='slog.KeyValueEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='slog.KeyValueEntry.key', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value_entry', full_name='slog.KeyValueEntry.value_entry', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=216, serialized_end=283, ) _TRANSACTIONEVENTINFO = _descriptor.Descriptor( name='TransactionEventInfo', full_name='slog.TransactionEventInfo', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='event', full_name='slog.TransactionEventInfo.event', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='time', full_name='slog.TransactionEventInfo.time', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='machine', full_name='slog.TransactionEventInfo.machine', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='home', full_name='slog.TransactionEventInfo.home', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=285, serialized_end=391, ) _TRANSACTIONINTERNAL = _descriptor.Descriptor( name='TransactionInternal', full_name='slog.TransactionInternal', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='id', full_name='slog.TransactionInternal.id', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='slog.TransactionInternal.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='home', full_name='slog.TransactionInternal.home', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='coordinating_server', full_name='slog.TransactionInternal.coordinating_server', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='timestamp', full_name='slog.TransactionInternal.timestamp', index=4, number=5, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='involved_partitions', full_name='slog.TransactionInternal.involved_partitions', index=5, number=6, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='active_partitions', full_name='slog.TransactionInternal.active_partitions', index=6, number=7, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='involved_regions', full_name='slog.TransactionInternal.involved_regions', index=7, number=8, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='events', full_name='slog.TransactionInternal.events', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mh_depart_from_coordinator_time', full_name='slog.TransactionInternal.mh_depart_from_coordinator_time', index=9, number=10, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mh_arrive_at_home_time', full_name='slog.TransactionInternal.mh_arrive_at_home_time', index=10, number=11, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mh_enter_local_batch_time', full_name='slog.TransactionInternal.mh_enter_local_batch_time', index=11, number=12, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='global_log_positions', full_name='slog.TransactionInternal.global_log_positions', index=12, number=13, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=394, serialized_end=790, ) _REMASTERPROCEDURE = _descriptor.Descriptor( name='RemasterProcedure', full_name='slog.RemasterProcedure', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='new_master', full_name='slog.RemasterProcedure.new_master', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='is_new_master_lock_only', full_name='slog.RemasterProcedure.is_new_master_lock_only', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=792, serialized_end=864, ) _PROCEDURE = _descriptor.Descriptor( name='Procedure', full_name='slog.Procedure', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='args', full_name='slog.Procedure.args', index=0, number=1, type=12, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=866, serialized_end=891, ) _PROCEDURES = _descriptor.Descriptor( name='Procedures', full_name='slog.Procedures', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='procedures', full_name='slog.Procedures.procedures', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=893, serialized_end=942, ) _TRANSACTION = _descriptor.Descriptor( name='Transaction', full_name='slog.Transaction', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='internal', full_name='slog.Transaction.internal', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='code', full_name='slog.Transaction.code', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='remaster', full_name='slog.Transaction.remaster', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keys', full_name='slog.Transaction.keys', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='deleted_keys', full_name='slog.Transaction.deleted_keys', index=4, number=5, type=12, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='status', full_name='slog.Transaction.status', index=5, number=6, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='abort_code', full_name='slog.Transaction.abort_code', index=6, number=7, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='abort_reason', full_name='slog.Transaction.abort_reason', index=7, number=8, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='program', full_name='slog.Transaction.program', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=945, serialized_end=1250, ) _VALUEENTRY.fields_by_name['type'].enum_type = _KEYTYPE _VALUEENTRY.fields_by_name['metadata'].message_type = _MASTERMETADATA _VALUEENTRY.oneofs_by_name['optional'].fields.append( _VALUEENTRY.fields_by_name['metadata']) _VALUEENTRY.fields_by_name['metadata'].containing_oneof = _VALUEENTRY.oneofs_by_name['optional'] _KEYVALUEENTRY.fields_by_name['value_entry'].message_type = _VALUEENTRY _TRANSACTIONEVENTINFO.fields_by_name['event'].enum_type = _TRANSACTIONEVENT _TRANSACTIONINTERNAL.fields_by_name['type'].enum_type = _TRANSACTIONTYPE _TRANSACTIONINTERNAL.fields_by_name['events'].message_type = _TRANSACTIONEVENTINFO _PROCEDURES.fields_by_name['procedures'].message_type = _PROCEDURE _TRANSACTION.fields_by_name['internal'].message_type = _TRANSACTIONINTERNAL _TRANSACTION.fields_by_name['code'].message_type = _PROCEDURES _TRANSACTION.fields_by_name['remaster'].message_type = _REMASTERPROCEDURE _TRANSACTION.fields_by_name['keys'].message_type = _KEYVALUEENTRY _TRANSACTION.fields_by_name['status'].enum_type = _TRANSACTIONSTATUS _TRANSACTION.fields_by_name['abort_code'].enum_type = _ABORTCODE _TRANSACTION.oneofs_by_name['program'].fields.append( _TRANSACTION.fields_by_name['code']) _TRANSACTION.fields_by_name['code'].containing_oneof = _TRANSACTION.oneofs_by_name['program'] _TRANSACTION.oneofs_by_name['program'].fields.append( _TRANSACTION.fields_by_name['remaster']) _TRANSACTION.fields_by_name['remaster'].containing_oneof = _TRANSACTION.oneofs_by_name['program'] DESCRIPTOR.message_types_by_name['MasterMetadata'] = _MASTERMETADATA DESCRIPTOR.message_types_by_name['ValueEntry'] = _VALUEENTRY DESCRIPTOR.message_types_by_name['KeyValueEntry'] = _KEYVALUEENTRY DESCRIPTOR.message_types_by_name['TransactionEventInfo'] = _TRANSACTIONEVENTINFO DESCRIPTOR.message_types_by_name['TransactionInternal'] = _TRANSACTIONINTERNAL DESCRIPTOR.message_types_by_name['RemasterProcedure'] = _REMASTERPROCEDURE DESCRIPTOR.message_types_by_name['Procedure'] = _PROCEDURE DESCRIPTOR.message_types_by_name['Procedures'] = _PROCEDURES DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION DESCRIPTOR.enum_types_by_name['TransactionType'] = _TRANSACTIONTYPE DESCRIPTOR.enum_types_by_name['TransactionStatus'] = _TRANSACTIONSTATUS DESCRIPTOR.enum_types_by_name['AbortCode'] = _ABORTCODE DESCRIPTOR.enum_types_by_name['KeyType'] = _KEYTYPE DESCRIPTOR.enum_types_by_name['TransactionEvent'] = _TRANSACTIONEVENT _sym_db.RegisterFileDescriptor(DESCRIPTOR) MasterMetadata = _reflection.GeneratedProtocolMessageType('MasterMetadata', (_message.Message,), { 'DESCRIPTOR' : _MASTERMETADATA, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.MasterMetadata) }) _sym_db.RegisterMessage(MasterMetadata) ValueEntry = _reflection.GeneratedProtocolMessageType('ValueEntry', (_message.Message,), { 'DESCRIPTOR' : _VALUEENTRY, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.ValueEntry) }) _sym_db.RegisterMessage(ValueEntry) KeyValueEntry = _reflection.GeneratedProtocolMessageType('KeyValueEntry', (_message.Message,), { 'DESCRIPTOR' : _KEYVALUEENTRY, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.KeyValueEntry) }) _sym_db.RegisterMessage(KeyValueEntry) TransactionEventInfo = _reflection.GeneratedProtocolMessageType('TransactionEventInfo', (_message.Message,), { 'DESCRIPTOR' : _TRANSACTIONEVENTINFO, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.TransactionEventInfo) }) _sym_db.RegisterMessage(TransactionEventInfo) TransactionInternal = _reflection.GeneratedProtocolMessageType('TransactionInternal', (_message.Message,), { 'DESCRIPTOR' : _TRANSACTIONINTERNAL, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.TransactionInternal) }) _sym_db.RegisterMessage(TransactionInternal) RemasterProcedure = _reflection.GeneratedProtocolMessageType('RemasterProcedure', (_message.Message,), { 'DESCRIPTOR' : _REMASTERPROCEDURE, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.RemasterProcedure) }) _sym_db.RegisterMessage(RemasterProcedure) Procedure = _reflection.GeneratedProtocolMessageType('Procedure', (_message.Message,), { 'DESCRIPTOR' : _PROCEDURE, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.Procedure) }) _sym_db.RegisterMessage(Procedure) Procedures = _reflection.GeneratedProtocolMessageType('Procedures', (_message.Message,), { 'DESCRIPTOR' : _PROCEDURES, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.Procedures) }) _sym_db.RegisterMessage(Procedures) Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), { 'DESCRIPTOR' : _TRANSACTION, '__module__' : 'proto.transaction_pb2' # @@protoc_insertion_point(class_scope:slog.Transaction) }) _sym_db.RegisterMessage(Transaction) # @@protoc_insertion_point(module_scope)
42.277259
4,079
0.759315
0
0
0
0
0
0
0
0
9,048
0.222239
d5dc3b0ac30486b996b5ad01fe0ad1a247834e86
1,411
py
Python
srl/simulation_test.py
google/simple-reinforcement-learning
9bdac29427cd5c556d7ea7531b807645f043aae3
[ "Apache-2.0" ]
60
2017-01-10T06:35:11.000Z
2020-12-19T07:33:40.000Z
srl/simulation_test.py
google/simple-reinforcement-learning
9bdac29427cd5c556d7ea7531b807645f043aae3
[ "Apache-2.0" ]
null
null
null
srl/simulation_test.py
google/simple-reinforcement-learning
9bdac29427cd5c556d7ea7531b807645f043aae3
[ "Apache-2.0" ]
29
2017-01-11T22:15:36.000Z
2022-03-17T02:17:37.000Z
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import unittest from srl import movement from srl import simulation from srl import world class TestSimulation(unittest.TestCase): def test_in_terminal_state(self): w = world.World.parse('@^') sim = simulation.Simulation(world.Static(w)) self.assertFalse(sim.in_terminal_state) sim.act(movement.ACTION_RIGHT) self.assertTrue(sim.in_terminal_state) def test_act_accumulates_score(self): w = world.World.parse('@.') sim = simulation.Simulation(world.Static(w)) sim.act(movement.ACTION_RIGHT) sim.act(movement.ACTION_LEFT) self.assertEqual(-2, sim.score) def test_to_array(self): w = world.World.parse('$.@^#') sim = simulation.Simulation(world.Static(w)) self.assertTrue( (np.array([[2, 3, 4, 5, 1]], dtype=np.int8) == sim.to_array()) .all())
32.068182
74
0.722892
722
0.511694
0
0
0
0
0
0
577
0.40893
d5dc76ad37d386c3045e8ed5404e25dd2364d605
26,564
py
Python
src/xmltollvm.py
Tejvinder/thesis-ghidra
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
[ "MIT" ]
101
2019-10-22T09:48:19.000Z
2022-03-30T07:03:40.000Z
src/xmltollvm.py
Tejvinder/thesis-ghidra
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
[ "MIT" ]
4
2020-03-06T14:18:47.000Z
2021-11-05T04:10:59.000Z
src/xmltollvm.py
Tejvinder/thesis-ghidra
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
[ "MIT" ]
15
2019-10-22T13:12:39.000Z
2022-03-04T20:08:06.000Z
from llvmlite import ir import xml.etree.ElementTree as et int32 = ir.IntType(32) int64 = ir.IntType(64) int1 = ir.IntType(1) void_type = ir.VoidType() function_names = [] registers, functions, uniques, extracts = {}, {}, {}, {} internal_functions = {} memory = {} flags = ["ZF", "CF", "OF", "SF"] pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"] def lift(filename): root = et.parse(filename).getroot() module = ir.Module(name="lifted") for register in root.find('globals').findall('register'): if register.get('name') in flags: var = ir.GlobalVariable(module, ir.IntType(1), register.get('name')) var.initializer = ir.Constant(ir.IntType(1), None) var.linkage = 'internal' registers[register.get('name')] = var elif register.get('name') in pointers: var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name')) var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None) var.linkage = 'internal' registers[register.get('name')] = var else: var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name')) var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None) var.linkage = 'internal' registers[register.get('name')] = var for memory_location in root.find('memory').findall('memory'): var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name')) var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None) var.linkage = 'internal' memory[memory_location.get('name')] = var func_return = ir.VoidType() fnty = ir.FunctionType(func_return, []) ir_func = ir.Function(module, fnty, "intra_function_branch") internal_functions["intra_function_branch"] = ir_func func_return = ir.VoidType() fnty = ir.FunctionType(func_return, []) ir_func = ir.Function(module, fnty, "call_indirect") internal_functions["call_indirect"] = ir_func func_return = ir.VoidType() fnty = ir.FunctionType(func_return, []) ir_func = ir.Function(module, fnty, "bit_extraction") internal_functions["bit_extraction"] = ir_func for function in root.findall('function'): name = function.get('name') x = 1 while name in function_names: name = name + "_" + str(x) x += 1 function_names.append(name) address = function.get('address') functions[address] = [build_function(name, module), function] for address in functions: ir_func, function = functions[address] populate_func(ir_func, function) return module def populate_func(ir_func, function): builders, blocks = build_cfg(function, ir_func) if blocks == {}: return populate_cfg(function, builders, blocks) def build_function(name, module): func_return = ir.VoidType() fnty = ir.FunctionType(func_return, []) ir_func = ir.Function(module, fnty, name) return ir_func def build_cfg(function, ir_func): builders, blocks = {}, {} instructions = function.find("instructions") if instructions: block = ir_func.append_basic_block("entry") blocks["entry"] = block builders["entry"] = ir.IRBuilder(block) for instruction in instructions: address = instruction.find("address").text block = ir_func.append_basic_block(address) blocks[address] = block builders[address] = ir.IRBuilder(block) return builders, blocks # noinspection DuplicatedCode def populate_cfg(function, builders, blocks): builder = builders["entry"] stack_size = 10 * 1024 * 1024 stack = builder.alloca(ir.IntType(8), stack_size, name="stack") stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top") builder.store(stack_top, registers["RSP"]) builder.branch(list(blocks.values())[1]) block_iterator = 1 instr = 0 quiter = False for instruction in function.find("instructions"): if quiter: break address = instruction.find("address").text if address in builders: builder = builders[address] pcodes = instruction.find("pcodes") pc = 0 no_branch = True for pcode in pcodes: pc += 1 mnemonic = pcode.find("name") if mnemonic.text == "COPY": output = pcode.find("output") if output.text in flags and pcode.find("input_0").get("storage") == "constant": source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0)) else: source = fetch_input_varnode(builder, pcode.find("input_0")) update_output(builder, pcode.find("output"), source) elif mnemonic.text == "LOAD": input_1 = pcode.find("input_1") output = pcode.find("output") rhs = fetch_input_varnode(builder, input_1) if input_1.get("storage") == "unique" and output.get("storage") == "unique": # This is incorrect. This is treating it as a copy, should load the memory address in the input 1 update_output(builder, output, rhs) else: if input_1.text in pointers: rhs = builder.gep(rhs, [ir.Constant(int64, 0)]) result = builder.load(rhs) update_output(builder, output, result) elif mnemonic.text == "STORE": input_1 = pcode.find("input_1") # target input_2 = pcode.find("input_2") # source rhs = fetch_input_varnode(builder, input_2) lhs = fetch_output_varnode(input_1) lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)]) if lhs2.type != rhs.type.as_pointer(): lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer()) builder.store(rhs, lhs2) elif mnemonic.text == "BRANCH": value = pcode.find("input_0").text[2:-2] if value in functions: target = functions[value][0] builder.call(target, []) elif value in blocks: target = blocks[value] builder.branch(target) no_branch = False else: # weird jump into some label in another function # might be solved with callbr instruction? builder.call(internal_functions["intra_function_branch"], []) elif mnemonic.text == "CBRANCH": true_target = blocks[pcode.find("input_0").text[2:-2]] false_target = list(blocks.values())[block_iterator + 1] condition = fetch_input_varnode(builder, pcode.find("input_1")) no_branch = False builder.cbranch(condition, true_target, false_target) elif mnemonic.text == "BRANCHIND": no_branch = False target = fetch_input_varnode(builder, pcode.find("input_0")) if not target.type.is_pointer: target = builder.inttoptr(target, target.type.as_pointer()) builder.branch_indirect(target) elif mnemonic.text == "CALL": target = functions[pcode.find("input_0").text[2:-2]][0] builder.call(target, []) elif mnemonic.text == "CALLIND": # target = pcode.find("input_0").text[2:-2] builder.call(internal_functions["call_indirect"], []) elif mnemonic.text == "USERDEFINED": raise Exception("Not implemented") elif mnemonic.text == "RETURN": input_1 = pcode.find("input_1") no_branch = False if input_1 is None: builder.ret_void() else: raise Exception("Return value being passed") elif mnemonic.text == "PIECE": raise Exception("PIECE operation needs to be tested") elif mnemonic.text == "SUBPIECE": output = pcode.find("output") input_0 = pcode.find("input_0") input_1 = pcode.find("input_1") if input_1.text == "0x0": val = fetch_input_varnode(builder, input_0) result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8)) update_output(builder, output, result) else: builder.call(internal_functions['bit_extraction'], []) elif mnemonic.text == "INT_EQUAL": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_unsigned('==', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_NOTEQUAL": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_unsigned('!=', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_LESS": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_unsigned('<', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_SLESS": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_signed('<', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_LESSEQUAL": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_unsigned('<=', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_SLESS_EQUAL": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.icmp_signed('<=', lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_ZEXT": rhs = fetch_input_varnode(builder, pcode.find("input_0")) if rhs.type.is_pointer: rhs = builder.ptrtoint(rhs, rhs.type.pointee) output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8)) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_SEXT": rhs = fetch_input_varnode(builder, pcode.find("input_0")) if rhs.type.is_pointer: rhs = builder.ptrtoint(rhs, rhs.type.pointee) output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8)) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_ADD": input_0 = pcode.find("input_0") input_1 = pcode.find("input_1") lhs = fetch_input_varnode(builder, input_0) rhs = fetch_input_varnode(builder, input_1) target = ir.IntType(int(pcode.find("output").get("size")) * 8) if input_0.text in pointers and input_1.get("storage") == "constant": result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))]) else: lhs, rhs = int_check_inputs(builder, lhs, rhs, target) result = builder.add(lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_SUB": input_0 = pcode.find("input_0") input_1 = pcode.find("input_1") lhs = fetch_input_varnode(builder, input_0) rhs = fetch_input_varnode(builder, input_1) target = ir.IntType(int(pcode.find("output").get("size")) * 8) if input_0.text in pointers and input_1.get("storage") == "constant": result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))]) else: lhs, rhs = int_check_inputs(builder, lhs, rhs, target) result = builder.sub(lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_CARRY": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.uadd_with_overflow(lhs, rhs) result = builder.extract_value(result, 1) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_SCARRY": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.sadd_with_overflow(lhs, rhs) result = builder.extract_value(result, 1) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_SBORROW": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs) result = builder.sadd_with_overflow(lhs, rhs) result = builder.extract_value(result, 1) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_2COMP": val = fetch_input_varnode(builder, pcode.find("input_0")) result = builder.not_(val) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_NEGATE": val = fetch_input_varnode(builder, pcode.find("input_0")) result = builder.neg(val) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "INT_XOR": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.xor(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_AND": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.and_(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_OR": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.or_(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_LEFT": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = check_shift_inputs(builder, lhs, rhs, target) output = builder.shl(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_RIGHT": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = check_shift_inputs(builder, lhs, rhs, target) output = builder.lshr(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_SRIGHT": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = check_shift_inputs(builder, lhs, rhs, target) output = builder.ashr(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_MULT": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.mul(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_DIV": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.div(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_REM": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.urem(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_SDIV": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.sdiv(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "INT_SREM": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) target = ir.IntType(int(pcode.find("output").get("size")) * 8) lhs, rhs = int_check_inputs(builder, lhs, rhs, target) output = builder.srem(lhs, rhs) update_output(builder, pcode.find("output"), output) elif mnemonic.text == "BOOL_NEGATE": lhs = fetch_input_varnode(builder, pcode.find("input_0")) result = builder.neg(lhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "BOOL_XOR": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) result = builder.xor(lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "BOOL_AND": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) result = builder.and_(lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "BOOL_OR": lhs = fetch_input_varnode(builder, pcode.find("input_0")) rhs = fetch_input_varnode(builder, pcode.find("input_1")) result = builder.or_(lhs, rhs) update_output(builder, pcode.find("output"), result) elif mnemonic.text == "FLOAT_EQUAL": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_NOTEQUAL": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_LESS": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_LESSEQUAL": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_ADD": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_SUB": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_MULT": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_DIV": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_NEG": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_ABS": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_SQRT": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_CEIL": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_FLOOR": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_ROUND": raise Exception("Not implemented") elif mnemonic.text == "FLOAT_NAN": raise Exception("Not implemented") elif mnemonic.text == "INT2FLOAT": raise Exception("Not implemented") elif mnemonic.text == "FLOAT2FLOAT": raise Exception("Not implemented") elif mnemonic.text == "TRUNC": raise Exception("Not implemented") elif mnemonic.text == "CPOOLREF": raise Exception("Not implemented") elif mnemonic.text == "NEW": raise Exception("Not implemented") elif mnemonic.text == "MULTIEQUAL": raise Exception("Not implemented") elif mnemonic.text == "INDIRECT": raise Exception("Not implemented") elif mnemonic.text == "PTRADD": raise Exception("Not implemented") elif mnemonic.text == "PTRSUB": raise Exception("Not implemented") elif mnemonic.text == "CAST": raise Exception("Not implemented") else: raise Exception("Not a standard pcode instruction") block_iterator += 1 instr += 1 if block_iterator < len(blocks) and no_branch: builder.branch(list(blocks.values())[block_iterator]) def fetch_input_varnode(builder, name): var_type = name.get("storage") var_size = int(name.get("size")) * 8 if var_type == "register": return builder.load(registers[name.text]) elif var_type == "unique": if name.text not in list(uniques.keys()): raise Exception("Temporary variable referenced before defined") return uniques[name.text] elif var_type == "constant": var = ir.Constant(ir.IntType(var_size), int(name.text, 0)) return var elif var_type == "memory": return memory[name.text] def update_output(builder, name, output): var_type = name.get("storage") if var_type == "register": reg = registers[name.text] if reg.type != output.type.as_pointer(): reg = builder.bitcast(reg, output.type.as_pointer()) builder.store(output, reg) elif var_type == "unique": uniques[name.text] = output def fetch_output_varnode(name): var_type = name.get("storage") if var_type == "register": return registers[name.text] elif var_type == "unique": if name.text not in uniques: uniques[name.text] = None return uniques[name.text] def int_check_inputs(builder, lhs, rhs, target): if lhs.type != target: if lhs.type.is_pointer: lhs2 = lhs lhs = builder.ptrtoint(lhs, target) if lhs2 == rhs: rhs = lhs if rhs.type != target and lhs != rhs: if rhs.type.is_pointer: rhs = builder.ptrtoint(rhs, target) return lhs, rhs def check_shift_inputs(builder, lhs, rhs, target): if lhs.type != target: if lhs.type.is_pointer: lhs = builder.ptrtoint(lhs, target) else: lhs = builder.zext(lhs, target) if rhs.type != target: if rhs.type.is_pointer: rhs = builder.ptrtoint(rhs, target) else: rhs = builder.zext(rhs, target) return lhs, rhs def int_comparison_check_inputs(builder, lhs, rhs): # For integer comparison operations. We assume rhs is the correct type. if lhs.type.is_pointer: lhs = builder.ptrtoint(lhs, rhs.type) return lhs, rhs
49.932331
118
0.571074
0
0
0
0
0
0
0
0
3,472
0.130703
d5dc93546bee372b907de208f03583a6f68c3b62
925
py
Python
modules/WPSeku/modules/discovery/generic/wplisting.py
Farz7/Darkness
4f3eb5fee3d8a476d001ad319ca22bca274eeac9
[ "MIT" ]
18
2020-04-24T06:50:23.000Z
2022-03-14T08:00:38.000Z
modules/WPSeku/modules/discovery/generic/wplisting.py
Farz7/Darkness
4f3eb5fee3d8a476d001ad319ca22bca274eeac9
[ "MIT" ]
null
null
null
modules/WPSeku/modules/discovery/generic/wplisting.py
Farz7/Darkness
4f3eb5fee3d8a476d001ad319ca22bca274eeac9
[ "MIT" ]
5
2020-06-28T16:21:22.000Z
2022-01-30T14:17:32.000Z
#/usr/bin/env python # -*- Coding: UTF-8 -*- # # WPSeku: Wordpress Security Scanner # # @url: https://github.com/m4ll0k/WPSeku # @author: Momo Outaadi (M4ll0k) import re from lib import wphttp from lib import wpprint class wplisting: chk = wphttp.UCheck() out = wpprint.wpprint() def __init__(self,agent,proxy,redir,time,url,cookie): self.url = url self.cookie = cookie self.req = wphttp.wphttp( agent=agent,proxy=proxy, redir=redir,time=time ) def run(self): paths = ['/wp-admin','/wp-includes','/wp-content/uploads', '/wp-content/plugins','/wp-content/themes' ] try: for path in paths: url = wplisting.chk.path(self.url,path) resp = self.req.send(url,c=self.cookie) if resp.status_code == 200 and resp._content != None: if resp.url == url: wplisting.out.plus('Dir {} listing enabled under: {}'.format(path,resp.url)) except Exception,e: pass
24.342105
82
0.656216
705
0.762162
0
0
0
0
0
0
274
0.296216
d5deac526ab7e57ca5c90998d8462e6ef3d52eff
350
py
Python
tw2/jit/widgets/__init__.py
toscawidgets/tw2.jit
c5e8059975115385f225029ba5c7380673524122
[ "MIT" ]
1
2020-01-12T05:11:24.000Z
2020-01-12T05:11:24.000Z
tw2/jit/widgets/__init__.py
toscawidgets/tw2.jit
c5e8059975115385f225029ba5c7380673524122
[ "MIT" ]
null
null
null
tw2/jit/widgets/__init__.py
toscawidgets/tw2.jit
c5e8059975115385f225029ba5c7380673524122
[ "MIT" ]
null
null
null
from tw2.jit.widgets.chart import (AreaChart, BarChart, PieChart) from tw2.jit.widgets.graph import (ForceDirectedGraph, RadialGraph) from tw2.jit.widgets.tree import (SpaceTree, HyperTree, Sunburst, Icicle, TreeMap) from tw2.jit.widgets.ajax import AjaxRadialGraph from tw2.jit.widgets.sqla import SQLARadialGraph
43.75
67
0.742857
0
0
0
0
0
0
0
0
0
0
d5dfc52594a99b2ee5b9d8578f257b3fdecb0fcf
4,726
py
Python
bot.py
tiianprb/TikTok-Downloader-Bot
91b6fd64d5a151c3e439772c69850a18b7562ceb
[ "MIT" ]
null
null
null
bot.py
tiianprb/TikTok-Downloader-Bot
91b6fd64d5a151c3e439772c69850a18b7562ceb
[ "MIT" ]
null
null
null
bot.py
tiianprb/TikTok-Downloader-Bot
91b6fd64d5a151c3e439772c69850a18b7562ceb
[ "MIT" ]
null
null
null
import json, requests, os, shlex, asyncio, uuid, shutil from typing import Tuple from pyrogram import Client, filters from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery # Configs API_HASH = os.environ['API_HASH'] APP_ID = int(os.environ['APP_ID']) BOT_TOKEN = os.environ['BOT_TOKEN'] downloads = './downloads/{}/' #Button START_BUTTONS=[ [ InlineKeyboardButton('Source', url='https://github.com/X-Gorn/TikTokDL'), InlineKeyboardButton('Project Channel', url='https://t.me/xTeamBots'), ], [InlineKeyboardButton('Author', url='https://t.me/xgorn')], ] DL_BUTTONS=[ [ InlineKeyboardButton('No Watermark', callback_data='nowm'), InlineKeyboardButton('Watermark', callback_data='wm'), ], [InlineKeyboardButton('Audio', callback_data='audio')], ] # Running bot xbot = Client('TikTokDL', api_id=APP_ID, api_hash=API_HASH, bot_token=BOT_TOKEN) # Helpers # Thanks to FridayUB async def run_cmd(cmd: str) -> Tuple[str, str, int, int]: args = shlex.split(cmd) process = await asyncio.create_subprocess_exec( *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) stdout, stderr = await process.communicate() return ( stdout.decode("utf-8", "replace").strip(), stderr.decode("utf-8", "replace").strip(), process.returncode, process.pid, ) # Start @xbot.on_message(filters.command('start') & filters.private) async def _start(bot, update): await update.reply_text(f"I'm TikTokDL!\nYou can download tiktok video/audio using this bot", True, reply_markup=InlineKeyboardMarkup(START_BUTTONS)) # Downloader for tiktok @xbot.on_message(filters.regex(pattern='.*http.*') & filters.private) async def _tiktok(bot, update): url = update.text session = requests.Session() resp = session.head(url, allow_redirects=True) if not 'tiktok.com' in resp.url: return await update.reply('Select the options below', True, reply_markup=InlineKeyboardMarkup(DL_BUTTONS)) # Callbacks @xbot.on_callback_query() async def _callbacks(bot, cb: CallbackQuery): if cb.data == 'nowm': dirs = downloads.format(uuid.uuid4().hex) os.makedirs(dirs) cbb = cb update = cbb.message.reply_to_message await cb.message.delete() url = update.text session = requests.Session() resp = session.head(url, allow_redirects=True) if '?' in resp.url: tt = resp.url.split('?', 1)[0] else: tt = resp.url ttid = dirs+tt.split('/')[-1] r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt) result = r.text rs = json.loads(result) link = rs['result']['nowm'] resp = session.head(link, allow_redirects=True) r = requests.get(resp.url, allow_redirects=True) open(f'{ttid}.mp4', 'wb').write(r.content) await bot.send_video(update.chat.id, f'{ttid}.mp4',) shutil.rmtree(dirs) elif cb.data == 'wm': dirs = downloads.format(uuid.uuid4().hex) os.makedirs(dirs) cbb = cb update = cbb.message.reply_to_message await cb.message.delete() url = update.text session = requests.Session() resp = session.head(url, allow_redirects=True) if '?' in resp.url: tt = resp.url.split('?', 1)[0] else: tt = resp.url ttid = dirs+tt.split('/')[-1] r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt) result = r.text rs = json.loads(result) link = rs['result']['wm'] resp = session.head(link, allow_redirects=True) r = requests.get(resp.url, allow_redirects=True) open(f'{ttid}.mp4', 'wb').write(r.content) await bot.send_video(update.chat.id, f'{ttid}.mp4',) shutil.rmtree(dirs) elif cb.data == 'audio': dirs = downloads.format(uuid.uuid4().hex) os.makedirs(dirs) cbb = cb update = cbb.message.reply_to_message await cb.message.delete() url = update.text session = requests.Session() resp = session.head(url, allow_redirects=True) if '?' in resp.url: tt = resp.url.split('?', 1)[0] else: tt = resp.url ttid = dirs+tt.split('/')[-1] r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt) result = r.text rs = json.loads(result) link = rs['result']['wm'] resp = session.head(link, allow_redirects=True) r = requests.get(resp.url, allow_redirects=True) open(f'{ttid}.mp4', 'wb').write(r.content) cmd = f'ffmpeg -i "{ttid}.mp4" -vn -ar 44100 -ac 2 -ab 192 -f mp3 "{ttid}.mp3"' await run_cmd(cmd) await bot.send_audio(update.chat.id, f'{ttid}.mp3',) shutil.rmtree(dirs) xbot.run()
33.757143
152
0.643039
0
0
0
0
3,223
0.681972
3,492
0.738891
857
0.181337
d5e12ba6cbfd755e451e70540ba00bbbd7d6bc8c
24,254
py
Python
frontend-gui/rpanel.py
skyu0221/660-iot
d31f973c93871bfa8122f1b83364d0147d402e9e
[ "Apache-2.0" ]
null
null
null
frontend-gui/rpanel.py
skyu0221/660-iot
d31f973c93871bfa8122f1b83364d0147d402e9e
[ "Apache-2.0" ]
8
2021-03-19T01:36:06.000Z
2022-03-12T00:22:43.000Z
frontend-gui/rpanel.py
skyu0221/660-iot
d31f973c93871bfa8122f1b83364d0147d402e9e
[ "Apache-2.0" ]
null
null
null
import wx import wx.adv import random import util import config import time import datetime import threading import requests import json from functools import partial class ReqeusterThread(threading.Thread): # https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html def __init__(self, name, parent_thread, parent_panel): threading.Thread.__init__(self, name=name) self._stopevent = threading.Event() self.parent_panel = parent_panel self.parent_thread = parent_thread def run(self): while (not self._stopevent.is_set()) and self.parent_thread.is_alive(): print("hello") # print(self.parent_panel.info_widget_dict) # print(self.parent_panel.info) # chnage to real time end = datetime.datetime.now() start = end - datetime.timedelta(minutes=1) self.parent_panel.info["start"] = util.convert_to_GMT_zone(start) self.parent_panel.info["end"] = util.convert_to_GMT_zone(end) self.parent_panel._send_request(self.parent_panel.info) self._stopevent.wait(5.0) def join(self, timeout=None): self._stopevent.set() print("thread stop") threading.Thread.join(self, timeout) class RightPanel(wx.Panel): def __init__(self, parent, info={}): wx.Panel.__init__(self, parent=parent) self.drop_down_menu_ID = None self.result_visual_ID = None self.info = info self._init_UI() def _init_UI(self): self.SetBackgroundColour("#BAB86C") font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT) font.SetPointSize(20) vbox = wx.BoxSizer(wx.VERTICAL) hbox1 = wx.BoxSizer(wx.HORIZONTAL) # add question label st1 = wx.StaticText(self, label='Question') st1.SetFont(font) hbox1.Add(st1, proportion=2, flag=wx.RIGHT, border=10) # add drop down menu question_list = [ "1. How many people are in the building?", "2. How many people are in a specific room?", "3. Where is someone?", # "4. Which room has someone visited?", "4. What is the utilization of a specific room?" ] drop_down_menu = wx.ComboBox(self, choices=question_list) hbox1.Add(drop_down_menu, proportion=8, flag=wx.TOP, border=5) vbox1 = wx.BoxSizer(wx.VERTICAL) # add result label # st2 = wx.StaticText(self, label='Result') # st2.SetFont(font) # vbox1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1) # add canvas panel # canvas_panel = CanvasPanel(self) # vbox1.Add(canvas_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10) result_panel = ResultPanel(self) # result_panel.SetBackgroundColour("#000000") vbox1.Add(result_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10) vbox.Add(hbox1, proportion=1, flag=wx.EXPAND|wx.ALL, border=10) vbox.Add(vbox1, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10) self.SetSizer(vbox) # listen combo drop_down_menu.Bind(wx.EVT_COMBOBOX, partial(self.on_selection, combo_box=drop_down_menu, panel=result_panel)) def on_selection(self, event, combo_box, panel): # print(self.drop_down_menu.GetValue()) print(combo_box.GetValue()) panel.init_question_UI(combo_box.GetValue()[0]) # st2 = wx.StaticText(self, label=combo_box.GetValue()) # st2.SetFont(font) # sizer1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1) class ResultPanel(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) # self._init_UI() self._q_dict = {"1": self._q1_panel, "2": self._q2_panel, "3": self._q3_panel, # "4": self._q4_panel, "4": self._q5_panel,} self.info_widget_dict = {"feeder": {}, "consumer": {}} self.worker = None self.server = config.SERVER self._set_font() def _set_font(self): self.font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT) self.font.SetPointSize(12) self.font.MakeBold() def init_question_UI(self, q_idx): # clean the panel for child in self.GetChildren(): child.Destroy() # stop the worker if self.worker: # print("the worker has been stop") self.worker.join() self.worker = None self.info_widget_dict["feeder"].clear() self.info_widget_dict["consumer"].clear() decorate_panel = self._q_dict[q_idx] decorate_panel() def add_date_time_picker_layout(self): vbox = wx.BoxSizer(wx.VERTICAL) hbox1 = wx.BoxSizer(wx.HORIZONTAL) hbox2 = wx.BoxSizer(wx.HORIZONTAL) hbox3 = wx.BoxSizer(wx.HORIZONTAL) # Start start_label = wx.StaticText(self, label="START TIME") start_label.SetFont(self.font) dpc1 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime) tpc1 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime) hbox1.Add(start_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4) hbox1.Add(dpc1, proportion=3, flag=wx.RIGHT, border=5) hbox1.Add(tpc1, proportion=3, flag=wx.RIGHT, border=5) vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5) # End end_label = wx.StaticText(self, label="END TIME") end_label.SetFont(self.font) dpc2 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime) tpc2 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime) hbox2.Add(end_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4) hbox2.Add(dpc2, proportion=3, flag=wx.RIGHT, border=5) hbox2.Add(tpc2, proportion=3, flag=wx.RIGHT, border=5) vbox.Add(hbox2, proportion=0, flag=wx.ALL, border=5) # Real time box real_label = wx.StaticText(self, label="REAL TIME") real_label.SetFont(self.font) cb = wx.CheckBox(self) hbox3.Add(real_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4) hbox3.Add(cb, proportion=3, flag=wx.RIGHT|wx.TOP, border=5) vbox.Add(hbox3, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["feeder"]["start_date"] = dpc1 self.info_widget_dict["feeder"]["start_time"] = tpc1 self.info_widget_dict["feeder"]["end_date"] = dpc2 self.info_widget_dict["feeder"]["end_time"] = tpc2 self.info_widget_dict["feeder"]["real_time"] = cb # self.SetBackgroundColour("#000000") # r = lambda: random.randint(0,255) # color = '#%02X%02X%02X' % (r(),r(),r()) return vbox def _add_confirm_button(self, sizer, question_index): """ question_index => {1, 2, 3, 4} """ comfirm_btn = wx.Button(self, id=-1, label="Confirm") sizer.Add(comfirm_btn, proportion=0, flag=wx.TOP|wx.LEFT, border=5) # self.Bind(wx.EVT_BUTTON, self.OnClick, comfirm_btn) self.Bind(wx.EVT_BUTTON, lambda event: self.OnClick(event, question_index), comfirm_btn) def _add_result_label(self, sizer): result_label = wx.StaticText(self, label="RESULT") font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT) font.SetPointSize(20) font.MakeBold() result_label.SetFont(font) sizer.Add(result_label, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL, border=20) def OnClick(self, event, question_index): info = {} # handle date and time if question_index in [1, 2, 3, 4]: start_date = self.info_widget_dict["feeder"]["start_date"].GetValue() start_time = self.info_widget_dict["feeder"]["start_time"].GetValue() end_date = self.info_widget_dict["feeder"]["end_date"].GetValue() end_time = self.info_widget_dict["feeder"]["end_time"].GetValue() info["start"] = util.combine_datetime(start_date, start_time) info["end"] = util.combine_datetime(end_date, end_time) # print("start time = {}".format(info["start"])) # print("end time = {}".format(info["end"])) if_real_time = self.info_widget_dict["feeder"]["real_time"].GetValue() if question_index == 1: # requester send request to server pass elif question_index == 2: # requester send request to server room = self.info_widget_dict["feeder"]["room_select"].GetValue() print(room) info["room"] = room elif question_index == 3: # requester send request to server name = self.info_widget_dict["feeder"]["name_select"].GetValue() print(name) info["name"] = name else: # question_index = 4 name = self.info_widget_dict["feeder"]["name_select"].GetValue() print(name) info["name"] = name else: # question_index == 5 if_real_time = False date = self.info_widget_dict["feeder"]["date_picker"].GetValue() time = self.info_widget_dict["feeder"]["time_picker"].GetValue() room = self.info_widget_dict["feeder"]["room_select"].GetValue() info["date"] = util.combine_datetime(date, time) info["room"] = room # requester send request to server info["question_index"] = question_index self.info = info if if_real_time: if not self.worker: self.worker = ReqeusterThread(name="question_{}_requester".format(question_index), parent_thread=threading.currentThread(), parent_panel=self) self.worker.start() print("start worker") else: # first check if the worker is working if self.worker: self.worker.join() self.worker = None self._send_request(info) def _request_handle(self, url, body={}, params={}, METHOD="post"): # https://stackoverflow.com/questions/15900338/python-request-post-with-param-data print("url", url) print("body", body) print("params", params) resp = {} if METHOD == "post": r = requests.post(url, data=body) else: r = requests.get(url, params=params) print(r.status_code) if r.status_code == 200: resp = r.json() print(resp) print(type(resp)) return resp def _send_request(self, info): question_index = int(info["question_index"]) if question_index == 1: ## get ## url = self.server + "/people_building/" body = {"start": info["start"], "end": info["end"]} # body = {'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'} response = self._request_handle(url=url, body=body, METHOD="post") try: occu = str(response['count']) except: occu = str(0) ## received## self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu) elif question_index == 2: ## get ## url = self.server + "/people_room/" body = {"room": info["room"], "start": info["start"], "end": info["end"], # 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00' } response = self._request_handle(url=url, body=body, METHOD="post") try: occu = str(response['count']) occupancy_info = response['occupancy_info'] except: occu = str(0) occupancy_info = [] ## received ## self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu) nlb = self.info_widget_dict["consumer"]["name_list"] nlb.Clear() for name in occupancy_info: nlb.Append(name) elif question_index == 3: ## get ## url = self.server + "/person_room/" body = {"name": info["name"], "start": info["start"], "end": info["end"], # 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00' } response = self._request_handle(url=url, body=body, METHOD="post") try: room_list = response['room'] count = str(len(room_list)) except: count = str(0) room_list = [] ## received ## self.info_widget_dict["consumer"]["count_label"].SetLabel(count) rlb = self.info_widget_dict["consumer"]["room_list"] rlb.Clear() for name in room_list: rlb.Append(name) elif question_index == 4: ## get ## url = self.server + "question/4" body = {"name": info["name"], # "start_time": info["start"], # "end_time": info["end"], "time": info["start"], } response = self._request_handle(url=url, body=body, METHOD="post") count = str(random.randint(0, 20)) room_list = ["Room_1_1_140", "Room_1_1_141"] ## received ## self.info_widget_dict["consumer"]["count_label"].SetLabel(count) rlb = self.info_widget_dict["consumer"]["room_list"] rlb.Clear() for name in room_list: rlb.Append(name) elif question_index == 5: ## get ## url = self.server + "/utilization/" body = {"room": info["room"], "date": info["date"], # 'date': '2020-04-05 20:00:00' } response = self._request_handle(url=url, body=body, METHOD="post") # self.request_handle(url, body, METHOD="post") try: response = json.loads(response) utilization = "{:.2f}".format(response["utilization"]*100) + "%" except: utilization = "0%" ## received## self.info_widget_dict["consumer"]["utilization_label"].SetLabel(utilization) def _q1_panel(self): print("q1") main_vbox = self.add_date_time_picker_layout() # confirm button self._add_confirm_button(main_vbox, 1) # add result label self._add_result_label(main_vbox) # add result widget hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label="Occupancy") label.SetFont(self.font) hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) occu_label = wx.StaticText(self, label="__") occu_label.SetFont(self.font) hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["consumer"]["occu_label"] = occu_label self.SetSizer(main_vbox) # https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small self.Fit() self.GetParent().SendSizeEvent() def _q2_panel(self): print("q2") main_vbox = self.add_date_time_picker_layout() # Room Info room_hbox = wx.BoxSizer(wx.HORIZONTAL) room_label = wx.StaticText(self, label="Room") room_label.SetFont(self.font) room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) room_list = [ "", "Room_1_1_140", "Room_1_1_141", "Room_1_1_142", "Room_1_1_143", "Room_1_1_144", "Room_1_1_150", "Room_1_1_184"] room_combobox = wx.ComboBox(self, choices=room_list) room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5) # room_info = wx.TextCtrl(self) # room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5) main_vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5) # confirm button self._add_confirm_button(main_vbox, 2) # add result label self._add_result_label(main_vbox) # add widget infomation to dict self.info_widget_dict["feeder"]["room_select"] = room_combobox # add result widget # add count hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label="Occupancy") label.SetFont(self.font) hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) occu_label = wx.StaticText(self, label="__") occu_label.SetFont(self.font) hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5) # add name list namelb = wx.ListBox(self) main_vbox.Add(namelb, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["consumer"]["occu_label"] = occu_label self.info_widget_dict["consumer"]["name_list"] = namelb self.SetSizer(main_vbox) # https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small self.Fit() self.GetParent().SendSizeEvent() def _q3_panel(self): print("q3") vbox = self.add_date_time_picker_layout() hbox1 = wx.BoxSizer(wx.HORIZONTAL) name_label = wx.StaticText(self, label="Name") name_label.SetFont(self.font) hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) name_text_ctrl = wx.TextCtrl(self) name_text_ctrl.AppendText('Please enter unique name') hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5) vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5) # confirm button self._add_confirm_button(vbox, 3) # add result label self._add_result_label(vbox) # add widget infomation to dict self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl # add result widget # add count hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label="Room Count") label.SetFont(self.font) hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) occu_label = wx.StaticText(self, label="__") occu_label.SetFont(self.font) hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5) # add name list roomlb = wx.ListBox(self) vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["consumer"]["count_label"] = occu_label self.info_widget_dict["consumer"]["room_list"] = roomlb self.SetSizer(vbox) # https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small self.Fit() self.GetParent().SendSizeEvent() def _q4_panel(self): print("q4") main_vbox = self.add_date_time_picker_layout() hbox1 = wx.BoxSizer(wx.HORIZONTAL) name_label = wx.StaticText(self, label="Name") name_label.SetFont(self.font) hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) name_text_ctrl = wx.TextCtrl(self) name_text_ctrl.AppendText('Please enter unique name') hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5) main_vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5) # confirm button self._add_confirm_button(main_vbox, 4) # add result label self._add_result_label(main_vbox) # add widget infomation to dict self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl # add result widget # add count hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label="Room Count") label.SetFont(self.font) hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) occu_label = wx.StaticText(self, label="__") occu_label.SetFont(self.font) hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5) # add name list roomlb = wx.ListBox(self) main_vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["consumer"]["count_label"] = occu_label self.info_widget_dict["consumer"]["room_list"] = roomlb self.SetSizer(main_vbox) # https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small self.Fit() self.GetParent().SendSizeEvent() def _q5_panel(self): print("q5") vbox = wx.BoxSizer(wx.VERTICAL) # datetime date_hbox = wx.BoxSizer(wx.HORIZONTAL) date_label = wx.StaticText(self, label="Datetime") date_label.SetFont(self.font) dpc = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime) tpc = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime) date_hbox.Add(date_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4) date_hbox.Add(dpc, proportion=3, flag=wx.RIGHT, border=5) date_hbox.Add(tpc, proportion=3, flag=wx.RIGHT, border=5) vbox.Add(date_hbox, proportion=0, flag=wx.ALL, border=5) # Room Info room_hbox = wx.BoxSizer(wx.HORIZONTAL) room_label = wx.StaticText(self, label="Room") room_label.SetFont(self.font) room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) room_list = [ "", "Room_1_1_140", "Room_1_1_141", "Room_1_1_142", "Room_1_1_143", "Room_1_1_144", "Room_1_1_150", "Room_1_1_184"] room_combobox = wx.ComboBox(self, choices=room_list) room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5) vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5) # confirm button self._add_confirm_button(vbox, 5) # add result label self._add_result_label(vbox) # add widget infomation to dict self.info_widget_dict["feeder"]["date_picker"] = dpc self.info_widget_dict["feeder"]["time_picker"] = tpc self.info_widget_dict["feeder"]["room_select"] = room_combobox # add result widget hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label="Utilization") label.SetFont(self.font) hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) occu_label = wx.StaticText(self, label="__") occu_label.SetFont(self.font) hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5) vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5) self.info_widget_dict["consumer"]["utilization_label"] = occu_label self.SetSizer(vbox) # https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small self.Fit() self.GetParent().SendSizeEvent()
34.599144
158
0.580564
24,075
0.99262
0
0
0
0
0
0
5,125
0.211305
d5e19c75c00ba0d6d2f1c4a0eb15f229a98c4904
7,259
py
Python
webapp/search.py
henchan/memfinity
3860985e29b203f0569f60eea68ffb22aaf34b1f
[ "MIT" ]
null
null
null
webapp/search.py
henchan/memfinity
3860985e29b203f0569f60eea68ffb22aaf34b1f
[ "MIT" ]
null
null
null
webapp/search.py
henchan/memfinity
3860985e29b203f0569f60eea68ffb22aaf34b1f
[ "MIT" ]
null
null
null
"""High-level search API. This module implements application-specific search semantics on top of App Engine's search API. There are two chief operations: querying for entities, and managing entities in the search facility. Add and remove Card entities in the search facility: insert_cards([models.Card]) delete_cards([models.Card]) Query for Card entities: query_cards(query_string, limit=20) -> search.SearchResults The results items will have the following fields: user_key, user_nickname, front, back, info, tag (repeated), added, modified, source_url The query_string is free-form, as a user would enter it, and passes through a custom query processor before the query is submitted to App Engine. Notably, pass @username to restrict the query to entities authored by username, and #tag to restrict the query to only documents matching the given tag. Multiple @usernames or #tags result in an OR query. """ import re from google.appengine.api import search from google.appengine.ext import ndb QUERY_LIMIT = 20 CARD_INDEX_NAME = 'cards' # Increase this value when _card2doc changes its format so that # queries can determine the data available on returned documents. CARD_DOCUMENT_VERSION = '1' # Ensure we're under the 2000 character limit from # https://developers.google.com/appengine/docs/python/search/query_strings MAX_QUERY_LEN = 200 # TODO(chris): it would be better if this module didn't know about # specific entity types, but instead defined a protocol to get # metadata from an entity and generate a document. def insert_cards(cards): """Insert or update models.Card entities in the search facility.""" # TODO(chris): should we allow more than 200 cards per call? assert len(cards) <= 200, len(cards) card_docs = map(_card2doc, cards) index = search.Index(name=CARD_INDEX_NAME) index.put(card_docs) def delete_cards(cards): """Delete models.Card entities from the search facility.""" index = search.Index(name=CARD_INDEX_NAME) card_doc_ids = map(_card2docid, cards) index.delete(card_doc_ids) def query_cards(query_str, limit=QUERY_LIMIT, web_safe_cursor=None, ids_only=False, user_key=None): """Return the search.SearchResults for a query. ids_only is useful because the returned document IDs are url-safe keys for models.Card entities. """ if web_safe_cursor: cursor = search.Cursor(web_safe_string=web_safe_cursor) else: cursor = None index = search.Index(name=CARD_INDEX_NAME) query_processor = _QueryProcessor( query_str, name_field='user_nickname', tag_field='tag', private_field='private', user_key_field='user_key', query_options=search.QueryOptions(limit=limit, cursor=cursor, ids_only=ids_only), user_key=user_key) search_results = index.search(query_processor.query()) # TODO(chris): should this return partially-instantiated # models.Card instances instead of leaking implementation details # like we do now? return search_results def _card2doc(card): # TODO(chris): should we include all fields that would be needed # for rendering a search results item to avoid entity lookup? tag_fields = [search.AtomField(name='tag', value=tag) for tag in card.tags] doc = search.Document( doc_id=_card2docid(card), fields=[ search.AtomField(name='doc_version', value=CARD_DOCUMENT_VERSION), search.AtomField(name='user_key', value=card.user_key.urlsafe()), # TODO(chris): is user_nickname always a direct-match # shortname, e.g., @chris? search.AtomField(name='user_nickname', value=card.user_nickname), # TODO(chris): support HtmlField for richer cards? search.TextField(name='front', value=card.front), search.TextField(name='back', value=card.back), search.TextField(name='info', value=card.info), search.DateField(name='added', value=card.added), search.DateField(name='modified', value=card.modified), search.AtomField(name='source_url', value=card.source_url), search.AtomField(name='private', value="1" if card.private else "0"), ] + tag_fields) return doc def _card2docid(card): # We set the search.Document's ID to the entity key it mirrors. return card.key.urlsafe() def _sanitize_user_input(query_str): # The search API puts special meaning on certain inputs and we # don't want to expose the internal query language to users so # we strictly restrict inputs. The rules are: # # Allowed characters for values are [a-zA-Z0-9._-]. # @name is removed and 'name' values returned as a list. # #tag is removed and 'tag' values returned as a list. terms, names, tags = [], [], [] for token in query_str.split(): # TODO(chris): allow international characters. sane_token = re.sub(r'[^a-zA-Z0-9._-]+', '', token) if sane_token: if sane_token in ('AND', 'OK'): continue # ignore special search keywords elif token.startswith('@'): names.append(sane_token) elif token.startswith('#'): tags.append(sane_token) else: terms.append(sane_token) return terms, names, tags class _QueryProcessor(object): """Simple queries, possibly with @name and #tag tokens. name_field is the field @name tokens should apply to. tag_field is the name of the field #tag tokens should apply to. """ def __init__(self, query_str, name_field, tag_field, private_field, user_key_field, query_options=None, user_key=None): self.query_str = query_str self.name_field = name_field self.tag_field = tag_field self.private_field = private_field self.user_key_field = user_key_field self.query_options = query_options self.user_key = user_key def _sanitize_user_input(self): query_str = self.query_str[:MAX_QUERY_LEN] return _sanitize_user_input(query_str) def _build_query_string(self): terms, names, tags = self._sanitize_user_input() # Our simply query logic is to OR together all terms from the # user, then AND in the name or tag filters (plus a privacy clause). parts = [] if terms: parts.append(' OR '.join(terms)) if names: parts.append('%s: (%s)' % (self.name_field, ' OR '.join(names))) if tags: parts.append('%s: (%s)' % (self.tag_field, ' OR '.join(tags))) # Don't return cards that other users have marked private... privacy = '%s: 0' % self.private_field if self.user_key: # ... but always show the user their own cards in results. privacy += ' OR %s: (%s)' % (self.user_key_field, self.user_key) parts.append('(' + privacy + ')') return ' AND '.join(parts) def query(self): query = search.Query( query_string=self._build_query_string(), options=self.query_options) return query
37.035714
81
0.667998
1,823
0.251137
0
0
0
0
0
0
3,270
0.450475
d5e280ff84ed8b441621c5c137faf53691c8d37c
3,422
py
Python
Bot/Bot/board.py
Baidi96/AI-Agent-for-Light-Rider
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
[ "MIT" ]
1
2019-12-18T08:24:22.000Z
2019-12-18T08:24:22.000Z
Bot/Bot/board.py
Baidi96/AI-Agent-for-Light-Rider
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
[ "MIT" ]
null
null
null
Bot/Bot/board.py
Baidi96/AI-Agent-for-Light-Rider
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
[ "MIT" ]
null
null
null
import copy import sys PLAYER1, PLAYER2, EMPTY, BLOCKED = [0, 1, 2, 3] S_PLAYER1, S_PLAYER2, S_EMPTY, S_BLOCKED, = ['0', '1', '.', 'x'] CHARTABLE = [(PLAYER1, S_PLAYER1), (PLAYER2, S_PLAYER2), (EMPTY, S_EMPTY), (BLOCKED, S_BLOCKED)] DIRS = [ ((-1, 0), "up"), ((1, 0), "down"), ((0, 1), "right"), ((0, -1), "left") ] #the information of the whole grid class Board: def __init__(self, width, height): self.width = width self.height = height self.cell = [[EMPTY for col in range (0, width)] for row in range(0, height)] def parse_cell_char(self, players, row, col, char): result = -1 if char == S_PLAYER1: players[0].row = row; players[0].col = col; elif char == S_PLAYER2: players[1].row = row; players[1].col = col; for (i, symbol) in CHARTABLE: if symbol == char: result = i break return result def parse_cell(self, players, row, col, data): cell = [] for char in data: item = self.parse_cell_char(players, row, col, char) cell.append(item) return cell def parse(self, players, data): cells = data.split(',') col = 0 row = 0 for cell in cells: if (col >= self.width): col = 0 row +=1 self.cell[row][col] = self.parse_cell(players, row, col, cell) col += 1 def in_bounds (self, row, col): return row >= 0 and col >= 0 and col < self.width and row < self.height def is_legal(self, row, col, my_id): enemy_id = my_id ^ 1 return (self.in_bounds(row, col)) and (not BLOCKED == self.cell[row][col]) and (not enemy_id == self.cell[row][col]) def is_legal_tuple(self, loc): row, col = loc return self.is_legal(row, col) def get_adjacent(self, row, col): result = [] for (o_row, o_col), _ in DIRS: t_row, t_col = o_row + row, o_col + col if self.is_legal(t_row, t_col): result.append((t_row, t_col)) return result def legal_moves(self, my_id, players): my_player = players[my_id] result = [] for ((o_row, o_col), order) in DIRS: t_row = my_player.row + o_row t_col = my_player.col + o_col if self.is_legal(t_row, t_col, my_id): result.append(((o_row, o_col), order)) else: pass return result def update_cell(self, row, col, data): self.cell[row][col] = data def output_cell(self, cell): done = False for (i, symbol) in CHARTABLE: if i == cell: if not done: sys.stderr.write(symbol) done = True break if not done: sys.stderr.write("!") done = True def output(self): for row in self.cell: sys.stderr.write("\n") for cell in row: self.output_cell(cell) sys.stderr.write("\n") sys.stderr.flush() def tostring(self): res = "" for row in xrange(self.height): for col in xrange(self.width): res += str(self.cell[row][col]) res += "," return res
29.247863
124
0.504383
3,050
0.891292
0
0
0
0
0
0
88
0.025716
d5e2b128cd1d2cb827ad4460d329a4ebc4a12998
884
py
Python
baekjoon/1012.py
wonnerky/coteMaster
360e491e6342c1ee42ff49750b838a2ead865613
[ "Apache-2.0" ]
null
null
null
baekjoon/1012.py
wonnerky/coteMaster
360e491e6342c1ee42ff49750b838a2ead865613
[ "Apache-2.0" ]
null
null
null
baekjoon/1012.py
wonnerky/coteMaster
360e491e6342c1ee42ff49750b838a2ead865613
[ "Apache-2.0" ]
null
null
null
import sys sys.setrecursionlimit(10000) def dfs(r, c): global visit visit[r][c] = True mov = [(-1, 0), (0, -1), (1, 0), (0, 1)] for i in range(4): dr, dc = mov[i] nr, nc = r + dr, c + dc if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1: dfs(nr, nc) T = int(input()) for _ in range(T): M, N, K = map(int, input().split()) board = [[0] * M for _ in range(N)] for _ in range(K): c, r = map(int, input().split()) board[r][c] = 1 visit = [[False] * M for _ in range(N)] cnt = 0 for r in range(N): for c in range(M): if not visit[r][c] and board[r][c] == 1: cnt += 1 dfs(r, c) for ele in visit: print(ele) print() print(cnt)
27.625
90
0.417421
0
0
0
0
0
0
0
0
0
0
d5e2b817212060ef7c5fee7505c4febd057adc71
5,827
py
Python
collection/cp/algorithms-master/python/binary_tree.py
daemonslayer/Notebook
a9880be9bd86955afd6b8f7352822bc18673eda3
[ "Apache-2.0" ]
1
2019-03-24T13:12:01.000Z
2019-03-24T13:12:01.000Z
collection/cp/algorithms-master/python/binary_tree.py
daemonslayer/Notebook
a9880be9bd86955afd6b8f7352822bc18673eda3
[ "Apache-2.0" ]
null
null
null
collection/cp/algorithms-master/python/binary_tree.py
daemonslayer/Notebook
a9880be9bd86955afd6b8f7352822bc18673eda3
[ "Apache-2.0" ]
null
null
null
""" Binary Tree and basic properties 1. In-Order Traversal 2. Pre-Order Traversal 3. Post-Order Traversal 4. Level-Order Traversal """ from collections import deque class BinaryTree(object): """ Representation of a general binary tree data: value of element left: Left subtree right: Right subtree """ def __init__(self, data, left=None, right=None): if data is None: raise ValueError('data cannot be null') self.data = data self.left = left self.right = right def insert(self, data): raise NotImplementedError('Method insert is not Implemented') def delete(self, data): raise NotImplementedError('Method delete is not implemented') def inorder_traversal(self, write=True): """ Return list of node data as inorder traversal. If write is True then print as well. This is a iterative tree inorder traversal. Algorithm: 1. Create a stack of nodes node_stack 2. Mark root as current 3. While current is not none or node_stack is not empty a. While current is not empty push current to nde_stack and reassign current to current->left b. If current is empty and node_stack is not empty then pop the top of stack and print that node c. mark current as poped_node->right """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) current = current.left if node_stack: node = node_stack.pop() traversal_lis.append(node.data) current = node.right if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def preorder_traversal(self, write=True): """ Return list of node data as preorder traversal. If write is true then print as well. Algorithm: 1. Create stack of nodes as node_stack 2. Mark root as current 3. While current is not none or node_stack is not empty a. While current is not empty i. Push current to node_stack ii. Add current->data to traversal_list iii. Reassign current to current->left b. If node_stack is not empty then pop the topmost node from node_stack and assign current to poped_node->right """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) traversal_lis.append(current.data) current = current.left if node_stack: node = node_stack.pop() current = node.right if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def postorder_traversal(self, write=True): """ Return list of node data as postorder traversal. If write is true then print as well. Algorithm: 1. Create stack of nodes as node_stack 2. Mark root as current 3. While current is not None or node_stack is not empty a. While current is not None i. Push current to node_stack ii. Append current->data to traversal_list iii. Reassign current as current->right !IMPORTANT: Here we're iterating on current-right as we're doing postorder traversal b. If node_stack is not empty then pop top node and assign poped_node->left to current """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) traversal_lis.append(current.data) current = current.right if node_stack: node = node_stack.pop() current = node.left if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def levelorder_traversal(self, write=True): """ Return list of node data as level order traversal. If write is true then print as well. Algorithm: 1. Maintain a queue of nodes to process as node_queue 2. Push root to node_queue 3. While node_queue is not empty a. Get top node of node_queue as top b. Push top->data to traversal_list c. Append top->left and top->right into node_queue if they are not null """ traversal_list = [] node_queue = deque() node_queue.append(self) while node_queue: top = node_queue.popleft() traversal_list.append(top.data) if top.left: node_queue.append(top.left) if top.right: node_queue.append(top.right) if write: for item in traversal_list: print(item, end=' ') return traversal_list def main(): """ Tree Structure: 1 / \ 2 3 / \ 4 5 """ tree = BinaryTree(1) tree.left = BinaryTree(2) tree.right = BinaryTree(3) tree.left.left = BinaryTree(4) tree.left.right = BinaryTree(5) assert tree.inorder_traversal(write=False) == [4, 2, 5, 1, 3] assert tree.preorder_traversal(write=False) == [1, 2, 4, 5, 3] assert tree.postorder_traversal(write=False) == [1, 3, 2, 5, 4] assert tree.levelorder_traversal(write=False) == [1, 2, 3, 4, 5] if __name__ == '__main__': main()
33.488506
120
0.579372
5,069
0.869916
0
0
0
0
0
0
2,801
0.480693
d5e30ec5517ff0e5f54798d022557ddc8306de32
445
py
Python
custom_components/vaddio_conferenceshot/const.py
rohankapoorcom/vaddio_conferenceshot
71744710df10f77e21e9e7568e3f6c7175b0d11d
[ "Apache-2.0" ]
null
null
null
custom_components/vaddio_conferenceshot/const.py
rohankapoorcom/vaddio_conferenceshot
71744710df10f77e21e9e7568e3f6c7175b0d11d
[ "Apache-2.0" ]
null
null
null
custom_components/vaddio_conferenceshot/const.py
rohankapoorcom/vaddio_conferenceshot
71744710df10f77e21e9e7568e3f6c7175b0d11d
[ "Apache-2.0" ]
null
null
null
import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PATH, CONF_USERNAME DOMAIN = "vaddio_conferenceshot" DATA_SCHEMA = vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) SERVICE_RECALL_PRESET = "move_to_preset" ATTR_PRESET_ID = "preset"
24.722222
82
0.750562
0
0
0
0
0
0
0
0
47
0.105618
d5e3869d32d3fe51b72766bc724a95897a33b8c9
32,841
py
Python
lightonml/opu.py
lightonai/lightonml
451327cccecdca4e8ec65df30f30d3fd8ad2194f
[ "Apache-2.0" ]
27
2021-02-24T15:37:20.000Z
2022-01-12T00:28:22.000Z
lightonml/opu.py
lightonai/lightonml
451327cccecdca4e8ec65df30f30d3fd8ad2194f
[ "Apache-2.0" ]
4
2021-02-26T12:58:21.000Z
2021-09-10T09:54:49.000Z
lightonml/opu.py
lightonai/lightonml
451327cccecdca4e8ec65df30f30d3fd8ad2194f
[ "Apache-2.0" ]
9
2021-02-26T15:58:32.000Z
2021-06-21T09:18:48.000Z
# Copyright (c) 2020 LightOn, All Rights Reserved. # This file is subject to the terms and conditions defined in # file 'LICENSE.txt', which is part of this source code package. """ This module contains the OPU class """ import time from math import sqrt import pkg_resources from lightonml.encoding.base import NoEncoding, NoDecoding import warnings from typing import Optional, Union, Tuple, TYPE_CHECKING import numpy as np from contextlib import ExitStack import attr import inspect import lightonml from lightonml.internal.config import get_host_option, opu_version from lightonml.internal import config, output_roi, utils, types from lightonml.internal.user_input import OpuUserInput, InputTraits from lightonml.internal.simulated_device import SimulatedOpuDevice from lightonml.context import ContextArray from lightonml.internal.settings import OpuSettings, TransformSettings from lightonml.internal.runner import TransformRunner, FitTransformRunner from lightonml.internal.types import InputRoiStrategy, IntOrTuple, TransformOutput, AcqState from lightonml.types import OutputRescaling # Import lightonopu only for typechecking, as it's an optional module and may not be present if TYPE_CHECKING: from lightonopu.internal.device import OpuDevice # noinspection PyPep8Naming class OPU: """Interface to the OPU. .. math:: \\mathbf{y} = \\lvert \\mathbf{R} \\mathbf{x} \\rvert^2 \\mbox{ (non-linear transform, the default)} .. math:: \\mathbf{y} = \\mathbf{R}\\mathbf{x} \\mbox{ (linear transform)} Main methods are `transform`, `linear_transform`, `fit1d` and `fit2d`, and accept NumPy arrays or PyTorch tensors. The non-linear transform (`transform`) is a native operation for the OPU, and performs at a higher speed than `linear_transform`. Acquiring/releasing hardware device resources is done by open/close and a context-manager interface. Unless `open_at_init=False`, these resources are acquired automatically at init. If another process or kernel has not released the resources, an error will be raised, call `close()` or shutdown the kernel on the OPU object to release it. Parameters ---------- n_components : int, dimensionality of the target projection space. opu_device : OpuDevice or SimulatedOpuDevice, optional optical processing unit instance linked to a physical or simulated device. If not provided, a device is properly instantiated. If opu_device is of type SimulatedOpuDevice, the random matrix is generated at __init__, using max_n_features and n_components max_n_features: int, optional maximum number of binary features that the OPU will transform used only if opu_device is a SimulatedOpuDevice, in order to initiate the random matrix config_file : str, optional path to the configuration file (for dev purpose) config_override: dict, optional for override of the config_file (for dev purpose) verbose_level: int, optional deprecated, use lightonml.set_verbose_level() instead .. seealso:: `lightonml.set_verbose_level` input_roi_strategy: types.InputRoiStrategy, optional describes how to display the features on the input device .. seealso:: `lightonml.internal.types.InputRoiStrategy` open_at_init: bool, optional forces the setting of acquiring hardware resource at init. If not provided, follow system's setting (usually True) disable_pbar: bool, optional disable display of the progress bar when verbose_level is set to 1 simulated: bool, optional performs the random projection using CPU, in case no OPU is available on your machine the random matrix is then generated at __init__, using max_n_features and n_components rescale: types.OutputRescaling, optional, output rescaling method for `linear_transform`. Ignored by `transform`. .. seealso:: `lightonml.types.OutputRescaling` Attributes ---------- n_components: int dimensionality of the target projection space. rescale: types.OutputRescaling, output rescaling method for `linear_transform`. Ignored by `transform`. max_n_features: int maximum number of binary features that the OPU will transform writeable only if opu_device is a SimulatedOpuDevice, in order to initiate or resize the random matrix device: OpuDevice or SimulatedOpuDevice underlying hardware that performs transformation (read-only) input_roi_strategy: types.InputRoiStrategy, optional describes how to display the features on the input device """ def __init__(self, n_components: int = 200000, opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None, max_n_features: int = 1000, config_file: str = "", config_override: dict = None, verbose_level: int = -1, input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.full, open_at_init: bool = None, disable_pbar=False, simulated=False, rescale: Union[OutputRescaling, str] = OutputRescaling.variance): self.__opu_config = None self.__config_file = config_file self.__config_override = config_override self._max_n_features = max_n_features self.disable_pbar = disable_pbar self.rescale = rescale # Get trace and print functions if verbose_level != -1: warnings.warn("Verbose level arg will removed in 1.3, " "Use lightonml.set_verbose_level instead", DeprecationWarning) lightonml.set_verbose_level(verbose_level) else: verbose_level = lightonml.get_verbose_level() self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn() no_config_msg = "No configuration files for the OPU was found on this machine.\n" \ "You may want to run the OPU in a simulated manner, by passing the " \ "simulated argument to True at init.\n" \ "See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \ "for more details.\n" \ "See also https://lighton.ai/products for getting access to our technology." if simulated and opu_device is not None: raise ValueError("simulated and opu_device arguments are conflicting") # Device init, or take the one passed as input if opu_device: if type(opu_device).__name__ not in ["SimulatedOpuDevice", "OpuDevice"]: raise TypeError("opu_device must be of type SimulatedOpuDevice or OpuDevice") self.device = opu_device elif simulated: self.device = SimulatedOpuDevice() else: # Instantiate device directly from lightonopu.internal.device import OpuDevice if not self.__config_file and not config.host_has_opu_config(): # Looks like there's no OPU on this host as we didn't find configuration files raise RuntimeError(no_config_msg) opu_type = self.config["type"] frametime_us = self.config["input"]["frametime_us"] exposure_us = self.config["output"]["exposure_us"] seq_nb_prelim = self.config.get("sequence_nb_prelim", 0) name = self.config["name"] self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim, None, verbose_level, name) self._base_frametime_us = self.device.frametime_us self._base_exposure_us = self.device.exposure_us if self._s.simulated: # build the random matrix if not done already self._resize_rnd_matrix(max_n_features, n_components) else: # Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction pkg_resources.require("lightonopu>=1.4.1") # initialize linear_reconstruction library from lightonopu import linear_reconstruction linear_reconstruction.init(np.prod(self.device.input_shape)) self._output_roi = output_roi.OutputRoi(self.device.output_shape_max, self.device.output_roi_strategy, self._s.allowed_roi, self._s.min_n_components) # This also sets the output ROI self.n_components = n_components self.input_roi_strategy = input_roi_strategy # Runner initialized when entering fit self._runner = None # type: Optional[TransformRunner] # ExitStack for device acquisition, initialized when entering fit self._acq_stack = ExitStack() self._trace("OPU initialized") # Open at init, unless relevant host.json option is False if open_at_init is None: open_at_init = get_host_option("lightonml_open_at_init", True) if open_at_init: self.open() def _tr_settings(self, no_input=False, **override) -> TransformSettings: """Returns transform settings for feeding to TransformRunner""" init = TransformSettings(self.input_roi_strategy, self.n_components) settings = attr.evolve(init, **override) if no_input and self.input_roi_strategy is InputRoiStrategy.auto: # If no input_roi, replace auto by full strategy settings.input_roi_strategy = InputRoiStrategy.full assert settings.input_roi is None return settings def fit1d(self, X=None, n_features: int = None, packed: bool = False, online=False, **override): """ Configure OPU transform for 1d vectors The function can be either called with input vector, for fitting OPU parameters to it, or just vector dimensions, with ``n_features``. When input is bit-packed the packed flag must be set to True. When input vectors must be transformed one by one, performance will be improved with the online flag set to True. Parameters ---------- X: np.ndarray or torch.Tensor Fit will be made on this vector to optimize transform parameters n_features: int Number of features for the input, necessary if X parameter isn't provided packed: bool Set to true if the input vectors will be already bit-packed online: bool, optional Set to true if the transforms will be made one vector after the other defaults to False override: dict, optional keyword args for overriding transform settings (advanced parameters) """ return self.__fit(X, n_features, packed, online, False, **override) def fit2d(self, X=None, n_features: Tuple[int, int] = None, packed: bool = False, online=False, **override): """ Configure OPU transform for 2d vectors The function can be either called with input vector, for fitting OPU parameters to it, or just vector dimensions, with `n_features`. When input is bit-packed the packed flag must be set to True. Number of features must be then provided with `n_features` When input vectors must be transformed one by one, performance will be improved with the online flag set to True. Parameters ---------- X: np.ndarray or torch.Tensor a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not n_features: tuple(int) Number of features for the input, necessary if X parameter isn't provided, or if input is bit-packed packed: bool, optional whether the input data is in bit-packed representation if True, each input vector is assumed to be a 1d array, and the "real" number of features must be provided as n_features defaults to False online: bool, optional Set to true if the transforms will be made one vector after the other defaults to False override: dict, optional keyword args for overriding transform settings (advanced parameters) """ return self.__fit(X, n_features, packed, online, True, **override) def transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput: """ Performs the nonlinear random projections of one or several input vectors. The `fit1d` or `fit2d` method must be called before, for setting vector dimensions or online option. If you need to transform one vector after each other, add `online=True` in the fit function. Parameters ---------- X: np.ndarray or torch.Tensor input vector, or batch of input vectors. Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`. encoder_cls: encoder.base.BaseTransformer, optional class or instance of class that transform the input into binary vectors to be processed by the opu. decoder_cls: encoder.base.BaseTransformer, optional class or instance of class that transforms the output of the opu back into the appropriate format. Returns ------- Y: np.ndarray or torch.Tensor complete array of nonlinear random projections of X, of size self.n_components If input is an ndarray, type is actually ContextArray, with a context attribute to add metadata """ assert self._runner, "Call fit1d or fit2d before transform" assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\"" if inspect.isclass(encoder_cls): encoder = encoder_cls() else: encoder = encoder_cls X_enc = encoder.transform(X) user_input = OpuUserInput.from_traits(X_enc, self._runner.traits) self._debug(str(user_input)) if user_input.is_batch and not self._s.simulated: # With batch input start acquisition first assert self.device.acq_state.value != AcqState.online.value, \ "Can't transform a batch of vectors when acquisition is" \ " in online mode, only single vectors" with self.device.acquiring(n_images=self._s.n_samples_by_pass): out = self._runner.transform(user_input) else: out = self._runner.transform(user_input) return self._post_transform(out, user_input, encoder, decoder_cls) def linear_transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput: """ Do a linear transform of X, for Nitro (non-linear) photonic cores. Parameters ---------- X: np.ndarray or torch.Tensor input vector, or batch of input vectors. Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`. encoder_cls: encoding.base.BaseTransformer, optional class or instance of class that transform the input into binary vectors to be processed by the opu. decoder_cls: encoding.base.BaseTransformer, optional class or instance of class that transforms the output of the opu back into the appropriate format. Returns ------- Y: np.ndarray or torch.Tensor complete array of nonlinear random projections of X, of size self.n_components If input is an ndarray, type is actually ContextArray, with a context attribute to add metadata """ assert self._runner, "Call fit1d or fit2d before linear_transform" traits = self._runner.traits if traits.packed: # TODO implement for packed raise RuntimeError("Linear transform isn't yet implemented for packed input :/") if inspect.isclass(encoder_cls): encoder = encoder_cls() else: encoder = encoder_cls X_enc = encoder.transform(X) user_input = OpuUserInput.from_traits(X_enc, traits) _, result_ctx = self._raw_linear_transform(X_enc, traits, user_input) # Decoding, add context, and optional convert back to torch if needed output = self._post_transform(result_ctx, user_input, encoder, decoder_cls) # Rescale the output, intentionally after the decoding step if self.rescale is OutputRescaling.variance: n_features = user_input.n_features_s output = output / (self._s.stdev * sqrt(n_features)) elif self.rescale is OutputRescaling.norm: output = output / (self._s.stdev * sqrt(self.n_components)) return output def transform1d(self, *args, **kwargs): raise RuntimeError("transform1d is deprecated, you must now use fit1d and transform") def transform2d(self, *args, **kwargs): raise RuntimeError("transform2d is deprecated, you must now use fit2d and transform") def fit_transform1d(self, X, packed: bool = False, **override) -> ContextArray: """Performs the nonlinear random projections of 1d input vector(s). This function is the one-liner equivalent of `fit1d` and `transform` calls. .. warning:: when making several transform calls, prefer calling `fit1d` and then `transform`, or you might encounter an inconsistency in the transformation matrix. The input data can be bit-packed, where ``n_features = 8*X.shape[-1]`` Otherwise ``n_features = X.shape[-1]`` If tqdm module is available, it is used for progress display Parameters ---------- X: np.ndarray or torch.Tensor a 1d input vector, or batch of 1d input_vectors, binary encoded, packed or not batch can be 1d or 2d. In all cases ``output.shape[:-1] = X.shape[:-1]`` packed: bool, optional whether the input data is in bit-packed representation defaults to False override: keyword args for overriding transform settings (advanced parameters) Returns ------- Y: np.ndarray or torch.Tensor complete array of nonlinear random projections of X, of size self.n_components If input is an ndarray, type is actually ContextArray, with a context attribute to add metadata """ self.fit1d(X, None, packed, False, **override) return self.transform(X) def fit_transform2d(self, X, packed: bool = False, n_2d_features=None, **override) -> ContextArray: """Performs the nonlinear random projections of 2d input vector(s). This function is the one-liner equivalent of `fit2d` and `transform` calls. .. warning:: when making several transform calls, prefer calling `fit2d` and then `transform`, or you might encounter an inconsistency in the transformation matrix. If tqdm module is available, it is used for progress display Parameters ---------- X: np.ndarray or torch.Tensor a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not packed: bool, optional whether the input data is in bit-packed representation if True, each input vector is assumed to be a 1d array, and the "real" number of features must be provided as n_2d_features defaults to False n_2d_features: list, tuple or np.ndarray of length 2 If the input is bit-packed, specifies the shape of each input vector. Not needed if the input isn't bit-packed. override: keyword args for overriding transform settings (advanced parameters) Returns ------- Y: np.ndarray or torch.Tensor complete array of nonlinear random projections of X, of size self.n_components If input is an ndarray, type is actually ContextArray, with a context attribute to add metadata """ self.fit2d(X, n_2d_features, packed, False, **override) return self.transform(X) def __fit(self, X, n_features: IntOrTuple, packed: bool, online: bool, is_2d_features: bool, **override): """Internal working of the fitXd calls Instantiates a TransformRunner, and start online acq if needs be. """ if X is not None: # Input is provided, do the fit with user input user_input = OpuUserInput.from_input(X, packed, is_2d_features, n_features) tr_settings = self._tr_settings(no_input=False, **override) self._runner = FitTransformRunner(self._s, tr_settings, user_input, device=self.device, disable_pbar=self.disable_pbar) else: # Only dimensions are provided, no fitting happens on input assert n_features, "either input vector or n_features must be specified" # tr_settings has no input_roi, since it uses X to compute it tr_settings = self._tr_settings(no_input=True, **override) traits = InputTraits(n_features, packed) self._runner = TransformRunner(self._s, tr_settings, traits, device=self.device, disable_pbar=self.disable_pbar) self._acq_stack.close() if online: if self._s.no_single_transform: raise RuntimeError("Online transform isn't available with this OPU") # Start acquisition only if online. Batch transform start their own. self._acq_stack.enter_context(self.device.acquiring(online=True)) @staticmethod def _post_transform(output, user_input, encoder, decoder_cls): """Final steps after transform 1. reshape 2. decode the output 3. convert to tensor if user input was tensor """ output = user_input.reshape_output(output) # If encoder has get_params method, it's for transmitting it to decoder init if inspect.isclass(decoder_cls): if hasattr(encoder, "get_params"): decoder = decoder_cls(**encoder.get_params()) else: decoder = decoder_cls() else: decoder = decoder_cls output = decoder.transform(output) if user_input.is_tensor: # noinspection PyPackageRequirements,PyUnresolvedReferences import torch return torch.from_numpy(output) else: return output def _raw_linear_transform(self, X, traits=None, user_input=None): """ Do linear_transform of X, and return both raw OPU output and decoded output in a tuple """ if traits is None: assert self._runner, "Call fit1d or fit2d before linear_transform" traits = self._runner.traits if user_input is None: user_input = OpuUserInput.from_traits(X, traits) if self._s.simulated: prepared_X = X else: assert self.device.acq_state.value != AcqState.online.value, \ "Can't do linear transform when acquisition is" \ " in online mode, only single vectors" assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \ "ROI strategy must be full for linear_transform to be correct.\n" \ "Set input_roi_strategy attribute to InputRoiStrategy.full." # X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy) X2 = user_input.reshape_input(raveled_features=True, leave_single_dim=True) try: import lightonopu.linear_reconstruction as reconstruction except ImportError: raise RuntimeError("Need a lightonopu version with linear_reconstruction module") start = time.time() prepared_X = reconstruction.encode_batch(X2) self._trace(f"Encoding time {time.time() - start} s") # Restore the dimension after batch encoding to something suitable for formatting prepared_X = user_input.unravel_features(prepared_X) # Run the OPU transform prepared_input = OpuUserInput.from_traits(prepared_X, traits) start = time.time() with self.device.acquiring(n_images=self._s.n_samples_by_pass): rp_opu = self._runner.transform(prepared_input, linear=True) self._trace(f"Transform time {time.time() - start} s") if self._s.simulated: result_ctx = rp_opu else: # Decoding forgets about the context, re-add it to result afterwards start = time.time() result = reconstruction.decode_batch(rp_opu) self._trace(f"Decoding time {time.time() - start} s") result_ctx = ContextArray(result, rp_opu.context) return rp_opu, result_ctx def __enter__(self): """Context manager interface that acquires hardware resources used by the OPU device.""" self.__active_before_enter = self.device.active self.open() return self def __exit__(self, *args): # Don't close if OPU was already active if not self.__active_before_enter: self.close() def open(self): """Acquires hardware resources used by the OPU device .. seealso:: `close()` or use the context manager interface for closing at the end af an indent block """ if self.device.active: return self.device.open() # initial reservation for giving batch transforms a buffer ready to use self.device.reserve(self._s.n_samples_by_pass) if self._s.detect_trigger: # Detect trigger issue, and take action if needed issue = utils.detect_trigger_issue(self.device) if issue: # noinspection PyProtectedMember,PyUnresolvedReferences self.device._OpuDevice__opu.nb_prelim = 1 self._debug("trigger issue detected, workaround applied") else: self._debug("trigger issue not detected") self._debug("OPU opened") def close(self): """Releases hardware resources used by the OPU device""" self._acq_stack.close() self.device.close() self._debug("OPU closed") @property def config(self): """Returns the internal configuration object""" # Load it when asked first time if not self.__opu_config: self.__opu_config = config.load_config(self.__config_file, self._trace) if self.__config_override is not None: utils.recurse_update(self.__opu_config, self.__config_override) return self.__opu_config @property def rescale(self): return self._rescale @rescale.setter def rescale(self, value): # If str it's the enum value if isinstance(value, str): self._rescale = OutputRescaling[value.lower()] else: assert isinstance(value, OutputRescaling) self._rescale = value @property def max_n_components(self): return self._output_roi.max_components @property def n_components(self) -> int: return self._n_components @n_components.setter def n_components(self, value: int): if self._s.simulated: self._resize_rnd_matrix(self.max_n_features, value) else: self.device.output_roi = self._output_roi.compute_roi(value) # We used to call device.reserve here, but moved to device.acquiring() self._n_components = value @property def max_n_features(self) -> int: return self._s.max_n_features @max_n_features.setter def max_n_features(self, value: int): if not self._s.simulated: raise AttributeError("max_n_feature can't be set if device is real") self._resize_rnd_matrix(value, self._n_components) self._max_n_features = value @property def _s(self) -> OpuSettings: """Returns immutable settings associated with the OPU Settings are immutable (attrs frozen), so generate it at each call. Performance impact is negligible""" # Get default value pass_default = attr.fields(OpuSettings).n_samples_by_pass.default # Common settings to both simulated and base kwargs = {"input_shape": self.device.input_shape, "output_max_shape": self.device.output_shape_max, "frametime_us": self._base_frametime_us, "exposure_us": self._base_exposure_us} if isinstance(self.device, SimulatedOpuDevice): # Notice we never query self.config here, in order not to # need a configuration file for simulated device return OpuSettings(max_n_features=self._max_n_features, n_samples_by_pass=pass_default, simulated=True, **kwargs ) return OpuSettings( max_n_features=int(np.prod(self.device.input_shape)), # Will use defaults of OpuSettings if not found n_samples_by_pass=self.config.get("n_samples_by_pass", pass_default), min_batch_size=self.config["input"].get("minimum_batch_size", 0), allowed_roi=self.config["output"].get("allowed_roi"), # min_n_components is linked to the minimum output size min_n_components=self.config["output"].get("minimum_output_size", 0), ones_range=self.config["ones_range"], n_tries=self.config.get("n_transform_tries", 5), detect_trigger=self.config.get("detect_trigger_issue", False), no_single_transform=self.config.get("no_single_transform", False), stdev=self.config["output"].get("stdev", 1.), **kwargs) def _resize_rnd_matrix(self, n_features: int, n_components: int): """Resize device's random matrix""" assert isinstance(self.device, SimulatedOpuDevice) rnd_mat = self.device.random_matrix if rnd_mat is None or rnd_mat.shape != (n_features, n_components): self._print("OPU: computing the random matrix... ", end='', flush=True) self.device.build_random_matrix(n_features, n_components) self._print("OK") def version(self, devices=False): """Returns a multi-line string containing name and versions of the OPU""" version = [] # Build OPU name if not self._s.simulated: version.append(opu_version(self.__opu_config)) # module version version.append(f"lightonml version {lightonml.__version__}") try: # noinspection PyUnresolvedReferences import lightonopu version.append(f"lightonopu version {lightonopu.__version__}") except ImportError: pass if devices: version.append(self.device.versions()) return '\n'.join(version) def __getstate__(self): state = self.__dict__.copy() # Remove logging functions, they can't be pickled state.pop("_debug") state.pop("_trace") state.pop("_print") # acq stack can't be pickled, will be restored state.pop("_acq_stack") # If acquisition is ongoing, close it if not self._s.simulated: state["__online_acq"] = self.device.acq_state.value == AcqState.online.value self._acq_stack.close() # Device itself is closed on pickling return state def __setstate__(self, state): self.__dict__.update(state) # Restore logging functions removed at getstate self._debug = lightonml.get_debug_fn() self._trace = lightonml.get_trace_fn() self._print = lightonml.get_print_fn() self._acq_stack = ExitStack() # Restore online acquisition if it was the case if state.get("__online_acq", False): self._acq_stack.enter_context(self.device.acquiring(online=True))
44.319838
114
0.645595
31,548
0.960628
0
0
4,431
0.134923
0
0
16,797
0.511464
d5e4c8d6143747e9fa0113815e838834d857b208
1,022
py
Python
example/shovel/bar.py
demiurgestudios/shovel
3db497164907d3765fae182959147d19064671c7
[ "MIT" ]
202
2015-01-12T13:47:29.000Z
2022-02-09T19:13:36.000Z
example/shovel/bar.py
demiurgestudios/shovel
3db497164907d3765fae182959147d19064671c7
[ "MIT" ]
14
2017-04-09T17:04:53.000Z
2021-05-16T11:08:34.000Z
example/shovel/bar.py
demiurgestudios/shovel
3db497164907d3765fae182959147d19064671c7
[ "MIT" ]
22
2015-09-11T18:35:10.000Z
2021-05-16T11:04:56.000Z
from shovel import task @task def hello(name='Foo'): '''Prints "Hello, " followed by the provided name. Examples: shovel bar.hello shovel bar.hello --name=Erin http://localhost:3000/bar.hello?Erin''' print('Hello, %s' % name) @task def args(*args): '''Echos back all the args you give it. This exists mostly to demonstrate the fact that shovel is compatible with variable argument functions. Examples: shovel bar.args 1 2 3 4 http://localhost:3000/bar.args?1&2&3&4''' for arg in args: print('You said "%s"' % arg) @task def kwargs(**kwargs): '''Echos back all the kwargs you give it. This exists mostly to demonstrate that shovel is compatible with the keyword argument functions. Examples: shovel bar.kwargs --foo=5 --bar 5 --howdy hey http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey''' for key, val in kwargs.items(): print('You said "%s" => "%s"' % (key, val))
27.621622
65
0.614481
0
0
0
0
993
0.971624
0
0
779
0.762231
d5e5a12f0690f68a0f2da693b51965dfe681eeea
22,938
py
Python
scripts/external_libs/scapy-2.4.3/scapy/config.py
timgates42/trex-core
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
[ "Apache-2.0" ]
956
2015-06-24T15:04:55.000Z
2022-03-30T06:25:04.000Z
scripts/external_libs/scapy-2.4.3/scapy/config.py
angelyouyou/trex-core
fddf78584cae285d9298ef23f9f5c8725e16911e
[ "Apache-2.0" ]
782
2015-09-20T15:19:00.000Z
2022-03-31T23:52:05.000Z
scripts/external_libs/scapy-2.4.3/scapy/config.py
angelyouyou/trex-core
fddf78584cae285d9298ef23f9f5c8725e16911e
[ "Apache-2.0" ]
429
2015-06-27T19:34:21.000Z
2022-03-23T11:02:51.000Z
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Implementation of the configuration object. """ from __future__ import absolute_import from __future__ import print_function import functools import os import re import time import socket import sys from scapy import VERSION, base_classes from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS from scapy.error import log_scapy, warning, ScapyInvalidPlatformException from scapy.modules import six from scapy.themes import NoTheme, apply_ipython_style ############ # Config # ############ class ConfClass(object): def configure(self, cnf): self.__dict__ = cnf.__dict__.copy() def __repr__(self): return str(self) def __str__(self): s = "" keys = self.__class__.__dict__.copy() keys.update(self.__dict__) keys = sorted(keys) for i in keys: if i[0] != "_": r = repr(getattr(self, i)) r = " ".join(r.split()) wlen = 76 - max(len(i), 10) if len(r) > wlen: r = r[:wlen - 3] + "..." s += "%-10s = %s\n" % (i, r) return s[:-1] class Interceptor(object): def __init__(self, name=None, default=None, hook=None, args=None, kargs=None): self.name = name self.intname = "_intercepted_%s" % name self.default = default self.hook = hook self.args = args if args is not None else [] self.kargs = kargs if kargs is not None else {} def __get__(self, obj, typ=None): if not hasattr(obj, self.intname): setattr(obj, self.intname, self.default) return getattr(obj, self.intname) @staticmethod def set_from_hook(obj, name, val): int_name = "_intercepted_%s" % name setattr(obj, int_name, val) def __set__(self, obj, val): setattr(obj, self.intname, val) self.hook(self.name, val, *self.args, **self.kargs) def _readonly(name): default = Conf.__dict__[name].default Interceptor.set_from_hook(conf, name, default) raise ValueError("Read-only value !") ReadOnlyAttribute = functools.partial( Interceptor, hook=(lambda name, *args, **kwargs: _readonly(name)) ) ReadOnlyAttribute.__doc__ = "Read-only class attribute" class ProgPath(ConfClass): universal_open = "open" if DARWIN else "xdg-open" pdfreader = universal_open psreader = universal_open svgreader = universal_open dot = "dot" display = "display" tcpdump = "tcpdump" tcpreplay = "tcpreplay" hexedit = "hexer" tshark = "tshark" wireshark = "wireshark" ifconfig = "ifconfig" class ConfigFieldList: def __init__(self): self.fields = set() self.layers = set() @staticmethod def _is_field(f): return hasattr(f, "owners") def _recalc_layer_list(self): self.layers = {owner for f in self.fields for owner in f.owners} def add(self, *flds): self.fields |= {f for f in flds if self._is_field(f)} self._recalc_layer_list() def remove(self, *flds): self.fields -= set(flds) self._recalc_layer_list() def __contains__(self, elt): if isinstance(elt, base_classes.Packet_metaclass): return elt in self.layers return elt in self.fields def __repr__(self): return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501 class Emphasize(ConfigFieldList): pass class Resolve(ConfigFieldList): pass class Num2Layer: def __init__(self): self.num2layer = {} self.layer2num = {} def register(self, num, layer): self.register_num2layer(num, layer) self.register_layer2num(num, layer) def register_num2layer(self, num, layer): self.num2layer[num] = layer def register_layer2num(self, num, layer): self.layer2num[layer] = num def __getitem__(self, item): if isinstance(item, base_classes.Packet_metaclass): return self.layer2num[item] return self.num2layer[item] def __contains__(self, item): if isinstance(item, base_classes.Packet_metaclass): return item in self.layer2num return item in self.num2layer def get(self, item, default=None): return self[item] if item in self else default def __repr__(self): lst = [] for num, layer in six.iteritems(self.num2layer): if layer in self.layer2num and self.layer2num[layer] == num: dir = "<->" else: dir = " ->" lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__, layer._name))) for layer, num in six.iteritems(self.layer2num): if num not in self.num2layer or self.num2layer[num] != layer: lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__, layer._name))) lst.sort() return "\n".join(y for x, y in lst) class LayersList(list): def __init__(self): list.__init__(self) self.ldict = {} def __repr__(self): return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self) def register(self, layer): self.append(layer) if layer.__module__ not in self.ldict: self.ldict[layer.__module__] = [] self.ldict[layer.__module__].append(layer) def layers(self): result = [] # This import may feel useless, but it is required for the eval below import scapy # noqa: F401 for lay in self.ldict: doc = eval(lay).__doc__ result.append((lay, doc.strip().split("\n")[0] if doc else lay)) return result class CommandsList(list): def __repr__(self): s = [] for l in sorted(self, key=lambda x: x.__name__): doc = l.__doc__.split("\n")[0] if l.__doc__ else "--" s.append("%-20s: %s" % (l.__name__, doc)) return "\n".join(s) def register(self, cmd): self.append(cmd) return cmd # return cmd so that method can be used as a decorator def lsc(): """Displays Scapy's default commands""" print(repr(conf.commands)) class CacheInstance(dict, object): __slots__ = ["timeout", "name", "_timetable", "__dict__"] def __init__(self, name="noname", timeout=None): self.timeout = timeout self.name = name self._timetable = {} def flush(self): self.__init__(name=self.name, timeout=self.timeout) def __getitem__(self, item): if item in self.__slots__: return object.__getattribute__(self, item) val = dict.__getitem__(self, item) if self.timeout is not None: t = self._timetable[item] if time.time() - t > self.timeout: raise KeyError(item) return val def get(self, item, default=None): # overloading this method is needed to force the dict to go through # the timetable check try: return self[item] except KeyError: return default def __setitem__(self, item, v): if item in self.__slots__: return object.__setattr__(self, item, v) self._timetable[item] = time.time() dict.__setitem__(self, item, v) def update(self, other): for key, value in six.iteritems(other): # We only update an element from `other` either if it does # not exist in `self` or if the entry in `self` is older. if key not in self or self._timetable[key] < other._timetable[key]: dict.__setitem__(self, key, value) self._timetable[key] = other._timetable[key] def iteritems(self): if self.timeout is None: return six.iteritems(self.__dict__) t0 = time.time() return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def iterkeys(self): if self.timeout is None: return six.iterkeys(self.__dict__) t0 = time.time() return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def __iter__(self): return six.iterkeys(self.__dict__) def itervalues(self): if self.timeout is None: return six.itervalues(self.__dict__) t0 = time.time() return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def items(self): if self.timeout is None: return dict.items(self) t0 = time.time() return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def keys(self): if self.timeout is None: return dict.keys(self) t0 = time.time() return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def values(self): if self.timeout is None: return list(six.itervalues(self)) t0 = time.time() return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def __len__(self): if self.timeout is None: return dict.__len__(self) return len(self.keys()) def summary(self): return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501 def __repr__(self): s = [] if self: mk = max(len(k) for k in six.iterkeys(self.__dict__)) fmt = "%%-%is %%s" % (mk + 1) for item in six.iteritems(self.__dict__): s.append(fmt % item) return "\n".join(s) class NetCache: def __init__(self): self._caches_list = [] def add_cache(self, cache): self._caches_list.append(cache) setattr(self, cache.name, cache) def new_cache(self, name, timeout=None): c = CacheInstance(name=name, timeout=timeout) self.add_cache(c) def __delattr__(self, attr): raise AttributeError("Cannot delete attributes") def update(self, other): for co in other._caches_list: if hasattr(self, co.name): getattr(self, co.name).update(co) else: self.add_cache(co.copy()) def flush(self): for c in self._caches_list: c.flush() def __repr__(self): return "\n".join(c.summary() for c in self._caches_list) def _version_checker(module, minver): """Checks that module has a higher version that minver. params: - module: a module to test - minver: a tuple of versions """ # We could use LooseVersion, but distutils imports imp which is deprecated version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?' version_tags = re.match(version_regexp, module.__version__) if not version_tags: return False version_tags = version_tags.group(1).split(".") version_tags = tuple(int(x) for x in version_tags) return version_tags >= minver def isCryptographyValid(): """ Check if the cryptography library is present, and if it is recent enough for most usages in scapy (v1.7 or later). """ try: import cryptography except ImportError: return False return _version_checker(cryptography, (1, 7)) def isCryptographyRecent(): """ Check if the cryptography library is recent (2.0 and later) """ try: import cryptography except ImportError: return False return _version_checker(cryptography, (2, 0)) def isCryptographyAdvanced(): """ Check if the cryptography library is present, and if it supports X25519, ChaCha20Poly1305 and such (v2.0 or later). """ try: from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501 X25519PrivateKey.generate() except Exception: return False else: return True def isPyPy(): """Returns either scapy is running under PyPy or not""" try: import __pypy__ # noqa: F401 return True except ImportError: return False def _prompt_changer(attr, val): """Change the current prompt theme""" try: sys.ps1 = conf.color_theme.prompt(conf.prompt) except Exception: pass try: apply_ipython_style(get_ipython()) except NameError: pass def _set_conf_sockets(): """Populate the conf.L2Socket and conf.L3Socket according to the various use_* parameters """ from scapy.main import _load if conf.use_bpf and not BSD: Interceptor.set_from_hook(conf, "use_bpf", False) raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !") if not conf.use_pcap and SOLARIS: Interceptor.set_from_hook(conf, "use_pcap", True) raise ScapyInvalidPlatformException( "Scapy only supports libpcap on Solaris !" ) # we are already in an Interceptor hook, use Interceptor.set_from_hook if conf.use_pcap or conf.use_dnet: try: from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \ L3pcapSocket except (OSError, ImportError): warning("No libpcap provider available ! pcap won't be used") Interceptor.set_from_hook(conf, "use_pcap", False) else: conf.L3socket = L3pcapSocket conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6") conf.L2socket = L2pcapSocket conf.L2listen = L2pcapListenSocket # Update globals _load("scapy.arch.pcapdnet") return if conf.use_bpf: from scapy.arch.bpf.supersocket import L2bpfListenSocket, \ L2bpfSocket, L3bpfSocket conf.L3socket = L3bpfSocket conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6") conf.L2socket = L2bpfSocket conf.L2listen = L2bpfListenSocket # Update globals _load("scapy.arch.bpf") return if LINUX: from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket conf.L3socket = L3PacketSocket conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6") conf.L2socket = L2Socket conf.L2listen = L2ListenSocket # Update globals _load("scapy.arch.linux") return if WINDOWS: from scapy.arch.windows import _NotAvailableSocket from scapy.arch.windows.native import L3WinSocket, L3WinSocket6 conf.L3socket = L3WinSocket conf.L3socket6 = L3WinSocket6 conf.L2socket = _NotAvailableSocket conf.L2listen = _NotAvailableSocket # No need to update globals on Windows return from scapy.supersocket import L3RawSocket from scapy.layers.inet6 import L3RawSocket6 conf.L3socket = L3RawSocket conf.L3socket6 = L3RawSocket6 def _socket_changer(attr, val): if not isinstance(val, bool): raise TypeError("This argument should be a boolean") dependencies = { # Things that will be turned off "use_pcap": ["use_bpf"], "use_bpf": ["use_pcap"], } restore = {k: getattr(conf, k) for k in dependencies} del restore[attr] # This is handled directly by _set_conf_sockets if val: # Only if True for param in dependencies[attr]: Interceptor.set_from_hook(conf, param, False) try: _set_conf_sockets() except (ScapyInvalidPlatformException, ImportError) as e: for key, value in restore.items(): Interceptor.set_from_hook(conf, key, value) if isinstance(e, ScapyInvalidPlatformException): raise def _loglevel_changer(attr, val): """Handle a change of conf.logLevel""" log_scapy.setLevel(val) class Conf(ConfClass): """This object contains the configuration of Scapy. session : filename where the session will be saved interactive_shell : can be "ipython", "python" or "auto". Default: Auto stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...) checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501 if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501 if 2, strictly checks that they are equals checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501 checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not check IP layers that encapsulates another IP layer check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501 iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501 verb : level of verbosity, from 0 (almost mute) to 3 (verbose) promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501 sniff_promisc : default mode for sniff() filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501 histfile : history file padding : includes padding in disassembled packets except_filter : BPF filter for packets to ignore debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501 route : holds the Scapy routing table and provides methods to manipulate it warning_threshold : how much time between warnings from the same place ASN1_default_codec: Codec used by default for ASN1 objects mib : holds MIB direct access dictionary resolve : holds list of fields for which resolution should be done noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501 AS_resolver: choose the AS resolver class to use extensions_paths: path or list of paths where extensions are to be looked for contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501 debug_tls:When 1, print some TLS session secrets when they are computed. recv_poll_rate: how often to check for new packets. Defaults to 0.05s. """ version = ReadOnlyAttribute("version", VERSION) session = "" interactive = False interactive_shell = "" stealth = "not implemented" iface = None iface6 = None layers = LayersList() commands = CommandsList() dot15d4_protocol = None # Used in dot15d4.py logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer) checkIPID = False checkIPsrc = True checkIPaddr = True checkIPinIP = True check_TCPerror_seqack = False verb = 2 prompt = Interceptor("prompt", ">>> ", _prompt_changer) promisc = True sniff_promisc = 1 raw_layer = None raw_summary = False default_l2 = None l2types = Num2Layer() l3types = Num2Layer() L3socket = None L3socket6 = None L2socket = None L2listen = None BTsocket = None USBsocket = None min_pkt_size = 60 bufsize = 2**16 histfile = os.getenv('SCAPY_HISTFILE', os.path.join(os.path.expanduser("~"), ".scapy_history")) padding = 1 except_filter = "" debug_match = False debug_tls = False wepkey = "" cache_iflist = {} route = None # Filed by route.py route6 = None # Filed by route6.py auto_fragment = True debug_dissector = False color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer) warning_threshold = 5 prog = ProgPath() resolve = Resolve() noenum = Resolve() emph = Emphasize() use_pypy = ReadOnlyAttribute("use_pypy", isPyPy()) use_pcap = Interceptor( "use_pcap", os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"), _socket_changer ) # XXX use_dnet is deprecated use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y") use_bpf = Interceptor("use_bpf", False, _socket_changer) use_npcap = False ipv6_enabled = socket.has_ipv6 extensions_paths = "." stats_classic_protocols = [] stats_dot11_protocols = [] temp_files = [] netcache = NetCache() geoip_city = None # can, tls, http are not loaded by default load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns', 'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet', 'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp', 'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios', 'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip', 'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp', 'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee'] contribs = dict() crypto_valid = isCryptographyValid() crypto_valid_recent = isCryptographyRecent() crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced() fancy_prompt = True auto_crop_tables = True recv_poll_rate = 0.05 def __getattr__(self, attr): # Those are loaded on runtime to avoid import loops if attr == "manufdb": from scapy.data import MANUFDB return MANUFDB if attr == "ethertypes": from scapy.data import ETHER_TYPES return ETHER_TYPES if attr == "protocols": from scapy.data import IP_PROTOS return IP_PROTOS if attr == "services_udp": from scapy.data import UDP_SERVICES return UDP_SERVICES if attr == "services_tcp": from scapy.data import TCP_SERVICES return TCP_SERVICES return object.__getattr__(self, attr) if not Conf.ipv6_enabled: log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501 for m in ["inet6", "dhcp6"]: if m in Conf.load_layers: Conf.load_layers.remove(m) conf = Conf() def crypto_validator(func): """ This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'. """ def func_in(*args, **kwargs): if not conf.crypto_valid: raise ImportError("Cannot execute crypto-related method! " "Please install python-cryptography v1.7 or later.") # noqa: E501 return func(*args, **kwargs) return func_in
33.584187
122
0.624161
15,639
0.681794
0
0
203
0.00885
0
0
5,969
0.260223
d5e70f438163ee68472f800dcc1f45bfb446e30f
5,797
py
Python
tests/base/test_server.py
Prodigy123/rasa_nlu_zh
b85717063a493f6b148504ee550a0642c6c379ae
[ "Apache-2.0" ]
4
2017-07-20T03:06:29.000Z
2021-04-20T03:25:17.000Z
tests/base/test_server.py
imsakshi/rasa_nlu
6dafc37825b99139248fdea9e9745f416734d4dd
[ "Apache-2.0" ]
null
null
null
tests/base/test_server.py
imsakshi/rasa_nlu
6dafc37825b99139248fdea9e9745f416734d4dd
[ "Apache-2.0" ]
2
2017-10-03T00:56:22.000Z
2018-08-15T10:41:41.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import import tempfile import pytest import time from treq.testing import StubTreq from rasa_nlu.config import RasaNLUConfig import json import io from tests import utilities from tests.utilities import ResponseTest from rasa_nlu.server import RasaNLU @pytest.fixture(scope="module") def app(tmpdir_factory): """ This fixture makes use of the IResource interface of the Klein application to mock Rasa HTTP server. :param component_builder: :return: """ _, nlu_log_file = tempfile.mkstemp(suffix="_rasa_nlu_logs.json") _config = { 'write': nlu_log_file, 'port': -1, # unused in test app "pipeline": "keyword", "path": tmpdir_factory.mktemp("projects").strpath, "server_model_dirs": {}, "data": "./data/demo-restaurants.json", "emulate": "wit", "max_training_processes": 1 } config = RasaNLUConfig(cmdline_args=_config) rasa = RasaNLU(config, testing=True) return StubTreq(rasa.app.resource()) @pytest.fixture def rasa_default_train_data(): with io.open('data/examples/rasa/demo-rasa.json', encoding='utf-8') as train_file: return json.loads(train_file.read()) @pytest.inlineCallbacks def test_root(app): response = yield app.get("http://dummy_uri/") content = yield response.text() assert response.code == 200 and content.startswith("hello") @pytest.inlineCallbacks def test_status(app): response = yield app.get("http://dummy_uri/status") rjs = yield response.json() assert response.code == 200 and "available_projects" in rjs assert "default" in rjs["available_projects"] @pytest.inlineCallbacks def test_config(app): response = yield app.get("http://dummy_uri/config") assert response.code == 200 @pytest.inlineCallbacks def test_version(app): response = yield app.get("http://dummy_uri/version") rjs = yield response.json() assert response.code == 200 and "version" in rjs @pytest.mark.parametrize("response_test", [ ResponseTest( "http://dummy_uri/parse?q=hello", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}] ), ResponseTest( "http://dummy_uri/parse?query=hello", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}] ), ResponseTest( "http://dummy_uri/parse?q=hello ńöñàśçií", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}] ), ResponseTest( "http://dummy_uri/parse?q=", [{"entities": {}, "confidence": 0.0, "intent": None, "_text": ""}] ), ]) @pytest.inlineCallbacks def test_get_parse(app, response_test): response = yield app.get(response_test.endpoint) rjs = yield response.json() assert response.code == 200 assert len(rjs) == 1 assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence']) @pytest.mark.parametrize("response_test", [ ResponseTest( "http://dummy_uri/parse", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}], payload={"q": "hello"} ), ResponseTest( "http://dummy_uri/parse", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}], payload={"query": "hello"} ), ResponseTest( "http://dummy_uri/parse", [{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}], payload={"q": "hello ńöñàśçií"} ), ]) @pytest.inlineCallbacks def test_post_parse(app, response_test): response = yield app.post(response_test.endpoint, data=json.dumps(response_test.payload), content_type='application/json') rjs = yield response.json() assert response.code == 200 assert len(rjs) == 1 assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence']) @utilities.slowtest @pytest.inlineCallbacks def test_post_train(app, rasa_default_train_data): response = app.post("http://dummy_uri/train", data=json.dumps(rasa_default_train_data), content_type='application/json') time.sleep(3) app.flush() response = yield response rjs = yield response.json() assert response.code == 404, "A project name to train must be specified" assert "error" in rjs @utilities.slowtest @pytest.inlineCallbacks def test_post_train_internal_error(app, rasa_default_train_data): response = app.post("http://dummy_uri/train?project=test", data=json.dumps({"data": "dummy_data_for_triggering_an_error"}), content_type='application/json') time.sleep(3) app.flush() response = yield response rjs = yield response.json() assert response.code == 500, "The training data format is not valid" assert "error" in rjs @pytest.inlineCallbacks def test_model_hot_reloading(app, rasa_default_train_data): query = "http://dummy_uri/parse?q=hello&project=my_keyword_model" response = yield app.get(query) assert response.code == 404, "Project should not exist yet" train_u = "http://dummy_uri/train?project=my_keyword_model&pipeline=keyword" response = app.post(train_u, data=json.dumps(rasa_default_train_data), content_type='application/json') time.sleep(3) app.flush() response = yield response assert response.code == 200, "Training should end successfully" response = yield app.get(query) assert response.code == 200, "Project should now exist after it got trained"
32.205556
104
0.656719
0
0
2,893
0.496652
5,369
0.921717
0
0
1,825
0.313305
d5e7507528f57c95fde0e247aa2531f1d8579112
15,277
py
Python
bugsnag/configuration.py
ForroKulcs/bugsnag-python
107c1add31a2202cc08ef944aa00ab96996b247a
[ "MIT" ]
null
null
null
bugsnag/configuration.py
ForroKulcs/bugsnag-python
107c1add31a2202cc08ef944aa00ab96996b247a
[ "MIT" ]
null
null
null
bugsnag/configuration.py
ForroKulcs/bugsnag-python
107c1add31a2202cc08ef944aa00ab96996b247a
[ "MIT" ]
null
null
null
import os import platform import socket import sysconfig from typing import List, Any, Tuple, Union import warnings from bugsnag.sessiontracker import SessionMiddleware from bugsnag.middleware import DefaultMiddleware, MiddlewareStack from bugsnag.utils import (fully_qualified_class_name, validate_str_setter, validate_bool_setter, validate_iterable_setter, validate_required_str_setter) from bugsnag.delivery import (create_default_delivery, DEFAULT_ENDPOINT, DEFAULT_SESSIONS_ENDPOINT) from bugsnag.uwsgi import warn_if_running_uwsgi_without_threads try: from contextvars import ContextVar _request_info = ContextVar('bugsnag-request', default=None) # type: ignore except ImportError: from bugsnag.utils import ThreadContextVar _request_info = ThreadContextVar('bugsnag-request', default=None) # type: ignore # noqa: E501 __all__ = ('Configuration', 'RequestConfiguration') class Configuration: """ Global app-level Bugsnag configuration settings. """ def __init__(self): self.api_key = os.environ.get('BUGSNAG_API_KEY', None) self.release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE", "production") self.notify_release_stages = None self.auto_notify = True self.send_code = True self.send_environment = False self.asynchronous = True self.delivery = create_default_delivery() self.lib_root = sysconfig.get_path('purelib') self.project_root = os.getcwd() self.app_type = None self.app_version = None self.params_filters = ["password", "password_confirmation", "cookie", "authorization"] self.ignore_classes = [ "KeyboardInterrupt", "django.http.Http404", "django.http.response.Http404", ] self.endpoint = DEFAULT_ENDPOINT self.session_endpoint = DEFAULT_SESSIONS_ENDPOINT self.auto_capture_sessions = True self.traceback_exclude_modules = [] self.middleware = MiddlewareStack() self.internal_middleware = MiddlewareStack() self.internal_middleware.append(DefaultMiddleware) self.internal_middleware.append(SessionMiddleware) self.proxy_host = None if not os.getenv("DYNO"): self.hostname = socket.gethostname() else: self.hostname = None self.runtime_versions = {"python": platform.python_version()} def configure(self, api_key=None, app_type=None, app_version=None, asynchronous=None, auto_notify=None, auto_capture_sessions=None, delivery=None, endpoint=None, hostname=None, ignore_classes=None, lib_root=None, notify_release_stages=None, params_filters=None, project_root=None, proxy_host=None, release_stage=None, send_code=None, send_environment=None, session_endpoint=None, traceback_exclude_modules=None): """ Validate and set configuration options. Will warn if an option is of an incorrect type. """ if api_key is not None: self.api_key = api_key if app_type is not None: self.app_type = app_type if app_version is not None: self.app_version = app_version if asynchronous is not None: self.asynchronous = asynchronous if auto_notify is not None: self.auto_notify = auto_notify if auto_capture_sessions is not None: self.auto_capture_sessions = auto_capture_sessions if delivery is not None: self.delivery = delivery if endpoint is not None: self.endpoint = endpoint if hostname is not None: self.hostname = hostname if ignore_classes is not None: self.ignore_classes = ignore_classes if lib_root is not None: self.lib_root = lib_root if notify_release_stages is not None: self.notify_release_stages = notify_release_stages if params_filters is not None: self.params_filters = params_filters if project_root is not None: self.project_root = project_root if proxy_host is not None: self.proxy_host = proxy_host if release_stage is not None: self.release_stage = release_stage if send_code is not None: self.send_code = send_code if send_environment is not None: self.send_environment = send_environment if session_endpoint is not None: self.session_endpoint = session_endpoint if traceback_exclude_modules is not None: self.traceback_exclude_modules = traceback_exclude_modules return self def get(self, name): """ Get a single configuration option """ warnings.warn('Using get() to retrieve a Configuration property is ' + 'deprecated in favor of referencing properties directly', DeprecationWarning) return getattr(self, name) @property def api_key(self): """ Unique application identifier """ return self._api_key @api_key.setter # type: ignore @validate_required_str_setter def api_key(self, value: str): self._api_key = value @property def app_type(self): """ Category for the current application or task """ return self._app_type @app_type.setter # type: ignore @validate_str_setter def app_type(self, value: str): self._app_type = value @property def app_version(self): """ Release version of the current application """ return self._app_version @app_version.setter # type: ignore @validate_str_setter def app_version(self, value: str): self._app_version = value @property def asynchronous(self): """ If API requests should be sent asynchronously """ return self._asynchronous @asynchronous.setter # type: ignore @validate_bool_setter def asynchronous(self, value: bool): self._asynchronous = value if value: warn_if_running_uwsgi_without_threads() @property def auto_capture_sessions(self): """ If sessions should be automatically detected and delivered from web request integrations """ return self._auto_capture_sessions @auto_capture_sessions.setter # type: ignore @validate_bool_setter def auto_capture_sessions(self, value: bool): self._auto_capture_sessions = value @property def auto_notify(self): """ If uncaught exceptions should be automatically captured and reported """ return self._auto_notify @auto_notify.setter # type: ignore @validate_bool_setter def auto_notify(self, value: bool): self._auto_notify = value @property def delivery(self): """ Transport mechanism used to make API requests. Implement the Delivery interface to customize how requests are sent. """ return self._delivery @delivery.setter # type: ignore def delivery(self, value): if hasattr(value, 'deliver') and callable(value.deliver): self._delivery = value else: message = ('delivery should implement Delivery interface, got ' + '{0}. This will be an error in a future release.') warnings.warn(message.format(type(value).__name__), RuntimeWarning) @property def endpoint(self): """ Event API endpoint. Set this property if using Bugsnag On-Premise. >>> config = Configuration() >>> config.endpoint = 'https://notify.bugsnag.example.co' """ return self._endpoint @endpoint.setter # type: ignore @validate_required_str_setter def endpoint(self, value: str): self._endpoint = value @property def hostname(self): """ The host name of the application server. This value is automatically detected for Heroku applications and included in event device metadata. """ return self._hostname @hostname.setter # type: ignore @validate_str_setter def hostname(self, value: str): self._hostname = value @property def ignore_classes(self): """ Fully qualified class names which should be ignored when capturing uncaught exceptions and other events. KeyboardInterrupt and Http404 exceptions are ignored by default. """ return self._ignore_classes @ignore_classes.setter # type: ignore @validate_iterable_setter def ignore_classes(self, value: Union[List[str], Tuple[str]]): self._ignore_classes = value @property def lib_root(self): """ The path to the Python library. Any traceback frame which contains lib_root as a prefix is considered out-of-project. The prefix is also stripped to make file names easier to read. """ return self._lib_root @lib_root.setter # type: ignore @validate_str_setter def lib_root(self, value: str): self._lib_root = value @property def notify_release_stages(self): """ A list of release_stage values which are permitted to capture and send events and sessions. By default this value is None and all events and sessions are delivered. """ return self._notify_release_stages @notify_release_stages.setter # type: ignore @validate_iterable_setter def notify_release_stages(self, value: List[str]): self._notify_release_stages = value @property def params_filters(self): """ A list of filters applied to event metadata to prevent the values from being sent in events. By default the following keys are filtered: * authorization * cookie * password * password_confirmation """ return self._params_filters @params_filters.setter # type: ignore @validate_iterable_setter def params_filters(self, value: List[str]): self._params_filters = value @property def project_root(self): """ The working directory containing the application source code. Traceback file paths which contain this prefix are considered a part of the project. This prefix is also stripped to increase file name readability in traceback lines. """ return self._project_root @project_root.setter # type: ignore @validate_str_setter def project_root(self, value: str): self._project_root = value @property def proxy_host(self): """ The host name of the proxy to use to deliver requests, if any """ return self._proxy_host @proxy_host.setter # type: ignore @validate_str_setter def proxy_host(self, value: str): self._proxy_host = value @property def release_stage(self): """ The development phase of the deployed application. This value is used to differentiate events which occur in production vs development or staging environments. """ return self._release_stage @release_stage.setter # type: ignore @validate_str_setter def release_stage(self, value: str): self._release_stage = value @property def send_code(self): """ If the source code lines immediately surrounding traceback locations should be sent with events """ return self._send_code @send_code.setter # type: ignore @validate_bool_setter def send_code(self, value: bool): self._send_code = value @property def send_environment(self): """ If the request environment should be automatically collected and attached to events """ return self._send_environment @send_environment.setter # type: ignore @validate_bool_setter def send_environment(self, value: bool): self._send_environment = value @property def session_endpoint(self): """ Sessions API endpoint. Set this property if using Bugsnag On-Premise. >>> config = Configuration() >>> config.session_endpoint = 'https://sessions.bugsnag.example.co' """ return self._session_endpoint @session_endpoint.setter # type: ignore @validate_required_str_setter def session_endpoint(self, value: str): self._session_endpoint = value @property def traceback_exclude_modules(self): """ Modules which should be stripped from event tracebacks entirely """ return self._traceback_exclude_modules @traceback_exclude_modules.setter # type: ignore @validate_iterable_setter def traceback_exclude_modules(self, value: List[str]): self._traceback_exclude_modules = value def should_notify(self) -> bool: return self.notify_release_stages is None or \ (isinstance(self.notify_release_stages, (tuple, list)) and self.release_stage in self.notify_release_stages) def should_ignore(self, exception: BaseException) -> bool: return self.ignore_classes is not None and \ fully_qualified_class_name(exception) in self.ignore_classes class RequestConfiguration: """ Per-request Bugsnag configuration settings. """ @classmethod def get_instance(cls): """ Get this thread's instance of the RequestConfiguration. """ try: instance = _request_info.get() except LookupError: instance = None if instance is None: instance = RequestConfiguration() _request_info.set(instance) # type: ignore return instance @classmethod def clear(cls): """ Clear this thread's instance of the RequestConfiguration. """ _request_info.set(None) def __init__(self): self.context = None self.grouping_hash = None self.user = {} self.metadata = {} # legacy fields self.user_id = None self.extra_data = {} self.request_data = {} self.environment_data = {} self.session_data = {} def get(self, name) -> Any: """ Get a single configuration option """ return getattr(self, name) def configure(self, **options): """ Set one or more configuration settings. """ for name, value in options.items(): setattr(self, name, value) return self @property def meta_data(self) -> Any: warnings.warn('RequestConfiguration.meta_data has been renamed to ' + '"metadata"', DeprecationWarning) return self.metadata
31.695021
99
0.636185
14,287
0.935197
0
0
8,563
0.560516
0
0
4,459
0.291877
d5e7f6433ef2aafee2885217cc2a65201e60c31e
587
py
Python
secret_injector/secret.py
failk8s/failk8s-operator
457890a09a2551b9002eec73386b11a37469569f
[ "Apache-2.0" ]
null
null
null
secret_injector/secret.py
failk8s/failk8s-operator
457890a09a2551b9002eec73386b11a37469569f
[ "Apache-2.0" ]
null
null
null
secret_injector/secret.py
failk8s/failk8s-operator
457890a09a2551b9002eec73386b11a37469569f
[ "Apache-2.0" ]
null
null
null
import kopf from .functions import global_logger, reconcile_secret @kopf.on.event("", "v1", "secrets") def injector_secret_event(type, event, logger, **_): obj = event["object"] namespace = obj["metadata"]["namespace"] name = obj["metadata"]["name"] # If secret already exists, indicated by type being None, the # secret is added or modified later, do a full reconcilation to # ensure that if now match will inject the secret. with global_logger(logger): if type in (None, "ADDED", "MODIFIED"): reconcile_secret(name, namespace, obj)
30.894737
67
0.67632
0
0
0
0
516
0.879046
0
0
251
0.427598
d5e86c6edc684a9da3a98d63325e3f3c6ab77abb
25,390
py
Python
src/py/gee/utils.py
openforis/collectearthonline
1af48e373c393a1d8c48b17472f6aa6c41f65769
[ "MIT" ]
null
null
null
src/py/gee/utils.py
openforis/collectearthonline
1af48e373c393a1d8c48b17472f6aa6c41f65769
[ "MIT" ]
null
null
null
src/py/gee/utils.py
openforis/collectearthonline
1af48e373c393a1d8c48b17472f6aa6c41f65769
[ "MIT" ]
null
null
null
import datetime import os import ee import math import sys import json from ee.ee_exception import EEException from gee.inputs import getLandsat, getS1 ########## Helper functions ########## def initialize(ee_account='', ee_key_path=''): try: if ee_account and ee_key_path and os.path.exists(ee_key_path): credentials = ee.ServiceAccountCredentials(ee_account, ee_key_path) ee.Initialize(credentials) else: ee.Initialize() except Exception as e: print(e) def getReducer(reducer): reducerName = reducer.lower() if(reducerName == 'min'): return ee.Reducer.min() elif (reducerName == 'max'): return ee.Reducer.max() elif (reducerName == 'mean'): return ee.Reducer.mean() elif (reducerName == 'mode'): return ee.Reducer.mode() elif (reducerName == 'first'): return ee.Reducer.first() elif (reducerName == 'last'): return ee.Reducer.last() elif (reducerName == 'sum'): return ee.Reducer.sum() else: return ee.Reducer.median() def reduceIC(imageCollection, reducer): reducerName = reducer.lower() if(reducerName == 'min'): return imageCollection.min() elif (reducerName == 'max'): return imageCollection.max() elif (reducerName == 'mean'): return imageCollection.mean() elif (reducerName == 'mode'): return imageCollection.mode() elif (reducerName == 'mosaic'): return imageCollection.mosaic() elif (reducerName == 'first'): return imageCollection.first() elif (reducerName == 'sum'): return imageCollection.sum() else: return imageCollection.median() def safeParseJSON(val): if isinstance(val, dict): return val else: try: return json.loads(val) except Exception as e: try: return json.loads(val.replace("'", "\"")) except Exception as e: return {} ########## Helper routes ########## def listAvailableBands(name, assetType): eeImage = None if assetType == "imageCollection": eeImage = ee.ImageCollection(name).first() else: eeImage = ee.Image(name) return { 'bands': eeImage.bandNames().getInfo(), 'imageName': name } ########## ee.Image ########## def imageToMapId(image, visParams): eeImage = ee.Image(image) mapId = eeImage.getMapId(visParams) # TODO, just return URL so the routes are easier to deduce whats being returned. return { 'url': mapId['tile_fetcher'].url_format } ########## ee.ImageCollection ########## def imageCollectionToMapId(assetId, visParams, reducer, startDate, endDate): eeCollection = ee.ImageCollection(assetId) if (startDate and endDate): eeFilterDate = ee.Filter.date(startDate, endDate) eeCollection = eeCollection.filter(eeFilterDate) reducedImage = ee.Image(reduceIC(eeCollection, reducer)) return imageToMapId(reducedImage, visParams) # TODO, should we allow user to select first cloud free image again? def firstCloudFreeImageInMosaicToMapId(assetId, visParams, startDate, endDate): skipCloudMask = False eeCollection = ee.ImageCollection(assetId) lowerAsset = assetId.lower() if("b2" not in visParams["bands"].lower()): skipCloudMask = True elif ("lc8" in lowerAsset): skipCloudMask = False elif ("le7" in lowerAsset): skipCloudMask = False elif ("lt5" in lowerAsset): skipCloudMask = False else: skipCloudMask = True if (startDate and endDate): eeFilterDate = ee.Filter.date(startDate, endDate) eeCollection = eeCollection.filter(eeFilterDate) eeFirstImage = ee.Image(eeCollection.mosaic()) try: if(skipCloudMask == False): sID = '' if ("lc8" in lowerAsset): sID = 'OLI_TIRS' elif ("le7" in lowerAsset): sID = 'ETM' elif ("lt5" in lowerAsset): sID = 'TM' scored = ee.Algorithms.Landsat.simpleCloudScore( eeFirstImage.set('SENSOR_ID', sID)) mask = scored.select(['cloud']).lte(20) masked = eeFirstImage.updateMask(mask) values = imageToMapId(masked, visParams) else: values = imageToMapId(eeFirstImage, visParams) except EEException as ine: imageToMapId(eeFirstImage, visParams) return values ########## ee.FeatureCollection ########## def getFeatureCollectionTileUrl(featureCollection, field, matchID, visParams): fc = ee.FeatureCollection(featureCollection) single = fc.filter(ee.Filter.equals(field, matchID)) mapId = ee.Image().paint(single, 0, 2).getMapId(visParams) return mapId['tile_fetcher'].url_format ########## Pre defined ee.ImageCollection ########## # Index Image Collection def lsMaskClouds(img, cloudThresh=10): score = ee.Image(1.0) # Clouds are reasonably bright in the blue band. blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide( ee.Number(0.3).subtract(ee.Number(0.1))) score = score.min(blue_rescale) # Clouds are reasonably bright in all visible bands. visible = img.select('red').add( img.select('green')).add(img.select('blue')) visible_rescale = visible.subtract(ee.Number(0.2)).divide( ee.Number(0.8).subtract(ee.Number(0.2))) score = score.min(visible_rescale) # Clouds are reasonably bright in all infrared bands. infrared = img.select('nir').add( img.select('swir1')).add(img.select('swir2')) infrared_rescale = infrared.subtract(ee.Number(0.3)).divide( ee.Number(0.8).subtract(ee.Number(0.3))) score = score.min(infrared_rescale) # Clouds are reasonably cool in temperature. temp_rescale = img.select('temp').subtract(ee.Number(300)).divide( ee.Number(290).subtract(ee.Number(300))) score = score.min(temp_rescale) # However, clouds are not snow. ndsi = img.normalizedDifference(['green', 'swir1']) ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide( ee.Number(0.6).subtract(ee.Number(0.8))) score = score.min(ndsi_rescale).multiply(100).byte() mask = score.lt(cloudThresh).rename(['cloudMask']) img = img.updateMask(mask) return img.addBands(score) def s2MaskClouds(img): qa = img.select('QA60') # Bits 10 and 11 are clouds and cirrus, respectively. cloudBitMask = int(math.pow(2, 10)) cirrusBitMask = int(math.pow(2, 11)) # clear if both flags set to zero. clear = qa.bitwiseAnd(cloudBitMask).eq(0).And( qa.bitwiseAnd(cirrusBitMask).eq(0)) return img.divide(10000).updateMask(clear).set('system:time_start', img.get('system:time_start')) def bandPassAdjustment(img): keep = img.select(['temp']) bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2'] # linear regression coefficients for adjustment gain = ee.Array([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]]) bias = ee.Array([[-0.00411], [-0.00093], [0.00094], [-0.00029], [-0.00015], [-0.00097]]) # Make an Array Image, with a 2-D Array per pixel. arrayImage2D = img.select(bands).toArray().toArray(1) # apply correction factors and reproject array to geographic image componentsImage = ee.Image(gain).multiply(arrayImage2D).add(ee.Image(bias)) \ .arrayProject([0]).arrayFlatten([bands]).float() # .set('system:time_start',img.get('system:time_start')); return keep.addBands(componentsImage) def getLandSatMergedCollection(): sensorBandDictLandsatTOA = {'L8': [1, 2, 3, 4, 5, 9, 6], 'L7': [0, 1, 2, 3, 4, 5, 7], 'L5': [0, 1, 2, 3, 4, 5, 6], 'L4': [0, 1, 2, 3, 4, 5, 6], 'S2': [1, 2, 3, 7, 11, 10, 12]} bandNamesLandsatTOA = ['blue', 'green', 'red', 'nir', 'swir1', 'temp', 'swir2'] metadataCloudCoverMax = 100 lt4 = ee.ImageCollection('LANDSAT/LT4_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L4'], bandNamesLandsatTOA).map(lsMaskClouds) lt5 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L5'], bandNamesLandsatTOA).map(lsMaskClouds) le7 = ee.ImageCollection('LANDSAT/LE7_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L7'], bandNamesLandsatTOA).map(lsMaskClouds) lc8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L8'], bandNamesLandsatTOA).map(lsMaskClouds) s2 = ee.ImageCollection('COPERNICUS/S2') \ .filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) \ .map(s2MaskClouds).select(sensorBandDictLandsatTOA['S2'], bandNamesLandsatTOA) \ .map(bandPassAdjustment) return ee.ImageCollection(lt4.merge(lt5).merge(le7).merge(lc8).merge(s2)) def filteredImageNDVIToMapId(startDate, endDate): def calcNDVI(img): return img.expression('(i.nir - i.red) / (i.nir + i.red)', {'i': img}).rename(['NDVI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'c9c0bf,435ebf,eee8aa,006400' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDVI).mean()) return imageToMapId(eviImage, visParams) def filteredImageEVIToMapId(startDate, endDate): def calcEVI(img): return img.expression('2.5 * (i.nir - i.red) / (i.nir + 6.0 * i.red - 7.5 * i.blue + 1)', {'i': img}).rename(['EVI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcEVI).mean()) return imageToMapId(eviImage, visParams) def filteredImageEVI2ToMapId(startDate, endDate): def calcEVI2(img): return img.expression('2.5 * (i.nir - i.red) / (i.nir + 2.4 * i.red + 1)', {'i': img}).rename(['EVI2']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcEVI2).mean()) return imageToMapId(eviImage, visParams) def filteredImageNDMIToMapId(startDate, endDate): def calcNDMI(img): return img.expression('(i.nir - i.swir1) / (i.nir + i.swir1)', {'i': img}).rename(['NDMI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = '0000FE,2E60FD,31B0FD,00FEFE,50FE00,DBFE66,FEFE00,FFBB00,FF6F00,FE0000' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDMI).mean()) return imageToMapId(eviImage, visParams) def filteredImageNDWIToMapId(startDate, endDate): def calcNDWI(img): return img.expression('(i.green - i.nir) / (i.green + i.nir)', {'i': img}).rename(['NDWI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = '505050,E8E8E8,00FF33,003300' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDWI).mean()) return imageToMapId(eviImage, visParams) def filteredImageByIndexToMapId(startDate, endDate, index): lowerIndex = index.lower() if (lowerIndex == 'ndvi'): return filteredImageNDVIToMapId(startDate, endDate) elif (lowerIndex == 'evi'): return filteredImageEVIToMapId(startDate, endDate) elif (lowerIndex == 'evi2'): return filteredImageEVI2ToMapId(startDate, endDate) elif (lowerIndex == 'ndmi'): return filteredImageNDMIToMapId(startDate, endDate) elif (lowerIndex == 'ndwi'): return filteredImageNDWIToMapId(startDate, endDate) def filteredImageCompositeToMapId(assetId, visParams, startDate, endDate, metadataCloudCoverMax, simpleCompositeVariable): eeCollection = ee.ImageCollection(assetId) if (startDate and endDate): eeCollection = eeCollection.filterDate(startDate, endDate) eeCollection.filterMetadata( 'CLOUD_COVER', 'less_than', metadataCloudCoverMax ) eeMosaicImage = ee.Algorithms.Landsat.simpleComposite( eeCollection, simpleCompositeVariable, 10, 40, True ) return imageToMapId(eeMosaicImage, visParams) def filteredSentinelComposite(visParams, startDate, endDate, metadataCloudCoverMax): def cloudScore(img): def rescale(img, exp, thresholds): return img.expression(exp, {'img': img}).subtract(thresholds[0]).divide(thresholds[1] - thresholds[0]) score = ee.Image(1.0) score = score.min(rescale(img, 'img.B2', [0.1, 0.3])) score = score.min(rescale(img, 'img.B4 + img.B3 + img.B2', [0.2, 0.8])) score = score.min( rescale(img, 'img.B8 + img.B11 + img.B12', [0.3, 0.8])) ndsi = img.normalizedDifference(['B3', 'B11']) return score.min(rescale(ndsi, 'img', [0.8, 0.6])) def cloudScoreS2(img): rescale = img.divide(10000) score = cloudScore(rescale).multiply(100).rename('cloudscore') return img.addBands(score) sentinel2 = ee.ImageCollection('COPERNICUS/S2') f2017s2 = sentinel2.filterDate(startDate, endDate).filterMetadata( 'CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) m2017s2 = f2017s2.map(cloudScoreS2) m2017s3 = m2017s2.median() return imageToMapId(m2017s3, visParams) def filteredSentinelSARComposite(visParams, startDate, endDate): def toNatural(img): return ee.Image(10).pow(img.divide(10)) def addRatioBands(img): # not using angle band vv = img.select('VV') vh = img.select('VH') vv_vh = vv.divide(vh).rename('VV/VH') vh_vv = vh.divide(vv).rename('VH/VV') return vv.addBands(vh).addBands(vv_vh).addBands(vh_vv) sentinel1 = ee.ImageCollection('COPERNICUS/S1_GRD') sentinel1 = sentinel1.filterDate(startDate, endDate) \ .filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \ .filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \ .filter(ee.Filter.eq('instrumentMode', 'IW')) sentinel1 = sentinel1.map(toNatural) sentinel1 = sentinel1.map(addRatioBands) median = sentinel1.median() return imageToMapId(median, visParams) ########## Time Series ########## def getTimeSeriesByCollectionAndIndex(assetId, indexName, scale, coords, startDate, endDate, reducer): geometry = None indexCollection = None if isinstance(coords[0], list): geometry = ee.Geometry.Polygon(coords) else: geometry = ee.Geometry.Point(coords) if indexName != None: indexCollection = ee.ImageCollection(assetId).filterDate( startDate, endDate).select(indexName) else: indexCollection = ee.ImageCollection( assetId).filterDate(startDate, endDate) def getIndex(image): theReducer = getReducer(reducer) if indexName != None: indexValue = image.reduceRegion( theReducer, geometry, scale).get(indexName) else: indexValue = image.reduceRegion(theReducer, geometry, scale) date = image.get('system:time_start') indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue]) return indexImage def getClipped(image): return image.clip(geometry) clippedcollection = indexCollection.map(getClipped) indexCollection1 = clippedcollection.map(getIndex) indexCollection2 = indexCollection1.aggregate_array('indexValue') return indexCollection2.getInfo() def getTimeSeriesByIndex(indexName, scale, coords, startDate, endDate, reducer): bandsByCollection = { 'LANDSAT/LC08/C01/T1_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'], 'LANDSAT/LC08/C01/T2_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'], 'LANDSAT/LE07/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LE07/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT05/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT05/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT04/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT04/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'] } indexes = { 'NDVI': '(nir - red) / (nir + red)', 'EVI': '2.5 * (nir - red) / (nir + 6.0 * red - 7.5 * blue + 1)', 'EVI2': '2.5 * (nir - red) / (nir + 2.4 * red + 1)', 'NDMI': '(nir - swir1) / (nir + swir1)', 'NDWI': '(green - nir) / (green + nir)', 'NBR': '(nir - swir2) / (nir + swir2)', 'LSAVI': '((nir - red) / (nir + red + 0.5)) * (1 + 0.5)' } def create(name): def maskClouds(image): def isSet(types): """ https://landsat.usgs.gov/collectionqualityband """ typeByValue = { 'badPixels': 15, 'cloud': 16, 'shadow': 256, 'snow': 1024, 'cirrus': 4096 } anySet = ee.Image(0) for Type in types: anySet = anySet.Or(image.select( 'BQA').bitwiseAnd(typeByValue[Type]).neq(0)) return anySet return image.updateMask(isSet(['badPixels', 'cloud', 'shadow', 'cirrus']).Not()) def toIndex(image): bands = bandsByCollection[name] return image.expression(indexes[indexName], { 'blue': image.select(bands[0]), 'green': image.select(bands[1]), 'red': image.select(bands[2]), 'nir': image.select(bands[3]), 'swir1': image.select(bands[4]), 'swir2': image.select(bands[5]), }).clamp(-1, 1).rename(['index']) def toIndexWithTimeStart(image): time = image.get('system:time_start') image = maskClouds(image) return toIndex(image).set('system:time_start', time) # if startDate and endDate: return ee.ImageCollection(name).filterDate(startDate, endDate).filterBounds(geometry).map(toIndexWithTimeStart, True) else: return ee.ImageCollection(name).filterBounds(geometry).map(toIndexWithTimeStart, True) def reduceRegion(image): theReducer = getReducer(reducer) reduced = image.reduceRegion( theReducer, geometry=geometry, scale=scale, maxPixels=1e6) return ee.Feature(None, { 'index': reduced.get('index'), 'timeIndex': [image.get('system:time_start'), reduced.get('index')] }) geometry = None if isinstance(coords[0], list) or isinstance(coords[0], tuple): geometry = ee.Geometry.Polygon(coords) else: geometry = ee.Geometry.Point(coords) collection = ee.ImageCollection([]) for name in bandsByCollection: collection = collection.merge(create(name)) return ee.ImageCollection(ee.ImageCollection(collection).sort('system:time_start').distinct('system:time_start')) \ .map(reduceRegion) \ .filterMetadata('index', 'not_equals', None) \ .aggregate_array('timeIndex') \ .getInfo() ########## Degradation########## def getDegradationTileUrlByDateS1(geometry, date, visParams): imDate = datetime.datetime.strptime(date, "%Y-%m-%d") befDate = imDate - datetime.timedelta(days=1) aftDate = imDate + datetime.timedelta(days=1) if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) sentinel1Data = getS1({ "targetBands": ['VV', 'VH', 'VV/VH'], 'region': geometry}) start = befDate.strftime('%Y-%m-%d') end = aftDate.strftime('%Y-%m-%d') selectedImage = sentinel1Data.filterDate(start, end).first() selectedImage = ee.Image(selectedImage) mapparams = selectedImage.getMapId(visParams) return mapparams['tile_fetcher'].url_format def getDegradationPlotsByPointS1(geometry, start, end): if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) sentinel1Data = getS1({ "targetBands": ['VV', 'VH', 'VV/VH'], 'region': geometry }).filterDate(start, end) def myimageMapper(img): theReducer = ee.Reducer.mean() indexValue = img.reduceRegion(theReducer, geometry, 30) date = img.get('system:time_start') visParams = {'bands': ['VV', 'VH', 'ratioVVVH'], 'min': [-15, -25, .40], 'max': [0, -10, 1], 'gamma': 1.6} indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue]) return indexImage lsd = sentinel1Data.map(myimageMapper, True) indexCollection2 = lsd.aggregate_array('indexValue') values = indexCollection2.getInfo() return values def getDegradationTileUrlByDate(geometry, date, visParams): imDate = datetime.datetime.strptime(date, "%Y-%m-%d") startDate = imDate - datetime.timedelta(days=1) endDate = imDate + datetime.timedelta(days=1) if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) landsatData = getLandsat({ "start": startDate.strftime('%Y-%m-%d'), "end": endDate.strftime('%Y-%m-%d'), "targetBands": ['RED', 'GREEN', 'BLUE', 'SWIR1', 'NIR'], "region": geometry, "sensors": {"l4": False, "l5": False, "l7": False, "l8": True} }) selectedImage = landsatData.first() unmasked = ee.Image(selectedImage).multiply(10000).toInt16().unmask() mapparams = unmasked.getMapId(visParams) return mapparams['tile_fetcher'].url_format def getDegradationPlotsByPoint(geometry, start, end, band): if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) landsatData = getLandsat({ "start": start, "end": end, "targetBands": [band], "region": geometry, "sensors": {"l4": True, "l5": True, "l7": True, "l8": True} }) def myImageMapper(img): theReducer = ee.Reducer.mean() indexValue = img.reduceRegion(theReducer, geometry, 30) date = img.get('system:time_start') indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue] ) return indexImage lsd = landsatData.map(myImageMapper, True) indexCollection2 = lsd.aggregate_array('indexValue') values = indexCollection2.getInfo() return values ########## Stats ########## def getStatistics(extent): extentGeom = ee.Geometry.Polygon(extent) elev = ee.Image('USGS/GTOPO30') minmaxElev = elev.reduceRegion( ee.Reducer.minMax(), extentGeom, 1000, maxPixels=500000000) minElev = minmaxElev.get('elevation_min').getInfo() maxElev = minmaxElev.get('elevation_max').getInfo() ciesinPopGrid = ee.Image('CIESIN/GPWv4/population-count/2020') popDict = ciesinPopGrid.reduceRegion( ee.Reducer.sum(), extentGeom, maxPixels=500000000) pop = popDict.get('population-count').getInfo() pop = int(pop) return { 'minElev': minElev, 'maxElev': maxElev, 'pop': pop }
38.704268
130
0.605593
0
0
0
0
0
0
0
0
4,657
0.183419
d5e8cedec4a5704ab1636f88d9b806e93b86ff8a
1,186
py
Python
userManagement/management/urls.py
shubhamguptaorg/user_managementl
ad98e0e4886d9b0547b05ae424c10d8f6268d470
[ "MIT" ]
null
null
null
userManagement/management/urls.py
shubhamguptaorg/user_managementl
ad98e0e4886d9b0547b05ae424c10d8f6268d470
[ "MIT" ]
4
2021-03-19T03:22:44.000Z
2022-03-11T23:58:10.000Z
userManagement/management/urls.py
shubhamguptaorg/user_managementl
ad98e0e4886d9b0547b05ae424c10d8f6268d470
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path,include from django.views.generic import TemplateView from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage # from .views import Index,UserDashboard,SignUp,AdminDashboard app_name='management' urlpatterns = [ # path('',homepage,name="index"), path('',Index.as_view(), name='index'), path('signup',SignUp.as_view(),name="signup"), path('userdashboard',UserDashboard.as_view(),name="userDashboard"), path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"), path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'), path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'), path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'), path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'), path('uploadimage/',uploadImage,name="uploadImage"), path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'), path('logout',logout,name='logout') ]
49.416667
143
0.764755
0
0
0
0
0
0
0
0
502
0.423272
d5e96b9312873b5f396a18010caddd4d11bd8888
16,962
py
Python
sickbeard/lib/hachoir_parser/container/riff.py
Branlala/docker-sickbeardfr
3ac85092dc4cc8a4171fb3c83e9682162245e13e
[ "MIT" ]
null
null
null
sickbeard/lib/hachoir_parser/container/riff.py
Branlala/docker-sickbeardfr
3ac85092dc4cc8a4171fb3c83e9682162245e13e
[ "MIT" ]
null
null
null
sickbeard/lib/hachoir_parser/container/riff.py
Branlala/docker-sickbeardfr
3ac85092dc4cc8a4171fb3c83e9682162245e13e
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- """ RIFF parser, able to parse: * AVI video container * WAV audio container * CDA file Documents: - libavformat source code from ffmpeg library http://ffmpeg.mplayerhq.hu/ - Video for Windows Programmer's Guide http://www.opennet.ru/docs/formats/avi.txt - What is an animated cursor? http://www.gdgsoft.com/anituner/help/aniformat.htm Authors: * Aurélien Jacobs * Mickaël KENIKSSI * Victor Stinner Changelog: * 2007-03-30: support ACON (animated icons) * 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser * 2006-08-03: creation of CDA parser by Mickaël KENIKSSI * 2005-06-21: creation of WAV parser by Victor Stinner * 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs Thanks to: * Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file format information """ from lib.hachoir_parser import Parser from lib.hachoir_core.field import (FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile) from lib.hachoir_core.tools import alignValue, humanDuration from lib.hachoir_core.endian import LITTLE_ENDIAN from lib.hachoir_core.text_handler import filesizeHandler, textHandler from lib.hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name from lib.hachoir_parser.image.ico import IcoFile from datetime import timedelta def parseText(self): yield String(self, "text", self["size"].value, strip=" \0", truncate="\0", charset="ISO-8859-1") def parseRawFormat(self, size): yield RawBytes(self, "raw_format", size) def parseVideoFormat(self, size): yield UInt32(self, "video_size", "Video format: Size") yield UInt32(self, "width", "Video format: Width") yield UInt32(self, "height", "Video format: Height") yield UInt16(self, "panes", "Video format: Panes") yield UInt16(self, "depth", "Video format: Depth") yield UInt32(self, "tag1", "Video format: Tag1") yield UInt32(self, "img_size", "Video format: Image size") yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter") yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter") yield UInt32(self, "clr_used", "Video format: ClrUsed") yield UInt32(self, "clr_important", "Video format: ClrImportant") def parseAudioFormat(self, size): yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name) yield UInt16(self, "channel", "Audio format: Channels") yield UInt32(self, "sample_rate", "Audio format: Sample rate") yield UInt32(self, "bit_rate", "Audio format: Bit rate") yield UInt16(self, "block_align", "Audio format: Block align") if size >= 16: yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample") if size >= 18: yield UInt16(self, "ext_size", "Audio format: Size of extra information") if size >= 28: # and self["a_channel"].value > 2 yield UInt16(self, "reserved", "Audio format: ") yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask") yield UInt32(self, "subformat", "Audio format: Subformat id") def parseAVIStreamFormat(self): size = self["size"].value strtype = self["../stream_hdr/stream_type"].value TYPE_HANDLER = { "vids": (parseVideoFormat, 40), "auds": (parseAudioFormat, 16) } handler = parseRawFormat if strtype in TYPE_HANDLER: info = TYPE_HANDLER[strtype] if info[1] <= size: handler = info[0] for field in handler(self, size): yield field def parseAVIStreamHeader(self): if self["size"].value != 56: raise ParserError("Invalid stream header size") yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII") field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII") if self["stream_type"].value == "vids": yield Enum(field, video_fourcc_name, lambda text: text.upper()) else: yield field yield UInt32(self, "flags", "Stream flags") yield UInt16(self, "priority", "Stream priority") yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0") yield UInt32(self, "init_frames", "InitialFrames") yield UInt32(self, "scale", "Time scale") yield UInt32(self, "rate", "Divide by scale to give frame rate") yield UInt32(self, "start", "Stream start time (unit: rate/scale)") yield UInt32(self, "length", "Stream length (unit: rate/scale)") yield UInt32(self, "buf_size", "Suggested buffer size") yield UInt32(self, "quality", "Stream quality") yield UInt32(self, "sample_size", "Size of samples") yield UInt16(self, "left", "Destination rectangle (left)") yield UInt16(self, "top", "Destination rectangle (top)") yield UInt16(self, "right", "Destination rectangle (right)") yield UInt16(self, "bottom", "Destination rectangle (bottom)") class RedBook(FieldSet): """ RedBook offset parser, used in CD audio (.cda) file """ def createFields(self): yield UInt8(self, "frame") yield UInt8(self, "second") yield UInt8(self, "minute") yield PaddingBytes(self, "notused", 1) def formatSerialNumber(field): """ Format an disc serial number. Eg. 0x00085C48 => "0008-5C48" """ sn = field.value return "%04X-%04X" % (sn >> 16, sn & 0xFFFF) def parseCDDA(self): """ HSG address format: number of 1/75 second HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset) HSG length = (minute*60 + second)*75 + frame (from RB length) """ yield UInt16(self, "cda_version", "CD file version (currently 1)") yield UInt16(self, "track_no", "Number of track") yield textHandler(UInt32(self, "disc_serial", "Disc serial number"), formatSerialNumber) yield UInt32(self, "hsg_offset", "Track offset (HSG format)") yield UInt32(self, "hsg_length", "Track length (HSG format)") yield RedBook(self, "rb_offset", "Track offset (Red-book format)") yield RedBook(self, "rb_length", "Track length (Red-book format)") def parseWAVFormat(self): size = self["size"].value if size not in (16, 18): self.warning("Format with size of %s bytes is not supported!" % size) yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name) yield UInt16(self, "nb_channel", "Number of audio channel") yield UInt32(self, "sample_per_sec", "Sample per second") yield UInt32(self, "byte_per_sec", "Average byte per second") yield UInt16(self, "block_align", "Block align") yield UInt16(self, "bit_per_sample", "Bits per sample") def parseWAVFact(self): yield UInt32(self, "nb_sample", "Number of samples in audio stream") def parseAviHeader(self): yield UInt32(self, "microsec_per_frame", "Microsecond per frame") yield UInt32(self, "max_byte_per_sec", "Maximum byte per second") yield NullBytes(self, "reserved", 4) # Flags yield NullBits(self, "reserved[]", 4) yield Bit(self, "has_index") yield Bit(self, "must_use_index") yield NullBits(self, "reserved[]", 2) yield Bit(self, "is_interleaved") yield NullBits(self, "reserved[]", 2) yield Bit(self, "trust_cktype") yield NullBits(self, "reserved[]", 4) yield Bit(self, "was_capture_file") yield Bit(self, "is_copyrighted") yield NullBits(self, "reserved[]", 14) yield UInt32(self, "total_frame", "Total number of frames in the video") yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)") yield UInt32(self, "nb_stream", "Number of streams") yield UInt32(self, "sug_buf_size", "Suggested buffer size") yield UInt32(self, "width", "Width in pixel") yield UInt32(self, "height", "Height in pixel") yield UInt32(self, "scale") yield UInt32(self, "rate") yield UInt32(self, "start") yield UInt32(self, "length") def parseODML(self): yield UInt32(self, "total_frame", "Real number of frame of OpenDML video") padding = self["size"].value - 4 if 0 < padding: yield NullBytes(self, "padding[]", padding) class AVIIndexEntry(FieldSet): size = 16*8 def createFields(self): yield String(self, "tag", 4, "Tag", charset="ASCII") yield UInt32(self, "flags") yield UInt32(self, "start", "Offset from start of movie data") yield UInt32(self, "length") def parseIndex(self): while not self.eof: yield AVIIndexEntry(self, "index[]") class Chunk(FieldSet): TAG_INFO = { # This dictionnary is edited by RiffFile.validate() "LIST": ("list[]", None, "Sub-field list"), "JUNK": ("junk[]", None, "Junk (padding)"), # Metadata "INAM": ("title", parseText, "Document title"), "IART": ("artist", parseText, "Artist"), "ICMT": ("comment", parseText, "Comment"), "ICOP": ("copyright", parseText, "Copyright"), "IENG": ("author", parseText, "Author"), "ICRD": ("creation_date", parseText, "Creation date"), "ISFT": ("producer", parseText, "Producer"), "IDIT": ("datetime", parseText, "Date time"), # TODO: Todo: see below # "strn": Stream description # TWOCC code, movie/field[]/tag.value[2:4]: # "db": "Uncompressed video frame", # "dc": "Compressed video frame", # "wb": "Audio data", # "pc": "Palette change" } subtag_info = { "INFO": ("info", "File informations"), "hdrl": ("headers", "Headers"), "strl": ("stream[]", "Stream header list"), "movi": ("movie", "Movie stream"), "odml": ("odml", "ODML"), } def __init__(self, *args, **kw): FieldSet.__init__(self, *args, **kw) self._size = (8 + alignValue(self["size"].value, 2)) * 8 tag = self["tag"].value if tag in self.TAG_INFO: self.tag_info = self.TAG_INFO[tag] if tag == "LIST": subtag = self["subtag"].value if subtag in self.subtag_info: info = self.subtag_info[subtag] self.tag_info = (info[0], None, info[1]) self._name = self.tag_info[0] self._description = self.tag_info[2] else: self.tag_info = ("field[]", None, None) def createFields(self): yield String(self, "tag", 4, "Tag", charset="ASCII") yield filesizeHandler(UInt32(self, "size", "Size")) if not self["size"].value: return if self["tag"].value == "LIST": yield String(self, "subtag", 4, "Sub-tag", charset="ASCII") handler = self.tag_info[1] while 8 < (self.size - self.current_size)/8: field = self.__class__(self, "field[]") yield field if (field.size/8) % 2 != 0: yield UInt8(self, "padding[]", "Padding") else: handler = self.tag_info[1] if handler: for field in handler(self): yield field else: yield RawBytes(self, "raw_content", self["size"].value) padding = self.seekBit(self._size) if padding: yield padding def createDescription(self): tag = self["tag"].display return u"Chunk (tag %s)" % tag class ChunkAVI(Chunk): TAG_INFO = Chunk.TAG_INFO.copy() TAG_INFO.update({ "strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"), "strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"), "avih": ("avi_hdr", parseAviHeader, "AVI header"), "idx1": ("index", parseIndex, "Stream index"), "dmlh": ("odml_hdr", parseODML, "ODML header"), }) class ChunkCDDA(Chunk): TAG_INFO = Chunk.TAG_INFO.copy() TAG_INFO.update({ 'fmt ': ("cdda", parseCDDA, "CD audio informations"), }) class ChunkWAVE(Chunk): TAG_INFO = Chunk.TAG_INFO.copy() TAG_INFO.update({ 'fmt ': ("format", parseWAVFormat, "Audio format"), 'fact': ("nb_sample", parseWAVFact, "Number of samples"), 'data': ("audio_data", None, "Audio stream data"), }) def parseAnimationHeader(self): yield UInt32(self, "hdr_size", "Size of header (36 bytes)") if self["hdr_size"].value != 36: self.warning("Animation header with unknown size (%s)" % self["size"].value) yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor") yield UInt32(self, "nb_step", "Number of Blits before the animation cycles") yield UInt32(self, "cx") yield UInt32(self, "cy") yield UInt32(self, "bit_count") yield UInt32(self, "planes") yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present") yield Bit(self, "is_icon") yield NullBits(self, "padding", 31) def parseAnimationSequence(self): while not self.eof: yield UInt32(self, "icon[]") def formatJiffie(field): sec = float(field.value) / 60 return humanDuration(timedelta(seconds=sec)) def parseAnimationRate(self): while not self.eof: yield textHandler(UInt32(self, "rate[]"), formatJiffie) def parseIcon(self): yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile) class ChunkACON(Chunk): TAG_INFO = Chunk.TAG_INFO.copy() TAG_INFO.update({ 'anih': ("anim_hdr", parseAnimationHeader, "Animation header"), 'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"), 'rate': ("anim_rate", parseAnimationRate, "Animation sequence"), 'icon': ("icon[]", parseIcon, "Icon"), }) class RiffFile(Parser): PARSER_TAGS = { "id": "riff", "category": "container", "file_ext": ("avi", "cda", "wav", "ani"), "min_size": 16*8, "mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"), # FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )" "magic": ( ("AVI LIST", 8*8), ("WAVEfmt ", 8*8), ("CDDAfmt ", 8*8), ("ACONanih", 8*8), ), "description": "Microsoft RIFF container" } VALID_TYPES = { "WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"), "CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"), "AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"), "ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"), } endian = LITTLE_ENDIAN def validate(self): if self.stream.readBytes(0, 4) != "RIFF": return "Wrong signature" if self["type"].value not in self.VALID_TYPES: return "Unknown RIFF content type" return True def createFields(self): yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII") yield filesizeHandler(UInt32(self, "filesize", "File size")) yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII") # Choose chunk type depending on file type try: chunk_cls = self.VALID_TYPES[self["type"].value][0] except KeyError: chunk_cls = Chunk # Parse all chunks up to filesize while self.current_size < self["filesize"].value*8+8: yield chunk_cls(self, "chunk[]") if not self.eof: yield RawBytes(self, "padding[]", (self.size-self.current_size)/8) def createMimeType(self): try: return self.VALID_TYPES[self["type"].value][1] except KeyError: return None def createDescription(self): tag = self["type"].value if tag == "AVI ": desc = u"Microsoft AVI video" if "headers/avi_hdr" in self: header = self["headers/avi_hdr"] desc += ": %ux%u pixels" % (header["width"].value, header["height"].value) microsec = header["microsec_per_frame"].value if microsec: desc += ", %.1f fps" % (1000000.0 / microsec) if "total_frame" in header and header["total_frame"].value: delta = timedelta(seconds=float(header["total_frame"].value) * microsec) desc += ", " + humanDuration(delta) return desc else: try: return self.VALID_TYPES[tag][2] except KeyError: return u"Microsoft RIFF container" def createContentSize(self): size = (self["filesize"].value + 8) * 8 return min(size, self.stream.size) def createFilenameSuffix(self): try: return self.VALID_TYPES[self["type"].value][3] except KeyError: return ".riff"
38.55
103
0.614432
7,825
0.461217
9,419
0.555169
0
0
0
0
6,627
0.390605
d5eae8227c1380d3fce1267b4a1949ca968db82b
1,041
py
Python
Utils.py
MartinEngen/NaiveBayesianClassifier
a28813708a4d2adcdcd629e6d4d8b4f438a9c799
[ "MIT" ]
null
null
null
Utils.py
MartinEngen/NaiveBayesianClassifier
a28813708a4d2adcdcd629e6d4d8b4f438a9c799
[ "MIT" ]
null
null
null
Utils.py
MartinEngen/NaiveBayesianClassifier
a28813708a4d2adcdcd629e6d4d8b4f438a9c799
[ "MIT" ]
null
null
null
import os import re def get_subfolder_paths(folder_relative_path: str) -> list: """ Gets all subfolders of a given path :param folder_relative_path: Relative path of folder to find subfolders of :return: list of relative paths to any subfolders """ return [f.path for f in os.scandir(folder_relative_path) if f.is_dir()] def get_group_name(group_path: str) -> str: return group_path.split("\\")[-1] def replace_unwanted_characters(line: str) -> str: return re.sub( r'([^\s\w]|_)+', u' ', line.replace('\n', ' ').replace('\t', ' '), flags=re.UNICODE ) def clean_document(document_file) -> list: document = document_file.read().lower().split("\n\n") cleaned_lines = list(map(replace_unwanted_characters, document[1:])) # lambda x, y: x + y, a, b list_of_lines = map(lambda x: x.split(" "), cleaned_lines) flattened_list_of_lines = [val for sublist in list_of_lines for val in sublist] return filter(lambda x: x != '', flattened_list_of_lines)
30.617647
83
0.659942
0
0
0
0
0
0
0
0
258
0.247839
d5eb56662663b212c6709a52f8fbe61a75880b3c
800
py
Python
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
carlboudreau007/ecosys
d415143837a85ceb6213a0f0588128a86a4a3984
[ "Apache-2.0" ]
245
2018-04-07T00:14:56.000Z
2022-03-28T05:51:35.000Z
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
carlboudreau007/ecosys
d415143837a85ceb6213a0f0588128a86a4a3984
[ "Apache-2.0" ]
47
2018-04-02T16:41:22.000Z
2022-03-24T01:40:46.000Z
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
carlboudreau007/ecosys
d415143837a85ceb6213a0f0588128a86a4a3984
[ "Apache-2.0" ]
140
2018-08-09T15:54:47.000Z
2022-03-30T12:44:48.000Z
from datetime import datetime with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log: begin = [] end = [] for line in log: if 'Index population started' in line: begin.append(line[:23]) elif 'Index creation finished' in line: end.append(line[:23]) if len(begin) == 0 or len(begin) > 9: print("Something went wrong. Please check debug.log") elif len(begin) != len(end): print("{}/{} Done. Please come back later.".format(len(end), len(begin))) else: elapsed_time = 0 for i in range(0,9): begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f') end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f') elapsed_time += (end_tmp-begin_tmp).total_seconds() print("Done in {} s".format(elapsed_time))
34.782609
77
0.6175
0
0
0
0
0
0
0
0
245
0.30625
d5ec93a99d9c113668c2693c8d65499328f692cd
1,489
py
Python
zf-setup.py
Ziki2001/new-school-sdk
b606e666888e1c9813e2f1a6a64bbede3744026e
[ "MIT" ]
null
null
null
zf-setup.py
Ziki2001/new-school-sdk
b606e666888e1c9813e2f1a6a64bbede3744026e
[ "MIT" ]
null
null
null
zf-setup.py
Ziki2001/new-school-sdk
b606e666888e1c9813e2f1a6a64bbede3744026e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' :file: setup.py :author: -Farmer :url: https://blog.farmer233.top :date: 2021/09/20 11:11:54 ''' from os import path from setuptools import setup, find_packages basedir = path.abspath(path.dirname(__file__)) with open(path.join(basedir, "README.md"), encoding='utf-8') as f: long_description = f.read() setup( name="zf-school-sdk", author="farmer.chillax", version="1.3.2", license='MIT', author_email="[email protected]", description="zf School SDK for Python", long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/Farmer-chong/new-school-sdk', packages=find_packages(), # package_data={}, package_data={"school_sdk": ['check_code/model.pkl']}, include_package_data=True, platforms='any', zip_safe=False, install_requires=[ 'requests', 'pyquery', 'bs4', 'Pillow', 'fake-headers', 'torch', 'torchvision', ], classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.8', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] ) # python zf-setup.py bdist_wheel sdist # twine upload dist/*
26.589286
70
0.620551
0
0
0
0
0
0
0
0
784
0.526528
d5ecb68fc8ba51b00e1a946759c8f1a77d41211f
1,635
py
Python
RunIt/airt/poker_cards.py
antx-code/funcode
a8a9b99274e169562771b488a3a9551277ef4b99
[ "MIT" ]
3
2021-09-27T08:07:07.000Z
2022-03-11T04:46:30.000Z
RunIt/airt/poker_cards.py
antx-code/funcode
a8a9b99274e169562771b488a3a9551277ef4b99
[ "MIT" ]
null
null
null
RunIt/airt/poker_cards.py
antx-code/funcode
a8a9b99274e169562771b488a3a9551277ef4b99
[ "MIT" ]
null
null
null
# Square 方片 => sq => RGB蓝色(Blue) # Plum 梅花 => pl => RGB绿色(Green) # Spade 黑桃 => sp => RGB黑色(Black) # Heart 红桃 => he => RGB红色(Red) init_poker = { 'local': { 'head': [-1, -1, -1], 'mid': [-1, -1, -1, -1, -1], 'tail': [-1, -1, -1, -1, -1], 'drop': [-1, -1, -1, -1], 'hand': [-1, -1, -1] }, 'player1': { 'head': [-1, -1, -1], 'mid': [-1, -1, -1, -1, -1], 'tail': [-1, -1, -1, -1, -1], 'drop': [-1, -1, -1, -1], 'hand': [-1, -1, -1] }, 'player2': { 'head': [-1, -1, -1], 'mid': [-1, -1, -1, -1, -1], 'tail': [-1, -1, -1, -1, -1], 'drop': [-1, -1, -1, -1], 'hand': [-1, -1, -1] } } # Square Blue = { '2': 0, '3': 1, '4': 2, '5': 3, '6': 4, '7': 5, '8': 6, '9': 7, '10': 8, 'J': 9, 'Q': 10, 'K': 11, 'A': 12 } # Plum Green = { '2': 13, '3': 14, '4': 15, '5': 16, '6': 17, '7': 18, '8': 19, '9': 20, '10': 21, 'J': 22, 'Q': 23, 'K': 24, 'A': 25 } # Heart Red = { '2': 26, '3': 27, '4': 28, '5': 29, '6': 30, '7': 31, '8': 32, '9': 33, '10': 34, 'J': 35, 'Q': 36, 'K': 37, 'A': 38 } # Spade Black = { '2': 39, '3': 40, '4': 41, '5': 42, '6': 43, '7': 44, '8': 45, '9': 46, '10': 47, 'J': 48, 'Q': 49, 'K': 50, 'A': 51 } POKER_SCOPE = [ '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A' ]
14.469027
40
0.263609
0
0
0
0
0
0
0
0
529
0.317337
d5edbea518993ed30402ca6ed7151f569ce035ff
42
py
Python
main.py
reflective21/iportfolio
39db626a9754c1df44ac698f3d8988fdc4e7c6d5
[ "MIT" ]
null
null
null
main.py
reflective21/iportfolio
39db626a9754c1df44ac698f3d8988fdc4e7c6d5
[ "MIT" ]
null
null
null
main.py
reflective21/iportfolio
39db626a9754c1df44ac698f3d8988fdc4e7c6d5
[ "MIT" ]
null
null
null
name = "David Asiru Adetomiwa" print(name)
21
30
0.761905
0
0
0
0
0
0
0
0
23
0.547619
d5edd2119227be04c5621c163a6292b04c441de0
10,716
py
Python
tcex/services/api_service.py
kdeltared/tcex
818c0d09256764f871e42d9ca5916f92d941d882
[ "Apache-2.0" ]
null
null
null
tcex/services/api_service.py
kdeltared/tcex
818c0d09256764f871e42d9ca5916f92d941d882
[ "Apache-2.0" ]
null
null
null
tcex/services/api_service.py
kdeltared/tcex
818c0d09256764f871e42d9ca5916f92d941d882
[ "Apache-2.0" ]
null
null
null
"""TcEx Framework API Service module.""" # standard library import json import sys import threading import traceback from io import BytesIO from typing import Any from .common_service import CommonService class ApiService(CommonService): """TcEx Framework API Service module.""" def __init__(self, tcex: object): """Initialize the Class properties. Args: tcex: Instance of TcEx. """ super().__init__(tcex) # properties self._metrics = {'Errors': 0, 'Requests': 0, 'Responses': 0} # config callbacks self.api_event_callback = None @property def command_map(self) -> dict: """Return the command map for the current Service type.""" command_map = super().command_map command_map.update({'runservice': self.process_run_service_command}) return command_map def format_query_string(self, params: dict) -> str: """Convert name/value array to a query string. Args: params: The query params for the request. Returns: str: The query params reformatted as a string. """ query_string = [] try: for q in params: query_string.append(f'''{q.get('name')}={q.get('value')}''') except AttributeError as e: self.log.error( f'feature=api-service, event=bad-params-provided, params={params}, error="""{e})"""' ) self.log.trace(traceback.format_exc()) return '&'.join(query_string) def format_request_headers(self, headers: dict) -> dict: """Convert name/value array to a headers dict. Args: headers: The dict of key/value header data. Returns: dict: The restructured header data. """ headers_ = {} try: for h in headers: # TODO: either support tuple or csv list of values # headers_.setdefault(h.get('name').lower(), []).append(h.get('value')) headers_.setdefault(h.get('name').lower(), str(h.get('value'))) except AttributeError as e: self.log.error( f'feature=api-service, event=bad-headers-provided, ' f'headers={headers}, error="""{e})"""' ) self.log.trace(traceback.format_exc()) return headers_ def format_response_headers(self, headers: dict) -> dict: """Convert name/value array to a query string. Args: headers: The dict header data to be converted to key/value pairs. Returns: dict: The restructured header data. """ headers_ = [] try: for h in headers: headers_.append({'name': h[0], 'value': h[1]}) except AttributeError as e: self.log.error( f'feature=api-service, event=bad-headers-provided, ' f'headers={headers}, error="""{e})"""' ) self.log.trace(traceback.format_exc()) return headers_ def process_run_service_response(self, *args, **kwargs) -> None: """Handle service event responses. ('200 OK', [('content-type', 'application/json'), ('content-length', '103')]) """ self.log.info('feature=api-service, event=response-received, status=waiting-for-body') kwargs.get('event').wait(30) # wait for thread event - (set on body write) self.log.trace(f'feature=api-service, event=response, args={args}') try: status_code, status = args[0].split(' ', 1) response = { 'bodyVariable': 'response.body', 'command': 'Acknowledged', 'headers': self.format_response_headers(args[1]), 'requestKey': kwargs.get('request_key'), # pylint: disable=cell-var-from-loop 'status': status, 'statusCode': status_code, 'type': 'RunService', } self.log.info('feature=api-service, event=response-sent') self.message_broker.publish(json.dumps(response), self.args.tc_svc_client_topic) self.increment_metric('Responses') except Exception as e: self.log.error( f'feature=api-service, event=failed-creating-response-body, error="""{e}"""' ) self.log.trace(traceback.format_exc()) self.increment_metric('Errors') def process_run_service_command(self, message: dict) -> None: """Process the RunService command. .. code-block:: python :linenos: :lineno-start: 1 { "command": "RunService", "apiToken": "abc123", "bodyVariable": "request.body", "headers": [ { key/value pairs } ], "method": "GET", "queryParams": [ { key/value pairs } ], "requestKey": "123abc", "userConfig": [{ "name": "tlpExportSetting", "value": "TLP:RED" }], } Args: message: The message payload from the server topic. """ # register config apiToken (before any logging) self.token.register_token( self.thread_name, message.get('apiToken'), message.get('expireSeconds') ) self.log.info(f'feature=api-service, event=runservice-command, message="{message}"') # thread event used to block response until body is written event = threading.Event() # process message request_key: str = message.get('requestKey') body = None try: # read body from redis body_variable: str = message.pop('bodyVariable', None) if body_variable is not None: body: Any = self.key_value_store.read(request_key, body_variable) if body is not None: # for API service the data in Redis is not b64 encoded body = BytesIO(body) except Exception as e: self.log.error(f'feature=api-service, event=failed-reading-body, error="""{e}"""') self.log.trace(traceback.format_exc()) headers: dict = self.format_request_headers(message.pop('headers')) method: str = message.pop('method') params: dict = message.pop('queryParams') path: str = message.pop('path') try: environ = { 'wsgi.errors': sys.stderr, 'wsgi.input': body, 'wsgi.multithread': True, 'wsgi.multiprocess': False, 'wsgi.run_once': True, 'wsgi.url_scheme': 'https', 'wsgi.version': (1, 0), 'PATH_INFO': path, 'QUERY_STRING': self.format_query_string(params), 'REMOTE_ADDR': message.get('remoteAddress', ''), # 'REMOTE_HOST': message.get('remoteAddress', ''), 'REQUEST_METHOD': method.upper(), 'SCRIPT_NAME': '/', 'SERVER_NAME': '', 'SERVER_PORT': '', 'SERVER_PROTOCOL': 'HTTP/1.1', } # Add user config for TAXII or other service that supports the data type environ['user_config'] = message.get('userConfig', []) # add headers if headers.get('content-type') is not None: environ['CONTENT_TYPE'] = headers.pop('content-type') # add content length if headers.get('content-length') is not None: environ['CONTENT_LENGTH'] = headers.pop('content-length') for header, value in headers.items(): environ[f'HTTP_{header}'.upper()] = value # make values from message available in env in camel # case (e.g., falcon -> req.env.get('request_url)) for key, value in message.items(): if key not in environ and self.tcex.utils.camel_to_snake(key) not in environ: environ[self.tcex.utils.camel_to_snake(key)] = value self.log.trace(f'feature=api-service, environ={environ}') self.increment_metric('Requests') except Exception as e: self.log.error(f'feature=api-service, event=failed-building-environ, error="""{e}"""') self.log.trace(traceback.format_exc()) self.increment_metric('Errors') return # stop processing def response_handler(*args, **kwargs): # pylint: disable=unused-argument """Handle WSGI Response""" kwargs['event'] = event # add event to kwargs for blocking kwargs['request_key'] = request_key self.service_thread( name='response-handler', target=self.process_run_service_response, args=args, kwargs=kwargs, ) if callable(self.api_event_callback): try: body_data: Any = self.api_event_callback( # pylint: disable=not-callable environ, response_handler ) # process body body = '' if hasattr(body_data, 'read'): body = body_data.read() elif isinstance(body_data, list): for bd in body_data: if hasattr(bd, 'read'): body += bd.read() elif isinstance(bd, bytes): body += bd.decode() elif isinstance(bd, list): for b in bd: self.log.error(f'unhandled type - {type(b)}') else: self.log.error(f'unhandled type - {type(body)}') self.log.error(f'unhandled type dir - {dir(body)}') # write body to Redis self.key_value_store.create(request_key, 'response.body', body) # set thread event to True to trigger response self.log.info('feature=api-service, event=response-body-written') event.set() except Exception as e: self.log.error( f'feature=api-service, event=api-event-callback-failed, error="""{e}""".' ) self.log.trace(traceback.format_exc()) self.increment_metric('Errors') # unregister config apiToken self.token.unregister_token(self.thread_name)
38.271429
100
0.535741
10,507
0.980496
0
0
257
0.023983
0
0
4,400
0.410601
d5ee43eaf3c3033dcd289654572ab9b3e0e7b99a
1,540
py
Python
mmpose/core/optimizer/builder.py
vsatyakumar/mmpose
2fffccb19dad3b59184b41be94653f75523b8585
[ "Apache-2.0" ]
1
2021-05-06T08:40:13.000Z
2021-05-06T08:40:13.000Z
mmpose/core/optimizer/builder.py
CV-IP/mmpose
3ef8e6dbbeb6262b7ed6c51faa74b83c23f4c6a1
[ "Apache-2.0" ]
null
null
null
mmpose/core/optimizer/builder.py
CV-IP/mmpose
3ef8e6dbbeb6262b7ed6c51faa74b83c23f4c6a1
[ "Apache-2.0" ]
null
null
null
from mmcv.runner import build_optimizer def build_optimizers(model, cfgs): """Build multiple optimizers from configs. If `cfgs` contains several dicts for optimizers, then a dict for each constructed optimizers will be returned. If `cfgs` only contains one optimizer config, the constructed optimizer itself will be returned. For example, 1) Multiple optimizer configs: .. code-block:: python optimizer_cfg = dict( model1=dict(type='SGD', lr=lr), model2=dict(type='SGD', lr=lr)) The return dict is ``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)`` 2) Single optimizer config: .. code-block:: python optimizer_cfg = dict(type='SGD', lr=lr) The return is ``torch.optim.Optimizer``. Args: model (:obj:`nn.Module`): The model with parameters to be optimized. cfgs (dict): The config dict of the optimizer. Returns: dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`: The initialized optimizers. """ optimizers = {} if hasattr(model, 'module'): model = model.module # determine whether 'cfgs' has several dicts for optimizers if all(isinstance(v, dict) for v in cfgs.values()): for key, cfg in cfgs.items(): cfg_ = cfg.copy() module = getattr(model, key) optimizers[key] = build_optimizer(module, cfg_) return optimizers else: return build_optimizer(model, cfgs)
29.056604
78
0.635065
0
0
0
0
0
0
0
0
1,074
0.697403
d5eeb5cec1758e31f96e4ef111f8b5ec32383697
1,189
py
Python
register/views.py
angel-vazquez25/My-Backlog-Handler
60880cfc6bcc5a7fb2d5c752c11bdfe741f76531
[ "MIT" ]
3
2021-05-11T20:56:41.000Z
2022-03-15T01:26:13.000Z
register/views.py
angel-vazquez25/My-Backlog-Handler
60880cfc6bcc5a7fb2d5c752c11bdfe741f76531
[ "MIT" ]
null
null
null
register/views.py
angel-vazquez25/My-Backlog-Handler
60880cfc6bcc5a7fb2d5c752c11bdfe741f76531
[ "MIT" ]
1
2021-05-19T15:46:32.000Z
2021-05-19T15:46:32.000Z
import datetime from django.contrib.auth import logout from django.shortcuts import render, redirect from .forms import RegisterForm from django.http import HttpResponse from django.contrib.auth.forms import AuthenticationForm from django.conf import settings from django.contrib.auth import authenticate, login from django.http import HttpResponseRedirect from django.contrib import messages # Create your views here. def register(response): if response.user.is_authenticated: return redirect("homepage") else: if response.method == "POST": form = RegisterForm(response.POST) if form.is_valid(): new_user = form.save() # messages.info(response, "Thanks for registering. You are now logged in.") new_user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'], ) login(response, new_user) return redirect("homepage") else: form = RegisterForm() return render(response, "register/register.html", {"form": form})
33.971429
91
0.641716
0
0
0
0
0
0
0
0
177
0.148865
d5eefeb4c414f13bc2793346ebb57b29f5de79db
572
py
Python
forum/migrations/0001_initial.py
Aerodlyn/mu
2c3b95e5a83d0f651dd8ad287b471803e1fec3a1
[ "MIT" ]
1
2021-06-25T22:27:39.000Z
2021-06-25T22:27:39.000Z
forum/migrations/0001_initial.py
Aerodlyn/mu
2c3b95e5a83d0f651dd8ad287b471803e1fec3a1
[ "MIT" ]
1
2022-03-12T00:55:31.000Z
2022-03-12T00:55:31.000Z
forum/migrations/0001_initial.py
Aerodlyn/mu
2c3b95e5a83d0f651dd8ad287b471803e1fec3a1
[ "MIT" ]
null
null
null
# Generated by Django 3.1.7 on 2021-03-26 01:27 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Community', fields=[ ('name', models.CharField(max_length=64, primary_key=True, serialize=False)), ('description', models.TextField()), ('private', models.BooleanField(default=False)), ('slug', models.SlugField()), ], ), ]
23.833333
93
0.552448
479
0.837413
0
0
0
0
0
0
92
0.160839
d5efef002e68abbec6057f8677301ab26bdc9a66
16,846
py
Python
custom_train.py
shirley-wu/text_to_table
44cb100b8ff2543b5b4efe1461502c00c34ef846
[ "MIT" ]
3
2022-03-17T05:55:23.000Z
2022-03-30T08:34:14.000Z
custom_train.py
shirley-wu/text_to_table
44cb100b8ff2543b5b4efe1461502c00c34ef846
[ "MIT" ]
1
2022-03-30T09:04:54.000Z
2022-03-30T09:04:54.000Z
custom_train.py
shirley-wu/text_to_table
44cb100b8ff2543b5b4efe1461502c00c34ef846
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import collections import logging import math import os import sys import numpy as np import torch from fairseq import ( checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils, ) from fairseq import meters from fairseq.checkpoint_utils import checkpoint_paths from fairseq.data import iterators from fairseq.file_io import PathManager from fairseq.logging import metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.train") class Saver: def __init__(self): self.best = None self.keep_best = [] def save_checkpoint(self, args, trainer, epoch_itr, val_loss): # only one worker should attempt to create the required dir if args.distributed_rank == 0: os.makedirs(args.save_dir, exist_ok=True) prev_best = val_loss if self.best is None else self.best if val_loss is not None: best_function = max if args.maximize_best_checkpoint_metric else min self.best = best_function(val_loss, prev_best) if args.no_save: return trainer.consolidate_optimizer() if not trainer.is_data_parallel_master: return def is_better(a, b): return a >= b if args.maximize_best_checkpoint_metric else a <= b write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() suffix = getattr(args, "checkpoint_suffix", "") checkpoint_conds = collections.OrderedDict() save_epoch_checkpoint = ( end_of_epoch and not args.no_epoch_checkpoints and epoch % args.save_interval == 0 ) checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = save_epoch_checkpoint checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not save_epoch_checkpoint and args.save_interval_updates > 0 and updates % args.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( self.best is None or is_better(val_loss, self.best) ) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not args.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if self.best is not None: extra_state.update({"best": self.best}) if args.keep_best_checkpoints > 0 and (len(self.keep_best) < args.keep_best_checkpoints or ( val_loss is not None and not is_better(self.keep_best[-1][0], val_loss))): ckpt_name = "checkpoint{}{}.best_{:.4f}.pt".format(epoch, suffix, val_loss) if save_epoch_checkpoint \ else "checkpoint_{}_{}{}.best_{:.4f}.pt".format(epoch, updates, suffix, val_loss) checkpoint_conds[ckpt_name] = True self.keep_best.append((val_loss, ckpt_name)) self.keep_best = sorted(self.keep_best) checkpoints = [ os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: PathManager.copy(checkpoints[0], cp, overwrite=True) write_timer.stop() logger.info( "saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and args.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt" ) for old_chk in checkpoints[args.keep_interval_updates:]: if os.path.lexists(old_chk): os.remove(old_chk) if args.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt") for old_chk in checkpoints[args.keep_last_epochs:]: if os.path.lexists(old_chk): os.remove(old_chk) if len(self.keep_best) > args.keep_best_checkpoints: for _, x in self.keep_best[args.keep_best_checkpoints:]: x = os.path.join(args.save_dir, x) if os.path.lexists(x): os.remove(x) self.keep_best = self.keep_best[:args.keep_best_checkpoints] def main(args): saver = Saver() utils.import_user_module(args) assert ( args.max_tokens is not None or args.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() np.random.seed(args.seed) utils.set_torch_seed(args.seed) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) # Print args logger.info(args) # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(args) # Load valid dataset (we load training data below, based on the latest checkpoint) for valid_sub_split in args.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # Build model and criterion model = task.build_model(args) criterion = task.build_criterion(args) logger.info(model) logger.info("task: {} ({})".format(args.task, task.__class__.__name__)) logger.info("model: {} ({})".format(args.arch, model.__class__.__name__)) logger.info( "criterion: {} ({})".format(args.criterion, criterion.__class__.__name__) ) logger.info( "num. model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters()), sum(p.numel() for p in model.parameters() if p.requires_grad), ) ) # (optionally) Configure quantization if args.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=args.quantization_config_path, max_epoch=args.max_epoch, max_update=args.max_update, ) else: quantizer = None # Build trainer if args.model_parallel_size == 1: trainer = Trainer(args, task, model, criterion, quantizer) else: trainer = MegatronTrainer(args, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format(args.distributed_world_size) ) logger.info( "max tokens per GPU = {} and max sentences per GPU = {}".format( args.max_tokens, args.batch_size ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( args, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) # Train until the learning rate gets too small max_epoch = args.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch: # train for one epoch valid_losses, should_stop = train(args, trainer, task, epoch_itr, saver) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) def should_stop_early(args, valid_loss): # skip check if no validation was done in the current epoch if valid_loss is None: return False if args.patience <= 0: return False def is_better(a, b): return a > b if args.maximize_best_checkpoint_metric else a < b prev_best = getattr(should_stop_early, "best", None) if prev_best is None or is_better(valid_loss, prev_best): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if should_stop_early.num_runs >= args.patience: logger.info( "early stop since valid performance hasn't improved for last {} runs".format( args.patience ) ) return True else: return False @metrics.aggregate("train") def train(args, trainer, task, epoch_itr, saver): """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > args.curriculum), ) update_freq = ( args.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1] ) itr = iterators.GroupedIterator(itr, update_freq) if getattr(args, "tpu", False): itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( args.tensorboard_logdir if distributed_utils.is_master(args) else None ), default_log_format=("tqdm" if not args.no_progress_bar else "simple"), ) trainer.begin_epoch(epoch_itr.epoch) valid_losses = [None] valid_subsets = args.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % args.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver): num_updates = trainer.get_num_updates() max_update = args.max_update or math.inf do_save = ( (end_of_epoch and epoch_itr.epoch % args.save_interval == 0) or num_updates >= max_update or ( args.save_interval_updates > 0 and num_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates >= args.validate_after_updates ) ) do_validate = ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0) or num_updates >= max_update or ( args.validate_interval_updates > 0 and num_updates > 0 and num_updates % args.validate_interval_updates == 0 ) ) and not args.disable_validation # Validate valid_losses = [None] if do_validate: valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, saver) # Stopping conditions should_stop = ( should_stop_early(args, valid_losses[0]) or num_updates >= max_update or ( args.stop_time_hours > 0 and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours ) ) # Save checkpoint if do_save or should_stop: logger.info("begin save checkpoint") saver.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) return valid_losses, should_stop def get_training_stats(stats): stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def validate(args, trainer, task, epoch_itr, subsets, saver): """Evaluate the model on the validation set(s) and return the losses.""" if args.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(args.fixed_validation_seed) trainer.begin_valid_epoch(epoch_itr.epoch) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) # Initialize data iterator itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False) if getattr(args, "tpu", False): itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=( args.tensorboard_logdir if distributed_utils.is_master(args) else None ), default_log_format=("tqdm" if not args.no_progress_bar else "simple"), ) # create a new root metrics aggregator so validation metrics # don't pollute other aggregators (e.g., train meters) with metrics.aggregate(new_root=True) as agg: for sample in progress: trainer.valid_step(sample) # log validation stats stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), saver) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[args.best_checkpoint_metric]) return valid_losses def get_valid_stats(args, trainer, stats, saver): stats["num_updates"] = trainer.get_num_updates() if hasattr(saver.save_checkpoint, "best"): key = "best_{0}".format(args.best_checkpoint_metric) best_function = max if args.maximize_best_checkpoint_metric else min stats[key] = best_function( saver.save_checkpoint.best, stats[args.best_checkpoint_metric] ) return stats def cli_main(modify_parser=None): parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(args, main) else: distributed_utils.call_main(args, main) if __name__ == "__main__": cli_main()
36.306034
114
0.633919
4,408
0.261664
0
0
2,531
0.150243
0
0
2,899
0.172088
d5eff585130a0defb51fd844556d3dea1143c55d
18,862
py
Python
src/ucar/unidata/idv/resources/python/griddiag.py
JessicaWiedemeier/IDV
e5f67c755cc95f8ad2123bdc45a91f0e5eca0d64
[ "CNRI-Jython" ]
1
2021-06-09T11:24:48.000Z
2021-06-09T11:24:48.000Z
src/ucar/unidata/idv/resources/python/griddiag.py
JessicaWiedemeier/IDV
e5f67c755cc95f8ad2123bdc45a91f0e5eca0d64
[ "CNRI-Jython" ]
null
null
null
src/ucar/unidata/idv/resources/python/griddiag.py
JessicaWiedemeier/IDV
e5f67c755cc95f8ad2123bdc45a91f0e5eca0d64
[ "CNRI-Jython" ]
null
null
null
""" This is the doc for the Grid Diagnostics module. These functions are based on the grid diagnostics from the GEneral Meteorological PAcKage (GEMPAK). Note that the names are case sensitive and some are named slightly different from GEMPAK functions to avoid conflicts with Jython built-ins (e.g. str). <P> In the following operators, scalar operands are named S<sub>n</sub> and vector operands are named V<sub>n</sub>. Lowercase u and v refer to the grid relative components of a vector. """ def GRAVITY(): """ Gravity constant """ return DerivedGridFactory.GRAVITY; # Math functions def atn2(S1,S2,WA=0): """ Wrapper for atan2 built-in <div class=jython> ATN2 (S1, S2) = ATAN ( S1 / S2 )<br> WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR) </div> """ return GridMath.atan2(S1,S2,WA) def add(S1,S2,WA=0): """ Addition <div class=jython> ADD (S1, S2) = S1 + S2<br> WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR) </div> """ return GridMath.add(S1,S2,WA) def mul(S1,S2,WA=0): """ Multiply <div class=jython> MUL (S1, S2) = S1 * S2<br> WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR) </div> """ return GridMath.multiply(S1,S2,WA) def quo(S1,S2,WA=0): """ Divide <div class=jython> QUO (S1, S2) = S1 / S2<br> WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR) </div> """ return GridMath.divide(S1,S2,WA) def sub(S1,S2,WA=0): """ Subtract <div class=jython> SUB (S1, S2) = S1 - S2<br> WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR) </div> """ return GridMath.subtract(S1,S2,WA) # Scalar quantities def adv(S,V): """ Horizontal Advection, negative by convention <div class=jython> ADV ( S, V ) = - ( u * DDX (S) + v * DDY (S) ) </div> """ return -add(mul(ur(V),ddx(S)),mul(vr(V),ddy(S))) def avg(S1,S2): """ Average of 2 scalars <div class=jython> AVG (S1, S2) = ( S1 + S2 ) / 2 </div> """ return add(S1,S2)/2 def avor(V): """ Absolute Vorticity <div class=jython> AVOR ( V ) = VOR ( V ) + CORL(V) </div> """ relv = vor(V) return add(relv,corl(relv)) def circs(S, D=2): """ <div class=jython> Apply a circular aperature smoothing to the grid points. The weighting function is the circular aperature diffraction function. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "CIRC", int(D)) def corl(S): """ Coriolis Parameter for all points in a grid <div class=jython> CORL = TWO_OMEGA*sin(latr) </div> """ return DerivedGridFactory.createCoriolisGrid(S) def cress(S, D=2): """ <div class=jython> Apply a Cressman smoothing to the grid points. The smoothed value is given by a weighted average of surrounding grid points. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "CRES", int(D)) def cros(V1,V2): """ Vector cross product magnitude <div class=jython> CROS ( V1, V2 ) = u1 * v2 - u2 * v1 </div> """ return sub(mul(ur(V1),vr(V2)),mul(ur(V2),vr(V1))) def ddx(S): """ Take the derivative with respect to the domain's X coordinate """ return GridMath.ddx(S); def ddy(S): """ Take the derivative with respect to the domain's Y coordinate """ return GridMath.ddy(S); def defr(V): """ Total deformation <div class=jython> DEF ( V ) = ( STRD (V) ** 2 + SHR (V) ** 2 ) ** .5 </div> """ return mag(strd(V),shr(V)) def div(V): """ Horizontal Divergence <div class=jython> DIV ( V ) = DDX ( u ) + DDY ( v ) </div> """ return add(ddx(ur(V)),ddy(vr(V))) def dirn(V): """ North relative direction of a vector <div class=jython> DIRN ( V ) = DIRR ( un(v), vn(v) ) </div> """ return dirr(DerivedGridFactory.createTrueFlowVector(V)) def dirr(V): """ Grid relative direction of a vector """ return DerivedGridFactory.createVectorDirection(V) def dot(V1,V2): """ Vector dot product <div class=jython> DOT ( V1, V2 ) = u1 * u2 + v1 * v2 </div> """ product = mul(V1,V2) return add(ur(product),vr(product)) def gwfs(S, N=6): """ <div class=jython> Horizontal smoothing using normally distributed weights with theoretical response of 1/e for N * delta-x wave. Increasing N increases the smoothing. (default N=6) </div> """ return GridUtil.smooth(S, "GWFS", int(N)) def jcbn(S1,S2): """ Jacobian Determinant <div class=jython> JCBN ( S1, S2 ) = DDX (S1) * DDY (S2) - DDY (S1) * DDX (S2) </div> """ return sub(mul(ddx(S1),ddy(S2)),mul(ddy(S1),ddx(S2))) def latr(S): """ Latitudue all points in a grid """ return DerivedGridFactory.createLatitudeGrid(S) def lap(S): """ Laplacian operator <div class=jython> LAP ( S ) = DIV ( GRAD (S) ) </div> """ grads = grad(S) return div(grads) def lav(S,level1=None,level2=None, unit=None): """ Layer Average of a multi layer grid <div class=jython> LAV ( S ) = ( S (level1) + S (level2) ) / 2. </div> """ if level1 == None: return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_AVERAGE) else: return layerAverage(S,level1,level2, unit) def ldf(S,level1,level2, unit=None): """ Layer Difference <div class=jython> LDF ( S ) = S (level1) - S (level2) </div> """ return layerDiff(S,level1,level2, unit); def mag(*a): """ Magnitude of a vector """ if (len(a) == 1): return DerivedGridFactory.createVectorMagnitude(a[0]); else: return DerivedGridFactory.createVectorMagnitude(a[0],a[1]); def mixr(temp,rh): """ Mixing Ratio from Temperature, RH (requires pressure domain) """ return DerivedGridFactory.createMixingRatio(temp,rh) def relh(temp,mixr): """ Create Relative Humidity from Temperature, mixing ratio (requires pressure domain) """ return DerivedGridFactory.createRelativeHumidity(temp,mixr) def pvor(S,V): """ Potetial Vorticity (usually from theta and wind) """ return DerivedGridFactory.createPotentialVorticity(S,V) def rects(S, D=2): """ <div class=jython> Apply a rectangular aperature smoothing to the grid points. The weighting function is the product of the rectangular aperature diffraction function in the x and y directions. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "RECT", int(D)) def savg(S): """ Average over whole grid <div class=jython> SAVG ( S ) = average of all non-missing grid point values </div> """ return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE) def savs(S): """ Average over grid subset <div class=jython> SAVS ( S ) = average of all non-missing grid point values in the subset area </div> """ return savg(S) def sdiv(S,V): """ Horizontal Flux Divergence <div class=jython> SDIV ( S, V ) = S * DIV ( V ) + DOT ( V, GRAD ( S ) ) </div> """ return add(mul(S,(div(V))) , dot(V,grad(S))) def shr(V): """ Shear Deformation <div class=jython> SHR ( V ) = DDX ( v ) + DDY ( u ) </div> """ return add(ddx(vr(V)),ddy(ur(V))) def sm5s(S): """ Smooth a scalar grid using a 5-point smoother <div class=jython> SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + S (i-1,j) + S (i,j-1) ) </div> """ return GridUtil.smooth(S, "SM5S") def sm9s(S): """ Smooth a scalar grid using a 9-point smoother <div class=jython> SM9S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + S (i-1,j) + S (i,j-1) ) + .0625 * ( S (i+1,j+1) + S (i+1,j-1) + S (i-1,j+1) + S (i-1,j-1) ) </div> """ return GridUtil.smooth(S, "SM9S") def strd(V): """ Stretching Deformation <div class=jython> STRD ( V ) = DDX ( u ) - DDY ( v ) </div> """ return sub(ddx(ur(V)),ddy(vr(V))) def thta(temp): """ Potential Temperature from Temperature (requires pressure domain) """ return DerivedGridFactory.createPotentialTemperature(temp) def thte(temp,rh): """ Equivalent Potential Temperature from Temperature and Relative humidity (requires pressure domain) """ return DerivedGridFactory.createEquivalentPotentialTemperature(temp,rh) def un(V): """ North relative u component """ return ur(DerivedGridFactory.createTrueFlowVector(V)) def ur(V): """ Grid relative u component """ return DerivedGridFactory.getUComponent(V) def vn(V): """ North relative v component """ return vr(DerivedGridFactory.createTrueFlowVector(V)) def vor(V): """ Relative Vorticity <div class=jython> VOR ( V ) = DDX ( v ) - DDY ( u ) </div> """ return sub(ddx(vr(V)),ddy(ur(V))) def vr(V): """ Grid relative v component """ return DerivedGridFactory.getVComponent(V) def xav(S): """ Average along a grid row <div class=jython> XAV (S) = ( S (X1) + S (X2) + ... + S (KXD) ) / KNT KXD = number of points in row KNT = number of non-missing points in row XAV for a row is stored at every point in that row. </div> """ return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_X) def xsum(S): """ Sum along a grid row <div class=jython> XSUM (S) = ( S (X1) + S (X2) + ... + S (KXD) ) KXD = number of points in row XSUM for a row is stored at every point in that row. </div> """ return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_X) def yav(S): """ Average along a grid column <div class=jython> YAV (S) = ( S (Y1) + S (Y2) + ... + S (KYD) ) / KNT KYD = number of points in column KNT = number of non-missing points in column </div> """ return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_Y) def ysum(S): """ Sum along a grid column <div class=jython> YSUM (S) = ( S (Y1) + S (Y2) + ... + S (KYD) ) KYD = number of points in row YSUM for a column is stored at every point in that column. </div> """ return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_Y) def zav(S): """ Average across the levels of a grid at all points <div class=jython> ZAV (S) = ( S (Z1) + S (Z2) + ... + S (KZD) ) / KNT KZD = number of levels KNT = number of non-missing points in column </div> """ return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE) def zsum(S): """ Sum across the levels of a grid at all points <div class=jython> ZSUM (S) = ( S (Z1) + S (Z2) + ... + S (KZD) ) KZD = number of levels ZSUM for a vertical column is stored at every point </div> """ return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_SUM) def wshr(V, Z, top, bottom): """ Magnitude of the vertical wind shear in a layer <div class=jython> WSHR ( V ) = MAG [ VLDF (V) ] / LDF (Z) </div> """ dv = mag(vldf(V,top,bottom)) dz = ldf(Z,top,bottom) return quo(dv,dz) # Vector output def age(obs,geo): """ Ageostrophic wind <div class=jython> AGE ( S ) = [ u (OBS) - u (GEO(S)), v (OBS) - v (GEO(S)) ] </div> """ return sub(obs,geo) def circv(S, D=2): """ <div class=jython> Apply a circular aperature smoothing to the grid points. The weighting function is the circular aperature diffraction function. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "CIRC", int(D)) def cresv(S, D=2): """ <div class=jython> Apply a Cressman smoothing to the grid points. The smoothed value is given by a weighted average of surrounding grid points. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "CRES", int(D)) def dvdx(V): """ Partial x derivative of a vector <div class=jython> DVDX ( V ) = [ DDX (u), DDX (v) ] </div> """ return vecr(ddx(ur(V)), ddx(vr(V))) def dvdy(V): """ Partial x derivative of a vector <div class=jython> DVDY ( V ) = [ DDY (u), DDY (v) ] </div> """ return vecr(ddy(ur(V)), ddy(vr(V))) def frnt(S,V): """ Frontogenesis function from theta and the wind <div class=jython> FRNT ( THTA, V ) = 1/2 * MAG ( GRAD (THTA) ) * ( DEF * COS (2 * BETA) - DIV ) <p> Where: BETA = ASIN ( (-DDX (THTA) * COS (PSI) <br> - DDY (THTA) * SIN (PSI))/ <br> MAG ( GRAD (THTA) ) ) <br> PSI = 1/2 ATAN2 ( SHR / STR ) <br> </div> """ shear = shr(V) strch = strd(V) psi = .5*atn2(shear,strch) dxt = ddx(S) dyt = ddy(S) cosd = cos(psi) sind = sin(psi) gradt = grad(S) mgradt = mag(gradt) a = -cosd*dxt-sind*dyt beta = asin(a/mgradt) frnto = .5*mgradt*(defr(V)*cos(2*beta)-div(V)) return frnto def geo(z): """ geostrophic wind from height <div class=jython> GEO ( S ) = [ - DDY (S) * const / CORL, DDX (S) * const / CORL ] </div> """ return DerivedGridFactory.createGeostrophicWindVector(z) def grad(S): """ Gradient of a scalar <div class=jython> GRAD ( S ) = [ DDX ( S ), DDY ( S ) ] </div> """ return vecr(ddx(S),ddy(S)) def gwfv(V, N=6): """ <div class=jython> Horizontal smoothing using normally distributed weights with theoretical response of 1/e for N * delta-x wave. Increasing N increases the smoothing. (default N=6) </div> """ return gwfs(V, N) def inad(V1,V2): """ Inertial advective wind <div class=jython> INAD ( V1, V2 ) = [ DOT ( V1, GRAD (u2) ), DOT ( V1, GRAD (v2) ) ] </div> """ return vecr(dot(V1,grad(ur(V2))),dot(V1,grad(vr(V2)))) def qvec(S,V): """ Q-vector at a level ( K / m / s ) <div class=jython> QVEC ( S, V ) = [ - ( DOT ( DVDX (V), GRAD (S) ) ), - ( DOT ( DVDY (V), GRAD (S) ) ) ] where S can be any thermal paramenter, usually THTA. </div> """ grads = grad(S) qvecu = newName(-dot(dvdx(V),grads),"qvecu") qvecv = newName(-dot(dvdy(V),grads),"qvecv") return vecr(qvecu,qvecv) def qvcl(THTA,V): """ Q-vector ( K / m / s ) <div class=jython> QVCL ( THTA, V ) = ( 1/( D (THTA) / DP ) ) * [ ( DOT ( DVDX (V), GRAD (THTA) ) ), ( DOT ( DVDY (V), GRAD (THTA) ) ) ] </div> """ dtdp = GridMath.partial(THTA,2) gradt = grad(THTA) qvecudp = newName(quo(dot(dvdx(V),gradt),dtdp),"qvecudp") qvecvdp = newName(quo(dot(dvdy(V),gradt),dtdp),"qvecvdp") return vecr(qvecudp,qvecvdp) def rectv(S, D=2): """ <div class=jython> Apply a rectangular aperature smoothing to the grid points. The weighting function is the product of the rectangular aperature diffraction function in the x and y directions. D is the radius of influence in grid increments, increasing D increases the smoothing. (default D=2) </div> """ return GridUtil.smooth(S, "RECT", int(D)) def sm5v(V): """ Smooth a scalar grid using a 5-point smoother (see sm5s) """ return sm5s(V) def sm9v(V): """ Smooth a scalar grid using a 9-point smoother (see sm9s) """ return sm9s(V) def thrm(S, level1, level2, unit=None): """ Thermal wind <div class=jython> THRM ( S ) = [ u (GEO(S)) (level1) - u (GEO(S)) (level2), v (GEO(S)) (level1) - v (GEO(S)) (level2) ] </div> """ return vldf(geo(S),level1,level2, unit) def vadd(V1,V2): """ add the components of 2 vectors <div class=jython> VADD (V1, V2) = [ u1+u2, v1+v2 ] </div> """ return add(V1,V2) def vecn(S1,S2): """ Make a true north vector from two components <div class=jython> VECN ( S1, S2 ) = [ S1, S2 ] </div> """ return makeTrueVector(S1,S2) def vecr(S1,S2): """ Make a vector from two components <div class=jython> VECR ( S1, S2 ) = [ S1, S2 ] </div> """ return makeVector(S1,S2) def vlav(V,level1,level2, unit=None): """ calculate the vector layer average <div class=jython> VLDF(V) = [(u(level1) - u(level2))/2, (v(level1) - v(level2))/2] </div> """ return layerAverage(V, level1, level2, unit) def vldf(V,level1,level2, unit=None): """ calculate the vector layer difference <div class=jython> VLDF(V) = [u(level1) - u(level2), v(level1) - v(level2)] </div> """ return layerDiff(V,level1,level2, unit) def vmul(V1,V2): """ Multiply the components of 2 vectors <div class=jython> VMUL (V1, V2) = [ u1*u2, v1*v2 ] </div> """ return mul(V1,V2) def vquo(V1,V2): """ Divide the components of 2 vectors <div class=jython> VQUO (V1, V2) = [ u1/u2, v1/v2 ] </div> """ return quo(V1,V2) def vsub(V1,V2): """ subtract the components of 2 vectors <div class=jython> VSUB (V1, V2) = [ u1-u2, v1-v2 ] </div> """ return sub(V1,V2) def LPIndex(u, v, z, t, top, bottom, unit): """ calculate the wind shear between discrete layers <div class=jython> LP = 7.268DUDZ + 0.718DTDN + 0.318DUDN - 2.52 </div> """ Z = windShear(u, v, z, top, bottom, unit)*7.268 uwind = getSliceAtLevel(u, top) vwind = getSliceAtLevel(v, top) temp = newUnit(getSliceAtLevel(t, top), "temperature", "celsius") HT = sqrt(ddx(temp)*ddx(temp) + ddy(temp)*ddy(temp))*0.718 HU = (ddx(vwind) + ddy(uwind))*0.318 L = add(noUnit(Z), add(noUnit(HU), noUnit(HT))) L = (L - 2.520)*(-0.59) P= 1.0/(1.0 + GridMath.applyFunctionOverGridsExt(L,"exp")) LP = setLevel(P ,top, unit) return LP def EllrodIndex(u, v, z, top, bottom, unit): """ calculate the wind shear between discrete layers <div class=jython> EI = VWS X ( DEF + DIV) </div> """ VWS = windShear(u, v, z, top, bottom, unit)*100.0 # uwind = getSliceAtLevel(u, top) vwind = getSliceAtLevel(v, top) DIV = (ddx(uwind) + ddy(vwind))* (-1.0) # DSH = ddx(vwind) + ddy(uwind) DST = ddx(uwind) - ddy(vwind) DEF = sqrt(DSH * DSH + DST * DST) EI = mul(noUnit(VWS), add(noUnit(DEF), noUnit(DIV))) return setLevel(EI, top, unit)
26.75461
89
0.584721
0
0
0
0
0
0
0
0
12,392
0.656982
d5effb4acc4b4904be8e5099e47cd060230843fe
2,376
py
Python
app.py
DevilBit/Twitter-Bot
6f1b285aeb5faf37906d575775a927e69a5321d6
[ "MIT" ]
null
null
null
app.py
DevilBit/Twitter-Bot
6f1b285aeb5faf37906d575775a927e69a5321d6
[ "MIT" ]
null
null
null
app.py
DevilBit/Twitter-Bot
6f1b285aeb5faf37906d575775a927e69a5321d6
[ "MIT" ]
1
2021-03-08T20:05:23.000Z
2021-03-08T20:05:23.000Z
from selenium import webdriver #to get the browser from selenium.webdriver.common.keys import Keys #to send key to browser import getpass #to get password safely import time #to pause the program #a calss to store all twetter related objects and functions class twitter_bot: def __init__(self, username, password): self.username = username self.password = password self.bot = webdriver.Firefox() #login function def login(self): bot = self.bot bot.get('https://twitter.com/login') #sleep to wait for the browser to get the website time.sleep(3) email = bot.find_element_by_class_name('js-username-field') #get the email field password = bot.find_element_by_class_name('js-password-field') #get the password field #clear the email and password field just in case of autofill email.clear() password.clear() #fill in email field email.send_keys(self.username) time.sleep(2) #fill in password field password.send_keys(self.password) time.sleep(2) #click the login button bot.find_element_by_class_name("EdgeButtom--medium").click() time.sleep(3) def like_tweet(self, search): bot = self.bot #use keyword to search bot.get('https://twitter.com/search?q=' + search + '&src=typd') bot.implicitly_wait(3) #get posts for i in range(0, 30): bot.execute_script('window.scrollTo(0, document.body.scrollHeight)') time.sleep(10) tweets = bot.find_elements_by_class_name('tweet') links = [element.get_attribute('data-permalink-path') for element in tweets] #like posts for link in links: bot.get('https://twitter.com/' + link) try: bot.find_element_by_class_name('HeartAnimation').click() time.sleep(10) except Exception as ex: time.sleep(60) if __name__ == '__main__': username = input('Email: ') password = getpass.getpass('Password: ') search = input('Please enter keyword: ') user = twitter_bot(username, password) user.login() time.sleep(10) user.like_tweet(search)
34.941176
95
0.603114
1,842
0.775253
0
0
0
0
0
0
734
0.308923
d5f13f54fb0bf75e7d45a4d1bb426a38fb3fb255
3,403
py
Python
visualization.py
shyhyawJou/GradCAM-pytorch
8159f077552fc71055fe97c17bf8544d32cc8b0f
[ "Apache-2.0" ]
null
null
null
visualization.py
shyhyawJou/GradCAM-pytorch
8159f077552fc71055fe97c17bf8544d32cc8b0f
[ "Apache-2.0" ]
null
null
null
visualization.py
shyhyawJou/GradCAM-pytorch
8159f077552fc71055fe97c17bf8544d32cc8b0f
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn from torch.nn import functional as F from PIL import Image import cv2 as cv from matplotlib import cm import numpy as np class GradCAM: """ #### Args: layer_name: module name (not child name), if None, will use the last layer before average pooling , default is None """ def __init__(self, model, device, layer_name=None, close_some_grad=True): if layer_name is None: layer_name = self.get_layer_name(model) if layer_name is None: raise ValueError( "There is no global average pooling layer, plz specify 'layer_name'" ) for n, m in model.named_children(): if close_some_grad: m.requires_grad_(False) for sub_n, sub_m in m.named_modules(): if '.'.join((n, sub_n)) == layer_name: sub_m.register_forward_hook(self.forward_hook) sub_m.register_full_backward_hook(self.backward_hook) m.requires_grad_(True) break model = model.to(device) self.model = model self.device = device self.feature_maps = {} self.gradients = {} def get_heatmap(self, img, img_tensor): self.model.zero_grad() img_tensor = img_tensor.to(self.device) outputs = self.model(img_tensor) _, pred_label = outputs.max(1) # outputs shape = 1x2 outputs[0][pred_label].backward() with torch.no_grad(): feature_maps = self.feature_maps["output"] # "gradients" is a tuple with one item grad_weights = self.gradients["output"][0] h, w = grad_weights.size()[-2:] grad_weights = grad_weights.sum((2,3), True) / (h * w) cam = (grad_weights * feature_maps).sum(1) F.relu(cam, True) cam = cam / cam.max() * 255 cam = cam.to(dtype=torch.uint8, device="cpu") cam = cam.numpy().transpose(1,2,0) cam = cv.resize(cam, img.size[:2], interpolation=4) cam = np.uint8(255 * cm.get_cmap("jet")(cam.squeeze())) if not isinstance(img, np.ndarray): img = np.asarray(img) img_size = img.shape[:2][::-1] # w, h overlay = np.uint8(0.6*img + 0.4 * cam[:,:,:3]) overlay = Image.fromarray(overlay) if overlay.size != img_size: overlay = overlay.resize(img_size, Image.BILINEAR) return outputs.detach(), overlay def get_layer_name(self, model): layer_name = None for n, m in model.named_children(): for sub_n, sub_m in m.named_modules(): if isinstance(sub_m, (nn.AdaptiveAvgPool2d, nn.AvgPool2d)): layer_name = tmp tmp = '.'.join((n, sub_n)) return layer_name def forward_hook(self, module, x, y): #self.feature_maps["input"] = x self.feature_maps["output"] = y def backward_hook(self, module, x, y): #self.gradients["input"] = x self.gradients["output"] = y self.gradients["output"] = y
34.72449
85
0.528945
3,210
0.943285
0
0
0
0
0
0
449
0.131942
d5f302c5d8d693812839ea69e155909e598db642
19,149
py
Python
frame_2D_alg/alternative versions/intra_blob_xy.py
Mechachleopteryx/CogAlg
723104e1f57010e52f1dc249ba53ba58db0a991b
[ "MIT" ]
null
null
null
frame_2D_alg/alternative versions/intra_blob_xy.py
Mechachleopteryx/CogAlg
723104e1f57010e52f1dc249ba53ba58db0a991b
[ "MIT" ]
null
null
null
frame_2D_alg/alternative versions/intra_blob_xy.py
Mechachleopteryx/CogAlg
723104e1f57010e52f1dc249ba53ba58db0a991b
[ "MIT" ]
null
null
null
''' 2D version of 1st-level algorithm is a combination of frame_blobs, intra_blob, and comp_P: optional raster-to-vector conversion. intra_blob recursively evaluates each blob for two forks of extended internal cross-comparison and sub-clustering: der+: incremental derivation cross-comp in high-variation edge areas of +vg: positive deviation of gradient triggers comp_g, rng+: incremental range cross-comp in low-variation flat areas of +v--vg: positive deviation of negated -vg triggers comp_r. Each adds a layer of sub_blobs per blob. Please see diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_blob_2_fork_scheme.png Blob structure, for all layers of blob hierarchy: root_dert__, Dert = I, iDy, iDx, G, Dy, Dx, M, S (area), Ly (vertical dimension) # I: input, (iDy, iDx): angle of input gradient, G: gradient, (Dy, Dx): vertical and lateral Ds, M: match sign, box, # y0, yn, x0, xn dert__, # box of derts, each = i, idy, idx, g, dy, dx, m stack_[ stack_params, Py_ [(P_params, dert_)]]: refs down blob formation tree, in vertical (horizontal) order # next fork: fcr, # flag comp rng, also clustering criterion in dert and Dert: g in der+ fork, i+m in rng+ fork? fig, # flag input is gradient rdn, # redundancy to higher layers rng, # comp range sub_layers # [sub_blobs ]: list of layers across sub_blob derivation tree # deeper layers are nested, multiple forks: no single set of fork params? ''' from collections import deque, defaultdict from class_cluster import ClusterStructure, NoneType from class_bind import AdjBinder from frame_blobs_yx import assign_adjacents from intra_comp_g import comp_g, comp_r from itertools import zip_longest from class_stream import BlobStreamer from utils import pairwise import numpy as np # from comp_P_draft import comp_P_blob # filters, All *= rdn: ave = 50 # fixed cost per dert, from average m, reflects blob definition cost, may be different for comp_a? aveB = 50 # fixed cost per intra_blob comp and clustering class CDeepP(ClusterStructure): I = int G = int Dy = int Dx = int M = int iDy = int iDx = int L = int x0 = int sign = NoneType class CDeepStack(ClusterStructure): I = int G = int Dy = int Dx = int M = int iDy = int iDx = int S = int Ly = int y0 = int Py_ = list blob = object down_connect_cnt = int sign = NoneType class CDeepBlob(ClusterStructure): Dert = dict box = list stack_ = list sign = NoneType open_stacks = int root_dert__ = object dert__ = object mask = object adj_blobs = list fopen = bool margin = list fcr = bool fig = bool rdn = float rng = int Ls = int # for visibility and next-fork rdn sub_layers = list # -------------------------------------------------------------------------------------------------------------- # functions, ALL WORK-IN-PROGRESS: def intra_blob(blob, rdn, rng, fig, fcr, **kwargs): # recursive input rng+ | der+ cross-comp within blob # fig: flag input is g | p, fcr: flag comp over rng+ | der+ if kwargs.get('render', None) is not None: # stop rendering sub-blobs when blob is too small if blob.Dert['S'] < 100: kwargs['render'] = False spliced_layers = [] # to extend root_blob sub_layers ext_dert__, ext_mask = extend_dert(blob) if fcr: dert__, mask = comp_r(ext_dert__, fig, fcr, ext_mask) # -> m sub_blobs else: dert__, mask = comp_g(ext_dert__, ext_mask) # -> g sub_blobs: if dert__[0].shape[0] > 2 and dert__[0].shape[1] > 2 and False in mask: # min size in y and x, least one dert in dert__ sub_blobs = cluster_derts(dert__, mask, ave * rdn, fcr, fig, **kwargs) # fork params: blob.fcr = fcr blob.fig = fig blob.rdn = rdn blob.rng = rng blob.Ls = len(sub_blobs) # for visibility and next-fork rdn blob.sub_layers = [sub_blobs] # 1st layer of sub_blobs for sub_blob in sub_blobs: # evaluate for intra_blob comp_g | comp_r: G = blob.Dert['G']; adj_G = blob.adj_blobs[2] borrow = min(abs(G), abs(adj_G) / 2) # or adjacent M if negative sign? if sub_blob.sign: if sub_blob.Dert['M'] - borrow > aveB * rdn: # M - (intra_comp value lend to edge blob) # comp_r fork: blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng * 2, fig=fig, fcr=1, **kwargs) # else: comp_P_ elif sub_blob.Dert['G'] + borrow > aveB * rdn: # G + (intra_comp value borrow from flat blob) # comp_g fork: blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng=rng, fig=1, fcr=0, **kwargs) # else: comp_P_ spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])] return spliced_layers def cluster_derts(dert__, mask, Ave, fcr, fig, render=False): # similar to frame_to_blobs if fcr: # comp_r output; form clustering criterion: if fig: crit__ = dert__[0] + dert__[6] - Ave # eval by i + m, accum in rng; dert__[:,:,0] if not transposed else: crit__ = Ave - dert__[3] # eval by -g, accum in rng else: # comp_g output crit__ = dert__[6] - Ave # comp_g output eval by m, or clustering is always by m? root_dert__ = dert__ # derts after the comps operation, which is the root_dert__ dert__ = [*zip(*dert__)] # transpose dert__ into shape [y, params, x] sub_blobs = [] # from form_blob: stack_ = deque() # buffer of running vertical stacks of Ps stack_binder = AdjBinder(CDeepStack) if render: streamer = BlobStreamer(CDeepBlob, crit__, mask) if render: streamer = BlobStreamer(CDeepBlob, crit__, mask) for y, dert_ in enumerate(dert__): # in height, first and last row are discarded; print(f'Processing intra line {y}...') # if False in mask[i]: # [y,x,params], there is at least one dert in line P_binder = AdjBinder(CDeepP) # binder needs data about clusters of the same level P_ = form_P_(zip(*dert_), crit__[y], mask[y], P_binder) # horizontal clustering, adds a row of Ps if render: render = streamer.update_blob_conversion(y, P_) # if return False, stop rendering P_ = scan_P_(P_, stack_, root_dert__, sub_blobs, P_binder) # vertical clustering, adds up_connects per P and down_connect_cnt per stack stack_ = form_stack_(P_, root_dert__, sub_blobs, y) stack_binder.bind_from_lower(P_binder) while stack_: # frame ends, last-line stacks are merged into their blobs: form_blob(stack_.popleft(), root_dert__, sub_blobs) blob_binder = AdjBinder(CDeepBlob) blob_binder.bind_from_lower(stack_binder) assign_adjacents(blob_binder) # add adj_blobs to each blob # sub_blobs = find_adjacent(sub_blobs) if render: # rendering mode after blob conversion streamer.end_blob_conversion(y) return sub_blobs # clustering functions: # ------------------------------------------------------------------------------------------------------------------- def form_P_(dert_, crit_, mask_, binder): # segment dert__ into P__, in horizontal ) vertical order P_ = deque() # row of Ps sign_ = crit_ > 0 x0 = 0 try: while mask_[x0]: # skip until not masked next(dert_) x0 += 1 except IndexError: return P_ # the whole line is masked, return an empty P I, iDy, iDx, G, Dy, Dx, M, L = *next(dert_), 1 # initialize P params _sign = sign_[x0] _mask = mask_[x0] # mask bit per dert for x, (i, idy, idx, g, dy, dx, m) in enumerate(dert_, start=x0+1): # loop left to right in each row of derts mask = mask_[x] if ~mask: # current dert is not masked sign = sign_[x] if ~_mask and sign != _sign: # prior dert is not masked and sign changed # pack P P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L,x0=x0, sign=_sign) P_.append(P) # initialize P params: I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x elif _mask: I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x # current dert is masked elif ~_mask: # prior dert is not masked # pack P P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign) P_.append(P) # initialize P params: (redundant) # I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x + 1 if ~mask: # accumulate P params: I += i iDy += idy iDx += idx G += g Dy += dy Dx += dx M += m L += 1 _sign = sign # prior sign _mask = mask if ~_mask: # terminate and pack last P in a row if prior dert is unmasked P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign) P_.append(P) for _P, P in pairwise(P_): if _P.x0 + _P.L == P.x0: # check if Ps are adjacents binder.bind(_P, P) return P_ def scan_P_(P_, stack_, root_dert__, sub_blobs, binder): # merge P into higher-row stack of Ps with same sign and x_coord overlap next_P_ = deque() # to recycle P + up_connect_ that finished scanning _P, will be converted into next_stack_ if P_ and stack_: # if both input row and higher row have any Ps / _Ps left P = P_.popleft() # load left-most (lowest-x) input-row P stack = stack_.popleft() # higher-row stacks _P = stack.Py_[-1] # last element of each stack is higher-row P up_connect_ = [] # list of same-sign x-overlapping _Ps per P while True: # while both P_ and stack_ are not empty x0 = P.x0 # first x in P xn = x0 + P.L # first x beyond P _x0 = _P.x0 # first x in _P _xn = _x0 + _P.L # first x beyond _P if stack.G > 0: # check for overlaps in 8 directions, else a blob may leak through its external blob if _x0 - 1 < xn and x0 < _xn + 1: # x overlap between loaded P and _P if P.sign == stack.sign: # sign match stack.down_connect_cnt += 1 up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_ else: binder.bind(_P, P) else: # -G, check for orthogonal overlaps only: 4 directions, edge blobs are more selective if _x0 < xn and x0 < _xn: # x overlap between loaded P and _P if P.sign == stack.sign: # sign match stack.down_connect_cnt += 1 up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_ else: binder.bind(_P, P) if (xn < _xn or # _P overlaps next P in P_ xn == _xn and stack.sign): # sign taken accounted next_P_.append((P, up_connect_)) # recycle _P for the next run of scan_P_ up_connect_ = [] if P_: P = P_.popleft() # load next P else: # terminate loop if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs form_blob(stack, root_dert__, sub_blobs) break else: # no next-P overlap if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs form_blob(stack, root_dert__, sub_blobs) if stack_: # load stack with next _P stack = stack_.popleft() _P = stack.Py_[-1] else: # no stack left: terminate loop next_P_.append((P, up_connect_)) break while P_: # terminate Ps and stacks that continue at row's end next_P_.append((P_.popleft(), [])) # no up_connect while stack_: form_blob(stack_.popleft(), root_dert__, sub_blobs) # down_connect_cnt always == 0 return next_P_ # each element is P + up_connect_ refs def form_stack_(P_, root_dert__, sub_blobs, y): # Convert or merge every P into its stack of Ps, merge blobs next_stack_ = deque() # converted to stack_ in the next run of scan_P_ while P_: P, up_connect_ = P_.popleft() I, G, Dy, Dx, M, iDy, iDx, L, x0, s = P.unpack() xn = x0 + L # next-P x0 if not up_connect_: # initialize new stack for each input-row P that has no connections in higher row: blob = CDeepBlob(Dert=dict(I=0, G=0, Dy=0, Dx=0, M=0, iDy=0, iDx=0, S=0, Ly=0), box=[y, x0, xn], stack_=[], sign=s, open_stacks=1) new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1, y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s) new_stack.hid = blob.id blob.stack_.append(new_stack) else: if len(up_connect_) == 1 and up_connect_[0].down_connect_cnt == 1: # P has one up_connect and that up_connect has one down_connect=P: merge P into up_connect stack: new_stack = up_connect_[0] new_stack.accumulate(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1) new_stack.Py_.append(P) # Py_: vertical buffer of Ps new_stack.down_connect_cnt = 0 # reset down_connect_cnt blob = new_stack.blob else: # if > 1 up_connects, or 1 up_connect that has > 1 down_connect_cnt: blob = up_connect_[0].blob # initialize new_stack with up_connect blob: new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1, y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s) new_stack.hid = blob.id blob.stack_.append(new_stack) if len(up_connect_) > 1: # merge blobs of all up_connects if up_connect_[0].down_connect_cnt == 1: # up_connect is not terminated form_blob(up_connect_[0], root_dert__, sub_blobs) # merge stack of 1st up_connect into its blob for up_connect in up_connect_[1:len(up_connect_)]: # merge blobs of other up_connects into blob of 1st up_connect if up_connect.down_connect_cnt == 1: form_blob(up_connect, root_dert__, sub_blobs) if not up_connect.blob is blob: merged_blob = up_connect.blob I, G, Dy, Dx, M, iDy, iDx, S, Ly = merged_blob.Dert.values() accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly) blob.open_stacks += merged_blob.open_stacks blob.box[0] = min(blob.box[0], merged_blob.box[0]) # extend box y0 blob.box[1] = min(blob.box[1], merged_blob.box[1]) # extend box x0 blob.box[2] = max(blob.box[2], merged_blob.box[2]) # extend box xn for stack in merged_blob.stack_: if not stack is up_connect: stack.blob = blob # blobs in other up_connects are references to blob in the first up_connect. stack.hid = blob.id blob.stack_.append(stack) # buffer of merged root stacks. up_connect.blob = blob up_connect.hid = blob.id blob.stack_.append(up_connect) blob.open_stacks -= 1 # overlap with merged blob. blob.box[1] = min(blob.box[1], x0) # extend box x0 blob.box[2] = max(blob.box[2], xn) # extend box xn P.hid = new_stack.id # assign higher cluster id for P next_stack_.append(new_stack) return next_stack_ def form_blob(stack, root_dert__, sub_blobs): # increment blob with terminated stack, check for blob termination I, G, Dy, Dx, M, iDy, iDx, S, Ly, y0, Py_, blob, down_connect_cnt, sign = stack.unpack() accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly) # terminated stack is merged into continued or initialized blob (all connected stacks): blob.open_stacks += down_connect_cnt - 1 # incomplete stack cnt + terminated stack down_connect_cnt - 1: stack itself # open stacks contain Ps of a current row and may be extended with new x-overlapping Ps in next run of scan_P_ if blob.open_stacks == 0: # if number of incomplete stacks == 0 # blob is terminated and packed in blob root: last_stack = stack y0, x0, xn = blob.box yn = last_stack.y0 + last_stack.Ly mask = np.ones((yn - y0, xn - x0), dtype=bool) # mask box, then unmask Ps: for stack in blob.stack_: for y, P in enumerate(stack.Py_, start=stack.y0 - y0): x_start = P.x0 - x0 x_stop = x_start + P.L mask[y, x_start:x_stop] = False fopen = 0 # flag: blob on frame boundary if x0 == 0 or xn == root_dert__[0].shape[1] or y0 == 0 or yn == root_dert__[0].shape[0]: fopen = 1 blob.root_dert__ = root_dert__ blob.box = (y0, yn, x0, xn) blob.dert__ = [derts[y0:yn, x0:xn] for derts in root_dert__] blob.mask = mask blob.adj_blobs = [[], 0, 0] blob.fopen = fopen sub_blobs.append(blob) def extend_dert(blob): # extend dert borders (+1 dert to boundaries) y0, yn, x0, xn = blob.box # extend dert box: rY, rX = blob.root_dert__[0].shape # higher dert size # determine pad size y0e = max(0, y0 - 1) yne = min(rY, yn + 1) x0e = max(0, x0 - 1) xne = min(rX, xn + 1) # e is for extended # take ext_dert__ from part of root_dert__ ext_dert__ = [derts[y0e:yne, x0e:xne] if derts is not None else None for derts in blob.root_dert__] # pad mask: top, btm, left, right. 1 or 0 at boundaries mask = np.pad(blob.mask, ((y0 - y0e, yne - yn), (x0 - x0e, xne - xn)), mode='constant', constant_values=True) return ext_dert__, mask def accum_Dert(Dert: dict, **params) -> None: Dert.update({param: Dert[param] + value for param, value in params.items()})
44.740654
144
0.576584
784
0.040942
0
0
0
0
0
0
7,051
0.368218
d5f33371ef4b57ee6f5f8e58e37840bbabd0819e
10,275
py
Python
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
9,136
2015-01-02T00:41:45.000Z
2022-03-31T15:30:02.000Z
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
2,424
2015-01-05T08:55:58.000Z
2022-03-30T19:34:55.000Z
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
2,921
2015-01-02T10:19:30.000Z
2022-03-31T02:48:42.000Z
"""Generates a random terrain at Minitaur gym environment reset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) parentdir = os.path.dirname(os.path.dirname(parentdir)) os.sys.path.insert(0, parentdir) import itertools import math import enum import numpy as np from pybullet_envs.minitaur.envs import env_randomizer_base _GRID_LENGTH = 15 _GRID_WIDTH = 10 _MAX_SAMPLE_SIZE = 30 _MIN_BLOCK_DISTANCE = 0.7 _MAX_BLOCK_LENGTH = _MIN_BLOCK_DISTANCE _MIN_BLOCK_LENGTH = _MAX_BLOCK_LENGTH / 2 _MAX_BLOCK_HEIGHT = 0.05 _MIN_BLOCK_HEIGHT = _MAX_BLOCK_HEIGHT / 2 class PoissonDisc2D(object): """Generates 2D points using Poisson disk sampling method. Implements the algorithm described in: http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf Unlike the uniform sampling method that creates small clusters of points, Poisson disk method enforces the minimum distance between points and is more suitable for generating a spatial distribution of non-overlapping objects. """ def __init__(self, grid_length, grid_width, min_radius, max_sample_size): """Initializes the algorithm. Args: grid_length: The length of the bounding square in which points are sampled. grid_width: The width of the bounding square in which points are sampled. min_radius: The minimum distance between any pair of points. max_sample_size: The maximum number of sample points around a active site. See details in the algorithm description. """ self._cell_length = min_radius / math.sqrt(2) self._grid_length = grid_length self._grid_width = grid_width self._grid_size_x = int(grid_length / self._cell_length) + 1 self._grid_size_y = int(grid_width / self._cell_length) + 1 self._min_radius = min_radius self._max_sample_size = max_sample_size # Flattern the 2D grid as an 1D array. The grid is used for fast nearest # point searching. self._grid = [None] * self._grid_size_x * self._grid_size_y # Generate the first sample point and set it as an active site. first_sample = np.array(np.random.random_sample(2)) * [grid_length, grid_width] self._active_list = [first_sample] # Also store the sample point in the grid. self._grid[self._point_to_index_1d(first_sample)] = first_sample def _point_to_index_1d(self, point): """Computes the index of a point in the grid array. Args: point: A 2D point described by its coordinates (x, y). Returns: The index of the point within the self._grid array. """ return self._index_2d_to_1d(self._point_to_index_2d(point)) def _point_to_index_2d(self, point): """Computes the 2D index (aka cell ID) of a point in the grid. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: x_index: The x index of the cell the point belongs to. y_index: The y index of the cell the point belongs to. """ x_index = int(point[0] / self._cell_length) y_index = int(point[1] / self._cell_length) return x_index, y_index def _index_2d_to_1d(self, index2d): """Converts the 2D index to the 1D position in the grid array. Args: index2d: The 2D index of a point (aka the cell ID) in the grid. Returns: The 1D position of the cell within the self._grid array. """ return index2d[0] + index2d[1] * self._grid_size_x def _is_in_grid(self, point): """Checks if the point is inside the grid boundary. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: Whether the point is inside the grid. """ return (0 <= point[0] < self._grid_length) and (0 <= point[1] < self._grid_width) def _is_in_range(self, index2d): """Checks if the cell ID is within the grid. Args: index2d: The 2D index of a point (aka the cell ID) in the grid. Returns: Whether the cell (2D index) is inside the grid. """ return (0 <= index2d[0] < self._grid_size_x) and (0 <= index2d[1] < self._grid_size_y) def _is_close_to_existing_points(self, point): """Checks if the point is close to any already sampled (and stored) points. Args: point: A 2D point (list) described by its coordinates (x, y). Returns: True iff the distance of the point to any existing points is smaller than the min_radius """ px, py = self._point_to_index_2d(point) # Now we can check nearby cells for existing points for neighbor_cell in itertools.product(xrange(px - 1, px + 2), xrange(py - 1, py + 2)): if not self._is_in_range(neighbor_cell): continue maybe_a_point = self._grid[self._index_2d_to_1d(neighbor_cell)] if maybe_a_point is not None and np.linalg.norm(maybe_a_point - point) < self._min_radius: return True return False def sample(self): """Samples new points around some existing point. Removes the sampling base point and also stores the new jksampled points if they are far enough from all existing points. """ active_point = self._active_list.pop() for _ in xrange(self._max_sample_size): # Generate random points near the current active_point between the radius random_radius = np.random.uniform(self._min_radius, 2 * self._min_radius) random_angle = np.random.uniform(0, 2 * math.pi) # The sampled 2D points near the active point sample = random_radius * np.array([np.cos(random_angle), np.sin(random_angle)]) + active_point if not self._is_in_grid(sample): continue if self._is_close_to_existing_points(sample): continue self._active_list.append(sample) self._grid[self._point_to_index_1d(sample)] = sample def generate(self): """Generates the Poisson disc distribution of 2D points. Although the while loop looks scary, the algorithm is in fact O(N), where N is the number of cells within the grid. When we sample around a base point (in some base cell), new points will not be pushed into the base cell because of the minimum distance constraint. Once the current base point is removed, all future searches cannot start from within the same base cell. Returns: All sampled points. The points are inside the quare [0, grid_length] x [0, grid_width] """ while self._active_list: self.sample() all_sites = [] for p in self._grid: if p is not None: all_sites.append(p) return all_sites class TerrainType(enum.Enum): """The randomzied terrain types we can use in the gym env.""" RANDOM_BLOCKS = 1 TRIANGLE_MESH = 2 class MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase): """Generates an uneven terrain in the gym env.""" def __init__(self, terrain_type=TerrainType.TRIANGLE_MESH, mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/" "triangle_mesh_terrain/terrain9735.obj", mesh_scale=None): """Initializes the randomizer. Args: terrain_type: Whether to generate random blocks or load a triangle mesh. mesh_filename: The mesh file to be used. The mesh will only be loaded if terrain_type is set to TerrainType.TRIANGLE_MESH. mesh_scale: the scaling factor for the triangles in the mesh file. """ self._terrain_type = terrain_type self._mesh_filename = mesh_filename self._mesh_scale = mesh_scale if mesh_scale else [1.0, 1.0, 0.3] def randomize_env(self, env): """Generate a random terrain for the current env. Args: env: A minitaur gym environment. """ if self._terrain_type is TerrainType.TRIANGLE_MESH: self._load_triangle_mesh(env) if self._terrain_type is TerrainType.RANDOM_BLOCKS: self._generate_convex_blocks(env) def _load_triangle_mesh(self, env): """Represents the random terrain using a triangle mesh. It is possible for Minitaur leg to stuck at the common edge of two triangle pieces. To prevent this from happening, we recommend using hard contacts (or high stiffness values) for Minitaur foot in sim. Args: env: A minitaur gym environment. """ env.pybullet_client.removeBody(env.ground_id) terrain_collision_shape_id = env.pybullet_client.createCollisionShape( shapeType=env.pybullet_client.GEOM_MESH, fileName=self._mesh_filename, flags=1, meshScale=self._mesh_scale) env.ground_id = env.pybullet_client.createMultiBody( baseMass=0, baseCollisionShapeIndex=terrain_collision_shape_id, basePosition=[0, 0, 0]) def _generate_convex_blocks(self, env): """Adds random convex blocks to the flat ground. We use the Possion disk algorithm to add some random blocks on the ground. Possion disk algorithm sets the minimum distance between two sampling points, thus voiding the clustering effect in uniform N-D distribution. Args: env: A minitaur gym environment. """ poisson_disc = PoissonDisc2D(_GRID_LENGTH, _GRID_WIDTH, _MIN_BLOCK_DISTANCE, _MAX_SAMPLE_SIZE) block_centers = poisson_disc.generate() for center in block_centers: # We want the blocks to be in front of the robot. shifted_center = np.array(center) - [2, _GRID_WIDTH / 2] # Do not place blocks near the point [0, 0], where the robot will start. if abs(shifted_center[0]) < 1.0 and abs(shifted_center[1]) < 1.0: continue half_length = np.random.uniform(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH) / (2 * math.sqrt(2)) half_height = np.random.uniform(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT) / 2 box_id = env.pybullet_client.createCollisionShape( env.pybullet_client.GEOM_BOX, halfExtents=[half_length, half_length, half_height]) env.pybullet_client.createMultiBody( baseMass=0, baseCollisionShapeIndex=box_id, basePosition=[shifted_center[0], shifted_center[1], half_height])
35.309278
98
0.706667
9,480
0.922628
0
0
0
0
0
0
4,851
0.472117
d5f35dd267171d89db5d5ed7c57d46dbcf723ae2
2,502
py
Python
polecat/db/sql/expression/values.py
furious-luke/polecat
7be5110f76dc42b15c922c1bb7d49220e916246d
[ "MIT" ]
4
2019-08-10T12:56:12.000Z
2020-01-21T09:51:20.000Z
polecat/db/sql/expression/values.py
furious-luke/polecat
7be5110f76dc42b15c922c1bb7d49220e916246d
[ "MIT" ]
71
2019-04-09T05:39:21.000Z
2020-05-16T23:09:24.000Z
polecat/db/sql/expression/values.py
furious-luke/polecat
7be5110f76dc42b15c922c1bb7d49220e916246d
[ "MIT" ]
null
null
null
from functools import partial from polecat.db.query import query as query_module from psycopg2.sql import SQL, Placeholder from .expression import Expression class Values(Expression): def __init__(self, values, relation=None): self.values = values self.relation = relation self.keyword = 'VALUES' def to_sql(self): if isinstance(self.values, query_module.Values): get_values_sql = partial( self.get_values_sql_from_values, self.values ) else: get_values_sql = partial( self.get_values_sql_from_dict, self.values ) return self.get_values_sql(get_values_sql) def get_values_sql(self, get_values_sql): values_sql, values_args = get_values_sql() joined_sql = SQL(', ').join( SQL('({})').format( SQL(', ').join(row_sql) ) for row_sql in values_sql ) return SQL('%s {}' % self.keyword).format(joined_sql), values_args def get_values_sql_from_values(self, values): column_values_sql = [] column_values = () for row in values.iter_rows(): row_values_sql = [] for column_name, column_value in row: value_sql, value = self.value_to_sql(column_value, column_name) row_values_sql.append(value_sql) column_values += value column_values_sql.append(row_values_sql) return column_values_sql, column_values def get_values_sql_from_dict(self, values_dict): column_values_sql = [] column_values = () for column_name, column_value in values_dict.items(): value_sql, value = self.value_to_sql(column_value, column_name) column_values_sql.append(value_sql) column_values += value return (column_values_sql,), column_values def value_to_sql(self, value, column_name=None): if isinstance(value, Expression): sql, args = value.to_sql() return SQL('{}').format(sql), args else: if self.relation and column_name: column = self.relation.get_column(column_name) value = column.to_db_value(value) return Placeholder(), (value,) def iter_column_names(self): if isinstance(self.values, dict): return self.values.keys() else: return self.values.iter_column_names()
34.75
79
0.61311
2,339
0.934852
0
0
0
0
0
0
33
0.013189
d5f3f84aa262b2485923b0060a6795013deae56c
1,292
py
Python
python/day3p1.py
swilcox/2019adventofcode
b67261aae74805ba8c2f4b72f09dd79277224ebb
[ "MIT" ]
1
2020-01-18T18:24:18.000Z
2020-01-18T18:24:18.000Z
python/day3p1.py
swilcox/2019adventofcode
b67261aae74805ba8c2f4b72f09dd79277224ebb
[ "MIT" ]
null
null
null
python/day3p1.py
swilcox/2019adventofcode
b67261aae74805ba8c2f4b72f09dd79277224ebb
[ "MIT" ]
null
null
null
# 2019 advent day 3 MOVES = { 'R': (lambda x: (x[0], x[1] + 1)), 'L': (lambda x: (x[0], x[1] - 1)), 'U': (lambda x: (x[0] + 1, x[1])), 'D': (lambda x: (x[0] - 1, x[1])), } def build_route(directions: list) -> list: current_location = (0, 0) route = [] for d in directions: direction, amount = d[0], int(d[1:]) for _ in range(amount): current_location = MOVES[direction](current_location) route.append(current_location) return route def find_intersections(r1: list, r2: list) -> set: return set(r1).intersection(set(r2)) def find_shortest_manhattan_distance(points: set) -> int: return min((abs(p[0]) + abs(p[1])) for p in points) #R1 = 'R75,D30,R83,U83,L12,D49,R71,U7,L72' #R2 = 'U62,R66,U55,R34,D71,R55,D58,R83' #R1 = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51' #R2 = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7' def main(): #route1 = build_route(R1.split(',')) #route2 = build_route(R2.split(',')) with open('day3input.txt') as f: line1, line2 = f.readlines() route1 = build_route(line1.strip().split(',')) route2 = build_route(line2.strip().split(',')) print(find_shortest_manhattan_distance(find_intersections(route1, route2))) if __name__ == "__main__": main()
26.367347
79
0.600619
0
0
0
0
0
0
0
0
310
0.239938
d5f42d830df55813fe6234674e4d597dccbd7f59
1,054
py
Python
examples/demo/python/catalog.py
JavDomGom/mist
83ae9f67df61ff2387a7d424cff0f8591a6a645f
[ "Apache-2.0" ]
1
2021-04-23T17:13:31.000Z
2021-04-23T17:13:31.000Z
examples/demo/python/catalog.py
JavDomGom/mist
83ae9f67df61ff2387a7d424cff0f8591a6a645f
[ "Apache-2.0" ]
null
null
null
examples/demo/python/catalog.py
JavDomGom/mist
83ae9f67df61ff2387a7d424cff0f8591a6a645f
[ "Apache-2.0" ]
null
null
null
import asyncio async def searchDomains(domain, q): domains = [] proc = await asyncio.create_subprocess_shell(f"dnsrecon -d {domain} -t crt", stdout=asyncio.subprocess.PIPE) line = True while line: line = (await proc.stdout.readline()).decode('utf-8') fields = line.split() if len(fields)>1 and fields[1]=="A": if q: await q.put(fields[2]) domains.append(fields[2]) return domains async def findOpenPorts(ip, ports, q): openPorts = [] proc = await asyncio.create_subprocess_shell(f"nmap -p {ports} --open {ip}",stdout=asyncio.subprocess.PIPE) line = True while line: line = (await proc.stdout.readline()).decode('utf-8') fields = line.split() if len(fields)>1 and fields[1]=="open": openPort = fields[0].split("/") if q: await q.put({"ip": ip, "port": openPort[0], "protocol": openPort[1]}) openPorts.append({"port": openPort[0], "protocol": openPort[1]}) return openPorts
36.344828
112
0.588235
0
0
0
0
0
0
1,035
0.981973
122
0.11575
d5f5577604a264eefbdbdf102a315e607e68f2da
15,156
py
Python
tests/api/v3_1_0/test_security_groups_acls.py
CiscoISE/ciscoisesdk
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
[ "MIT" ]
36
2021-05-18T16:24:19.000Z
2022-03-05T13:44:41.000Z
tests/api/v3_1_0/test_security_groups_acls.py
CiscoISE/ciscoisesdk
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
[ "MIT" ]
15
2021-06-08T19:03:37.000Z
2022-02-25T14:47:33.000Z
tests/api/v3_1_0/test_security_groups_acls.py
CiscoISE/ciscoisesdk
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
[ "MIT" ]
6
2021-06-10T09:32:01.000Z
2022-01-12T08:34:39.000Z
# -*- coding: utf-8 -*- """IdentityServicesEngineAPI security_groups_acls API fixtures and tests. Copyright (c) 2021 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import pytest from fastjsonschema.exceptions import JsonSchemaException from ciscoisesdk.exceptions import MalformedRequest from ciscoisesdk.exceptions import ciscoisesdkException from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match') def is_valid_get_security_groups_acl_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_a50d1bd34d5f593aadf8eb02083c67b0_v3_1_0').validate(obj.response) return True def get_security_groups_acl_by_id(api): endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id( id='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_get_security_groups_acl_by_id(api, validator): try: assert is_valid_get_security_groups_acl_by_id( validator, get_security_groups_acl_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def get_security_groups_acl_by_id_default(api): endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id( id='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_get_security_groups_acl_by_id_default(api, validator): try: assert is_valid_get_security_groups_acl_by_id( validator, get_security_groups_acl_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_update_security_groups_acl_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_0').validate(obj.response) return True def update_security_groups_acl_by_id(api): endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id( aclcontent='string', active_validation=False, description='string', generation_id='string', id='string', ip_version='string', is_read_only=True, modelled_content={}, name='string', payload=None ) return endpoint_result @pytest.mark.security_groups_acls def test_update_security_groups_acl_by_id(api, validator): try: assert is_valid_update_security_groups_acl_by_id( validator, update_security_groups_acl_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def update_security_groups_acl_by_id_default(api): endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id( active_validation=False, id='string', aclcontent=None, description=None, generation_id=None, ip_version=None, is_read_only=None, modelled_content=None, name=None, payload=None ) return endpoint_result @pytest.mark.security_groups_acls def test_update_security_groups_acl_by_id_default(api, validator): try: assert is_valid_update_security_groups_acl_by_id( validator, update_security_groups_acl_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_delete_security_groups_acl_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_b0a2bea8bfec52b68663ef3f7ac6d7a7_v3_1_0').validate(obj.response) return True def delete_security_groups_acl_by_id(api): endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id( id='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_delete_security_groups_acl_by_id(api, validator): try: assert is_valid_delete_security_groups_acl_by_id( validator, delete_security_groups_acl_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def delete_security_groups_acl_by_id_default(api): endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id( id='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_delete_security_groups_acl_by_id_default(api, validator): try: assert is_valid_delete_security_groups_acl_by_id( validator, delete_security_groups_acl_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_get_security_groups_acl(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_999b22d6ad9f595ab7e3eee5cf44de8a_v3_1_0').validate(obj.response) return True def get_security_groups_acl(api): endpoint_result = api.security_groups_acls.get_security_groups_acl( filter='value1,value2', filter_type='string', page=0, size=0, sortasc='string', sortdsc='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_get_security_groups_acl(api, validator): try: assert is_valid_get_security_groups_acl( validator, get_security_groups_acl(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def get_security_groups_acl_default(api): endpoint_result = api.security_groups_acls.get_security_groups_acl( filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None ) return endpoint_result @pytest.mark.security_groups_acls def test_get_security_groups_acl_default(api, validator): try: assert is_valid_get_security_groups_acl( validator, get_security_groups_acl_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_create_security_groups_acl(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_9ab61f24bdaf508590f7686e1130913f_v3_1_0').validate(obj.response) return True def create_security_groups_acl(api): endpoint_result = api.security_groups_acls.create_security_groups_acl( aclcontent='string', active_validation=False, description='string', generation_id='string', ip_version='string', is_read_only=True, modelled_content={}, name='string', payload=None ) return endpoint_result @pytest.mark.security_groups_acls def test_create_security_groups_acl(api, validator): try: assert is_valid_create_security_groups_acl( validator, create_security_groups_acl(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def create_security_groups_acl_default(api): endpoint_result = api.security_groups_acls.create_security_groups_acl( active_validation=False, aclcontent=None, description=None, generation_id=None, ip_version=None, is_read_only=None, modelled_content=None, name=None, payload=None ) return endpoint_result @pytest.mark.security_groups_acls def test_create_security_groups_acl_default(api, validator): try: assert is_valid_create_security_groups_acl( validator, create_security_groups_acl_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_get_version(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_6704e67a1131578aa794d8377da9a1de_v3_1_0').validate(obj.response) return True def get_version(api): endpoint_result = api.security_groups_acls.get_version( ) return endpoint_result @pytest.mark.security_groups_acls def test_get_version(api, validator): try: assert is_valid_get_version( validator, get_version(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def get_version_default(api): endpoint_result = api.security_groups_acls.get_version( ) return endpoint_result @pytest.mark.security_groups_acls def test_get_version_default(api, validator): try: assert is_valid_get_version( validator, get_version_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_bulk_request_for_security_groups_acl(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_7da250e23ac05e6a8dcf32a81effcee9_v3_1_0').validate(obj.response) return True def bulk_request_for_security_groups_acl(api): endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl( active_validation=False, operation_type='string', payload=None, resource_media_type='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_bulk_request_for_security_groups_acl(api, validator): try: assert is_valid_bulk_request_for_security_groups_acl( validator, bulk_request_for_security_groups_acl(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def bulk_request_for_security_groups_acl_default(api): endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl( active_validation=False, operation_type=None, payload=None, resource_media_type=None ) return endpoint_result @pytest.mark.security_groups_acls def test_bulk_request_for_security_groups_acl_default(api, validator): try: assert is_valid_bulk_request_for_security_groups_acl( validator, bulk_request_for_security_groups_acl_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_monitor_bulk_status_security_groups_acl(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_07af5ee576605a5a915d888924c1e804_v3_1_0').validate(obj.response) return True def monitor_bulk_status_security_groups_acl(api): endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl( bulkid='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_monitor_bulk_status_security_groups_acl(api, validator): try: assert is_valid_monitor_bulk_status_security_groups_acl( validator, monitor_bulk_status_security_groups_acl(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def monitor_bulk_status_security_groups_acl_default(api): endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl( bulkid='string' ) return endpoint_result @pytest.mark.security_groups_acls def test_monitor_bulk_status_security_groups_acl_default(api, validator): try: assert is_valid_monitor_bulk_status_security_groups_acl( validator, monitor_bulk_status_security_groups_acl_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e
31.509356
109
0.720375
0
0
0
0
6,552
0.432304
0
0
2,163
0.142716
d5f5d714834d96889f873a0d7ec900fdf1926bca
21,522
py
Python
geomstats/geometry/riemannian_metric.py
stefanheyder/geomstats
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
[ "MIT" ]
null
null
null
geomstats/geometry/riemannian_metric.py
stefanheyder/geomstats
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
[ "MIT" ]
null
null
null
geomstats/geometry/riemannian_metric.py
stefanheyder/geomstats
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
[ "MIT" ]
null
null
null
"""Riemannian and pseudo-Riemannian metrics.""" import math import warnings import autograd import geomstats.backend as gs from geomstats.geometry.connection import Connection EPSILON = 1e-4 N_CENTERS = 10 TOLERANCE = 1e-5 N_REPETITIONS = 20 N_MAX_ITERATIONS = 50000 N_STEPS = 10 def loss(y_pred, y_true, metric): """Compute loss function between prediction and ground truth. Loss function given by a Riemannian metric, expressed as the squared geodesic distance between the prediction and the ground truth. Parameters ---------- y_pred y_true metric Returns ------- loss """ loss = metric.squared_dist(y_pred, y_true) return loss def grad(y_pred, y_true, metric): """Closed-form for the gradient of the loss function.""" tangent_vec = metric.log(base_point=y_pred, point=y_true) grad_vec = - 2. * tangent_vec inner_prod_mat = metric.inner_product_matrix(base_point=y_pred) grad = gs.einsum('ni,nij->ni', grad_vec, gs.transpose(inner_prod_mat, axes=(0, 2, 1))) return grad class RiemannianMetric(Connection): """Class for Riemannian and pseudo-Riemannian metrics.""" def __init__(self, dimension, signature=None): assert isinstance(dimension, int) or dimension == math.inf assert dimension > 0 super().__init__(dimension=dimension) self.signature = signature def inner_product_matrix(self, base_point=None): """Inner product matrix at the tangent space at a base point. Parameters ---------- base_point : array-like, shape=[n_samples, dimension], optional """ raise NotImplementedError( 'The computation of the inner product matrix' ' is not implemented.') def inner_product_inverse_matrix(self, base_point=None): """Inner product matrix at the tangent space at a base point. Parameters ---------- base_point : array-like, shape=[n_samples, dimension], optional """ metric_matrix = self.inner_product_matrix(base_point) cometric_matrix = gs.linalg.inv(metric_matrix) return cometric_matrix def inner_product_derivative_matrix(self, base_point=None): """Compute derivative of the inner prod matrix at base point. Parameters ---------- base_point : array-like, shape=[n_samples, dimension], optional """ metric_derivative = autograd.jacobian(self.inner_product_matrix) return metric_derivative(base_point) def christoffels(self, base_point): """Compute Christoffel symbols associated with the connection. Parameters ---------- base_point: array-like, shape=[n_samples, dimension] Returns ------- christoffels: array-like, shape=[n_samples, dimension, dimension, dimension] """ cometric_mat_at_point = self.inner_product_inverse_matrix(base_point) metric_derivative_at_point = self.inner_product_derivative_matrix( base_point) term_1 = gs.einsum('nim,nmkl->nikl', cometric_mat_at_point, metric_derivative_at_point) term_2 = gs.einsum('nim,nmlk->nilk', cometric_mat_at_point, metric_derivative_at_point) term_3 = - gs.einsum('nim,nklm->nikl', cometric_mat_at_point, metric_derivative_at_point) christoffels = 0.5 * (term_1 + term_2 + term_3) return christoffels def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None): """Inner product between two tangent vectors at a base point. Parameters ---------- tangent_vec_a: array-like, shape=[n_samples, dimension] or shape=[1, dimension] tangent_vec_b: array-like, shape=[n_samples, dimension] or shape=[1, dimension] base_point: array-like, shape=[n_samples, dimension] or shape=[1, dimension] Returns ------- inner_product : array-like, shape=[n_samples,] """ tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2) tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2) n_tangent_vec_a = gs.shape(tangent_vec_a)[0] n_tangent_vec_b = gs.shape(tangent_vec_b)[0] inner_prod_mat = self.inner_product_matrix(base_point) inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3) n_mats = gs.shape(inner_prod_mat)[0] if n_tangent_vec_a != n_mats: if n_tangent_vec_a == 1: tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0) einsum_str_a = 'j,njk->nk' elif n_mats == 1: inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0) einsum_str_a = 'nj,jk->nk' else: raise ValueError('Shape mismatch for einsum.') else: einsum_str_a = 'nj,njk->nk' aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat) n_auxs, _ = gs.shape(aux) if n_tangent_vec_b != n_auxs: if n_auxs == 1: aux = gs.squeeze(aux, axis=0) einsum_str_b = 'k,nk->n' elif n_tangent_vec_b == 1: tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0) einsum_str_b = 'nk,k->n' else: raise ValueError('Shape mismatch for einsum.') else: einsum_str_b = 'nk,nk->n' inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b) inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1) assert gs.ndim(inner_prod) == 2, inner_prod.shape return inner_prod def squared_norm(self, vector, base_point=None): """Compute the square of the norm of a vector. Squared norm of a vector associated to the inner product at the tangent space at a base point. Parameters ---------- vector : array-like, shape=[n_samples, dimension] base_point : array-like, shape=[n_samples, dimension] Returns ------- sq_norm : array-like, shape=[n_samples,] """ sq_norm = self.inner_product(vector, vector, base_point) return sq_norm def norm(self, vector, base_point=None): """Compute norm of a vector. Norm of a vector associated to the inner product at the tangent space at a base point. Note: This only works for positive-definite Riemannian metrics and inner products. Parameters ---------- vector : array-like, shape=[n_samples, dimension] base_point : array-like, shape=[n_samples, dimension] Returns ------- norm : array-like, shape=[n_samples,] """ sq_norm = self.squared_norm(vector, base_point) norm = gs.sqrt(sq_norm) return norm def geodesic(self, initial_point, end_point=None, initial_tangent_vec=None, point_type='vector'): """Return the geodesic as function of t. Geodesic curve defined by either: - an initial point and an initial tangent vector, or - an initial point and an end point. The geodesic is returned as a function parameterized by t. Parameters ---------- initial_point : array-like, shape=[n_samples, dimension] end_point : array-like, shape=[n_samples, dimension], optional initial_tangent_vec : array-like, shape=[n_samples, dimension], optional point_type : str, optional Returns ------- path : callable """ point_ndim = 1 if point_type == 'matrix': point_ndim = 2 initial_point = gs.to_ndarray(initial_point, to_ndim=point_ndim + 1) if end_point is None and initial_tangent_vec is None: raise ValueError('Specify an end point or an initial tangent ' 'vector to define the geodesic.') if end_point is not None: end_point = gs.to_ndarray(end_point, to_ndim=point_ndim + 1) shooting_tangent_vec = self.log(point=end_point, base_point=initial_point) if initial_tangent_vec is not None: assert gs.allclose(shooting_tangent_vec, initial_tangent_vec) initial_tangent_vec = shooting_tangent_vec initial_tangent_vec = gs.array(initial_tangent_vec) initial_tangent_vec = gs.to_ndarray(initial_tangent_vec, to_ndim=point_ndim + 1) def path(t): """Generate a function parameterizing the geodesic. Parameters ---------- t : parameter value of the geodesic Returns ------- point_at_time_t : callable """ t = gs.cast(t, gs.float32) t = gs.to_ndarray(t, to_ndim=1) t = gs.to_ndarray(t, to_ndim=2, axis=1) new_initial_point = gs.to_ndarray( initial_point, to_ndim=point_ndim + 1) new_initial_tangent_vec = gs.to_ndarray( initial_tangent_vec, to_ndim=point_ndim + 1) if point_type == 'vector': tangent_vecs = gs.einsum('il,nk->ik', t, new_initial_tangent_vec) elif point_type == 'matrix': tangent_vecs = gs.einsum('il,nkm->ikm', t, new_initial_tangent_vec) point_at_time_t = self.exp(tangent_vec=tangent_vecs, base_point=new_initial_point) return point_at_time_t return path def squared_dist(self, point_a, point_b): """Squared geodesic distance between two points. Parameters ---------- point_a : array-like, shape=[n_samples, dimension] point_b : array-like, shape=[n_samples, dimension] Returns ------- sq_dist : array-like, shape=[n_samples,] """ log = self.log(point=point_b, base_point=point_a) sq_dist = self.squared_norm(vector=log, base_point=point_a) return sq_dist def dist(self, point_a, point_b): """Geodesic distance between two points. Note: It only works for positive definite Riemannian metrics. Parameters ---------- point_a : array-like, shape=[n_samples, dimension] point_b : array-like, shape=[n_samples, dimension] Returns ------- dist : array-like, shape=[n_samples,] """ sq_dist = self.squared_dist(point_a, point_b) dist = gs.sqrt(sq_dist) return dist def variance(self, points, weights=None, base_point=None, point_type='vector'): """Variance of (weighted) points wrt a base point. Parameters ---------- points: array-like, shape=[n_samples, dimension] weights: array-like, shape=[n_samples, 1], optional """ if point_type == 'vector': points = gs.to_ndarray(points, to_ndim=2) if point_type == 'matrix': points = gs.to_ndarray(points, to_ndim=3) n_points = gs.shape(points)[0] if weights is None: weights = gs.ones((n_points, 1)) weights = gs.array(weights) weights = gs.to_ndarray(weights, to_ndim=2, axis=1) sum_weights = gs.sum(weights) if base_point is None: base_point = self.mean(points, weights) variance = 0. sq_dists = self.squared_dist(base_point, points) variance += gs.einsum('nk,nj->j', weights, sq_dists) variance = gs.array(variance) variance /= sum_weights variance = gs.to_ndarray(variance, to_ndim=1) variance = gs.to_ndarray(variance, to_ndim=2, axis=1) return variance def mean(self, points, weights=None, n_max_iterations=32, epsilon=EPSILON, point_type='vector', mean_method='default', verbose=False): """Frechet mean of (weighted) points. Parameters ---------- points : array-like, shape=[n_samples, dimension] weights : array-like, shape=[n_samples, 1], optional verbose : bool, optional Returns ------- mean : array-like the Frechet mean of points, a point on the manifold """ if mean_method == 'default': # TODO(nina): Profile this code to study performance, # i.e. what to do with sq_dists_between_iterates. def while_loop_cond(iteration, mean, variance, sq_dist): result = ~gs.logical_or( gs.isclose(variance, 0.), gs.less_equal(sq_dist, epsilon * variance)) return result[0, 0] or iteration == 0 def while_loop_body(iteration, mean, variance, sq_dist): logs = self.log(point=points, base_point=mean) tangent_mean = gs.einsum('nk,nj->j', weights, logs) tangent_mean /= sum_weights mean_next = self.exp( tangent_vec=tangent_mean, base_point=mean) sq_dist = self.squared_dist(mean_next, mean) sq_dists_between_iterates.append(sq_dist) variance = self.variance(points=points, weights=weights, base_point=mean_next) mean = mean_next iteration += 1 return [iteration, mean, variance, sq_dist] if point_type == 'vector': points = gs.to_ndarray(points, to_ndim=2) if point_type == 'matrix': points = gs.to_ndarray(points, to_ndim=3) n_points = gs.shape(points)[0] if weights is None: weights = gs.ones((n_points, 1)) weights = gs.array(weights) weights = gs.to_ndarray(weights, to_ndim=2, axis=1) sum_weights = gs.sum(weights) mean = points[0] if point_type == 'vector': mean = gs.to_ndarray(mean, to_ndim=2) if point_type == 'matrix': mean = gs.to_ndarray(mean, to_ndim=3) if n_points == 1: return mean sq_dists_between_iterates = [] iteration = 0 sq_dist = gs.array([[0.]]) variance = gs.array([[0.]]) last_iteration, mean, variance, sq_dist = gs.while_loop( lambda i, m, v, sq: while_loop_cond(i, m, v, sq), lambda i, m, v, sq: while_loop_body(i, m, v, sq), loop_vars=[iteration, mean, variance, sq_dist], maximum_iterations=n_max_iterations) if last_iteration == n_max_iterations: print('Maximum number of iterations {} reached.' 'The mean may be inaccurate'.format(n_max_iterations)) if verbose: print('n_iter: {}, final variance: {}, final dist: {}'.format( last_iteration, variance, sq_dist)) mean = gs.to_ndarray(mean, to_ndim=2) return mean if mean_method == 'frechet-poincare-ball': lr = 1e-3 tau = 5e-3 if len(points) == 1: return points iteration = 0 convergence = math.inf barycenter = points.mean(0, keepdims=True) * 0 while convergence > tau and n_max_iterations > iteration: iteration += 1 expand_barycenter = gs.repeat(barycenter, points.shape[0], 0) grad_tangent = 2 * self.log(points, expand_barycenter) cc_barycenter = self.exp(lr * grad_tangent.sum(0, keepdims=True), barycenter) convergence = self.dist(cc_barycenter, barycenter).max().item() barycenter = cc_barycenter if iteration == n_max_iterations: warnings.warn( 'Maximum number of iterations {} reached. The ' 'mean may be inaccurate'.format(n_max_iterations)) return barycenter def adaptive_gradientdescent_mean(self, points, weights=None, n_max_iterations=40, epsilon=1e-12, init_points=[], verbose=False): """Compute Frechet mean of (weighted) points using adaptive time-steps. Frechet mean of (weighted) points using adaptive time-steps The loss function optimized is ||M_1(x)||_x (where M_1(x) is the tangent mean at x) rather than the mean-square-distance (MSD) because this saves computation time. Parameters ---------- points: array-like, shape=[n_samples, dimension] weights: array-like, shape=[n_samples, 1], optional init_points: array-like, shape=[n_init, dimension] epsilon: tolerance for stopping the gradient descent verbose: verbose mode printing the surrogate value epsilon: tolerance for stopping the gradient descent """ # TODO(Xavier): This function assumes that all points are lists # of vectors and not of matrices n_points = gs.shape(points)[0] if n_points == 1: return gs.to_ndarray(points[0], to_ndim=2) if weights is None: weights = gs.ones((n_points, 1)) weights = gs.array(weights) weights = gs.to_ndarray(weights, to_ndim=2, axis=1) sum_weights = gs.sum(weights) n_init = len(init_points) if n_init == 0: current_mean = points[0] else: current_mean = init_points[0] tau = 1.0 iteration = 0 logs = self.log(point=points, base_point=current_mean) current_tangent_mean = gs.einsum('nk,nj->j', weights, logs) current_tangent_mean /= sum_weights norm_current_tangent_mean = gs.linalg.norm(current_tangent_mean) while (norm_current_tangent_mean > epsilon and iteration < n_max_iterations): iteration = iteration + 1 shooting_vector = gs.to_ndarray( tau * current_tangent_mean, to_ndim=2) next_mean = self.exp( tangent_vec=shooting_vector, base_point=current_mean) logs = self.log(point=points, base_point=next_mean) next_tangent_mean = gs.einsum('nk,nj->j', weights, logs) next_tangent_mean /= sum_weights norm_next_tangent_mean = gs.linalg.norm(next_tangent_mean) if verbose: print( "Iter {0}: tau= {1}, " "norm_current_tangent_mean = {2}".format( iter, tau, norm_current_tangent_mean)) if norm_next_tangent_mean < norm_current_tangent_mean: current_mean = next_mean current_tangent_mean = next_tangent_mean norm_current_tangent_mean = norm_next_tangent_mean tau = max(1.0, 1.0511111 * tau) else: tau = tau * 0.8 if iteration == n_max_iterations: warnings.warn( 'Maximum number of iterations {} reached.' 'The mean may be inaccurate'.format(n_max_iterations)) return gs.to_ndarray(current_mean, to_ndim=2) def diameter(self, points): """Give the distance between two farthest points. Distance between the two points that are farthest away from each other in points. Parameters ---------- points Returns ------- diameter """ diameter = 0.0 n_points = points.shape[0] for i in range(n_points - 1): dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :]) dist_to_farthest_neighbor = gs.amax(dist_to_neighbors) diameter = gs.maximum(diameter, dist_to_farthest_neighbor) return diameter def closest_neighbor_index(self, point, neighbors): """Closest neighbor of point among neighbors. Parameters ---------- point neighbors Returns ------- closest_neighbor_index """ dist = self.dist(point, neighbors) closest_neighbor_index = gs.argmin(dist) return closest_neighbor_index
33.315789
79
0.557987
20,404
0.948053
0
0
0
0
0
0
7,155
0.332451
d5f67147c5059c64bf2090a7f0dd93d9aec0092b
9,842
py
Python
app/main/pages/instrument/hrs/red/order/plots.py
hettlage/salt-data-quality-site
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
[ "MIT" ]
null
null
null
app/main/pages/instrument/hrs/red/order/plots.py
hettlage/salt-data-quality-site
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
[ "MIT" ]
null
null
null
app/main/pages/instrument/hrs/red/order/plots.py
hettlage/salt-data-quality-site
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
[ "MIT" ]
null
null
null
import pandas as pd from bokeh.models import HoverTool from bokeh.models.formatters import DatetimeTickFormatter from bokeh.palettes import Plasma256 from bokeh.plotting import figure, ColumnDataSource from app import db from app.decorators import data_quality # creates your plot date_formatter = DatetimeTickFormatter(microseconds=['%f'], milliseconds=['%S.%2Ns'], seconds=[':%Ss'], minsec=[':%Mm:%Ss'], minutes=['%H:%M:%S'], hourmin=['%H:%M:'], hours=["%H:%M"], days=["%d %b"], months=["%d %b %Y"], years=["%b %Y"]) def get_position_source(start_date, end_date, obsmode): logic = " and HrsMode_Id = {obsmode} " \ " and FileName like 'RORDER%%' " \ .format(obsmode=obsmode) sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \ " from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \ " where Date > '{start_date}' and Date <'{end_date}' {logic}" \ .format(start_date=start_date, end_date=end_date, logic=logic) df = pd.read_sql(sql, db.engine) colors = [] if len(df) > 0: ord_min = df['HrsOrder'].min() ord_max = df['HrsOrder'].max() colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in df["HrsOrder"]] df['colors'] = colors source = ColumnDataSource(df) return source @data_quality(name='hrs_order', caption='HRS Order') def hrs_order_plot(start_date, end_date): """Return a <div> element with the Order plot. The plot shows the HRS order for obsmode High, low and medium over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <div> element with the Order plot. """ def get_source(obsmode): logic = " and HrsMode_Id = {obsmode} " \ " and FileName like 'RORDER%%' " \ " group by Date " \ .format(obsmode=obsmode) sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \ " from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \ " where Date > '{start_date}' and Date <'{end_date}' {logic}" \ .format(start_date=start_date, end_date=end_date, logic=logic) df = pd.read_sql(sql, db.engine) source = ColumnDataSource(df) return source low_source = get_source(1) # HrsMode_Id = 1 low med_source = get_source(2) # HrsMode_Id = 2 med high_source = get_source(3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <div> <div> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span> <span style="font-size: 15px;"> @ord</span> </div> </div> """ ) p = figure(title="HRS Order", x_axis_label='Date', y_axis_label='Max(HrsOrder) - Min(HrsOrder)', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10) p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10) p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10) p.legend.location = "top_right" p.legend.click_policy = "hide" p.legend.background_fill_alpha = 0.3 p.legend.inactive_fill_alpha = 0.8 p.xaxis[0].formatter = date_formatter return p @data_quality(name='hrs_order_position_high', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <div> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <div> element with the Order Position plot. """ high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <div> <div> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </div> </div> """ ) p = figure(title="HRS Order Position High Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatter = date_formatter return p @data_quality(name='hrs_order_position_medium', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <div> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <div> element with the Order Position plot. """ high_source = get_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <div> <div> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </div> </div> """ ) p = figure(title="HRS Order Position Medium Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatter = date_formatter return p @data_quality(name='hrs_order_position_low', caption=' ') def hrs_order_position_plot(start_date, end_date): """ Return a <div> element with the Order Position plot. The plot shows the HRS order for obsmode High resolution over time Params: ------- start_date: date Earliest date to include in the plot. end_date: date Earliest date not to include in the plot. Return: ------- str: A <div> element with the Order Position plot. """ high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high tool_list = "pan,reset,save,wheel_zoom, box_zoom" _hover = HoverTool( tooltips=""" <div> <div> <span style="font-size: 15px; font-weight: bold;">Date: </span> <span style="font-size: 15px;"> @Time</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">Y Upper: </span> <span style="font-size: 15px;"> @y_upper</span> </div> <div> <span style="font-size: 15px; font-weight: bold;">HRS Order: </span> <span style="font-size: 15px;"> @HrsOrder</span> </div> </div> """ ) p = figure(title="HRS Order Position Low Resolution", x_axis_label='Date', y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover]) p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10) p.xaxis[0].formatter = date_formatter return p
35.530686
109
0.517984
0
0
0
0
8,103
0.823308
0
0
5,782
0.587482
d5f72b6bb8de932265e3494ed6520e23b33d2b72
705
py
Python
p6e8.py
yannickbf-prog/python
da4bd2c8668966359b829a8ac2a896afeca2b150
[ "MIT" ]
null
null
null
p6e8.py
yannickbf-prog/python
da4bd2c8668966359b829a8ac2a896afeca2b150
[ "MIT" ]
null
null
null
p6e8.py
yannickbf-prog/python
da4bd2c8668966359b829a8ac2a896afeca2b150
[ "MIT" ]
null
null
null
#Yannick p6e8 Escribe un programa que te pida primero un número y luego te pida números hasta que la suma de los números introducidos coincida con el número inicial. El programa termina escribiendo la lista de números. limite = int(input("Escribe limite:")) valores = int(input("Escribe un valor:")) listavalores = [] listavalores.append(valores) while limite > sum(listavalores): valores = int(input("Escribe otro valor")) listavalores.append(valores) print(f"El limite a superar es {limite}. La lista creada es ", end="") for i in range(len(listavalores)): print (listavalores[i], end=" ") print(f"ya que la suma de estos numeros es {sum(listavalores)}")
30.652174
219
0.704965
0
0
0
0
0
0
0
0
397
0.559155
d5f73b66aea43800edd9e2977d37ade872174872
1,574
py
Python
.venv/lib/python3.8/site-packages/cleo/application.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
1
2020-08-07T16:09:57.000Z
2020-08-07T16:09:57.000Z
.venv/lib/python3.8/site-packages/cleo/application.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
null
null
null
.venv/lib/python3.8/site-packages/cleo/application.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
null
null
null
from typing import Optional from typing import Tuple from clikit.console_application import ConsoleApplication from .commands import BaseCommand from .commands.completions_command import CompletionsCommand from .config import ApplicationConfig class Application(ConsoleApplication, object): """ An Application is the container for a collection of commands. This class is optimized for a standard CLI environment. Usage: >>> app = Application('myapp', '1.0 (stable)') >>> app.add(HelpCommand()) >>> app.run() """ def __init__( self, name=None, version=None, complete=True, config=None ): # type: (str, str, bool, Optional[ApplicationConfig]) -> None if config is None: config = ApplicationConfig(name, version) super(Application, self).__init__(config) if complete: self.add(CompletionsCommand()) def add_commands(self, *commands): # type: (Tuple[BaseCommand]) -> None for command in commands: self.add(command) def add(self, command): # type: (BaseCommand) -> Application """ Adds a command object. """ self.add_command(command.config) command.set_application(self) return self def find(self, name): # type: (str) -> BaseCommand names = name.split(" ") command = self.get_command(names[0]) for name in names[1:]: command = command.get_sub_command(name) return command.config.handler
29.148148
77
0.623888
1,314
0.834816
0
0
0
0
0
0
475
0.301779