blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2cda01192caf74ae2c14d6719c08706742a49cd8 | 7f7e36cd9570970007a0f974837f9115e5d4d587 | /libdl/nn_losses/mctc.py | 6fb9d3994ea386bbc5f573156158ee109eceae17 | [] | no_license | christofw/multipitch_mctc | 591275e4074ded69aa7f166e7d1982417013c21d | d95e201044d8974ae9d3a5157a6e013a13288071 | refs/heads/main | 2023-08-16T20:37:57.999239 | 2021-10-12T11:56:09 | 2021-10-12T11:56:09 | 388,519,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,978 | py | import numpy as np, os, scipy
import torch
import torch.nn as nn
from itertools import groupby
class sctc_loss_threecomp(nn.CTCLoss):
""" Separable Connectionist Temporal Classification (SCTC) Loss
with three components per category, e.g. (blank, 0, 1)
Args:
reduction='none' No reduction / averaging applied to loss within this class.
Has to be done afterwards explicitly.
For details see: https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html#CTCLoss
and
C. Wigington, B.L. Price, S. Cohen: Multi-label Connectionist Temporal Classification. ICDAR 2019: 979-986
"""
def __init__(self, reduction='none'):
super(sctc_loss_threecomp, self).__init__(reduction=reduction)
assert reduction=='none', 'This loss is not tested with other reductions. Please apply reductions afterwards explicitly'
def forward(self, log_probs, targets, input_lengths, target_lengths):
ctc_loss = nn.CTCLoss(reduction=self.reduction)
num_categories = targets.size(0) # there is no category blank in SCTC
all_losses = []
for i in range(num_categories):
# Prepare targets
targ_cat = torch.tensor([t[0]+1 for t in groupby(targets[i,:])])
target_torch = targ_cat.type(torch.cuda.LongTensor).unsqueeze(0)
# Overwrite target sequence length
target_lengths = torch.tensor(target_torch.size(1), dtype=torch.long)
# Prepare inputs
input_torch = log_probs[:, i, :].squeeze(1).type(torch.cuda.FloatTensor).T.unsqueeze(1)
# Overwrite input sequence length
input_lengths = torch.tensor(input_torch.size(0), dtype=torch.long)
# Compute individual loss for category
sctc_loss_cat = ctc_loss(input_torch, target_torch, input_lengths, target_lengths)
all_losses.append(sctc_loss_cat)
# Sum to obtain overall loss (instead of multiply since we deal with log probs!)
sctc_loss = sum(all_losses)
return sctc_loss
class sctc_loss_twocomp(nn.CTCLoss):
""" Separable Connectionist Temporal Classification (SCTC) Loss
with two components per category, e.g. (blank, 1)
Args:
reduction='none' No reduction / averaging applied to loss within this class.
Has to be done afterwards explicitly.
For details see: https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html#CTCLoss
and
C. Wigington, B.L. Price, S. Cohen: Multi-label Connectionist Temporal Classification. ICDAR 2019: 979-986
"""
def __init__(self, reduction='none'):
super(sctc_loss_twocomp, self).__init__(reduction=reduction)
assert reduction=='none', 'reduction ' + redcution + 'This loss is not tested with other reductions. Please apply reductions afterwards explicitly'
def forward(self, log_probs, targets, input_lengths, target_lengths):
ctc_loss = nn.CTCLoss(reduction=self.reduction)
num_categories = targets.size(0) # there is no category blank in SCTC
all_losses = []
for i in range(num_categories):
# Prepare targets
targ_cat = torch.tensor([t[0] for t in groupby(targets[i,:])])
targ_cat_nonzero = targ_cat[targ_cat!=0.]
target_torch = targ_cat_nonzero.type(torch.cuda.LongTensor).unsqueeze(0)
# Overwrite target sequence length
target_lengths = torch.tensor(target_torch.size(1), dtype=torch.long)
# Prepare inputs
input_torch = log_probs[:, i, :].squeeze(1).type(torch.cuda.FloatTensor).T.unsqueeze(1)
# Overwrite input sequence length
input_lengths = torch.tensor(input_torch.size(0), dtype=torch.long)
# Compute individual loss for category
sctc_loss_cat = ctc_loss(input_torch, target_torch, input_lengths, target_lengths)#/input_lengths
all_losses.append(sctc_loss_cat)
# Sum to obtain overall loss (instead of multiply since we deal with log probs!)
sctc_loss = sum(all_losses)
return sctc_loss
class mctc_ne_loss_twocomp(nn.CTCLoss):
""" Multi-label Connectionist Temporal Classification (MCTC) Loss in "No Epsilon" (NE) encoding,
i.e., without an overall blank category
with two components per category, e.g. (blank, 1)
Args:
reduction='none' No reduction / averaging applied to loss within this class.
Has to be done afterwards explicitly.
For details see: https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html#CTCLoss
and
C. Wigington, B.L. Price, S. Cohen: Multi-label Connectionist Temporal Classification. ICDAR 2019: 979-986
"""
def __init__(self, reduction='none'):
super(mctc_ne_loss_twocomp, self).__init__(reduction=reduction)
assert reduction=='none', 'This loss is not tested with other reductions. Please apply reductions afterwards explicitly'
def forward(self, log_probs, targets, input_lengths, target_lengths):
ctc_loss = nn.CTCLoss(reduction=self.reduction)
# Prepare targets (add zero column to guarantee that blank is included!)
char_unique, char_target = torch.unique(torch.cat((targets, torch.zeros((targets.size(0), 1)).type(torch.cuda.LongTensor)), dim=1), dim=1, return_inverse=True) # char_unique is the BatchCharacterList
char_targ_condensed = torch.tensor([t[0] for t in groupby(char_target[:-1])])
target_torch = char_targ_condensed.type(torch.cuda.LongTensor).unsqueeze(0) # no shift since no blank_MCTC
# Overwrite target sequence length
target_lengths = torch.tensor(target_torch.size(1), dtype=torch.long)
# Prepare inputs
input_logsoftmax = log_probs.unsqueeze(2)
char_probs = torch.matmul(1-char_unique.transpose(0, -1), torch.squeeze(input_logsoftmax[0, :, :, :])) \
+ torch.matmul(char_unique.transpose(0, -1), torch.squeeze(input_logsoftmax[1, :, :, :]))
input_torch = char_probs.transpose(0, -1).type(torch.cuda.FloatTensor).unsqueeze(1)
# Overwrite input sequence length
input_lengths = torch.tensor(input_torch.size(0), dtype=torch.long)
# Compute loss from characters
mctc_loss = ctc_loss(input_torch, target_torch, input_lengths, target_lengths)
return mctc_loss
class mctc_ne_loss_threecomp(nn.CTCLoss):
""" Multi-label Connectionist Temporal Classification (MCTC) Loss in "No Epsilon" (NE) encoding,
i.e., without an overall blank category
with three components per category, e.g. (blank, 0, 1)
Args:
reduction='none' No reduction / averaging applied to loss within this class.
Has to be done afterwards explicitly.
For details see: https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html#CTCLoss
and
C. Wigington, B.L. Price, S. Cohen: Multi-label Connectionist Temporal Classification. ICDAR 2019: 979-986
"""
def __init__(self, reduction='none'):
super(mctc_ne_loss_threecomp, self).__init__(reduction=reduction)
assert reduction=='none', 'This loss is not tested with other reductions. Please apply reductions afterwards explicitly'
def forward(self, log_probs, targets, input_lengths, target_lengths):
ctc_loss = nn.CTCLoss(reduction=self.reduction)
# Prepare targets
targets_ext = torch.cat((-1*torch.ones((targets.size(0), 1)).type(torch.cuda.LongTensor), targets), dim=1)
char_unique, char_target = torch.unique(targets_ext, dim=1, return_inverse=True) # char_unique is the BatchCharacterList
char_targ_condensed = torch.tensor([t[0] for t in groupby(char_target)][1:])
target_torch = char_targ_condensed.type(torch.cuda.LongTensor).unsqueeze(0) # no shift since no blank_MCTC
# Overwrite target sequence length
target_lengths = torch.tensor(target_torch.size(1), dtype=torch.long)
# Prepare inputs
input_logsoftmax = log_probs.unsqueeze(2)
char_probs = torch.matmul((char_unique==-1).type(torch.cuda.FloatTensor).transpose(0, -1), torch.squeeze(input_logsoftmax[0, :, :, :])) \
+ torch.matmul((char_unique==0).type(torch.cuda.FloatTensor).transpose(0, -1), torch.squeeze(input_logsoftmax[1, :, :, :])) \
+ torch.matmul((char_unique==1).type(torch.cuda.FloatTensor).transpose(0, -1), torch.squeeze(input_logsoftmax[2, :, :, :]))
input_torch = char_probs.transpose(0, -1).type(torch.cuda.FloatTensor).unsqueeze(1)
# Overwrite input sequence length
input_lengths = torch.tensor(input_torch.size(0), dtype=torch.long)
# Compute loss from characters
mctc_loss = ctc_loss(input_torch, target_torch, input_lengths, target_lengths)
return mctc_loss
class mctc_we_loss(nn.CTCLoss):
""" Multi-label Connectionist Temporal Classification (MCTC) Loss in "With Epsilon" (WE) encoding,
i.e., there is an overall blank category, for which the probabilities of other components are ignored (epsilon)
thus, other categories have components (blank, 1, [epsilon])
Args:
reduction='none' No reduction / averaging applied to loss within this class.
Has to be done afterwards explicitly.
For details see: https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html#CTCLoss
and
C. Wigington, B.L. Price, S. Cohen: Multi-label Connectionist Temporal Classification. ICDAR 2019: 979-986
"""
def __init__(self, reduction='none'):
super(mctc_we_loss, self).__init__(reduction=reduction)
assert reduction=='none', 'This loss is not tested with other reductions. Please apply reductions afterwards explicitly'
def forward(self, log_probs, targets, input_lengths, target_lengths):
ctc_loss = nn.CTCLoss(reduction=self.reduction)
# Prepare targets
char_unique, char_target = torch.unique(targets, dim=1, return_inverse=True) # char_unique is the BatchCharacterList
char_target = torch.remainder(char_target+1, char_unique.size(1)) # shift blank character to first position
char_unique = torch.roll(char_unique, 1, -1)
char_targ_condensed = torch.tensor([t[0] for t in groupby(char_target)][1:])
target_torch = char_targ_condensed.type(torch.cuda.LongTensor).unsqueeze(0) # no shift since blank_MCTC already exists on pos. 0
# Overwrite target sequence length
target_lengths = torch.tensor(target_torch.size(1), dtype=torch.long)
# Prepare inputs
input_logsoftmax = log_probs.unsqueeze(2)
char_probs_nonblank = torch.matmul(1-char_unique[:, 1:].transpose(0, -1), torch.squeeze(input_logsoftmax[0, :, :, :])) \
+ torch.matmul(char_unique[:, 1:].transpose(0, -1), torch.squeeze(input_logsoftmax[1, :, :, :]))
# recalculate first row (category blank) due to eps values (ignore other categories for computing blank probability)
char_probs_blank = input_logsoftmax[1, :1, :, :].squeeze(2).squeeze(1)
char_probs = torch.cat((char_probs_blank, char_probs_nonblank), dim=0)
input_torch = char_probs.transpose(0, -1).type(torch.cuda.FloatTensor).unsqueeze(1)
# Overwrite input sequence length
input_lengths = torch.tensor(input_torch.size(0), dtype=torch.long)
# Compute loss from characters
mctc_loss = ctc_loss(input_torch, target_torch, input_lengths, target_lengths)
return mctc_loss
| [
"[email protected]"
] | |
7cc0ff5d841a8a1c80894553ae24881e03372ee8 | eb588aedbda16984770b1d35a0d4d14f6204dfcf | /muswarmlogger/events.py | a44af3d89ef475047f49865501a7195cc022fc11 | [] | no_license | fr0gs/mu-swarm-logger-service | b670318647f4c5823cc0f833ae4b2980a290ee10 | 49d794cda4e2c3dcf8dcdc106f8fece7e9f4078f | refs/heads/master | 2021-01-19T13:07:06.128646 | 2017-04-12T13:00:27 | 2017-04-12T13:00:53 | 88,065,283 | 0 | 0 | null | 2017-04-12T15:06:01 | 2017-04-12T15:06:00 | null | UTF-8 | Python | false | false | 3,529 | py | from aiodockerpy import APIClient
import importlib
import logging
import os, sys
from typing import Any, Callable, Dict, List
from muswarmlogger.sparql import SPARQLClient
logger = logging.getLogger(__name__)
on_startup_subroutines = []
event_handlers = []
module_mtimes = {}
class Event:
def __init__(self, client: APIClient, data: dict):
self.client = client
self.data = data
@property
def type(self):
return self.data['Type']
@property
def action(self):
return self.data['Action']
@property
def id(self):
return self.data['Actor']['ID']
@property
def attributes(self):
return self.data['Actor']['Attributes']
@property
def time(self):
return self.data['time']
@property
def time_nano(self):
return self.data['timeNano']
class ContainerEvent(Event):
_container_task = None
@property
def container(self):
if self._container_task is None:
self._container_task = self.client.loop.create_task(
self.client.inspect_container(self.id))
return self._container_task
@property
def name(self):
return self.attributes['name']
@property
def status(self):
return self.data['status']
def new_event(client: APIClient, data: Dict[str, Any]) -> None:
event_type = data['Type']
if event_type == "container":
return ContainerEvent(client, data)
else:
logger.debug("Unrecognized event (%s): %s", event_type, data)
return Event(client, data)
def on_startup(subroutine: Callable[[APIClient, SPARQLClient], None]) -> None:
on_startup_subroutines.append(subroutine)
def register_event(subroutine: Callable[[Event, SPARQLClient], None]) -> None:
module_name = subroutine.__module__
module = sys.modules[module_name]
stat_info = os.stat(module.__file__)
if module_name not in module_mtimes:
module_mtimes[module_name] = stat_info.st_mtime
event_handlers.append(subroutine)
async def run_on_startup_subroutines(docker: APIClient, sparql: SPARQLClient) -> None:
for subroutine in on_startup_subroutines:
await subroutine(docker, sparql)
def list_handlers(event: Event, reload: bool = False) -> None:
handlers = _filter_handlers(event)
if reload:
changes = _detect_changes(handlers)
if changes:
logger.debug("Reloading modules: %s", ", ".join(changes.keys()))
_reload_modules(changes)
handlers = _filter_handlers(event)
return handlers
def _detect_changes(handlers: List[Callable]) -> Dict[str, float]:
changes = {}
for subroutine in handlers:
module_name = subroutine.__module__
module = sys.modules[module_name]
stat_info = os.stat(module.__file__)
if module_mtimes[module_name] != stat_info.st_mtime:
changes[module_name] = stat_info.st_mtime
return changes
def _reload_modules(changes: Dict[str, float]) -> None:
for module_name, st_mtime in changes.items():
event_handlers[:] = [
x
for x in event_handlers
if x.__module__ != module_name
]
importlib.reload(sys.modules[module_name])
module_mtimes[module_name] = st_mtime
def _filter_handlers(event: Event):
event_type = type(event)
return [
event_handler
for event_handler in event_handlers
if event_handler.__annotations__['event'] in (event_type, Event)
]
| [
"[email protected]"
] | |
cacefbab7b3e73ec5ec0bc3b622944a513516fa5 | 369baa3e61ec16e94bfd6755cc9f17fe12178aa2 | /stock_analysis/spider_holder.py | 7c473c49fb0c691b68e0bd119bb47f61f069e284 | [] | no_license | amelie5/interesting_project | 4268d85fbc7c7c9f0a5f37ea23c858be89d8931e | f63ef4c71129252eae08b92f2686622976426646 | refs/heads/master | 2021-01-11T18:24:26.805498 | 2017-09-25T09:22:02 | 2017-09-25T09:22:02 | 79,536,194 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | py | # coding=utf-8
import pandas as pd
import requests
from pyquery import PyQuery as pq
import re
import json
def get_holder(code):
df = pd.DataFrame()
url = 'http://stock.finance.qq.com/corp1/stk_holder_count.php?zqdm=' + code
html = requests.get(url).text
p = pq(html).find('table.list>tr')
cnt = 0
for d in p:
cnt += 1
if cnt == 1:
continue
else:
date = pq(d).find('td').eq(0).text()
if date == '2013-12-31':
break
else:
holders = pq(d).find('td').eq(1).text()
holders = float(holders.replace(",", ''))
df = df.append({'code': code, 'holders': holders, 'date': date}, ignore_index=True)
return df
def get_holder_dongfang():
df = pd.DataFrame()
for page in range(1,16):
print('page: {}'.format(page))
url = 'http://data.eastmoney.com/DataCenter_V3/gdhs/GetList.ashx?pagesize=200&page='+str(page)
html = requests.get(url).text
json_list = json.loads(html)
data = json_list['data']
for one in data:
code=one['SecurityCode']
date=one['EndDate']
date = re.findall(r'(.*)T', date)[0]
holders=one['HolderNum']
df = df.append({'code': code, 'holders': holders, 'date': date}, ignore_index=True)
return df
def get_top10_2017(code):
df = pd.DataFrame()
url = 'http://stock.finance.qq.com/corp1/stk_ciholder.php?zqdm=' + code + '&type=2017'
html = requests.get(url).text
p = pq(html).find('table.list>tr')
cnt = 0
for d in p:
if cnt % 13 == 0:
date = pq(d).find('th').text()
if date == "流通股东名单":
break
#date = re.compile(r'报告期: (.*) 公告日期').findall(date)[0]
date='2017-03-31'
if (cnt % 13 == 0) | ((cnt - 1) % 13 == 0) | ((cnt - 12) % 13 == 0):
cnt = cnt + 1
continue
else:
name = pq(d).find('td').eq(1).text()
if name == '':
break
else:
amount = pq(d).find('td').eq(2).text()
amount = amount.replace(",", '')
amount = float(amount)
type = pq(d).find('td').eq(3).text()
percent = pq(d).find('td').eq(4).text()
percent = float(percent.replace("%", ''))
change = pq(d).find('td').eq(5).text()
df = df.append({'code': code, 'amount': amount, 'company': name, 'type': type, 'percent': percent,
'change': change, 'date': date}, ignore_index=True)
cnt += 1
return df
def get_top10(code):
df = pd.DataFrame()
url = 'http://stock.finance.qq.com/corp1/stk_ciholder.php?zqdm=' + code + '&type=2015'
html = requests.get(url).text
p = pq(html).find('table.list')
cnt = 0
for d in p:
date = pq(d).find('th').text()
if date == "流通股东名单":
break
date = re.compile(r'报告期: (.*) 公告日期').findall(date)[0]
p_p = pq(d).find('tr')
cnt = 0
for d_d in p_p:
if cnt <2:
cnt = cnt + 1
continue
else:
name = pq(d_d).find('td').eq(1).text()
if name == '':
break
else:
amount = pq(d_d).find('td').eq(2).text()
amount = amount.replace(",", '')
amount = float(amount)
type = pq(d_d).find('td').eq(3).text()
percent = pq(d_d).find('td').eq(4).text()
percent = float(percent.replace("%", ''))
change = pq(d_d).find('td').eq(5).text()
df = df.append({'code': code, 'amount': amount, 'company': name, 'type': type, 'percent': percent,
'change': change, 'date': date}, ignore_index=True)
cnt += 1
return df
if __name__ == '__main__':
get_holder_dongfang()
| [
"[email protected]"
] | |
e355d624e961a40e8b92cff5458a5e10991f08c4 | 6db927df05667bbe793d95b63e29f5f6cc4d6b7e | /app/users/serializers.py | 2f49dde052f319461964784ed654e7a046278146 | [] | no_license | ankhanguit/DjangoRest | 94509702af2ec3702acfa7430e57e00340f069fe | 15a9cc51679d62dd094ff010a8b10578774a520c | refs/heads/master | 2020-04-05T07:43:17.461774 | 2018-11-08T10:01:34 | 2018-11-08T10:01:34 | 156,686,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
birth_date = serializers.ReadOnlyField()
gender = serializers.ReadOnlyField()
class Meta:
model = models.User
fields = (
'username',
'email',
'birth_date',
'gender',
)
| [
"[email protected]"
] | |
d46568a7d208963ca57ef8ab6042e825a9fd0a26 | 05ce9a6e1575f9fca2c55e2a317fbaee98819f2b | /icon/make_icon | 0d560f7ec388c4c1a9b5dd581134b84866297cb9 | [
"Apache-2.0"
] | permissive | lkesteloot/r_view | f05c64ce7b13d405cdf52603ed511812cf12f236 | 7fba4abb53731cd2a99459b33a7e4af9af10e59b | refs/heads/master | 2022-01-03T06:02:21.512079 | 2021-12-28T01:48:49 | 2021-12-28T01:48:49 | 124,017,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | #!/usr/bin/python
# Make our icon procedurally.
import sys
from PIL import Image
RAINBOW = [
(200, 20, 20),
(255, 127, 0),
(240, 240, 0),
(20, 200, 20),
(20, 20, 255),
(75, 20, 130),
(148, 0, 211),
]
def make_icon(pathname):
width = 1024
height = 1024
padding = 20
margin = 80
tile_size = (width - 2*margin - padding) / len(RAINBOW)
pixels = []
for y in range(height):
for x in range(width):
if x < margin or y < margin or x >= width - margin or y >= height - margin:
# Outside margins.
color = (0, 0, 0, 0)
else:
internal_x = x - margin
internal_y = y - margin
tile_x = internal_x/tile_size
tile_y = internal_y/tile_size
tile_offset_x = internal_x - tile_x*tile_size
tile_offset_y = internal_y - tile_y*tile_size
# See if we're in the padding.
if tile_x >= len(RAINBOW) or tile_offset_x < padding or \
tile_y >= len(RAINBOW) or tile_offset_y < padding:
# Internal padding.
color = (255, 255, 255, 255)
else:
# Colored tiles.
index = (tile_x + tile_y) % len(RAINBOW)
color = RAINBOW[index] + (255,)
pixels.append(color)
image = Image.new("RGBA", (width, height))
image.putdata(pixels)
image.save(pathname)
def main():
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s out.png\n" % (sys.argv[0],))
sys.exit(1)
pathname = sys.argv[1]
make_icon(pathname)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
5f1c1c4a88042e548781f0e89c112de4fa691097 | c399d83163bfdc40dde80cf771a2606d95fc83ed | /src/c2/while-else.py | 05a68579c5bfc6c5e971061edd9bd9bca3e9f228 | [] | no_license | saimjcf/python_study | 3b5c141b683a8b7b05ee0586d2542d263cb6d652 | b4a137d17dea4be404ce6f2160b69cae8457feec | refs/heads/master | 2020-04-01T03:34:38.193225 | 2018-10-13T02:57:56 | 2018-10-13T02:57:56 | 152,827,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | i = 8
while i < 5:
print("while(in)=", i)
i = i + 1
else:
print("while(else)", i)
| [
"saimj@DESKTOP-LLVDTKO"
] | saimj@DESKTOP-LLVDTKO |
8b57f43da0c346fd63b1a5ec64aa649b9ffcb8f4 | 66963a3a64123dd343f9520750eea8d469a8eb4f | /scripts/export_targets.py | 88f983b29bcc376b0c55cba0262e2d44952e4d73 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Montura/angle | 2c1d9dea780634044db4c2dc6825863e79c97540 | e2c225d48b2eca6a2d56ff8e56635a1c2e1a2db5 | refs/heads/master | 2020-12-19T16:00:30.915478 | 2020-02-05T08:55:23 | 2020-02-05T08:55:23 | 235,775,988 | 0 | 0 | NOASSERTION | 2020-01-23T10:54:54 | 2020-01-23T10:54:54 | null | UTF-8 | Python | false | false | 9,353 | py | #! /usr/bin/env python3
assert __name__ == '__main__'
'''
To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and
python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come
before python3 in your path.
Upstream: https://chromium.googlesource.com/angle/angle
Our repo: https://github.com/mozilla/angle
It has branches like 'firefox-60' which is the branch we use for pulling into
Gecko with this script.
This script leaves a record of the merge-base and cherry-picks that we pull into
Gecko. (gfx/angle/cherries.log)
ANGLE<->Chrome version mappings are here: https://omahaproxy.appspot.com/
An easy choice is to grab Chrome's Beta's ANGLE branch.
## Usage
Prepare your env:
~~~
export PATH="$PATH:/path/to/depot_tools"
~~~
If this is a new repo, don't forget:
~~~
# In the angle repo:
./scripts/bootstrap.py
gclient sync
~~~
Update: (in the angle repo)
~~~
# In the angle repo:
/path/to/gecko/gfx/angle/update-angle.py origin/chromium/XXXX
git push moz # Push the firefox-XX branch to github.com/mozilla/angle
~~~~
'''
import json
import os
import pathlib
import re
import shutil
import subprocess
import sys
from typing import * # mypy annotations
REPO_DIR = pathlib.Path.cwd()
GN_ENV = dict(os.environ)
# We need to set DEPOT_TOOLS_WIN_TOOLCHAIN to 0 for non-Googlers, but otherwise
# leave it unset since vs_toolchain.py assumes that the user is a Googler with
# the Visual Studio files in depot_tools if DEPOT_TOOLS_WIN_TOOLCHAIN is not
# explicitly set to 0.
vs_found = False
for directory in os.environ['PATH'].split(os.pathsep):
vs_dir = os.path.join(directory, 'win_toolchain', 'vs_files')
if os.path.exists(vs_dir):
vs_found = True
break
if not vs_found:
GN_ENV['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0'
if len(sys.argv) < 3:
sys.exit('Usage: export_targets.py OUT_DIR ROOTS...')
(OUT_DIR, *ROOTS) = sys.argv[1:]
for x in ROOTS:
assert x.startswith('//:')
# ------------------------------------------------------------------------------
def run_checked(*args, **kwargs):
print(' ', args, file=sys.stderr)
sys.stderr.flush()
return subprocess.run(args, check=True, **kwargs)
def sortedi(x):
return sorted(x, key=str.lower)
def dag_traverse(root_keys: Sequence[str], pre_recurse_func: Callable[[str], list]):
visited_keys: Set[str] = set()
def recurse(key):
if key in visited_keys:
return
visited_keys.add(key)
t = pre_recurse_func(key)
try:
(next_keys, post_recurse_func) = t
except ValueError:
(next_keys,) = t
post_recurse_func = None
for x in next_keys:
recurse(x)
if post_recurse_func:
post_recurse_func(key)
return
for x in root_keys:
recurse(x)
return
# ------------------------------------------------------------------------------
print('Importing graph', file=sys.stderr)
try:
p = run_checked('gn', 'desc', '--format=json', str(OUT_DIR), '*', stdout=subprocess.PIPE,
env=GN_ENV, shell=(True if sys.platform == 'win32' else False))
except subprocess.CalledProcessError:
sys.stderr.buffer.write(b'"gn desc" failed. Is depot_tools in your PATH?\n')
exit(1)
# -
print('\nProcessing graph', file=sys.stderr)
descs = json.loads(p.stdout.decode())
# Ready to traverse
# ------------------------------------------------------------------------------
LIBRARY_TYPES = ('shared_library', 'static_library')
def flattened_target(target_name: str, descs: dict, stop_at_lib: bool =True) -> dict:
flattened = dict(descs[target_name])
EXPECTED_TYPES = LIBRARY_TYPES + ('source_set', 'group', 'action')
def pre(k):
dep = descs[k]
dep_type = dep['type']
deps = dep['deps']
if stop_at_lib and dep_type in LIBRARY_TYPES:
return ((),)
if dep_type == 'copy':
assert not deps, (target_name, dep['deps'])
else:
assert dep_type in EXPECTED_TYPES, (k, dep_type)
for (k,v) in dep.items():
if type(v) in (list, tuple, set):
flattened[k] = sortedi(set(flattened.get(k, []) + v))
else:
#flattened.setdefault(k, v)
pass
return (deps,)
dag_traverse(descs[target_name]['deps'], pre)
return flattened
# ------------------------------------------------------------------------------
# Check that includes are valid. (gn's version of this check doesn't seem to work!)
INCLUDE_REGEX = re.compile(b'(?:^|\\n) *# *include +([<"])([^>"]+)[>"]')
assert INCLUDE_REGEX.match(b'#include "foo"')
assert INCLUDE_REGEX.match(b'\n#include "foo"')
# Most of these are ignored because this script does not currently handle
# #includes in #ifdefs properly, so they will erroneously be marked as being
# included, but not part of the source list.
IGNORED_INCLUDES = {
b'compiler/translator/TranslatorESSL.h',
b'compiler/translator/TranslatorGLSL.h',
b'compiler/translator/TranslatorHLSL.h',
b'compiler/translator/TranslatorMetal.h',
b'compiler/translator/TranslatorVulkan.h',
b'libANGLE/renderer/d3d/DeviceD3D.h',
b'libANGLE/renderer/d3d/DisplayD3D.h',
b'libANGLE/renderer/d3d/RenderTargetD3D.h',
b'libANGLE/renderer/d3d/d3d11/winrt/NativeWindow11WinRT.h',
b'libANGLE/renderer/gl/glx/DisplayGLX.h',
b'libANGLE/renderer/gl/cgl/DisplayCGL.h',
b'libANGLE/renderer/gl/eagl/DisplayEAGL.h',
b'libANGLE/renderer/gl/egl/ozone/DisplayOzone.h',
b'libANGLE/renderer/gl/egl/android/DisplayAndroid.h',
b'libANGLE/renderer/gl/wgl/DisplayWGL.h',
b'libANGLE/renderer/metal/DisplayMtl_api.h',
b'libANGLE/renderer/null/DisplayNULL.h',
b'libANGLE/renderer/vulkan/android/DisplayVkAndroid.h',
b'libANGLE/renderer/vulkan/fuchsia/DisplayVkFuchsia.h',
b'libANGLE/renderer/vulkan/ggp/DisplayVkGGP.h',
b'libANGLE/renderer/vulkan/mac/DisplayVkMac.h',
b'libANGLE/renderer/vulkan/win32/DisplayVkWin32.h',
b'libANGLE/renderer/vulkan/xcb/DisplayVkXcb.h',
b'kernel/image.h',
}
IGNORED_INCLUDE_PREFIXES = {
b'android',
b'Carbon',
b'CoreFoundation',
b'CoreServices',
b'IOSurface',
b'mach',
b'mach-o',
b'OpenGL',
b'pci',
b'sys',
b'wrl',
b'X11',
}
IGNORED_DIRECTORIES = {
'//third_party/SwiftShader',
'//third_party/vulkan-headers',
'//third_party/vulkan-loader',
'//third_party/vulkan-tools',
'//third_party/vulkan-validation-layers',
}
def has_all_includes(target_name: str, descs: dict) -> bool:
for ignored_directory in IGNORED_DIRECTORIES:
if target_name.startswith(ignored_directory):
return True
flat = flattened_target(target_name, descs, stop_at_lib=False)
acceptable_sources = flat.get('sources', []) + flat.get('outputs', [])
acceptable_sources = {x.rsplit('/', 1)[-1].encode() for x in acceptable_sources}
ret = True
desc = descs[target_name]
for cur_file in desc.get('sources', []):
assert cur_file.startswith('/'), cur_file
if not cur_file.startswith('//'):
continue
cur_file = pathlib.Path(cur_file[2:])
text = cur_file.read_bytes()
for m in INCLUDE_REGEX.finditer(text):
if m.group(1) == b'<':
continue
include = m.group(2)
if include in IGNORED_INCLUDES:
continue
try:
(prefix, _) = include.split(b'/', 1)
if prefix in IGNORED_INCLUDE_PREFIXES:
continue
except ValueError:
pass
include_file = include.rsplit(b'/', 1)[-1]
if include_file not in acceptable_sources:
#print(' acceptable_sources:')
#for x in sorted(acceptable_sources):
# print(' ', x)
print('Warning in {}: {}: Invalid include: {}'.format(target_name, cur_file, include), file=sys.stderr)
ret = False
#print('Looks valid:', m.group())
continue
return ret
# -
# Gather real targets:
def gather_libraries(roots: Sequence[str], descs: dict) -> Set[str]:
libraries = set()
def fn(target_name):
cur = descs[target_name]
print(' ' + cur['type'], target_name, file=sys.stderr)
assert has_all_includes(target_name, descs), target_name
if cur['type'] in ('shared_library', 'static_library'):
libraries.add(target_name)
return (cur['deps'], )
dag_traverse(roots, fn)
return libraries
# -
libraries = gather_libraries(ROOTS, descs)
print(f'\n{len(libraries)} libraries:', file=sys.stderr)
for k in libraries:
print(f' {k}', file=sys.stderr)
print('\nstdout begins:', file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------
# Output
out = {k: flattened_target(k, descs) for k in libraries}
for (k,desc) in out.items():
dep_libs: Set[str] = set()
for dep_name in set(desc['deps']):
dep = descs[dep_name]
if dep['type'] in LIBRARY_TYPES:
dep_libs.add(dep_name[3:])
desc['deps'] = sortedi(dep_libs)
json.dump(out, sys.stdout, indent=' ')
exit(0)
| [
"[email protected]"
] | |
395d25db1cef864e086bca62372ed2fd8cbf961a | 115f22f558eb96ca87141a52b953bbc3f2dfc2b1 | /2019/check_solution_file.py | 43e91de931863b182bd1c82dbc9beef8af77c649 | [] | no_license | Ohav/HashCode | 0349ca3b5df8af4e7f845cb2200c8814cec4e40a | 5d5d8d4af1f6c9c09f13eaa595ac657d885d1d5a | refs/heads/master | 2020-05-26T15:07:04.831720 | 2019-05-23T17:28:36 | 2019-05-23T17:28:36 | 188,278,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # Dan eldad Is the best gever
# Sanity check:
# 1. Lines of presentation match the first line
# 2. each photo apears only once
# 3. each row has 1 or two numbers
# 4. there are rows with two numbers
photos = set()
with open ("final.sol", 'r') as file:
number = int(file.readline())
counter = 0
found_verticals = False
for line in file.readlines():
cur_line = line.split()
line_len = len(cur_line)
if line_len > 2:
print ("found line with {0} numbers (line number: {1})".format(line_len, counter))
if line_len == 2:
found_verticals = True
for photo in cur_line:
if photo in photos:
print("found duplicated picture {0}".format(photo))
else:
photos.add(photo)
counter += 1
if counter != number:
print("number of photos do not match {0} != {1}".format(number, counter)) | [
"[email protected]"
] | |
cc4a3c0bedae1de0d655f0cdf260de4421e3be97 | 3edaacb98d681523d502ff46ab4c88d3caf2fdc2 | /node_modules/bcrypt/build/config.gypi | aaf50852446adca3c50d2382fc109069d6955e22 | [
"MIT"
] | permissive | Quito848/AuthTest | 1400cadc0ef8267dc57484c62a1800b69ec535f5 | d950a1569b2ca37e609ba3ab44d7254191603047 | refs/heads/master | 2020-04-21T18:21:03.214448 | 2019-02-08T16:43:09 | 2019-02-08T16:43:09 | 169,766,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,159 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v141",
"msvs_windows_target_platform_version": "10.0.16299.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt62l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "62",
"nasm_version": "2.13",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 67,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.67",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Petr\\.node-gyp\\11.0.0",
"standalone_static_library": 1,
"msbuild_path": "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe",
"fallback_to_build": "true",
"module": "C:\\Users\\Petr\\WebstormProjects\\AuthTest\\node_modules\\bcrypt\\lib\\binding\\bcrypt_lib.node",
"module_name": "bcrypt_lib",
"module_path": "C:\\Users\\Petr\\WebstormProjects\\AuthTest\\node_modules\\bcrypt\\lib\\binding",
"napi_version": "3",
"node_abi_napi": "napi",
"napi_build_version": "0",
"node_napi_label": "node-v67",
"access": "",
"allow_same_version": "",
"also": "",
"always_auth": "",
"audit": "true",
"audit_level": "low",
"auth_type": "legacy",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\Petr\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"cidr": "",
"color": "true",
"commit_hooks": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"dry_run": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\Petr\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\Petr\\AppData\\Roaming\\npm\\etc\\npmignore",
"global_style": "",
"group": "",
"ham_it_up": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_prepublish": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\Petr\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"legacy_bundling": "",
"link": "",
"local_address": "",
"logs_max": "10",
"long": "",
"maxsockets": "50",
"message": "%s",
"metrics_registry": "https://registry.npmjs.org/",
"node_gyp": "C:\\Program Files\\nodejs\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"node_options": "",
"node_version": "11.0.0",
"noproxy": "",
"offline": "",
"onload_script": "",
"only": "",
"optional": "true",
"otp": "",
"package_lock": "true",
"package_lock_only": "",
"parseable": "",
"prefer_offline": "",
"prefer_online": "",
"prefix": "C:\\Users\\Petr\\AppData\\Roaming\\npm",
"preid": "",
"production": "",
"progress": "true",
"read_only": "",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"save_prod": "",
"scope": "",
"scripts_prepend_node_path": "warn-only",
"script_shell": "",
"searchexclude": "",
"searchlimit": "20",
"searchopts": "",
"searchstaleness": "900",
"send_metrics": "",
"shell": "C:\\WINDOWS\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_commit": "",
"sign_git_tag": "",
"sso_poll_frequency": "500",
"sso_type": "oauth",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"timing": "",
"tmp": "C:\\Users\\Petr\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "",
"unsafe_perm": "true",
"update_notifier": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\Petr\\.npmrc",
"user_agent": "npm/6.4.1 node/v11.0.0 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"[email protected]"
] | |
f08c96f223b44901d252179aaba6e2dfd26f5949 | 6be2483b27a8f13cd999691eab8e8d58ca731db3 | /main.py | 5c50bc4192b580bc844eb21f82e7325e859b4cfb | [] | no_license | EthanLee0210/uno-project | f22273618a39456af071faa351e2cebb4cdedaf7 | 6a1b1e6686d53e564fe907430ba020c991a7c92d | refs/heads/main | 2023-01-20T21:10:27.692975 | 2020-11-25T21:17:07 | 2020-11-25T21:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,019 | py | import deck_functions as df
import hand_functions as hf
import player_functions as pf
class Token:
def __init__(self):
self.count = 0
self.last_card = ''
self.wild_color = ''
self.is_reversed = False
self.is_first_reversed = False
self.is_skipped = False
self.is_draw_two = False
self.is_draw_four = False
self.skip_after_draw = False
def __str__(self):
return 'Player ' + str(len(players)) + ': ' + players[self.count].name + ' is up!'
def increment(self):
if not self.is_reversed:
if self.count == len(players) - 1:
self.count = 0
else:
self.count += 1
else:
if self.count == 0:
self.count = len(players) - 1
else:
self.count -= 1
def reverse(self):
if self.is_reversed:
self.is_reversed = False
else:
self.is_reversed = True
def read_last(self):
print('Last card played was a {}.'.format(hf.decode(self.last_card)))
# SETUP
players = [pf.Player(1, 'Dan'), pf.Player(2, 'Ethan'), pf.Player(3, 'Holton')] # placeholder players
# players = pf.setup_players() # actual player constructor
token = Token()
token.last_card = df.choose_card()
while token.last_card[0] == 'w' or token.last_card[1] == 'd' or token.last_card[1] == 's' or token.last_card[1] == 'r':
token.last_card = df.choose_card() # So that we don't start the game with a Wild/Skip/Draw/Reverse
# GAMEPLAY LOOP players[token.count] is how we refer to the player whose turn it is.
while True:
if token.is_skipped:
print(players[token.count].name + ', you\'re skipped this turn. Boo hoo, better luck next time!\n')
token.is_skipped = False # And then it skips all the rest of that player's actions.
else:
if token.is_first_reversed: # Give a special greeting to the person right after the reverse.
print('Back to you, {}!'.format(players[token.count].name))
token.is_first_reversed = False # Make it so it doesn't do that again.
else:
print('Player {}: {} you\'re up!'.format(token.count + 1, players[token.count].name))
if token.is_draw_two:
players[token.count].draw_two()
token.is_draw_two = False
elif token.is_draw_four:
players[token.count].draw_four()
token.is_draw_four = False
else:
players[token.count].holding()
token.read_last()
# ONE PLAYER'S TURN LOOP
while True:
print('What would you like to do?')
print(' 1. Draw a card\n 2. Play a card\n--> ', end='')
prompt = input()
if prompt == '1': # Draw a card
players[token.count].draw_card(read=True)
players[token.count].holding(now=True)
token.read_last()
elif prompt == '2': # Play a card
choice = players[token.count].play_card()
if choice == -1:
continue
chosen_card = players[token.count].hand[choice]
if not hf.is_legal(chosen_card, token.last_card):
hf.admonish(chosen_card, token.last_card)
else:
print('You played: ' + hf.decode(chosen_card) + '.')
token.last_card = chosen_card
token.wild_color = '' # Clear the restriction on what color can be played.
if chosen_card[0] == 'w': # Record any actions that affect the following player. vv
print('What color do you choose?')
print('r = Red | y = Yellow | g = Green | b = Blue')
token.wild_color = input(' --> ')
token.last_card = token.wild_color + chosen_card[1]
if chosen_card[1] == 's':
token.is_skipped = True
elif chosen_card[1] == 'r':
token.reverse()
token.is_first_reversed = True
elif chosen_card[1] == 'd':
token.is_draw_two = True
elif chosen_card[1] == 'f':
token.is_draw_four = True
players[token.count].hand.remove(chosen_card)
players[token.count].holding(now=True)
ending = input('Type "end" to end your turn. --> ')
if ending.lower() == 'uno':
print(players[token.count].name + ' declares UNO! Watch out!')
print()
break
elif prompt == 'd2':
players[token.count].draw_two()
elif prompt == 'done': # End turn
break
elif prompt == 'exit':
break
if prompt == 'exit':
break
token.increment()
| [
"[email protected]"
] | |
507d7886ffe1824961877b0e13125edc6b4f9b48 | 2d539ed2a5e374a4427a6dcb023c43cb1a42b708 | /rest_api_app/migrations/0018_auto_20190123_1236.py | d37eb709563efe5817d339a00c6edb2688310e3b | [] | no_license | otninast/rest-api-server | 592eab1d89aaa7f64deca9bbb12d6df3bc8e2eac | 5281c6c6d083bfce9940656bad34b0c01a256e21 | refs/heads/master | 2020-04-15T09:25:50.223542 | 2019-02-15T08:54:40 | 2019-02-15T08:54:40 | 164,549,411 | 0 | 0 | null | 2019-02-15T08:54:41 | 2019-01-08T03:18:41 | Python | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.0.2 on 2019-01-23 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest_api_app', '0017_auto_20190106_2153'),
]
operations = [
migrations.AlterField(
model_name='trainingprogram',
name='self_assessment_score',
field=models.FloatField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=3, verbose_name='Your Training Score'),
),
]
| [
"[email protected]"
] | |
62910aef3f58da6894729e13c5a5ec4cc0a914dc | c90dce8358dc43ee2b50e3b8e099a38a38635d0c | /ivi/lecroy/lecroyWR44XIA.py | e7c4c1c2875e9a53a5031e627880b330c92eeb22 | [
"MIT"
] | permissive | CedricB31/python-ivi | 88209d6ff34dc2f8291eead0ccd03a7437aea9b7 | 5910db9d0e2f4704aa80fa29de86eb98a82bec8b | refs/heads/master | 2020-12-26T09:22:00.284269 | 2016-06-02T15:04:33 | 2016-06-02T15:04:33 | 53,782,758 | 1 | 0 | null | 2016-03-13T11:58:50 | 2016-03-13T11:58:50 | null | UTF-8 | Python | false | false | 1,643 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .lecroyWRXIA import *
class lecroyWR44XIA(lecroyWRXIA):
"Lecroy WaveRunner 44Xi-A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'WaveRunner 44Xi-A')
super(lecroy104XiA, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 400e6
self._init_channels()
| [
"[email protected]"
] | |
c1c75b9ead4406e5b43862d69389055a570d21bd | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res_bw/scripts/common/bwpydevd.py | 9e36553c556081fe6c27de8516ff07ec00fc2c01 | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,544 | py | # 2015.11.18 12:00:11 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/bwpydevd.py
import os
import sys
import ResMgr
import BigWorld
import inspect
import threading
import bwdebug
REPLACE_PATHS = []
HAS_BW_CONFIG = False
if os.name == 'posix':
try:
import BWConfig
HAS_BW_CONFIG = True
except ImportError:
HAS_BW_CONFIG = False
else:
class BWConfig:
scriptConfig = None
@staticmethod
def readString(key, default = ''):
return BWConfig.scriptConfig.readString(key, default)
@staticmethod
def readBool(key, default = False):
return BWConfig.scriptConfig.readBool(key, default)
@staticmethod
def readInt(key, default = 0):
return BWConfig.scriptConfig.readInt(key, default)
@staticmethod
def getSections(key):
sections = []
for sectName, sect in BWConfig.scriptConfig.items():
if sectName == key:
sections.append(sect)
return sections
def BWConfigWrapper(fn):
def wrapped(*args, **kwargs):
global HAS_BW_CONFIG
if os.name == 'posix':
return fn(*args, **kwargs)
else:
BWConfig.scriptConfig = ResMgr.openSection('scripts_config.xml')
if BWConfig.scriptConfig is not None:
HAS_BW_CONFIG = True
fn(*args, **kwargs)
BWConfig.scriptConfig = None
return
return wrapped
@BWConfigWrapper
def startDebug(isStartUp = False):
if not HAS_BW_CONFIG:
return
if isStartUp and not BWConfig.readBool('pydevd/autoConnect/%s' % BigWorld.component, False):
return
for pydevdSect in BWConfig.getSections('pydevd'):
for sectName, sect in pydevdSect.items():
if sectName == 'replacePath':
REPLACE_PATHS.append((sect.readString('to'), sect.readString('from')))
ide = BWConfig.readString('pydevd/ide', 'pycharm')
host = BWConfig.readString('pydevd/host', 'localhost')
port = BWConfig.readInt('pydevd/port', 5678)
suspend = BWConfig.readBool('pydevd/suspend', False)
traceOnlyCurrentThread = BWConfig.readBool('pydevd/traceOnlyCurrentThread', False)
startPyDevD(ide, host, port, suspend, traceOnlyCurrentThread)
bwPyDevDStarted = False
def startPyDevD(ide, host = '127.0.0.1', port = 5678, suspend = False, traceOnlyCurrentThread = False):
global bwPyDevDStarted
if not bwPyDevDStarted:
bwPyDevDStarted = True
pydevDir = ResMgr.resolveToAbsolutePath('scripts/common/pydev/%s/pydev' % ide)
if not os.path.isdir(pydevDir):
bwdebug.ERROR_MSG('Failed to start pydevd: Unable to find pydevd directory for IDE %s' % ide)
sys.path.append(pydevDir)
try:
import pydevd
bwdebug.INFO_MSG('PyDevD connecting to %s:%d' % (host, port))
pydevd.settrace(host=host, port=port, suspend=suspend, stdoutToServer=True, stderrToServer=True, trace_only_current_thread=traceOnlyCurrentThread)
threading.currentThread().__pydevd_id__ = BigWorld.component
except Exception as e:
from traceback import print_exc
print_exc()
bwdebug.ERROR_MSG('Failed to load pydevd: %s' % repr(e))
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\bwpydevd.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:00:11 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
cc011a55ad7c61646a22ad860015a5d83d61ad5c | efa9be53cf1094300ed66fe09d19d3886de7e663 | /tests/regressions/python/966_named_arguments.py | 52f1d4c4230498c82665bde68db22392a1904b35 | [
"BSL-1.0"
] | permissive | STEllAR-GROUP/phylanx | 25a6d1ecf8493fd77583d9b54ea4f6b7f2fcac63 | 443ba956e47c2c5ae1e3b6d0f51bb31930fa83e9 | refs/heads/master | 2022-04-30T13:57:26.767667 | 2022-04-06T13:57:01 | 2022-04-06T13:57:01 | 101,409,977 | 87 | 55 | BSL-1.0 | 2022-04-06T13:57:02 | 2017-08-25T14:07:21 | C++ | UTF-8 | Python | false | false | 469 | py | # Copyright (c) 2019 Bita Hasheminezhad
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# #966: Named arguments don't work
from phylanx import Phylanx
import numpy as np
# make flake happy
def eye(N, M, k, dtype):
pass
@Phylanx
def i(N, M=None, k=0, dtype=None):
return eye(N, M=M, k=k, dtype=dtype)
assert((i(3, k=2) == np.eye(3, k=2)).all())
| [
"[email protected]"
] | |
5a78bdc3ef8695d3d3ed81ddb9d28e9f5bae4391 | 587846478c16d1c19c3de6a5f0a20b3d01e98471 | /main/results/experiments_welink.py | f2b4ff6ff3376f0ebf8097dd6b5bd5d202062a21 | [
"MIT"
] | permissive | wissembrdj/welink | 198afac250a20d1042024f695355ac397eda3969 | ebc0cd4742578ad22014bd8067796e8cc1869f02 | refs/heads/master | 2022-11-23T11:48:09.220691 | 2020-07-25T18:56:35 | 2020-07-25T18:56:35 | 275,353,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,970 | py | import json
import csv
from result import Result
import requests
import time
import re
import io
from extract_entities import entities
writer = csv.writer(open("welink_results_qald7.csv", 'a', newline=''))
url = 'http://127.0.0.1:8000/api/'
headers = {'Content-type': 'application/json'}
with open('qald-7.json', encoding='UTF-8') as data_file:
data = json.loads(data_file.read())
nb=0
for distro in data['questions']:
# print(distro['query']['sparql'])
entities_dataset=entities(distro['query']['sparql'])
print(entities_dataset)
entity_mentions=0
correctly_linked=0
n=1
system_result=0
result=[]
tmp=time.time()
for d in distro['question']:
if d["language"]=='en':
question_en=d["string"]
print(question_en)
query = {'query': str(question_en)}
data_json = json.dumps(query)
response = requests.post(url, data=data_json, headers=headers)
execution_time=time.time()-tmp
print(execution_time)
if response:
response_json=response.json()
if 'mentions' in response_json:
detected_entity= len(response_json['mentions'])
system_result=detected_entity
if 'results' in response_json:
# system_result=len(response_json['results'])
entity_mentions=len(entities_dataset)
for em in entities_dataset:
for i in range(len(response_json["mentions"])):
j=response_json["results"][str(i)][0][1]
if j==em:
if j not in result:
# system_result=system_result+n
correctly_linked=correctly_linked+1
result.append(j)
n=n+1
#print(correctly_linked, system_result, entity_mentions)
res= Result(correctly_linked, system_result, entity_mentions)
fmeasure=0
if system_result!=0:
entity_precision=res.precision()
else:
entity_precision=0
if entity_mentions!=0:
entity_recall=res.recall()
else:
entity_recall=0
if entity_recall!=0 and entity_precision!=0:
fmeasure= (2*entity_precision*entity_recall)/(entity_precision + entity_recall)
for i in result:
print("id question: ", distro['id'], "result n: ", system_result, detected_entity, result)
print("Precision:", entity_precision," Recall:", entity_recall )
print("____________________________________")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "0", "0", execution_time] ]
myFile = open('welink_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No string match
nsm=0
system_result=0
entity_precision=0
entity_recall=0
nsm=nsm+1
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, "0", "0",nsm, execution_time] ]
print("____________________________________No string match")
myFile = open('welink_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#No detected named entity:
if entities_dataset:
nbem=0
system_result=0
entity_precision=0
entity_recall=0
correctly_linked=0
detected_entity=0
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,"0", "1", "0", execution_time] ]
print("____________________________________No detected named entity")
else:
nbem=0
system_result=1
entity_precision=1
entity_recall=1
correctly_linked=1
detected_entity=0
fmeasure=1
if 'entity mapping' in distro:
for em in distro["entity mapping"]:
nbem=nbem+1
myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,fmeasure, "3", "3", execution_time] ]
print("____________________________________No mention + No results")
myFile = open('welink_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
else:
#Unknown error from the web service
execution_time=time.time()-tmp
system_result=0
entity_precision=0
entity_recall=0
fmeasure= 0
entity_mentions=0
detected_entity=0
correctly_linked=0
print("____________________________________Unknown error from the web service")
myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "2", "2", execution_time] ]
myFile = open('welink_results_qald7.csv', 'a', encoding='utf-8')
with myFile:
writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
writer.writerows(myData)
#resultats= Results(best_candidate)
#resultats_classified=resultats.message()
#print(resultats_classified)
print("process completed")
# import json
# import csv
# from result import Result
# import requests
# import time
# import re
# import io
#
# def extract_entities(query):
# pattern="http://dbpedia.org/resource/[^>]+"
# return re.findall(pattern,query)
# def extract_entities_QALD7(query):
# firstModified=[]
# #print (query)
# if query=="OUT OF SCOPE":
# return firstModified
# whereString = query[query.index('{')+1:query.rfind('}')-1]
# if "no_query" in whereString:
# return firstModified
# whereString=whereString.replace("\n","")
# whereString=whereString.replace("\t"," ")
# query=whereString
# pattern="res:[^\s]+"
# first=re.findall(pattern,query)
#
# for entity in first:
# firstModified.append(entity.replace("res:","http://dbpedia.org/resource/"))
#
# pattern="http://dbpedia.org/resource/[^>]+"
# second=re.findall(pattern,query)
# #print(firstModified+second)
# return firstModified+second
#
# writer = csv.writer(open("final_results_qald8_tt.csv", 'a', newline=''))
# url = 'http://127.0.0.1:8000/api/'
# headers = {'Content-type': 'application/json'}
# with open('qald-8-train-multilingual.json', encoding='UTF-8') as data_file:
# data = json.loads(data_file.read())
# nb=0
# for distro in data['questions']:
# print(distro['query']['sparql'])
# entities_dataset=extract_entities_QALD7(distro['query']['sparql'])
# print(entities_dataset)
# entity_mentions=0
# correctly_linked=0
# n=1
# system_result=0
# result=[]
# tmp=time.time()
# if distro['question'][nb]['language']=='en':
# question_en=distro['question'][nb]['string']
# query = {'query': str(question_en)}
# data_json = json.dumps(query)
# response = requests.post(url, data=data_json, headers=headers)
# if response:
# execution_time=time.time()-tmp
# response_json=response.json()
# if 'mentions' in response_json:
# detected_entity= len(response_json['mentions'])
# if response_json['results']:
# # system_result=len(response_json['results'])
# if entities_dataset:
# for em in entities_dataset:
# entity_mentions=entity_mentions+1
# for b in response_json['results']:
# n=1
# for j in response_json['results'][str(b)]:
# if j[1]==em:
# if j[1] not in result:
# system_result=system_result+n
# correctly_linked=correctly_linked+1
# result.append(j[1])
# n=n+1
# else:
# system_result=1
# correctly_linked=1
# entity_mentions=1
# #print(correctly_linked, system_result, entity_mentions)
# res= Result(correctly_linked, system_result, entity_mentions)
# fmeasure=0
# if system_result!=0:
# entity_precision=res.precision()
# else:
# entity_precision=0
# if entity_mentions!=0:
# entity_recall=res.recall()
# else:
# entity_recall=0
# if entity_recall!=0 and entity_precision!=0:
# fmeasure= (2*entity_precision*entity_recall)/(entity_precision + entity_recall)
#
# for i in result:
# print("id question: ", distro['id'], "result n: ", system_result, detected_entity, result)
# print("Precision:", entity_precision," Recall:", entity_recall )
# print("____________________________________")
# myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "0", "0", execution_time] ]
# myFile = open('final_results_qald8_tt.csv', 'a', encoding='utf-8')
# with myFile:
# writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
# writer.writerows(myData)
#
# else:
# #No string match
# system_result=0
# entity_precision=0
# entity_recall=0
# nsm=nsm+1
# myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, "0", "0",nsm, execution_time] ]
# print("____________________________________No string match")
# myFile = open('final_results_qald8_tt.csv', 'a', encoding='utf-8')
# with myFile:
# writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
# writer.writerows(myData)
# else:
# #No detected named entity:
# nbem=0
# system_result=0
# entity_precision=0
# entity_recall=0
# correctly_linked=0
# detected_entity=0
# if 'entity mapping' in distro:
# for em in distro["entity mapping"]:
# nbem=nbem+1
# myData=[[distro['id'],question_en,nbem,detected_entity,system_result,correctly_linked, entity_precision,entity_recall,"0", "1", "0", execution_time] ]
# print("____________________________________No detected named entity")
# myFile = open('final_results_qald8_tt.csv', 'a', encoding='utf-8')
# with myFile:
# writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
# writer.writerows(myData)
# else:
# #Unknown error from the web service
# execution_time=time.time()-tmp
# system_result=0
# entity_precision=0
# entity_recall=0
# fmeasure= 0
# entity_mentions=0
# detected_entity=0
# correctly_linked=0
# print("____________________________________Unknown error from the web service")
# myData=[[distro['id'],question_en,entity_mentions,detected_entity,system_result,correctly_linked, entity_precision,entity_recall, fmeasure, "2", "2", execution_time] ]
# myFile = open('final_results_qald8_tt.csv', 'a', encoding='utf-8')
# with myFile:
# writer = csv.writer(myFile, delimiter =";", lineterminator='\r')
# writer.writerows(myData)
#
#
# #resultats= Results(best_candidate)
# #resultats_classified=resultats.message()
# #print(resultats_classified)
# print("process completed")
| [
"[email protected]"
] | |
47c70190efecc803a2d8be5e7ea849460a62ed28 | 86618c9cc9c9de33bc679c763ddb4cb65e737650 | /MyAnswers/0006/word_in_diary.py | 6c4b66c0f8e35f84f851380c6f340ac3a3863c47 | [] | no_license | LeonHardt427/ShowMeTheCode | ea07779d8c603aad3c89d56dc98a3ec1db48b6d2 | e1ef324b447cc38d3c3b3ac2d19df5dcb113136b | refs/heads/master | 2021-04-29T09:01:34.654493 | 2017-01-20T04:00:42 | 2017-01-20T04:00:42 | 77,668,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # usr/bin/python
# -*- coding: utf-8 -*-
'''
**第 0006 题:**你有一个目录,放了你一个月的日记,都是 txt,为了避免分词的问题,假设内容都是英文,请统计出你认为每篇日记最重要的词。
'''
from collections import Counter
import re
import sys
def get_word(txt):
common_word = ['the', 'in', 'of', 'and', 'to', 'has', 'that', 'this', 's', 'is', 'are', 'a', 'with', 'as', 'an']
file_text = open(txt, 'r', encoding='UTF-8')
content = file_text.read().lower()
pattern = '[a-z0-9\']+'
word = re.findall(pattern, content)
word_list = Counter(word)
for wor in word_list:
if wor in common_word:
word_list[wor] = 0
file_text.close()
return word_list.most_common()[:3]
d = get_word('/Users/leonhart/Documents/Git/TestTxt/TestTxt.txt')
print(d)
| [
"[email protected]"
] | |
d5da552639e12a3caecae37b50fdc8d2f9aac299 | 5891324384c1eb96c19bde7a6d21df91890bacce | /Programacao-Internet/primeira-atividade/atividade02.py | 250393fc7488f85a66c91c62c004da3e4c181aed | [] | no_license | Guilherme2020/ADS | f41324f48e6e2bc6ddd7e4310328e09f79b8a7a9 | 6d3ac6effca7633d6bc309ecfa9c5e8349f8e680 | refs/heads/master | 2021-01-17T21:06:21.874766 | 2018-01-19T21:49:40 | 2018-01-19T21:49:40 | 60,442,107 | 0 | 0 | null | 2016-06-08T13:57:25 | 2016-06-05T03:51:00 | Python | UTF-8 | Python | false | false | 273 | py | import requests
import shutil
url = 'https://meusanimais.com.br/wp-content/uploads/2015/05/gato.jpg'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open('gato.jpg','wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
| [
"[email protected]"
] | |
c0901823b1d16ad5c74b7e70f97e4d7f11330163 | e664f2b2b23f2194bc6f2e8782c56c54fe22527d | /venv/bin/epylint | 45a56bbc0831569e555be8a88096192e8a4ab145 | [
"MIT"
] | permissive | daktari01/sama_automated_incentive_app | fb944c244dec244b45fd4e592e95b91f2bec96c0 | 2b49d504191a10067aa1de4637180c6bc7924054 | refs/heads/master | 2021-09-04T09:18:08.566742 | 2017-12-26T03:47:31 | 2017-12-26T03:47:31 | 113,555,536 | 0 | 0 | MIT | 2017-12-26T03:47:31 | 2017-12-08T09:14:58 | null | UTF-8 | Python | false | false | 282 | #!/home/daktari/Andela/flask_pro/sama_automated_incentive_app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"[email protected]"
] | ||
f9b1707515b78946ab468234f39d1d4903d4e9b8 | a165e553b8c06224f00713c0b11220a0f675ab3d | /streamlit/app.py | c7dc078d15802db6e4097cde52fe054bfb57b7ed | [] | no_license | Uttam-Grade-McK/Income-Prediction | 92c9bd2db2321c76b5ea7dd572cad960d34a5b9e | 667755668d929aad41e5ff96d9604c1ae69d38ab | refs/heads/main | 2023-08-06T03:15:55.640628 | 2021-09-14T17:34:31 | 2021-09-14T17:34:31 | 405,459,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,150 | py | # from flask import Flask, request
import pandas as pd
import numpy as np
import pickle
import sklearn
import streamlit as st
from PIL import Image
pickle_in = open('Income_Classifier.pkl', 'rb')
classifier = pickle.load(pickle_in)
def Predict_Income(age, fnlwgt, education_num, marital_status, relationship, race, sex, capital_gain, capital_loss, hours_per_week, country, employment_type):
"""Let's Predict the Income
This is using docstrings for specifications.
---
parameters:
- name: age
in: query
type: number
required: true
- name: fnlwgt
in: query
type: number
required: true
- name: education-num
in: query
type: number
required: true
- name: marital-status
in: query
type: number
required: true
- name: relationship
in: query
type: number
required: true
- name: race
in: query
type: number
required: true
- name: sex
in: query
type: number
required: true
- name: capital-gain
in: query
type: number
required: true
- name: capital-loss
in: query
type: number
required: true
- name: hours-per-week
in: query
type: number
required: true
- name: country
in: query
type: number
required: true
- name: employment_type
in: query
type: number
required: true
responses:
200:
description: The output values
"""
'''age = request.args.get('age')
fnlwgt = request.args.get('fnlwgt')
education_num = request.args.get('education-num')
marital_status = request.args.get('marital-status')
relationship = request.args.get('relationship')
race = request.args.get('race')
sex = request.args.get('sex')
capital_gain = request.args.get('capital-gain')
capital_loss = request.args.get('capital-loss')
hours_per_week = request.args.get('hours-per-week')
country = request.args.get('country')
employment_type = request.args.get('employment_type')'''
prediction = classifier.predict([[age, fnlwgt, education_num,
marital_status, relationship,
race, sex, capital_gain, capital_loss,
hours_per_week, country,
employment_type]])
print("prediction:", prediction)
return 'This is the Predicted Value:-->' , prediction
def main():
st.title('Adult Income Prediction')
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Streamlit Adult Income Prediction ML App</h2>
</div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
age = st.text_input('age', 'Type Here')
fnlwgt = st.text_input('fnlwgt', 'Type Here')
education_num = st.text_input('education-num', 'Type Here')
marital_status = st.text_input('marital-status', 'Type Here')
relationship = st.text_input('relationship', 'Type Here')
race = st.text_input('race', 'Type Here')
sex = st.text_input('sex', 'Type Here')
capital_gain = st.text_input('capital-gain', 'Type Here')
capital_loss = st.text_input('capital-loss', 'Type Here')
hours_per_week = st.text_input('hours-per-week', 'Type Here')
country = st.text_input('country', 'Type Here')
employment_type = st.text_input('employment-type', 'Type Here')
result = ""
if st.button("Predict"):
result = Predict_Income(age, fnlwgt, education_num, marital_status, relationship, race, sex, capital_gain, capital_loss, hours_per_week, country, employment_type)
st.success("The Output is {}".format(result))
if st.button('About'):
st.text('Let,s Learn')
st.text('Built with Streamlit!')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6f43df9fc8783a67f35973c65b05af46f76f2117 | 1305bcd0525b66800f07f7cdc12154daee54c2d8 | /src/sentry/integrations/github/client.py | ea76c399077c1258f8b4565c97de7446ca29e841 | [
"BSD-2-Clause"
] | permissive | xiaohuihuifgt/sentry | 9c5a61fed749d837e93e555b46d11777d848b301 | ea4098d57463abf52c69c5225f9581071cd03f5b | refs/heads/master | 2020-03-20T03:45:06.180800 | 2018-06-13T00:27:51 | 2018-06-13T00:27:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,039 | py | from __future__ import absolute_import
import datetime
from sentry.integrations.github.utils import get_jwt
from sentry.integrations.client import ApiClient
class GitHubClientMixin(ApiClient):
allow_redirects = True
base_url = 'https://api.github.com'
def get_last_commits(self, repo, end_sha):
# return api request that fetches last ~30 commits
# see https://developer.github.com/v3/repos/commits/#list-commits-on-a-repository
# using end_sha as parameter
return self.get(
'/repos/{}/commits'.format(
repo,
),
params={'sha': end_sha},
)
def compare_commits(self, repo, start_sha, end_sha):
# see https://developer.github.com/v3/repos/commits/#compare-two-commits
# where start sha is oldest and end is most recent
return self.get('/repos/{}/compare/{}...{}'.format(
repo,
start_sha,
end_sha,
))
def get_pr_commits(self, repo, num):
# see https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
# Max: 250 Commits
return self.get('/repos/{}/pulls/{}/commits'.format(
repo,
num
))
def get_commits(self, repo):
return self.get('/repos/{}/commits'.format(repo))
def get_repo(self, repo):
return self.get('/repos/{}'.format(repo))
class GitHubAppsClient(GitHubClientMixin):
def __init__(self, external_id):
self.external_id = external_id
self.token = None
self.expires_at = None
super(GitHubAppsClient, self).__init__()
def get_token(self):
if not self.token or self.expires_at < datetime.datetime.utcnow():
res = self.create_token()
self.token = res['token']
self.expires_at = datetime.datetime.strptime(
res['expires_at'],
'%Y-%m-%dT%H:%M:%SZ',
)
return self.token
def request(self, method, path, headers=None, data=None, params=None):
if headers is None:
headers = {
'Authorization': 'token %s' % self.get_token(),
# TODO(jess): remove this whenever it's out of preview
'Accept': 'application/vnd.github.machine-man-preview+json',
}
return self._request(method, path, headers=headers, data=data, params=params)
def create_token(self):
return self.post(
'/installations/{}/access_tokens'.format(
self.external_id,
),
headers={
'Authorization': 'Bearer %s' % get_jwt(),
# TODO(jess): remove this whenever it's out of preview
'Accept': 'application/vnd.github.machine-man-preview+json',
},
)
def get_repositories(self):
repositories = self.get(
'/installation/repositories',
params={'per_page': 100},
)
return repositories['repositories']
| [
"[email protected]"
] | |
3b201b951bcaa7fdc3906079154cb31fe14e8094 | 11b5bd6806447a9b2ef4d8d54a9dc64c8be5d6e5 | /configs/base/base.py | 49a0c3cdc03b691b6d9591180e6166b27cc4f10f | [] | no_license | milySW/NNResearchAPI | 0789478791a91002d79dd909fe5f9654deeb4b44 | 00bbea4909d1272f80455edb692b45c6c6d56831 | refs/heads/master | 2023-04-17T19:56:19.667177 | 2021-05-03T23:48:26 | 2021-05-03T23:48:26 | 291,540,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from typing import List, Dict, Any
class BaseConfig:
@staticmethod
def condition(key: str) -> bool:
return not key.startswith("__")
@classmethod
def to_dict(cls) -> Dict[str, Any]:
class_dict = cls.__dict__.items()
return {key: value for key, value in class_dict if cls.condition(key)}
@classmethod
def value_list(cls) -> List[Any]:
return list(cls.to_dict().values())
@classmethod
def key_list(cls) -> List[Any]:
return list(cls.to_dict().keys())
| [
"[email protected]"
] | |
5dd9545b161a3c85837453baef13b476e033796b | fad161c7bb4709487f6561f5fdf40afd106e2c1b | /hik_app/server/appserver.py | fa128745c2149449d5ca0d7d6fdb6723d46414a0 | [] | no_license | happyAnger6/hik_app | 94ae55fe31b3ba14a566771970a10fa9cf30d953 | daf3be00faba271ee6ccf6db1431c2e332f931ab | refs/heads/master | 2020-06-15T09:36:37.608041 | 2019-07-10T12:17:21 | 2019-07-10T12:17:21 | 195,262,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from hik_app.utils.epoll import EpollServer
class AppServer(EpollServer):
def __init__(self):
pass
| [
"[email protected]"
] | |
6efe35387bd54aac15d8cb6ef36b0847abc37b95 | adc7e9488f4248c61c5e0e1b20d8476e6c5b63c8 | /introduction-to-algorithms/2/3/merge-sort.py | c410aeea175fdcab04cd5e614406b0e4b9db8386 | [] | no_license | SergeyKulagin/algoritms | 22bcc81486a862b23dcb8cd57e093be0889a7812 | b6bb19f95fa9f5170c31a0cf4b2befa1e2d6c420 | refs/heads/master | 2022-03-17T13:47:59.125689 | 2022-03-07T16:45:32 | 2022-03-07T16:45:32 | 175,367,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import sys
def merge(a, start, middle, end):
i = 0
j = 0
left = a[start:middle]
left.append(sys.maxsize)
right = a[middle:end]
right.append(sys.maxsize)
for k in range(start, end):
if left[i] <= right[j]:
a[k] = left[i]
i = i + 1
else:
a[k] =right[j]
j = j + 1
return a
def merge_nosentinel(a, start, middle, end):
temp = a[start:end]
i = start
j = middle
k = 0
while i < middle and j < end:
if a[i] < a[j]:
temp[k] = a[i]
i = i + 1
else:
temp[k] = a[j]
j = j + 1
k = k +1
while i < middle:
temp[k] = a[i]
i = i + 1
k = k + 1
while j < end:
temp[k] = a[j]
j = j + 1
k = k + 1
for m in range(0, len(temp)):
a[start + m] = temp[m]
return a
def mergesort(a):
mergesortit(a, 0, len(a))
return a
def mergesortit(a, start, end):
if start >= end - 1:
return
middle = round((start + end) / 2)
mergesortit(a, start, middle)
mergesortit(a, middle, end)
merge_nosentinel(a, start, middle, end)
l = [1, 3, 5, 7, 2, 4, 8, 10, 15]
print(mergesort(l))
print(mergesort([7,6,3,1]))
| [
"[email protected]"
] | |
edfacbf2d8cbd3f422b2844c79dabc4fe60e2d5c | 1d1dbb5cba37a7dfbc9226fd988bb9a756b6542d | /lisc/urls/urls.py | d050c550243df0ec8a99ac66ca08f534d8df6d1b | [
"Apache-2.0"
] | permissive | aashish24/lisc | 27b1e2bf793d7a1866c980eaedbf1bcc8c9d7e87 | f19a3896d63ff701cb1cf22902d96fb0a67a9a6b | refs/heads/master | 2020-07-13T02:36:19.144698 | 2019-08-25T22:38:20 | 2019-08-25T22:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,132 | py | """Base URL object for LISC.
Segments : section added to the URL, separated by '/'.
Settings : settings added to the URL, as key value pairs, following a '?' and added with '&'.
"""
from lisc.urls.utils import make_segments, make_settings
###################################################################################################
###################################################################################################
class URLs():
"""URLs for an API interface.
Attributes
----------
base : str
Base URL for the API.
utils : dict
What utilities are available for the API.
urls : dict
The URLs for each utility.
settings : dict
The available settings for the API.
authenticated : boolean
Whether acting as an authenticated user for the API.
"""
def __init__(self, base, utils={}, authenticated=None):
"""Initialize a URLs object.
Parameters
----------
base : str
Base URL for the API.
utils : dict
Utilities for the utility, a dictionary with names and URL extensions.
authenticated : bool
Whether acting as an authenticated user for the API.
"""
self.base = base
utils['base'] = self.base
self.utils = utils
self.urls = {key : None for key in self.utils.keys()}
self.settings = {}
self.authenticated = authenticated
def check_url(self, util):
"""Check the built URL for a specified utility.
Parameters
----------
util : str
Which utility to check the URL for.
"""
self._check_util(util)
print(self.urls[util])
def fill_settings(self, **kwargs):
"""Put all provided settings values into a dictionary object.
Parameters
----------
**kwargs
Keyword arguments for all settings, with their values.
Notes
-----
Potential parameters to this function include all the possible settings for the given API.
Any possible setting that is provided a value as an input to this function is
saved out to the dictionary of collected and available settings.
"""
self.settings = {ke: va for ke, va in kwargs.items() if va is not None}
def authenticate(self, url):
"""Method to authenticate a URL for a given API.
Parameters
----------
url : str
URL to add authentification to.
Returns
-------
str
Authenticated URL.
Notes
-----
This is a placeholder method, on the base URLs object, and should be
overloaded by any API object that has authentification.
When overloading this method, it should implement whatever is needed
to authenticate a URL request for the specified API.
"""
return url
def build_url(self, util, segments=[], settings=[]):
"""Build the URL for a specified utility, with provided settings.
Parameters
----------
util : str
Which utility to build the URL for.
segments : list of str
Segments to add to the URL.
settings : dict or list of str
Settings to use to build the URL.
If list, the settings values are taken from the objects settings attribute.
"""
self._check_util(util)
if isinstance(settings, list):
if not all(el in self.settings.keys() for el in settings):
raise ValueError('Not all requested settings available - can not proceed.')
settings = {ke : va for ke, va in self.settings.items() if ke in settings}
url = self.base + make_segments([self.utils[util]] + segments) + make_settings(settings)
if self.authenticated:
url = self.authenticate(url)
self.urls[util] = url
def get_url(self, util, segments=[], settings={}):
"""Get a requested URL, with any additional segments or settings.
Parameters
----------
util : str
Which utility to get the URL for.
segments : list of str, optional
Any additional segments to add to the URL.
settings : dict, optional
Any additional settings to add to the URL.
Returns
-------
full_url : str
The requested URL, with any extra segments and settings added.
"""
if not util in self.utils.keys():
self.build_url(util)
url = self.urls[util]
settings_join = '?' if not '?' in url else '&'
full_url = url + make_segments(segments) + make_settings(settings, settings_join)
return full_url
def _check_util(self, util):
"""Check that a requested utility is valid.
Parameters
----------
util : str
Name of the utility to check for.
"""
if util not in self.utils.keys():
raise ValueError('Specified utility not understood.')
| [
"[email protected]"
] | |
788904b590885b4196447c3e6fa4e61cc2d5f1e8 | 889f243db8de853729ac68f5c6c56e5ea3f54df2 | /dominion_grpc_proto/dominion_pb2_grpc.py | 2ff7d0cf0456327270ae0beba6e1b40349210282 | [
"MIT"
] | permissive | the-gigi/dominion-grpc-proto | 1b38160da2d72633f1ff301d95ba6a5966ca3642 | bc3f7a93c7985476864aa49359dbe2e14afad271 | refs/heads/master | 2022-12-04T16:15:55.398905 | 2020-08-23T19:42:06 | 2020-08-23T19:42:06 | 282,386,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,285 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import dominion_grpc_proto.dominion_pb2 as dominion__pb2
class DominionServerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Join = channel.unary_stream(
'/DominionServer/Join',
request_serializer=dominion__pb2.PlayerInfo.SerializeToString,
response_deserializer=dominion__pb2.Message.FromString,
)
self.Start = channel.unary_unary(
'/DominionServer/Start',
request_serializer=dominion__pb2.PlayerInfo.SerializeToString,
response_deserializer=dominion__pb2.Response.FromString,
)
self.PlayCard = channel.unary_unary(
'/DominionServer/PlayCard',
request_serializer=dominion__pb2.Card.SerializeToString,
response_deserializer=dominion__pb2.Response.FromString,
)
self.Buy = channel.unary_unary(
'/DominionServer/Buy',
request_serializer=dominion__pb2.Card.SerializeToString,
response_deserializer=dominion__pb2.Response.FromString,
)
self.Done = channel.unary_unary(
'/DominionServer/Done',
request_serializer=dominion__pb2.PlayerInfo.SerializeToString,
response_deserializer=dominion__pb2.Response.FromString,
)
self.Respond = channel.unary_unary(
'/DominionServer/Respond',
request_serializer=dominion__pb2.ActionResponse.SerializeToString,
response_deserializer=dominion__pb2.Response.FromString,
)
class DominionServerServicer(object):
"""Missing associated documentation comment in .proto file."""
def Join(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PlayCard(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Buy(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Done(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Respond(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DominionServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'Join': grpc.unary_stream_rpc_method_handler(
servicer.Join,
request_deserializer=dominion__pb2.PlayerInfo.FromString,
response_serializer=dominion__pb2.Message.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=dominion__pb2.PlayerInfo.FromString,
response_serializer=dominion__pb2.Response.SerializeToString,
),
'PlayCard': grpc.unary_unary_rpc_method_handler(
servicer.PlayCard,
request_deserializer=dominion__pb2.Card.FromString,
response_serializer=dominion__pb2.Response.SerializeToString,
),
'Buy': grpc.unary_unary_rpc_method_handler(
servicer.Buy,
request_deserializer=dominion__pb2.Card.FromString,
response_serializer=dominion__pb2.Response.SerializeToString,
),
'Done': grpc.unary_unary_rpc_method_handler(
servicer.Done,
request_deserializer=dominion__pb2.PlayerInfo.FromString,
response_serializer=dominion__pb2.Response.SerializeToString,
),
'Respond': grpc.unary_unary_rpc_method_handler(
servicer.Respond,
request_deserializer=dominion__pb2.ActionResponse.FromString,
response_serializer=dominion__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'DominionServer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DominionServer(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Join(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/DominionServer/Join',
dominion__pb2.PlayerInfo.SerializeToString,
dominion__pb2.Message.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DominionServer/Start',
dominion__pb2.PlayerInfo.SerializeToString,
dominion__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PlayCard(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DominionServer/PlayCard',
dominion__pb2.Card.SerializeToString,
dominion__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Buy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DominionServer/Buy',
dominion__pb2.Card.SerializeToString,
dominion__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Done(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DominionServer/Done',
dominion__pb2.PlayerInfo.SerializeToString,
dominion__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Respond(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DominionServer/Respond',
dominion__pb2.ActionResponse.SerializeToString,
dominion__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"[email protected]"
] | |
6b3f120d2ee38f1d5955c0b3bc04faa4d16890fa | f28bd0867cbe009b016aafbab79a73b593baabd6 | /BMW/dealership/services/wayawayservices.py | 7b3ddebde1db87ff0862f567ca3024b6f0d050e6 | [] | no_license | kylegrone/greenlightautomotive | 2ff778ebfff7eb66d3cfdae6eedbac19398fe90f | 65fa72813d1acb80fda966f1cffdb1530b0cfa6b | refs/heads/master | 2021-07-17T08:49:10.865191 | 2017-10-25T02:58:04 | 2017-10-25T02:58:04 | 107,046,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | '''
Created on Dec 21, 2015
@author: aroofi
'''
from dealership.models import *
class WayAwayService():
def get_all_wayaway(self,dealer=None):
wayaways = WayAway.objects.all()
wayaway_array = []
for wayaway in wayaways:
wayaway_tmp = {"dealer":None,"default":wayaway}
if dealer:
try:
wayaway_tmp["dealer"] = WayAwayDealer.objects.get(dealer=dealer,wayaway=wayaway)
except Exception,e:
print e
wayaway_array.append(wayaway_tmp)
return wayaway_array
def get_apt_wayaway(self, appt):
try:
app = Appointment.objects.get(id=appt)
return app.way_away_id
except Exception,e:
print e
return None
def update_wayaway(self,appt , wayaway,driver_liscens_number=None,insurance_company_name=None,insurance_card_number=None,state_id=None,reserve=0):
if appt and wayaway:
try:
app = Appointment.objects.get(id=appt)
app.way_away_id = wayaway
app.driver_liscens_number = driver_liscens_number
app.insurance_company_name = insurance_company_name
app.insurance_card_number = insurance_card_number
app.state_wayaway_id = state_id
app.reserve_wayaway = reserve
app.save()
except Appointment.DoesNotExist:
return False
return True
def get_all_states(self):
states = States.objects.all()
return states | [
"[email protected]"
] | |
7b06f3fb221180a1c79e4dc9c8b2c27818a05c6d | 4e7b28fa812f59fed3686cc2f687d212967ce0af | /validator.py | b1e9d896da424569e40980c37c8691f7d3f8c4fd | [] | no_license | isabella232/faascc | ec13142cc9c9a752ff05273b268fd9dfeea2e064 | 23a520ef80d18c04eade79b2b1f758275add2689 | refs/heads/master | 2022-10-08T06:35:36.124049 | 2020-05-11T13:52:47 | 2020-05-11T13:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # Validates a (simplified) file for relative completeness
import yaml
cc = yaml.load(open("faascc.simplified.yaml").read())
def validate(cc):
for entry in cc[0]:
for ccrel in cc[1:]:
if not entry in ccrel:
print("Entry", entry, "missing in", ccrel["name"])
validate(cc)
| [
"spio@tougener"
] | spio@tougener |
f97969be2a7cbf02e096c647e4f3b74269ae0ff5 | 56c96c128b797635dd20410859ccfe374ef2b4d5 | /dojos/anagrama/test_dojo.py | a92947ef85c88540b7e09580c4160b45ee8e56d8 | [
"MIT"
] | permissive | gupy-io/mentoria-python | 465ba45cb22edd4b339549fe2e8a14dc3d0729d3 | c8f56cfe4e0a61dd1f4609b8e645f62b380724f0 | refs/heads/master | 2022-11-11T16:45:34.443173 | 2020-03-05T17:10:37 | 2020-03-05T17:10:37 | 209,610,190 | 7 | 0 | MIT | 2022-11-01T15:50:53 | 2019-09-19T17:20:37 | Jupyter Notebook | UTF-8 | Python | false | false | 1,111 | py | from exercicio.dojo import anagramas
# Aline
# Breno
# Daniel
# Denis
# Diogo
# Heros
# Marina
# Matheus
# Pedro
def teste_uma_letra():
li = anagramas("b")
assert li == ["b"]
def teste_uma_letra_2():
li = anagramas("a")
assert li == ["a"]
def test_duas_letras():
li = anagramas("ae")
assert li == ["ae", "ea"]
def test_duas_letras_2():
li = anagramas("cb")
assert li == ["bc", "cb"]
def test_duas_letras_3():
li = anagramas("aa")
assert li == ["aa"]
def teste_tres_letras():
li = anagramas("bff")
assert li == ["bff", "fbf", "ffb"]
def test_tres_letras_2():
li = anagramas("aaa")
assert li == ["aaa"]
def test_tres_letras_3():
li = anagramas("aab")
assert li == ["aab", "aba", "baa"]
def test_tres_letras_4():
li = anagramas("aab")
assert li == ["aab", "aba", "baa"]
def test_biro():
li = anagramas("biro")
assert li == sorted(["biro", "bior", "brio" ,"broi" ,"boir" ,"bori" ,"ibro", "ibor", "irbo", "irob" ,"iobr", "iorb" ,"rbio", "rboi" ,"ribo" ,"riob" ,"roib" ,"robi" ,"obir", "obri" ,"oibr" ,"oirb" ,"orbi", "orib"]) | [
"[email protected]"
] | |
66af5d4f12a2fe96992a1902d9c5be9ae0386389 | ad48c3fd77deb941a6e58b5a7b0bab15ad36d3e8 | /SECTION_04/chronometry.py | ccad8b41ce20c49bdbc7d44cc8c0b4360194d739 | [] | no_license | tmtmaj/PYTHON | 74541641c21a9989f9ace8aa0773756d1a079e80 | ffc76d025491e482958c442b501c39dd7af7f8da | refs/heads/main | 2023-04-01T20:07:45.648270 | 2021-04-06T05:18:52 | 2021-04-06T05:18:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import time
def foo(a, b, c):
print(a, b, c)
time.sleep(2)
def goo():
print('goo')
time.sleep(3)
def chronometry(f, *args, **kwargs ):
start = time.time()
f(*args, **kwargs)
end = time.time()
print(f'elapsed : {end-start}')
#chronometry(goo)
chronometry(foo, 1, 2, 3)
chronometry(foo, 1, 2, c = 3)
| [
"[email protected]"
] | |
bfd5ffced44944f1c508de289c0108341965ef13 | 061bddccaaa87c0624315814ad0afe5709817d28 | /Portrayal_allSNPs.py | 2b22afbb446bc293d7db96a889e611f129c54b86 | [] | no_license | csoeder/snip-suite | 05141619c6b348acb420f158f830ffb9783ae0d3 | 65ab970744bfc02b8d8a2e5e7ce5f68faa11ccf6 | refs/heads/master | 2020-06-04T21:14:25.095672 | 2015-02-12T19:40:21 | 2015-02-12T19:40:21 | 25,361,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,286 | py | """
Given two lists of SNPs from SNP_call:
1 partition them into shared SNPs, SNPs from list 1 only, and SNPs from list 2 only
2
"""
# THIS VERSION OPTIMIZED FOR RICH M.'S SUGARFLIES
import matplotlib
matplotlib.use('agg')
import csv
import sys
import numpy as np
import matplotlib.pyplot as plt
from subprocess import call, check_output
import pickle
parent1_SNPS_file = sys.argv[1]
title1 = sys.argv[2]
parent2_SNPS_file = sys.argv[3]
title2 = sys.argv[4]
hybrid_SNPS_file = sys.argv[5]
titleHyb = sys.argv[6]
hybrid_Align=sys.argv[7] #sorted BAM file of hybrid alignment
chromosome = sys.argv[8]
###yesyes
#chroms=["2L","2R"]#,"3L","3R","4","X", "YHet", "2RHet"]
chroms=[chromosome]# Yes, it's a terrible way to do things but it's too late to restructure the whole thing from scratch
### My code is a dog's code
### It could never make a lady weep
### It could never make a homeless man turn his life around and achieve more then any man has ever achieved before
### Not like piano music
### http://achewood.com/index.php?date=11082002
box_size=10**3
missing_SNP_threshold = 10 #hybrid must have at least this coverage to declare that it is missing a parental SNP
#Super awesome
def pool_snps(parent1, parent2):
""" given two .SNPS files, this loads them into python dict objects, then outputs
which SNPs are disjoint and which are shared """
def parent_pull(source, sink):# Pull parental SNPs
with open(source, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t')
for row in spamreader:
try:
sink[row[0]][int(row[1])] = row[3]### Collect all the SNPs from parent 1;
### load them into the parent1 dict in the form {CHR:{POSITION:ALT_ALLELE}}
except KeyError: # if the chromosome isn't in chroms
pass
return sink
p1 = parent_pull(parent1, dict.fromkeys(chroms,{}))
p2 = parent_pull(parent2, dict.fromkeys(chroms,{}))
shared = dict.fromkeys(chroms,{}) #shared SNPs go here
disjoint1 = dict.fromkeys(chroms,{}) #those only in parent1
disjoint2 = dict.fromkeys(chroms,{}) #those only in parent2
for chro in chroms: #For each chromosome...
disj1 = list(set(p1[chro].keys()).difference(set(p2[chro].keys()))) #sites unique to p1
disj2 = list(set(p2[chro].keys()).difference(set(p1[chro].keys()))) #sites unique to p2
inter = list(set(p1[chro].keys()).intersection(set(p2[chro].keys())))#shared sites
for i in inter: #for each shared site...
if p1[chro][i] != p2[chro][i]: #if the phenotype at the site is actually different...
inter.pop(inter.index(i)) #remove it from the shared SNPS
disj1.append(i) #insert each site
disj2.append(i) # in the appropriate list
for j in disj1: #
disjoint1[chro][j]=p1[chro][j] #Now take each site and load it into the master dicts for output
for j in disj2:
disjoint2[chro][j]=p2[chro][j]
for j in inter:
shared[chro][j]=p1[chro][j]
return shared, disjoint1, disjoint2
def cov_grep(snp_list, bam_file):
phial=open('%s_sites.bed'%titleHyb, 'w')
for chro in chroms:
for site in snp_list[chro]:
phial.write('%s\t%s\t%s\n'%tuple([chro, site, site+1]))
phial.close()
call('bedtools coverage -abam %s -b %s_sites.bed > %s.cov'%tuple([hybrid_Align, titleHyb, titleHyb]), shell=True)
coverage=dict.fromkeys(chroms,{})
with open('%s.cov'%titleHyb, 'rb') as csvfile:
spamreader=csv.reader(csvfile, delimiter='\t')
for row in spamreader:
coverage[row[0]][int(row[1])]=int(row[3])
return coverage
def snp_grep(parent1, parent2, hybrid, hyb_cov):
"""
Given the two dicts of disjoint parental SNPs, load the hybrid .SNPS file, and
look at each SNP site in the two dicts. Classify each as present or absent
in the hybrid genome
"""
hybrid_snps=dict.fromkeys(chroms,{})
parent_snps=dict.fromkeys(chroms, [])
for chro in chroms:
parent_snps[chro].extend(parent1[chro])
parent_snps[chro].extend(parent2[chro])
with open(hybrid, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t')
for row in spamreader:
try:
hybrid_snps[row[0]][int(row[1])] = row[3]### Collect all the SNPs from hybrid
### load them into the parent1 dict in the form POSITION:ALT_ALLELE
except KeyError:
pass
print "begin coverage grep"
coverage = cov_grep(parent_snps, hyb_cov)
print "done grepping "
parent1_present = dict.fromkeys(chroms,[]) # SNPs from p1 which are present
parent2_present = dict.fromkeys(chroms,[]) # SNPs from p2 which are present
#parents 1 and 2 absent pending coverage grepping
parent1_absent = dict.fromkeys(chroms,[]) # SNPs from p1 which are present
parent2_absent = dict.fromkeys(chroms,[]) # SNPs from p2 which are present
gnu_vars = dict.fromkeys(chroms,[])
for chro in chroms:
for site in hybrid_snps[chro].keys():
if site in set(parent1[chro].keys()).intersection(set(parent2[chro].keys())):
if hybrid_snps[chro][site] in parent1[chro][site]:
parent1_present[chro].append(int(site))
elif hybrid_snps[chro][site] in parent2[chro][site]:
parent2_present[chro].append(int(site))
else:#Record this as an additional SNP
gnu_vars[chro].append(int(site))
elif site in parent1[chro].keys():
if hybrid_snps[chro][site] in parent1[chro][site]:
parent1_present[chro].append(int(site))
elif site in parent2[chro].keys():
if hybrid_snps[chro][site] in parent2[chro][site]:
parent2_present[chro].append(int(site))
else:
gnu_vars[chro].append(int(site))
for site in parent1[chro].keys():
if site not in parent1_present[chro] and site not in parent2_present[chro] and site not in gnu_vars[chro]:#make sure the site isn't recorded ANYwhere....
if coverage[chro][site] >= missing_SNP_threshold:
parent1_absent[chro].append(int(site))
for site in parent2[chro].keys():
if site not in parent1_present[chro] and site not in parent2_present[chro] and site not in gnu_vars[chro]:
if coverage[chro][site] >= missing_SNP_threshold:
parent2_absent[chro].append(int(site))
return parent1_present, parent2_present, gnu_vars, parent1_absent, parent2_absent#, new_snps, hypervars
def archimedes(points):
points = np.array(points)
points.sort()
print len(points)
start, last = 0, points[-1]
coord, density = [], []
while start < last:
coord.append(int(start+0.5*box_size))
clip1 = points[points<start+box_size]
clip2 = clip1[clip1>start]
dens = float(len(clip2))/box_size
density.append(dens)
#print dens
start += box_size
return coord, density
def write_to_varwig(coords, density, phial, colour, name):
vial = open(phial, 'w')
vial.write('browser position chr%s\n'%chrom)
vial.write('browser hide all\n')
vial.write('track type=wiggle_0 name="%s" description="varStep format" visibility=full autoScale=off viewLimits=0:%s color=%s graphType=points priority=20\n'%tuple([name, max(density), colour]))
vial.write('variableStep chrom=chr%s\n'%tuple([chrom]))
for pair in zip(coords, density):
vial.write('%s\t%s\n'%tuple(pair))
vial.close()
def write_to_bed(points, phial, colour, name):
vial = open(phial, 'w')
#vial.write('browser hide all')
#vial.write('track name="%s" description="%s" visibility=1 itemRgb="On"\n'%tuple([name,name]))
for chro in points.keys():
for site in points[chro]:
vial.write('%s\t%s\t%s\therpderp\t0\t+\t%s\t%s\t%s\n'%tuple([chro, site, site+1, site, site+1, colour]))
vial.close()
shared_SNPs, disjoint1_SNPs, disjoint2_SNPs = pool_snps(parent1_SNPS_file, parent2_SNPS_file)
present1, present2, n00bs, absent1, absent2= snp_grep(disjoint1_SNPs, disjoint2_SNPs, hybrid_SNPS_file, hybrid_Align)
pickle.dump( [shared_SNPs, disjoint1_SNPs, disjoint2_SNPs, present1, present2, n00bs, absent1, absent2], open('%s.pickle'%titleHyb, 'wb') )
###http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
print "\t\tREPORT:\t\t\t"
print "Between %s and %s, %s SNP variants were logged."%tuple([title1, title2, len( [item for sublist in shared_SNPs.values() for item in sublist] )+len( [item for sublist in disjoint1_SNPs.values() for item in sublist] )+len([item for sublist in disjoint2_SNPs.values() for item in sublist])])
print "%s SNPs were identified unique to %s"%tuple([len([item for sublist in disjoint1_SNPs.values() for item in sublist]), title1])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(disjoint1_SNPs[chro]), chro])
print "%s SNPs were identified unique to %s"%tuple([len([item for sublist in disjoint2_SNPs.values() for item in sublist]), title2])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(disjoint2_SNPs[chro]), chro])
print "%s SNPs were found shared between %s and %s"%tuple([len([item for sublist in shared_SNPs.values() for item in sublist]), title1, title2])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(shared_SNPs[chro]), chro])
print "%s contained %s SNPs unique to %s"%tuple([titleHyb, len([item for sublist in present1.values() for item in sublist]), title1])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(present1[chro]), chro])
print "%s was missing %s SNPs unique to %s"%tuple([titleHyb, len([item for sublist in absent1.values() for item in sublist]), title1])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(absent1[chro]), chro])
print "%s contained %s SNP unique to %s"%tuple([titleHyb, len([item for sublist in present2.values() for item in sublist]), title2])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(present2[chro]), chro])
print "%s was missing %s SNPs unique to %s"%tuple([titleHyb, len([item for sublist in absent2.values() for item in sublist]), title2])
for chro in chroms:
print "\t%s on %s\n"%tuple([len(absent2[chro]), chro])
print "%s contained %s SNPs unseen in either %s or %s\n"%tuple([titleHyb, len([item for sublist in n00bs.values() for item in sublist]), title1, title2])
#print "\t\t including %s sites representing a third polymorphism"%tuple([len(hypervars)])
print "\t\t~~~END REPORT~~~\t\t"
write_to_bed(present1, '%s_present_in_%s_%s.bed'%tuple([title1, titleHyb, chromosome]), '255,0,0', '%s SNPs in %s'%tuple([title1, titleHyb]))
write_to_bed(present2, '%s_present_in_%s_%s.bed'%tuple([title2, titleHyb, chromosome]), '0,0,255', '%s SNPs in %s'%tuple([title2, titleHyb]))
write_to_bed(absent1, '%s_absent_in_%s_%s.bed'%tuple([title1, titleHyb, chromosome]), '0,255,0', '%s SNPs missing from %s'%tuple([title1, titleHyb]))
write_to_bed(absent2, '%s_absent_in_%s_%s.bed'%tuple([title2, titleHyb, chromosome]), '255,153,51', '%s SNPs missing from %s'%tuple([title2, titleHyb]))
#coord, dens = archimedes(disjoint1_SNPs.keys())
#write_to_varwig(coord, dens, '%s_disjoint.wig'%title1, '255,0,0', '%s disjoint SNP density'%title1)
#coord, dens = archimedes(disjoint2_SNPs.keys())
#write_to_varwig(coord, dens, '%s_disjoint.wig'%title2, '0,0,255', '%s disjoint SNP density'%title1)
#coord, dens = archimedes(present1)
#write_to_varwig(coord, dens, '%s_SNPs_in_%s.wig'%tuple([title1, titleHyb]), '255,0,0', '%s-specific SNP density in %s'%tuple([title1, titleHyb]))
#coord, dens = archimedes(present2)
#write_to_varwig(coord, dens, '%s_SNPs_in_%s.wig'%tuple([title2, titleHyb]), '0,0,255', '%s-specific SNP density in %s'%tuple([title2, titleHyb]))
#
| [
"[email protected]"
] | |
d16531933e2778cfe27da41deb050b19f3d3155d | fa52d1de7b18af22f6601b773e9f66a32ebd9999 | /middleOfLinkedList_Easy/problem.py | ad744e4ef6e65e58c7c6c7b34f459a91061fe8cb | [
"MIT"
] | permissive | razzlestorm/code-challenges | 9ba5b222cb97e106900d52d027fe2490d2e47e1c | 765dc668623eb5b1e1d52155a09ac17c58b1f756 | refs/heads/main | 2023-01-24T01:04:04.850044 | 2020-12-06T00:32:09 | 2020-12-06T00:32:09 | 307,545,536 | 0 | 0 | MIT | 2020-11-11T06:42:57 | 2020-10-27T00:51:59 | Python | UTF-8 | Python | false | false | 735 | py | '''
Given a non-empty, singly linked list with head node head, return a middle node of linked list.
If there are two middle nodes, return the second middle node.
Example 1:
Input: [1,2,3,4,5]
Output: Node 3 from this list (Serialization: [3,4,5])
The returned node has value 3. (The judge's serialization of this node is [3,4,5]).
Note that we returned a ListNode object ans, such that:
ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next = NULL.
Example 2:
Input: [1,2,3,4,5,6]
Output: Node 4 from this list (Serialization: [4,5,6])
Since the list has two middle nodes with values 3 and 4, we return the second one.
Note:
The number of nodes in the given list will be between 1 and 100.
'''
| [
"[email protected]"
] | |
24f46786de1e0d92144a98136c143df569995bcb | e0934ca26ac6c3f8816952ceafb3c84ace34d6aa | /resources/lib/common/misc_utils.py | 67b7ffde68d1eb7d3e5bc4e964e7bd7d679ae6df | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | masQelec/plugin.video.netflix | 62e4f6416c0f9bf547a8a5980de9747f677236b6 | 90ebf3343ebeaf6b790fdb1048d78fe5bf127dde | refs/heads/master | 2023-02-09T17:07:36.200308 | 2021-01-04T01:36:55 | 2021-01-04T01:36:55 | 288,575,867 | 0 | 0 | MIT | 2021-01-04T01:36:56 | 2020-08-18T22:17:06 | Python | UTF-8 | Python | false | false | 6,159 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Miscellaneous utility functions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from urllib.parse import quote, urlencode
from resources.lib.globals import G
def find(value_to_find, attribute, search_space):
"""Find a video with matching id in a dict or list"""
for video in search_space:
if video[attribute] == value_to_find:
return video
raise KeyError('Metadata for {} does not exist'.format(value_to_find))
def find_episode_metadata(episode_videoid, metadata):
"""Find metadata for a specific episode within a show metadata dict"""
season = find(int(episode_videoid.seasonid), 'id', metadata['seasons'])
episode = find(int(episode_videoid.episodeid), 'id', season.get('episodes', {}))
return episode, season
def get_class_methods(class_item=None):
"""
Returns the class methods of agiven class object
:param class_item: Class item to introspect
:type class_item: object
:returns: list -- Class methods
"""
from types import FunctionType
_type = FunctionType
return [x for x, y in class_item.__dict__.items()
if isinstance(y, _type)]
def build_url(pathitems=None, videoid=None, params=None, mode=None):
"""Build a plugin URL from pathitems and query parameters. Add videoid to the path if it's present."""
if not (pathitems or videoid):
raise ValueError('Either pathitems or videoid must be set.')
path = '{netloc}/{path}/{qs}'.format(
netloc=G.BASE_URL,
path=_encode_path(mode, pathitems, videoid),
qs=_encode_params(params))
return path
def _expand_mode(mode):
return [mode] if mode else []
def _expand_videoid(videoid):
return videoid.to_path() if videoid else []
def _encode_path(mode, pathitems, videoid):
return quote(
'/'.join(_expand_mode(mode) +
(pathitems or []) +
_expand_videoid(videoid)).encode('utf-8'))
def _encode_params(params):
return ('?' + urlencode(params)) if params else ''
def is_numeric(string):
"""Return true if string represents an integer, else false"""
try:
int(string)
except ValueError:
return False
return True
def strp(value, form):
"""
Helper function to safely create datetime objects from strings
:return: datetime - parsed datetime object
"""
# pylint: disable=broad-except
from datetime import datetime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
# Python bug https://bugs.python.org/issue27400
try:
from time import strptime
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
def strf_timestamp(timestamp, form):
"""
Helper function to safely create string date time from a timestamp value
:return: string - date time in the specified form
"""
from datetime import datetime
try:
return datetime.utcfromtimestamp(timestamp).strftime(form)
except Exception: # pylint: disable=broad-except
return ''
# def compress_data(data):
# """GZIP and b64 encode data"""
# out = StringIO()
# with gzip.GzipFile(fileobj=out, mode='w') as outh:
# outh.write(data)
# return base64.standard_b64encode(out.getvalue())
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge"""
for key, value in dict_to_merge.items():
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict
def compare_dict_keys(dict_a, dict_b, compare_keys):
"""Compare two dictionaries with the specified keys"""
return all(dict_a[k] == dict_b[k] for k in dict_a if k in compare_keys)
def chunked_list(seq, chunk_len):
for start in range(0, len(seq), chunk_len):
yield seq[start:start + chunk_len]
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with excluded_key.
Raises StopIteration if there are no other keys than excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys)
def enclose_quotes(content):
return '"' + content + '"'
def is_minimum_version(version, min_version):
"""Return True if version is equal or greater to min_version"""
return list(map(int, version.split('.'))) >= list(map(int, min_version.split('.')))
def is_less_version(version, max_version):
"""Return True if version is less to max_version"""
return list(map(int, version.split('.'))) < list(map(int, max_version.split('.')))
def make_list(arg):
"""Return a list with arg as its member or arg if arg is already a list. Returns an empty list if arg is None"""
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else []))
def convert_seconds_to_hms_str(time):
h = int(time // 3600)
time %= 3600
m = int(time // 60)
s = int(time % 60)
return '{:02d}:{:02d}:{:02d}'.format(h, m, s)
def remove_html_tags(raw_html):
import re
pattern = re.compile('<.*?>')
return re.sub(pattern, '', raw_html)
def censure(value, length=3):
"""Censor part of the string with asterisks"""
if not value:
return value
return value[:-length] + '*' * length
def run_threaded(non_blocking, target_func, *args, **kwargs):
"""Call a function in a thread, when specified"""
if not non_blocking:
return target_func(*args, **kwargs)
from threading import Thread
Thread(target=target_func, args=args, kwargs=kwargs).start()
return None
| [
"[email protected]"
] | |
b5ca52130ba54fb17a66f5fe77a40648a3218e22 | 9c2421c24ed985e64166e884a3f2c3b3c17afd1f | /validator/validator.py | 8a04f82b64505034b27282425b6cc9fc40b88dd2 | [
"Apache-2.0"
] | permissive | rdgoite/ingest-validator | 7f76edd5c12e804cf53fdd1a300b887cfb2b3e94 | 1b8aa8acd42835127c5680a09b318d4e6fdfe944 | refs/heads/master | 2021-08-28T14:18:45.593474 | 2017-11-28T13:22:04 | 2017-11-28T13:22:04 | 113,034,596 | 0 | 0 | null | 2017-12-04T11:32:40 | 2017-12-04T11:32:40 | null | UTF-8 | Python | false | false | 3,147 | py | import jsonschema
import requests
from functools import reduce
class ErrorReport:
"""
A user friendly error message, along with corresponding ValidationError
"""
def __init__(self, message="", validation_error=None):
self.message = message
self.validation_error = validation_error
def to_dict(self):
error_report_dict = dict()
error_report_dict["user_friendly_message"] = self.message
error_report_dict["validation_error"] = dict()
error_report_dict["validation_error"]["absolute_path"] = list(self.validation_error.absolute_path)
error_report_dict["validation_error"]["path"] = list(self.validation_error.path)
error_report_dict["validation_error"]["message"] = self.validation_error.message
error_report_dict["validation_error"]["instance"] = self.validation_error.instance
error_report_dict["validation_error"]["schema_path"] = list(self.validation_error.schema_path)
error_report_dict["validation_error"]["absolute_schema_path"] = list(self.validation_error.absolute_schema_path)
error_report_dict["validation_error"]["validator"] = self.validation_error.validator
error_report_dict["validation_error"]["validator_value"] = self.validation_error.validator_value
return error_report_dict
class ValidationReport:
def __init__(self):
self.validation_state = ""
self.error_reports = list() # list of ErrorReport
def errors_to_dict(self):
return [error.to_dict() for error in self.error_reports]
@staticmethod
def validation_report_ok():
report = ValidationReport()
report.validation_state = "VALID"
return report
VALIDATION_REPORT_OK = ValidationReport.validation_report_ok()
def validate(metadata, schema):
"""
given a json document(metadata) and a json-schema(schema), validates the
schema and returns a ValidationReport
"""
validator = jsonschema.Draft4Validator(schema=schema)
if validator.is_valid(instance=metadata):
return VALIDATION_REPORT_OK
else:
validation_report = ValidationReport()
validation_report.validation_state = "INVALID"
for error in validator.iter_errors(instance=metadata):
validation_report.error_reports.append(ErrorReport(generate_error_message(error), error))
return validation_report
def generate_error_message(error):
"""
Given an error object, generates an error message
:param error: a jsonschema ValidationError
:return: error message string generated from the error
"""
path_to_error_in_document = reduce((lambda key1, key2: key1 + "." + key2), error.absolute_path) if len(error.absolute_path) > 0 else "root of document"
return "Error: " + error.message + " at " + path_to_error_in_document
def extract_schema_url_from_document(metadata_document):
try:
return metadata_document["core"]["schema_url"]
except KeyError as e:
raise("Could not find schema_url")
def get_schema_from_url(schema_url):
return requests.get(schema_url).json()
| [
"[email protected]"
] | |
91c01439a08981a8ae26c9299542571550307480 | 1929443c8e4ec6ccd79777f18d161546867e17ef | /methods/transformers/tests/test_modeling_common.py | 6360ea64bad78476a86baedf0e7a2a3e715cb70d | [
"Apache-2.0",
"MIT"
] | permissive | INK-USC/RiddleSense | 6f4b00546d7f4d5ada12db50929c1f0d7713d541 | a3d57eaf084da9cf6b77692c608e2cd2870fbd97 | refs/heads/main | 2023-08-14T19:01:01.478946 | 2021-07-05T04:06:01 | 2021-07-05T04:06:01 | 376,487,870 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 43,481 | py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os.path
import random
import tempfile
import unittest
from typing import List, Tuple
from transformers import is_torch_available
from transformers.file_utils import WEIGHTS_NAME
from transformers.testing_utils import require_torch, require_torch_multigpu, slow, torch_device
if is_torch_available():
import numpy as np
import torch
from transformers import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AdaptiveEmbedding,
BertConfig,
BertModel,
PretrainedConfig,
PreTrainedModel,
)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
@require_torch
class ModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_torchscript = True
test_pruning = True
test_resize_embeddings = True
test_head_masking = True
test_missing_keys = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
if isinstance(v, torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in [
*MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*MODEL_FOR_MASKED_LM_MAPPING.values(),
*MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
with torch.no_grad():
after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Make sure we don't have nans
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_save_load_keys_to_never_save(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
keys_to_never_save = getattr(model, "keys_to_never_save", None)
if keys_to_never_save is None:
continue
# check the keys are in the original state_dict
for k in keys_to_never_save:
self.assertIn(k, model.state_dict())
# check that certain keys didn't get saved with the model
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME)
state_dict_saved = torch.load(output_model_file)
for k in keys_to_never_save:
self.assertNotIn(k, state_dict_saved)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
"encoder_outputs",
]
self.assertListEqual(arg_names[:5], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
attentions = outputs["attentions"] if "attentions" in outputs.keys() else outputs[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = (
self.model_tester.base_model_out_len if hasattr(self.model_tester, "base_model_out_len") else 4
)
decoder_attention_idx = (
self.model_tester.decoder_attention_idx
if hasattr(self.model_tester, "decoder_attention_idx")
else 1
)
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
decoder_attention_idx += 1
# Question Answering model returns start_logits and end_logits
if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
decoder_attention_idx += 1
self.assertEqual(out_len, correct_outlen)
decoder_attentions = outputs[decoder_attention_idx]
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs["attentions"] if "attentions" in outputs else outputs[-1]
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_torchscript(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_hidden_state(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
self._create_and_check_torchscript(config, inputs_dict)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
try:
if model.config.is_encoder_decoder:
model.config.use_cache = False # TODO: this should be deleted after bug #7474 is solved
input_ids = inputs["input_ids"]
attention_mask = inputs["attention_mask"]
decoder_input_ids = inputs["decoder_input_ids"]
decoder_attention_mask = inputs["decoder_attention_mask"]
traced_model = torch.jit.trace(
model, (input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
)
else:
input_ids = inputs["input_ids"]
traced_model = torch.jit.trace(model, input_ids)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_headmasking(self):
if not self.test_head_masking:
return
global_rng.seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
global_rng.seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
# Prepare head_mask
# Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
head_mask = torch.ones(
self.model_tester.num_hidden_layers,
self.model_tester.num_attention_heads,
device=torch_device,
)
head_mask[0, 0] = 0
head_mask[-1, :-1] = 0
head_mask.requires_grad_(requires_grad=True)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
outputs = model(**inputs)
# Test that we can get a gradient back for importance score computation
output = sum(t.sum() for t in outputs[0])
output = output.sum()
output.backward()
multihead_outputs = head_mask.grad
attentions = outputs[-1]
# Remove Nan
for t in attentions:
self.assertLess(
torch.sum(torch.isnan(t)), t.numel() / 4
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
t.masked_fill(torch.isnan(t), 0.0) for t in attentions
] # remove them (the test is less complete)
self.assertIsNotNone(multihead_outputs)
self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)
def test_head_pruning(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_pretrained(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_config_init(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_integration(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(
config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {0: [0], 1: [1, 2]}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
heads_to_prune = {0: [0], 2: [1, 2]}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
hidden_states = outputs["hidden_states"] if "hidden_states" in outputs else outputs[-1]
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_feed_forward_chunking(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
torch.manual_seed(0)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model.eval()
hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
torch.manual_seed(0)
config.chunk_size_feed_forward = 1
model = model_class(config)
model.to(torch_device)
model.eval()
hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
model.set_input_embeddings(torch.nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
def test_correct_missing_keys(self):
if not self.test_missing_keys:
return
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
base_model_prefix = model.base_model_prefix
if hasattr(model, base_model_prefix):
with tempfile.TemporaryDirectory() as temp_dir_name:
model.base_model.save_pretrained(temp_dir_name)
model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)
with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
self.assertGreater(len(loading_info["missing_keys"]), 0)
def test_tie_model_weights(self):
if not self.test_torchscript:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_same_values(layer_1, layer_2):
equal = True
for p1, p2 in zip(layer_1.weight, layer_2.weight):
if p1.data.ne(p2.data).sum() > 0:
equal = False
return equal
for model_class in self.all_model_classes:
config.torchscript = True
model_not_tied = model_class(config)
if model_not_tied.get_output_embeddings() is None:
continue
config_tied = copy.deepcopy(config)
config_tied.torchscript = False
model_tied = model_class(config_tied)
params_tied = list(model_tied.parameters())
# Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# embeddings.weight.data.div_(2)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# decoding.weight.data.div_(4)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# Check that after resize they remain tied.
model_tied.resize_token_embeddings(config.vocab_size + 10)
params_tied_2 = list(model_tied.parameters())
self.assertEqual(len(params_tied_2), len(params_tied))
# decoding.weight.data.mul_(20)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
# self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_multigpu
def test_multigpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = torch.nn.DataParallel(model)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
global_rng = random.Random()
def ids_tensor(shape, vocab_size, rng=None, name=None):
# Creates a random int32 tensor of the shape within the vocab size
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
def random_attention_mask(shape, rng=None, name=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
@require_torch
class ModelUtilsTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
model = BertModel.from_pretrained(model_name)
model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
# Not sure this is the intended behavior. TODO fix Lysandre & Thom
config.name_or_path = model_name
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)
| [
"[email protected]"
] | |
2bcc73892647048fa886b11407dd34d93f207c30 | 6b57ceb08961c61e19ecb1e5343c6a85cb4e8bda | /markdown/MarkdownRenderTk.py | 7f00c9718443592497ac7ccdf2a4119c3fd81be8 | [] | no_license | seggiepants/minesweeper | 212a7caa5d64c27bf37ce081adc15b53f21787b2 | 5ba11e89d3058b694c7fb49dad77acc06b02c125 | refs/heads/master | 2021-06-28T02:17:25.796316 | 2021-06-19T07:29:35 | 2021-06-19T07:29:35 | 232,244,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,776 | py | import os
import tkinter as tk
from tkinter import PhotoImage, scrolledtext
from tkinter.font import Font
import urllib.request
import base64
class MarkdownRenderTk():
def __init__(self, target):
self.text = target
fontName = self.find_font(['Times New Roman', 'FreeSerif', 'Helvetica', 'Liberation Serif', 'Arial'])
self.fontText = Font(family=fontName, size=12)
self.fontH1 = Font(family=fontName, size=36, weight='bold')
self.fontH2 = Font(family=fontName, size=21, weight='bold')
self.fontH3 = Font(family=fontName, size=18, weight='bold')
self.fontH4 = Font(family=fontName, size=16, weight='bold')
self.fontH5 = Font(family=fontName, size=14, weight='bold')
self.fontH6 = Font(family=fontName, size=12, weight='bold')
self.fontStrike = Font(family=fontName, overstrike=1)
self.fontBold = Font(family=fontName, weight='bold')
self.fontItalic = Font(family=fontName, slant='italic')
fontNameMono = self.find_font(['Tlwg Typewriter', 'Courier', 'Dejavu Sans Mono', 'Liberation Mono', 'FreeSans'])
self.fontMonospace = Font(family=fontNameMono, size=12)
self.fontA = Font(family=fontName, underline=1)
self.text.font = self.fontText
self.crlf_tags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
def find_font(self, font_list):
families = [str.lower(font) for font in tk.font.families()]
for target in font_list:
if str.lower(target) in families:
return target
return families[0]
def render(self, tokens, img_path, images, callback):
self.text['state'] = 'normal'
self.text.delete('1.0', tk.END)
tags = []
images.clear()
countA = 0
indent = {}
for token in tokens:
tokenType = token[0]
tokenText = token[1]
if tokenType == 'text':
self.text.insert(tk.INSERT, tokenText, tuple(tags))
elif tokenType == 'img':
alttext = ''
url = ''
title = ''
elif tokenType == 'a':
title = ''
url = ''
elif tokenType == 'alttext':
alttext = tokenText
elif tokenType == 'url':
url = tokenText
elif tokenType == 'title':
title = tokenText
elif tokenType == '/img':
if url[0:4] == 'http':
u = urllib.request.urlopen(url)
raw_data = u.read()
u.close()
img = tk.PhotoImage(data=base64.encodebytes(raw_data))
else:
img = tk.PhotoImage(file=os.path.normpath(os.path.join(img_path, url)))
images.append(img) # save a reference
self.text.image_create(tk.INSERT, image=img)
elif tokenType == 'hr':
self.text.insert(tk.INSERT, "▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬" ,('hr'))
elif tokenType == 'br':
self.text.insert(tk.INSERT, "\n", tags)
elif tokenType == 'p':
self.text.insert(tk.INSERT, "\n\n", tags)
elif tokenType == '/a':
countA += 1
tagName = 'a' + str(countA)
self.text.tag_config(tagName, font=self.fontA)
self.text.tag_bind(tagName, "<Enter>", lambda event : event.widget.configure(cursor="hand1"))
self.text.tag_bind(tagName, "<Leave>", lambda event : event.widget.configure(cursor=""))
self.text.tag_bind(tagName, "<Button-1>", lambda e, url=url, title=title: callback(url, title))
if len(title) == 0:
title = url
self.text.insert(tk.INSERT, title, tuple(tags + [tagName]))
elif tokenType == 'ul' or tokenType == 'ol':
if lastToken != '/ul' and lastToken != '/ol':
indent = {}
level = tokenText # really a number
if tokenType == 'ol':
if not level in indent:
indent[level] = 1
else:
indent[level] = indent[level] + 1
counter = indent[level]
tags.append(tokenType)
self.text.insert(tk.INSERT, "\n", tuple(tags))
self.text.insert(tk.INSERT, " " * level, tuple(tags))
if tokenType == 'ul':
self.text.insert(tk.INSERT, "● ", tuple(tags))
else: # ol
self.text.insert(tk.INSERT, str(counter) + ". ", tuple(tags))
else:
if tokenType[0] == '/':
tags.remove(tokenType[1:])
if tokenType[1:] in self.crlf_tags:
self.text.insert(tk.INSERT, '\n',)
else:
tags.append(tokenType)
lastToken = tokenType
self.text.tag_config('h1', font=self.fontH1)
self.text.tag_config('h2', font=self.fontH2)
self.text.tag_config('h3', font=self.fontH3)
self.text.tag_config('h4', font=self.fontH4)
self.text.tag_config('h5', font=self.fontH5)
self.text.tag_config('h6', font=self.fontH6)
self.text.tag_config('strike', font=self.fontStrike)
self.text.tag_config('bold', font=self.fontBold)
self.text.tag_config('italic', font=self.fontItalic)
self.text.tag_config('monospace', font=self.fontMonospace)
self.text['state'] = 'disabled'
| [
"[email protected]"
] | |
bc10bd50610a4cbc01aae785e97c2cd5261cf129 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/sort/60b0d171-eaf9-4106-9947-74a8abaa6712__sort.py | 1ecfa4d31d8f9aacb3f83b00e8f6dba5fb40c716 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | def quickSort (arr):
""" Quicksort a list
:type arr: list
:param arr: List to sort
:returns: list -- Sorted list
"""
if not arr:
return []
pivots = []
lesser = []
greater = []
for x in arr:
if x==arr[0]:
pivots.append(x)
elif x>arr[0]:
greater.append(x)
else:
lesser.append(x)
return quickSort(lesser) + pivots + quickSort(greater)
test_array = [1,4,5,7,8,9,90,3,2,3,4]
sorted_array = quickSort (test_array)
print "unsorted:",test_array,"Sorted:",sorted_array
| [
"[email protected]"
] | |
b6348cb89e3618cb1488cd7678db294d1f9814b5 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping20r3/StrippingLowMult.py | 64d0de0b8e6e74cccb721b4715d426825cefe155 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,651 | py |
#Stripping Lines for Low Multiplicity Processes.
#Electroweak Group (Convenor: Tara Shears)
#Adaptation of lines (to use line builders) originally designed by Dermot Moran by Will Barter
# Accepts events that passed the relevant HLT line.
from Gaudi.Configuration import *
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop, CombineParticles
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
from StandardParticles import StdAllNoPIDsKaons, StdAllNoPIDsPions, StdAllNoPIDsProtons
from GaudiKernel.SystemOfUnits import MeV, mm
confdict_LowMult = {
'LowMultPrescale' : 1.0
, 'LowMultWSPrescale' : 1.0 ##changed from 0.1
, 'LowMultHHIncPrescale' : 1.0 ##changed from 0.1
, 'LowMultLMRPrescale' : 1.0 ##changed from 0.2
, 'LowMultPrescale_ps' : 1.0 ##changed from 0.005
, 'LowMultNoFilterPrescale' : 1.0 ##changed from 0.1
, 'LowMultPostscale' : 1.0
# Final-state particles
, 'H_PTmin' : 100.0 * MeV
, 'H_Pmin' : 5000.0 * MeV
, 'H_TrkChi2max' : 3.0
, 'K_PIDKmin' : 0.0
, 'P_PIDPmin' : 0.0
, 'KKInc_K_Pmin' : 10000.0 * MeV
, 'KKInc_K_PIDKmin' : 5.0
# D0 -> KPi
, 'D2KPi_APTmin' : 0.0 * MeV
, 'D2KPi_ADAMASSmax' : 80.0 * MeV
, 'D2KPi_ADOCAmax' : 0.5 * mm
, 'D2KPi_APmin' : 10000.0 * MeV
, 'D2KPi_VtxChi2DoFmax' : 15.0
# D+- -> KPiPi
, 'D2KPiPi_APTmin' : 0.0 * MeV
, 'D2KPiPi_ADAMASSmax' : 80.0 * MeV
, 'D2KPiPi_ADOCAmax' : 0.5 * mm
, 'D2KPiPi_APmin' : 10000.0 * MeV
, 'D2KPiPi_VtxChi2DoFmax' : 15.0
# D0 -> K3Pi
, 'D2K3Pi_APTmin' : 0.0 * MeV
, 'D2K3Pi_ADAMASSmax' : 80.0 * MeV
, 'D2K3Pi_ADOCAmax' : 0.7 * mm
, 'D2K3Pi_APmin' : 10000.0 * MeV
, 'D2K3Pi_VtxChi2DoFmax' : 15.0
# 'ChiC' -> HH (H = K, Pi)
, 'ChiC2HH_APTmin' : 0.0 * MeV
, 'ChiC2HH_APTmax' : 5000.0 * MeV
, 'ChiC2HH_AMmin' : 2850.0 * MeV
, 'ChiC2HH_AMmax' : 4500.0 * MeV
, 'ChiC2HH_ADOCAmax' : 0.5 * mm
, 'ChiC2HH_APmin' : 10000.0 * MeV
, 'ChiC2HH_VtxChi2DoFmax' : 15.0
# 'ChiC' -> PP
, 'ChiC2PP_APTmin' : 0.0 * MeV
, 'ChiC2PP_APTmax' : 5000.0 * MeV
, 'ChiC2PP_AMmin' : 2850.0 * MeV
, 'ChiC2PP_AMmax' : 3650.0 * MeV
, 'ChiC2PP_ADOCAmax' : 0.5 * mm
, 'ChiC2PP_APmin' : 10000.0 * MeV
, 'ChiC2PP_VtxChi2DoFmax' : 15.0
# 'ChiC' -> HHHH (H = K, Pi)
, 'ChiC2HHHH_APTmin' : 0.0 * MeV
, 'ChiC2HHHH_APTmax' : 5000.0 * MeV
, 'ChiC2HHHH_AMmin' : 2850.0 * MeV
, 'ChiC2HHHH_AMmax' : 4500.0 * MeV
, 'ChiC2HHHH_ADOCAmax' : 0.7 * mm
, 'ChiC2HHHH_APmin' : 10000.0 * MeV
, 'ChiC2HHHH_VtxChi2DoFmax' : 15.0
# Low-mass resonance -> HH (H = K, Pi)
, 'LMR2HH_APTmin' : 500.0 * MeV
, 'LMR2HH_APTmax' : 1500.0 * MeV
, 'LMR2HH_AMmin' : 450.0 * MeV
, 'LMR2HH_AMmax' : 1500.0 * MeV
, 'LMR2HH_ADOCAmax' : 0.1 * mm
, 'LMR2HH_APmin' : 15000.0 * MeV
, 'LMR2HH_VtxChi2DoFmax' : 3.0
# Phi resonance -> KK
, 'PHI2KK_APTmin' : 0.0 * MeV
, 'PHI2KK_APTmax' : 1500.0 * MeV
, 'PHI2KK_AMmin' : 990.0 * MeV
, 'PHI2KK_AMmax' : 1050.0 * MeV
, 'PHI2KK_ADOCAmax' : 0.1 * mm
, 'PHI2KK_APmin' : 4000.0 * MeV
, 'PHI2KK_VtxChi2DoFmax' : 3.0
}
default_name = "LowMult"
class LowMultConf(LineBuilder) :
__configuration_keys__ = ('LowMultPrescale'
, 'LowMultWSPrescale'
, 'LowMultHHIncPrescale'
, 'LowMultLMRPrescale'
, 'LowMultPrescale_ps'
, 'LowMultNoFilterPrescale'
, 'LowMultPostscale'
# Final-state particles
, 'H_PTmin'
, 'H_Pmin'
, 'H_TrkChi2max'
, 'K_PIDKmin'
, 'P_PIDPmin'
, 'KKInc_K_Pmin'
, 'KKInc_K_PIDKmin'
# D0 -> KPi
, 'D2KPi_APTmin'
, 'D2KPi_ADAMASSmax'
, 'D2KPi_ADOCAmax'
, 'D2KPi_APmin'
, 'D2KPi_VtxChi2DoFmax'
# D+- -> KPiPi
, 'D2KPiPi_APTmin'
, 'D2KPiPi_ADAMASSmax'
, 'D2KPiPi_ADOCAmax'
, 'D2KPiPi_APmin'
, 'D2KPiPi_VtxChi2DoFmax'
# D0 -> K3Pi
, 'D2K3Pi_APTmin'
, 'D2K3Pi_ADAMASSmax'
, 'D2K3Pi_ADOCAmax'
, 'D2K3Pi_APmin'
, 'D2K3Pi_VtxChi2DoFmax'
# 'ChiC' -> HH (H = K, Pi)
, 'ChiC2HH_APTmin'
, 'ChiC2HH_APTmax'
, 'ChiC2HH_AMmin'
, 'ChiC2HH_AMmax'
, 'ChiC2HH_ADOCAmax'
, 'ChiC2HH_APmin'
, 'ChiC2HH_VtxChi2DoFmax'
# 'ChiC' -> PP
, 'ChiC2PP_APTmin'
, 'ChiC2PP_APTmax'
, 'ChiC2PP_AMmin'
, 'ChiC2PP_AMmax'
, 'ChiC2PP_ADOCAmax'
, 'ChiC2PP_APmin'
, 'ChiC2PP_VtxChi2DoFmax'
# 'ChiC' -> HHHH
, 'ChiC2HHHH_APTmin'
, 'ChiC2HHHH_APTmax'
, 'ChiC2HHHH_AMmin'
, 'ChiC2HHHH_AMmax'
, 'ChiC2HHHH_ADOCAmax'
, 'ChiC2HHHH_APmin'
, 'ChiC2HHHH_VtxChi2DoFmax'
# Low-mass resonance -> HH (H = K, Pi)
, 'LMR2HH_APTmin'
, 'LMR2HH_APTmax'
, 'LMR2HH_AMmin'
, 'LMR2HH_AMmax'
, 'LMR2HH_ADOCAmax'
, 'LMR2HH_APmin'
, 'LMR2HH_VtxChi2DoFmax'
# Phi -> KK (H = K, Pi)
, 'PHI2KK_APTmin'
, 'PHI2KK_APTmax'
, 'PHI2KK_AMmin'
, 'PHI2KK_AMmax'
, 'PHI2KK_ADOCAmax'
, 'PHI2KK_APmin'
, 'PHI2KK_VtxChi2DoFmax'
)
def __init__(self, name, config) :
LineBuilder.__init__(self, name, config)
self._myname = name
#MUON
ExclusiveMuonGEC = {'Code' : "(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 0) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(recSummary(LHCb.RecSummary.nTracks, 'Rec/Track/Best') < 6)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
self.LowMultMuon_line = StrippingLine(self._myname+"MuonLine",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = ExclusiveMuonGEC,
HLT = "HLT_PASS('Hlt2LowMultMuonDecision')"
)
self.registerLine(self.LowMultMuon_line)
self.LowMultMuon_lineps = StrippingLine(self._myname+"MuonLinePS",
prescale = config['LowMultPrescale_ps'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultMuonDecision')"
)
self.registerLine(self.LowMultMuon_lineps)
ExclusiveDiMuonGEC = {'Code' : "(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 0) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
self.LowMultPP2PPMuMu_line = StrippingLine(self._myname+"PP2PPMuMuLine",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = ExclusiveDiMuonGEC,
HLT = "HLT_PASS('Hlt2diPhotonDiMuonDecision')"
)
self.registerLine(self.LowMultPP2PPMuMu_line)
self.LowMultPP2PPMuMu_lineps = StrippingLine(self._myname+"PP2PPMuMuLinePS",
prescale = config['LowMultPrescale_ps'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2diPhotonDiMuonDecision')"
)
self.registerLine(self.LowMultPP2PPMuMu_lineps)
#ELECTRON
ExclusiveElectronGEC = {'Code' : "(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(recSummary(LHCb.RecSummary.nTracks, 'Rec/Track/Best') < 6)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
self.LowMultElectron_line = StrippingLine(self._myname+"ElectronLine",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = ExclusiveElectronGEC,
HLT = "HLT_PASS('Hlt2LowMultElectronDecision')"
)
self.registerLine(self.LowMultElectron_line)
self.LowMultElectron_lineps = StrippingLine(self._myname+"ElectronLinePS",
prescale = config['LowMultPrescale_ps'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultElectronDecision')"
)
self.registerLine(self.LowMultElectron_lineps)
self.LowMultElectron_nofilter_line = StrippingLine(self._myname+"ElectronLineNoFilter",
prescale = config['LowMultNoFilterPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultElectron_nofilterDecision')"
)
self.registerLine(self.LowMultElectron_nofilter_line)
#HADRON
ExclusiveHadronGEC = {'Code' : "(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 1) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(CONTAINS('Rec/Track/Best') < 6) ",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
self.LowMultHadron_line = StrippingLine(self._myname+"HadronLine",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = ExclusiveHadronGEC,
HLT = "HLT_PASS('Hlt2LowMultHadronDecision')"
)
self.registerLine(self.LowMultHadron_line)
self.LowMultHadron_lineps = StrippingLine(self._myname+"HadronLinePS",
prescale = config['LowMultPrescale_ps'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultHadronDecision')"
)
self.registerLine(self.LowMultHadron_lineps)
self.LowMultHadron_nofilter_line = StrippingLine(self._myname+"HadronLineNoFilter",
prescale = config['LowMultNoFilterPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultHadron_nofilterDecision')"
)
self.registerLine(self.LowMultHadron_nofilter_line)
#PHOTON
self.LowMultPhoton_line = StrippingLine(self._myname+"PhotonLine",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
HLT = "HLT_PASS('Hlt2LowMultPhotonDecision')"
)
self.registerLine(self.LowMultPhoton_line)
#########
## CEP ##
#########
#
#=== HLT and GEC filters ===#
#
CEPHLTReq = "HLT_PASS_RE('Hlt2LowMult(D.*|C.*|Hadron)Decision')"
CEPFilterTracks = {'Code' :
"(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 1) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(CONTAINS ('Rec/Track/Best') < 12)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
CEPFilterTracksChiC2HH = {'Code' :
"(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 1) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(CONTAINS ('Rec/Track/Best') < 6)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
CEPFilterTracksChiC2HHHH = {'Code' :
"(recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) > 1) & " \
"(recSummaryTrack(LHCb.RecSummary.nBackTracks, TrBACKWARD) < 1) & " \
"(CONTAINS ('Rec/Track/Best') < 8)",
'Preambulo' : ["from LoKiTracks.decorators import *"]}
#
#=== Combinatorics ===#
#
self.selKaons = makeKaons ('KaonsFor' + name,
H_PTmin = config['H_PTmin'],
H_Pmin = config['H_Pmin'],
H_TrkChi2max = config['H_TrkChi2max'],
K_PIDKmin = config['K_PIDKmin']
)
self.selKaonsForKK = makeKaonsForKK ('KaonsForKKFor' + name,
H_PTmin = config['H_PTmin'],
KKInc_K_Pmin = config['KKInc_K_Pmin'],
H_TrkChi2max = config['H_TrkChi2max'],
KKInc_K_PIDKmin = config['KKInc_K_PIDKmin']
)
self.selPions = makePions ('PionsFor' + name,
H_PTmin = config['H_PTmin'],
H_Pmin = config['H_Pmin'],
H_TrkChi2max = config['H_TrkChi2max']
)
self.selProtons = makeProtons ('ProtonsFor' + name,
H_PTmin = config['H_PTmin'],
H_Pmin = config['H_Pmin'],
H_TrkChi2max = config['H_TrkChi2max'],
P_PIDPmin = config['P_PIDPmin']
)
self.selD2KPi = makeD2KPi("selD2KPi",
decayDesc = [ "[D0 -> K- pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2KPi_APTmin = config['D2KPi_APTmin'],
D2KPi_ADAMASSmax = config['D2KPi_ADAMASSmax'],
D2KPi_ADOCAmax = config['D2KPi_ADOCAmax'],
D2KPi_APmin = config['D2KPi_APmin'],
D2KPi_VtxChi2DoFmax = config['D2KPi_VtxChi2DoFmax']
)
self.selD2KPiPi = makeD2KPiPi("selD2KPiPi",
decayDesc = [ "[D+ -> K+ pi+ pi-]cc",
"[D+ -> K- pi+ pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2KPiPi_APTmin = config['D2KPiPi_APTmin'],
D2KPiPi_ADAMASSmax = config['D2KPiPi_ADAMASSmax'],
D2KPiPi_ADOCAmax = config['D2KPiPi_ADOCAmax'],
D2KPiPi_APmin = config['D2KPiPi_APmin'],
D2KPiPi_VtxChi2DoFmax = config['D2KPiPi_VtxChi2DoFmax']
)
self.selD2K3Pi = makeD2K3Pi("selD2K3Pi",
decayDesc = [ "[D0 -> K- pi+ pi- pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2K3Pi_APTmin = config['D2K3Pi_APTmin'],
D2K3Pi_ADAMASSmax = config['D2K3Pi_ADAMASSmax'],
D2K3Pi_ADOCAmax = config['D2K3Pi_ADOCAmax'],
D2K3Pi_APmin = config['D2K3Pi_APmin'],
D2K3Pi_VtxChi2DoFmax = config['D2K3Pi_VtxChi2DoFmax']
)
self.selChiC2HH = makeChiC2HH("selChiC2HH",
decayDesc = [ "chi_c1(1P) -> K+ K-",
"chi_c1(1P) -> pi+ pi-" ],
kaons = self.selKaons,
pions = self.selPions,
ChiC2HH_APTmin = config['ChiC2HH_APTmin'],
ChiC2HH_APTmax = config['ChiC2HH_APTmax'],
ChiC2HH_AMmin = config['ChiC2HH_AMmin'],
ChiC2HH_AMmax = config['ChiC2HH_AMmax'],
ChiC2HH_ADOCAmax = config['ChiC2HH_ADOCAmax'],
ChiC2HH_APmin = config['ChiC2HH_APmin'],
ChiC2HH_VtxChi2DoFmax = config['ChiC2HH_VtxChi2DoFmax']
)
self.selChiC2PP = makeChiC2PP("selChiC2PP",
decayDesc = [ "chi_c1(1P) -> p+ p~-" ],
protons = self.selProtons,
ChiC2PP_APTmin = config['ChiC2PP_APTmin'],
ChiC2PP_APTmax = config['ChiC2PP_APTmax'],
ChiC2PP_AMmin = config['ChiC2PP_AMmin'],
ChiC2PP_AMmax = config['ChiC2PP_AMmax'],
ChiC2PP_ADOCAmax = config['ChiC2PP_ADOCAmax'],
ChiC2PP_APmin = config['ChiC2PP_APmin'],
ChiC2PP_VtxChi2DoFmax = config['ChiC2PP_VtxChi2DoFmax']
)
self.selChiC2HHHH = makeChiC2HHHH("selChiC2HHHH",
decayDesc = [ "[chi_c1(1P) -> K+ K+ pi- pi-]cc",
"chi_c1(1P) -> K+ K- pi+ pi-",
"chi_c1(1P) -> K+ K+ K- K-",
"chi_c1(1P) -> pi+ pi+ pi- pi-" ],
kaons = self.selKaons,
pions = self.selPions,
ChiC2HHHH_APTmin = config['ChiC2HHHH_APTmin'],
ChiC2HHHH_APTmax = config['ChiC2HHHH_APTmax'],
ChiC2HHHH_AMmin = config['ChiC2HHHH_AMmin'],
ChiC2HHHH_AMmax = config['ChiC2HHHH_AMmax'],
ChiC2HHHH_ADOCAmax = config['ChiC2HHHH_ADOCAmax'],
ChiC2HHHH_APmin = config['ChiC2HHHH_APmin'],
ChiC2HHHH_VtxChi2DoFmax = config['ChiC2HHHH_VtxChi2DoFmax']
)
self.selDD = makeDD("selDD",
decayDesc = [ "[psi(3770) -> D0 D0]cc",
"psi(3770) -> D0 D~0",
"psi(3770) -> D+ D-",
"[psi(3770) -> D0 D+]cc",
"[psi(3770) -> D+ D+]cc"
],
inD2KPi = self.selD2KPi,
inD2KPiPi = self.selD2KPiPi,
inD2K3Pi = self.selD2K3Pi
)
self.selKK = makeKK("selKK",
decayDesc = [ "D0 -> K+ K-",
"[D0 -> K+ K+]cc"
],
kaonsForKK = self.selKaonsForKK
)
self.selLMR2HH = makeLMR2HH("selLMR2HH",
decayDesc = [ "phi(1020) -> K+ K-",
"[phi(1020) -> K+ pi-]cc",
"phi(1020) -> pi+ pi-" ],
kaons = self.selKaons,
pions = self.selPions,
LMR2HH_APTmin = config['LMR2HH_APTmin'],
LMR2HH_APTmax = config['LMR2HH_APTmax'],
LMR2HH_AMmin = config['LMR2HH_AMmin'],
LMR2HH_AMmax = config['LMR2HH_AMmax'],
LMR2HH_ADOCAmax = config['LMR2HH_ADOCAmax'],
LMR2HH_APmin = config['LMR2HH_APmin'],
LMR2HH_VtxChi2DoFmax = config['LMR2HH_VtxChi2DoFmax']
)
self.selPHI2KK = makePHI2KK("selPHI2KK",
decayDesc = [ "phi(1020) -> K+ K-" ],
kaons = self.selKaons,
PHI2KK_APTmin = config['PHI2KK_APTmin'],
PHI2KK_APTmax = config['PHI2KK_APTmax'],
PHI2KK_AMmin = config['PHI2KK_AMmin'],
PHI2KK_AMmax = config['PHI2KK_AMmax'],
PHI2KK_ADOCAmax = config['PHI2KK_ADOCAmax'],
PHI2KK_APmin = config['PHI2KK_APmin'],
PHI2KK_VtxChi2DoFmax = config['PHI2KK_VtxChi2DoFmax']
)
#
#=== Wrong-sign lines ===#
#
self.selD2KPiWS = makeD2KPi("selD2KPiWS",
decayDesc = [ "[D0 -> K+ pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2KPi_APTmin = config['D2KPi_APTmin'],
D2KPi_ADAMASSmax = config['D2KPi_ADAMASSmax'],
D2KPi_ADOCAmax = config['D2KPi_ADOCAmax'],
D2KPi_APmin = config['D2KPi_APmin'],
D2KPi_VtxChi2DoFmax = config['D2KPi_VtxChi2DoFmax']
)
self.selD2KPiPiWS = makeD2KPiPi("selD2KPiPiWS",
decayDesc = [ "[D+ -> K+ pi+ pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2KPiPi_APTmin = config['D2KPiPi_APTmin'],
D2KPiPi_ADAMASSmax = config['D2KPiPi_ADAMASSmax'],
D2KPiPi_ADOCAmax = config['D2KPiPi_ADOCAmax'],
D2KPiPi_APmin = config['D2KPiPi_APmin'],
D2KPiPi_VtxChi2DoFmax = config['D2KPiPi_VtxChi2DoFmax']
)
self.selD2K3PiWS = makeD2K3Pi("selD2K3PiWS",
decayDesc = [ "[D0 -> K+ pi+ pi+ pi+]cc",
"[D0 -> K+ pi+ pi+ pi-]cc",
"[D0 -> K+ pi- pi- pi-]cc" ],
kaons = self.selKaons,
pions = self.selPions,
D2K3Pi_APTmin = config['D2K3Pi_APTmin'],
D2K3Pi_ADAMASSmax = config['D2K3Pi_ADAMASSmax'],
D2K3Pi_ADOCAmax = config['D2K3Pi_ADOCAmax'],
D2K3Pi_APmin = config['D2K3Pi_APmin'],
D2K3Pi_VtxChi2DoFmax = config['D2K3Pi_VtxChi2DoFmax']
)
self.selChiC2HHWS = makeChiC2HH("selChiC2HHWS",
decayDesc = [ "[chi_c1(1P) -> K+ K+]cc",
"[chi_c1(1P) -> pi+ pi+]cc" ],
kaons = self.selKaons,
pions = self.selPions,
ChiC2HH_APTmin = config['ChiC2HH_APTmin'],
ChiC2HH_APTmax = config['ChiC2HH_APTmax'],
ChiC2HH_AMmin = config['ChiC2HH_AMmin'],
ChiC2HH_AMmax = config['ChiC2HH_AMmax'],
ChiC2HH_ADOCAmax = config['ChiC2HH_ADOCAmax'],
ChiC2HH_APmin = config['ChiC2HH_APmin'],
ChiC2HH_VtxChi2DoFmax = config['ChiC2HH_VtxChi2DoFmax']
)
self.selChiC2PPWS = makeChiC2PP("selChiC2PPWS",
decayDesc = [ "[chi_c1(1P) -> p+ p+]cc" ],
protons = self.selProtons,
ChiC2PP_APTmin = config['ChiC2PP_APTmin'],
ChiC2PP_APTmax = config['ChiC2PP_APTmax'],
ChiC2PP_AMmin = config['ChiC2PP_AMmin'],
ChiC2PP_AMmax = config['ChiC2PP_AMmax'],
ChiC2PP_ADOCAmax = config['ChiC2PP_ADOCAmax'],
ChiC2PP_APmin = config['ChiC2PP_APmin'],
ChiC2PP_VtxChi2DoFmax = config['ChiC2PP_VtxChi2DoFmax']
)
self.selChiC2HHHHWS = makeChiC2HHHH("selChiC2HHHHWS",
decayDesc = [ "[chi_c1(1P) -> K+ K+ pi+ pi+]cc",
"[chi_c1(1P) -> K+ K+ pi+ pi-]cc",
"[chi_c1(1P) -> K+ K- pi+ pi+]cc",
"[chi_c1(1P) -> K+ K+ K+ K+]cc",
"[chi_c1(1P) -> K+ K+ K+ K-]cc",
"[chi_c1(1P) -> pi+ pi+ pi+ pi+]cc",
"[chi_c1(1P) -> pi+ pi+ pi+ pi-]cc" ],
kaons = self.selKaons,
pions = self.selPions,
ChiC2HHHH_APTmin = config['ChiC2HHHH_APTmin'],
ChiC2HHHH_APTmax = config['ChiC2HHHH_APTmax'],
ChiC2HHHH_AMmin = config['ChiC2HHHH_AMmin'],
ChiC2HHHH_AMmax = config['ChiC2HHHH_AMmax'],
ChiC2HHHH_ADOCAmax = config['ChiC2HHHH_ADOCAmax'],
ChiC2HHHH_APmin = config['ChiC2HHHH_APmin'],
ChiC2HHHH_VtxChi2DoFmax = config['ChiC2HHHH_VtxChi2DoFmax']
)
#
#=== Declare lines ===#
#
self.LowMultCEP_D2KPi_line = StrippingLine(self._myname + "CEP_D2KPi_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2KPi ]
)
self.registerLine(self.LowMultCEP_D2KPi_line)
self.LowMultCEP_D2KPiPi_line = StrippingLine(self._myname + "CEP_D2KPiPi_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2KPiPi ]
)
self.registerLine(self.LowMultCEP_D2KPiPi_line)
self.LowMultCEP_D2K3Pi_line = StrippingLine(self._myname + "CEP_D2K3Pi_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2K3Pi ]
)
self.registerLine(self.LowMultCEP_D2K3Pi_line)
self.LowMultCEP_ChiC2HH_line = StrippingLine(self._myname + "CEP_ChiC2HH_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HH,
HLT = CEPHLTReq,
algos = [ self.selChiC2HH ]
)
self.registerLine(self.LowMultCEP_ChiC2HH_line)
self.LowMultCEP_ChiC2PP_line = StrippingLine(self._myname + "CEP_ChiC2PP_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HH,
HLT = CEPHLTReq,
algos = [ self.selChiC2PP ]
)
self.registerLine(self.LowMultCEP_ChiC2PP_line)
self.LowMultCEP_ChiC2HHHH_line = StrippingLine(self._myname + "CEP_ChiC2HHHH_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HHHH,
HLT = CEPHLTReq,
algos = [ self.selChiC2HHHH ]
)
self.registerLine(self.LowMultCEP_ChiC2HHHH_line)
self.LowMultCEP_DD_line = StrippingLine(self._myname + "CEP_DD_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selDD ]
)
self.registerLine(self.LowMultCEP_DD_line)
self.LowMultCEP_KK_line = StrippingLine(self._myname + "CEP_KK_line",
prescale = config['LowMultHHIncPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selKK ]
)
self.registerLine(self.LowMultCEP_KK_line)
self.LowMultCEP_LMR2HH_line = StrippingLine(self._myname + "CEP_LMR2HH_line",
prescale = config['LowMultLMRPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HHHH,
HLT = CEPHLTReq,
algos = [ self.selLMR2HH ]
)
self.registerLine(self.LowMultCEP_LMR2HH_line)
self.LowMultCEP_PHI2KK_line = StrippingLine(self._myname + "CEP_PHI2KK_line",
prescale = config['LowMultPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HHHH,
HLT = CEPHLTReq,
algos = [ self.selPHI2KK ]
)
self.registerLine(self.LowMultCEP_PHI2KK_line)
self.LowMultCEP_D2KPiWS_line = StrippingLine(self._myname + "CEP_D2KPiWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2KPiWS ]
)
self.registerLine(self.LowMultCEP_D2KPiWS_line)
self.LowMultCEP_D2KPiPiWS_line = StrippingLine(self._myname + "CEP_D2KPiPiWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2KPiPiWS ]
)
self.registerLine(self.LowMultCEP_D2KPiPiWS_line)
self.LowMultCEP_D2K3PiWS_line = StrippingLine(self._myname + "CEP_D2K3PiWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracks,
HLT = CEPHLTReq,
algos = [ self.selD2K3PiWS ]
)
self.registerLine(self.LowMultCEP_D2K3PiWS_line)
self.LowMultCEP_ChiC2HHWS_line = StrippingLine(self._myname + "CEP_ChiC2HHWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HH,
HLT = CEPHLTReq,
algos = [ self.selChiC2HHWS ]
)
self.registerLine(self.LowMultCEP_ChiC2HHWS_line)
self.LowMultCEP_ChiC2PPWS_line = StrippingLine(self._myname + "CEP_ChiC2PPWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HH,
HLT = CEPHLTReq,
algos = [ self.selChiC2PPWS ]
)
self.registerLine(self.LowMultCEP_ChiC2PPWS_line)
self.LowMultCEP_ChiC2HHHHWS_line = StrippingLine(self._myname + "CEP_ChiC2HHHHWS_line",
prescale = config['LowMultWSPrescale'],
postscale = config['LowMultPostscale'],
checkPV = False,
FILTER = CEPFilterTracksChiC2HHHH,
HLT = CEPHLTReq,
algos = [ self.selChiC2HHHHWS ]
)
self.registerLine(self.LowMultCEP_ChiC2HHHHWS_line)
#
#=== Final-state particles ===#
#
def makeKaons(name,
H_PTmin,
H_Pmin,
H_TrkChi2max,
K_PIDKmin
) :
_code = "(PT > %(H_PTmin)s) & (P > %(H_Pmin)s) & (TRCHI2DOF < %(H_TrkChi2max)s) & (PIDK > %(K_PIDKmin)s)" % locals()
_kaonsFilter = FilterDesktop(Code = _code)
_stdKaons = DataOnDemand(Location = "Phys/StdAllNoPIDsKaons/Particles")
return Selection(name,
Algorithm = _kaonsFilter,
RequiredSelections = [_stdKaons]
)
def makeKaonsForKK(name,
H_PTmin,
KKInc_K_Pmin,
H_TrkChi2max,
KKInc_K_PIDKmin
) :
_code = "(PT > %(H_PTmin)s) & (P > %(KKInc_K_Pmin)s) & (TRCHI2DOF < %(H_TrkChi2max)s) & (PIDK > %(KKInc_K_PIDKmin)s)" % locals()
_kaonsFilter = FilterDesktop(Code = _code)
_stdKaons = DataOnDemand(Location = "Phys/StdAllNoPIDsKaons/Particles")
return Selection(name,
Algorithm = _kaonsFilter,
RequiredSelections = [_stdKaons]
)
def makePions(name,
H_PTmin,
H_Pmin,
H_TrkChi2max
) :
_code = "(PT > %(H_PTmin)s) & (P > %(H_Pmin)s) & (TRCHI2DOF < %(H_TrkChi2max)s)" % locals()
_pionsFilter = FilterDesktop(Code = _code)
_stdPions = DataOnDemand(Location = "Phys/StdAllNoPIDsPions/Particles")
return Selection (name,
Algorithm = _pionsFilter,
RequiredSelections = [_stdPions]
)
def makeProtons(name,
H_PTmin,
H_Pmin,
H_TrkChi2max,
P_PIDPmin
) :
_code = "(PT > %(H_PTmin)s) & (P > %(H_Pmin)s) & (TRCHI2DOF < %(H_TrkChi2max)s) & (PIDp > %(P_PIDPmin)s)" % locals()
_protonsFilter = FilterDesktop(Code = _code)
_stdProtons = DataOnDemand(Location = "Phys/StdAllNoPIDsProtons/Particles")
return Selection(name,
Algorithm = _protonsFilter,
RequiredSelections = [_stdProtons]
)
#
#=== D0 -> KPi ===#
#
def makeD2KPi(name,
decayDesc,
kaons,
pions,
D2KPi_APTmin,
D2KPi_ADAMASSmax,
D2KPi_ADOCAmax,
D2KPi_APmin,
D2KPi_VtxChi2DoFmax
) :
D2KPi_Comb_cut = "(APT > %(D2KPi_APTmin)s) & (ADAMASS('D0') < %(D2KPi_ADAMASSmax)s) & (ADOCAMAX('LoKi::DistanceCalculator') < %(D2KPi_ADOCAmax)s) & " \
"(AP > %(D2KPi_APmin)s)" % locals()
D2KPi_Mother_cut = "(VFASPF(VCHI2PDOF) < %(D2KPi_VtxChi2DoFmax)s)" % locals()
CombineD2KPi = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = D2KPi_Comb_cut
, MotherCut = D2KPi_Mother_cut)
return Selection(name,
Algorithm = CombineD2KPi,
RequiredSelections = [kaons, pions])
#
#=== D+ -> KPiPi ===#
#
def makeD2KPiPi(name,
decayDesc,
kaons,
pions,
D2KPiPi_APTmin,
D2KPiPi_ADAMASSmax,
D2KPiPi_ADOCAmax,
D2KPiPi_APmin,
D2KPiPi_VtxChi2DoFmax
) :
D2KPiPi_Comb_cut = "(APT > %(D2KPiPi_APTmin)s) & (ADAMASS('D+') < %(D2KPiPi_ADAMASSmax)s) & (ADOCAMAX('LoKi::DistanceCalculator') < %(D2KPiPi_ADOCAmax)s) & " \
"(AP > %(D2KPiPi_APmin)s)" % locals()
D2KPiPi_Mother_cut = "(VFASPF(VCHI2PDOF) < %(D2KPiPi_VtxChi2DoFmax)s)" % locals()
CombineD2KPiPi = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = D2KPiPi_Comb_cut
, MotherCut = D2KPiPi_Mother_cut
)
return Selection(name,
Algorithm = CombineD2KPiPi,
RequiredSelections = [kaons, pions])
#
#=== D0 -> K3Pi ===#
#
def makeD2K3Pi(name,
decayDesc,
kaons,
pions,
D2K3Pi_APTmin,
D2K3Pi_ADAMASSmax,
D2K3Pi_ADOCAmax,
D2K3Pi_APmin,
D2K3Pi_VtxChi2DoFmax
) :
D2K3Pi_Comb_cut = "(APT > %(D2K3Pi_APTmin)s) & (ADAMASS('D0') < %(D2K3Pi_ADAMASSmax)s) & (ADOCAMAX('LoKi::DistanceCalculator') < %(D2K3Pi_ADOCAmax)s) & " \
"(AP > %(D2K3Pi_APmin)s)" % locals()
D2K3Pi_Mother_cut = "(VFASPF(VCHI2PDOF) < %(D2K3Pi_VtxChi2DoFmax)s)" % locals()
CombineD2K3Pi = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = D2K3Pi_Comb_cut
, MotherCut = D2K3Pi_Mother_cut
)
return Selection(name,
Algorithm = CombineD2K3Pi,
RequiredSelections = [kaons, pions])
#
#=== 'chi_c' -> HH ===#
#
def makeChiC2HH(name,
decayDesc,
kaons,
pions,
ChiC2HH_APTmin,
ChiC2HH_APTmax,
ChiC2HH_AMmin,
ChiC2HH_AMmax,
ChiC2HH_ADOCAmax,
ChiC2HH_APmin,
ChiC2HH_VtxChi2DoFmax
) :
ChiC2HH_Comb_cut = "(APT > %(ChiC2HH_APTmin)s) & (APT < %(ChiC2HH_APTmax)s) & (AM > %(ChiC2HH_AMmin)s) & (AM < %(ChiC2HH_AMmax)s) & " \
"(ADOCAMAX('LoKi::DistanceCalculator') < %(ChiC2HH_ADOCAmax)s) & (AP > %(ChiC2HH_APmin)s)" % locals()
ChiC2HH_Mother_cut = "(VFASPF(VCHI2PDOF) < %(ChiC2HH_VtxChi2DoFmax)s)" % locals()
CombineChiC2HH = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = ChiC2HH_Comb_cut
, MotherCut = ChiC2HH_Mother_cut
)
return Selection(name,
Algorithm = CombineChiC2HH,
RequiredSelections = [kaons, pions])
#
#=== 'chi_c' -> PP ===#
#
def makeChiC2PP(name,
decayDesc,
protons,
ChiC2PP_APTmin,
ChiC2PP_APTmax,
ChiC2PP_AMmin,
ChiC2PP_AMmax,
ChiC2PP_ADOCAmax,
ChiC2PP_APmin,
ChiC2PP_VtxChi2DoFmax
) :
ChiC2PP_Comb_cut = "(APT > %(ChiC2PP_APTmin)s) & (APT < %(ChiC2PP_APTmax)s) & (AM > %(ChiC2PP_AMmin)s) & (AM < %(ChiC2PP_AMmax)s) & " \
"(ADOCAMAX('LoKi::DistanceCalculator') < %(ChiC2PP_ADOCAmax)s) & (AP > %(ChiC2PP_APmin)s)" % locals()
ChiC2PP_Mother_cut = "(VFASPF(VCHI2PDOF) < %(ChiC2PP_VtxChi2DoFmax)s)" % locals()
CombineChiC2PP = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = ChiC2PP_Comb_cut
, MotherCut = ChiC2PP_Mother_cut
)
return Selection(name,
Algorithm = CombineChiC2PP,
RequiredSelections = [protons])
#
#=== 'chi_c' -> 4H ===#
#
def makeChiC2HHHH(name,
decayDesc,
kaons,
pions,
ChiC2HHHH_APTmin,
ChiC2HHHH_APTmax,
ChiC2HHHH_AMmin,
ChiC2HHHH_AMmax,
ChiC2HHHH_ADOCAmax,
ChiC2HHHH_APmin,
ChiC2HHHH_VtxChi2DoFmax
) :
ChiC2HHHH_Comb_cut = "(APT > %(ChiC2HHHH_APTmin)s) & (APT < %(ChiC2HHHH_APTmax)s) & (AM > %(ChiC2HHHH_AMmin)s) & (AM < %(ChiC2HHHH_AMmax)s) & " \
"(ADOCAMAX('LoKi::DistanceCalculator') < %(ChiC2HHHH_ADOCAmax)s) & (AP > %(ChiC2HHHH_APmin)s)" % locals()
ChiC2HHHH_Mother_cut = "(VFASPF(VCHI2PDOF) < %(ChiC2HHHH_VtxChi2DoFmax)s)" % locals()
CombineChiC2HHHH = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = ChiC2HHHH_Comb_cut
, MotherCut = ChiC2HHHH_Mother_cut
)
return Selection(name,
Algorithm = CombineChiC2HHHH,
RequiredSelections = [kaons, pions])
#
#=== DD combination ===#
#
def makeDD(name,
decayDesc,
inD2KPi,
inD2KPiPi,
inD2K3Pi
) :
CombineDD = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = "AALL"
, MotherCut = "ALL"
)
return Selection(name,
Algorithm = CombineDD,
RequiredSelections = [inD2KPi, inD2KPiPi, inD2K3Pi])
#
#=== KK inclusive ===#
#
def makeKK(name,
decayDesc,
kaonsForKK
) :
CombineKK = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = "AALL"
, MotherCut = "ALL"
)
KKConf = CombineKK.configurable("Combine_" + name + "_KK")
KKConf.ParticleCombiners.update({'':'MomentumCombiner'})
return Selection(name,
Algorithm = CombineKK,
RequiredSelections = [kaonsForKK])
#
#=== Low-mass resonance -> HH ===#
#
def makeLMR2HH(name,
decayDesc,
kaons,
pions,
LMR2HH_APTmin,
LMR2HH_APTmax,
LMR2HH_AMmin,
LMR2HH_AMmax,
LMR2HH_ADOCAmax,
LMR2HH_APmin,
LMR2HH_VtxChi2DoFmax
) :
LMR2HH_Comb_cut = "(APT > %(LMR2HH_APTmin)s) & (APT < %(LMR2HH_APTmax)s) & (AM > %(LMR2HH_AMmin)s) & (AM < %(LMR2HH_AMmax)s) & " \
"(ADOCAMAX('LoKi::DistanceCalculator') < %(LMR2HH_ADOCAmax)s) & (AP > %(LMR2HH_APmin)s)" % locals()
LMR2HH_Mother_cut = "(VFASPF(VCHI2PDOF) < %(LMR2HH_VtxChi2DoFmax)s)" % locals()
CombineLMR2HH = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = LMR2HH_Comb_cut
, MotherCut = LMR2HH_Mother_cut
)
return Selection(name,
Algorithm = CombineLMR2HH,
RequiredSelections = [kaons, pions])
def makePHI2KK(name,
decayDesc,
kaons,
PHI2KK_APTmin,
PHI2KK_APTmax,
PHI2KK_AMmin,
PHI2KK_AMmax,
PHI2KK_ADOCAmax,
PHI2KK_APmin,
PHI2KK_VtxChi2DoFmax
) :
PHI2KK_Comb_cut = "(APT > %(PHI2KK_APTmin)s) & (APT < %(PHI2KK_APTmax)s) & (AM > %(PHI2KK_AMmin)s) & (AM < %(PHI2KK_AMmax)s) & " \
"(ADOCAMAX('LoKi::DistanceCalculator') < %(PHI2KK_ADOCAmax)s) & (AP > %(PHI2KK_APmin)s)" % locals()
PHI2KK_Mother_cut = "(VFASPF(VCHI2PDOF) < %(PHI2KK_VtxChi2DoFmax)s)" % locals()
CombinePHI2KK = CombineParticles( DecayDescriptors = decayDesc
, CombinationCut = PHI2KK_Comb_cut
, MotherCut = PHI2KK_Mother_cut
)
return Selection(name,
Algorithm = CombinePHI2KK,
RequiredSelections = [kaons])
| [
"[email protected]"
] | |
54590a54f5fc9df79887398bae3eac38483a904f | cafee376f09de58b47793ad6c8000882ff73c2a2 | /week1/banner.py | be7edc96cce370ef73b173efcd30e56665751da1 | [] | no_license | edgabriel92/digital_crafts | 28cef7e281b22cf2a40b44173e5800e702f9809c | 6c9367148eb9b968d52fc098e182319444201611 | refs/heads/master | 2021-04-06T08:29:58.916357 | 2018-03-10T16:55:52 | 2018-03-10T16:55:52 | 124,678,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | userText = raw_input("Text? ")
space = len(userText)
for row in range(space):
if row == 0:
print '*' * space
elif row - 1 == space:
print '*' * space | [
"[email protected]"
] | |
5b7ffde830bffb098822f58bf2f897b6477cabb5 | 97149b75bd21fb9f82aed657ab5180f765927746 | /ietf/group/migrations/0051_populate_groupfeatures_agenda_filter_type.py | fa5025902b5166df6adb3778d40d3843575268fc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hassanakbar4/ietfdb-filter-repo | c74e7cbfdd5acb8f55ca0fcff6757166582d68db | 67513183b7035014b494bfdd982f9f3990ee0647 | refs/heads/main | 2023-08-25T09:47:28.900431 | 2021-11-09T22:16:24 | 2021-11-09T22:16:24 | 426,613,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Copyright The IETF Trust 2021 All Rights Reserved
from django.db import migrations
def forward(apps, schema_editor):
GroupFeatures = apps.get_model('group', 'GroupFeatures')
# map AgendaFilterTypeName slug to group types - unlisted get 'none'
filter_types = dict(
# list previously hard coded in agenda view, plus 'review'
normal={'wg', 'ag', 'rg', 'rag', 'iab', 'program', 'review'},
heading={'area', 'ietf', 'irtf'},
special={'team', 'adhoc'},
)
for ft, group_types in filter_types.items():
for gf in GroupFeatures.objects.filter(type__slug__in=group_types):
gf.agenda_filter_type_id = ft
gf.save()
def reverse(apps, schema_editor):
pass # nothing to do, model will be deleted anyway
class Migration(migrations.Migration):
dependencies = [
('group', '0050_groupfeatures_agenda_filter_type'),
]
operations = [
migrations.RunPython(forward, reverse),
]
| [
"[email protected]"
] | |
393c3143d0a37450a0cd8316de7af61a85e37382 | 7aa2eb8f887e5bd0b4bdfff2509b244a8d3bb6b2 | /ex30.py | 10505bebb749c28afacc5cac2b5d2f94f44a23d8 | [] | no_license | theyogiwhocodes/lp3thw | 6ded2a0d989c49283aec662c93b2045cbc385c91 | 40fb57dec60671f6d79cbee2f7f7aac90fc96313 | refs/heads/master | 2021-09-04T03:09:56.138135 | 2018-01-15T03:46:36 | 2018-01-15T03:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | ## Else and If
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we could take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.") | [
"[email protected]"
] | |
9c8e206c740f98c0ccb55c2acca02c89360f9722 | 654f15aa9ade9ec62c1243fd6619fc7088350bce | /hbmqtt/scripts/sub_script.py | 6a178229dd282ee1461a6dcb82cc0d0236a3f8f5 | [
"MIT"
] | permissive | nfsnfs/amqtt | 29b06b45801536971e7c79bda32bb2a16f12e435 | 759f0ed64fce5d1443988c6af909aca2326a4979 | refs/heads/master | 2023-03-30T20:44:41.709936 | 2021-04-01T09:50:14 | 2021-04-01T10:53:46 | 350,219,108 | 0 | 0 | MIT | 2021-04-02T13:29:50 | 2021-03-22T05:32:20 | null | UTF-8 | Python | false | false | 145 | py | import warnings
from amqtt.scripts.sub_script import *
warnings.warn("importing hbmqtt is deprecated. Please import amqtt", DeprecationWarning)
| [
"[email protected]"
] | |
485a12f1235acc5c931d5afd6af59d48eb7a96c3 | 17436750e71236feba9a5fdc70bfabdfa6fc73d6 | /Scripts/django-admin.py | 08ae5f164a1101cec4e8ddc5badef934512c9e89 | [] | no_license | ivoovi/djtest | d40fa12ca30f8b1f5b1ab63943e3e9ab0d4dc98d | 38100378b027d11f666202372f60f7b0197894c2 | refs/heads/master | 2020-04-06T04:01:26.809488 | 2015-07-28T16:31:04 | 2015-07-28T16:33:38 | 39,845,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!C:\Users\ivo.angelovski\django\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
396ccbf2e7079c27966af0da3a7a69286a5f5919 | ea691446e415c276debdee3de826015e6e2e7149 | /Convergence_project_Pixy/pixy_rpi-master/pixy_rpi-master/echo.py | 64740b8a9d225052e066794fe88d12a39de42a06 | [] | no_license | hanseunghan/Project | f1c5bc10bc6b19b2dd263a06e25874e9500b75cb | 39fe342691d39aab4cdb95b0a195e1bf6be511dc | refs/heads/master | 2022-02-02T14:25:41.788291 | 2022-01-14T01:13:28 | 2022-01-14T01:13:28 | 104,845,734 | 0 | 1 | null | 2018-02-09T05:28:06 | 2017-09-26T06:37:26 | C | UTF-8 | Python | false | false | 336 | py | import pixy
import pixy_spi
p = pixy.Pixy(pixy_spi.LinkSPI())
while(True):
num_blocks = p.getBlocks()
if num_blocks > 0:
print "Detected: %d" % num_blocks
for ii, block in enumerate(p.blocks):
print " block %d" % ii,
pixy.print_block(block)
else:
print "No blocks detected"
| [
"[email protected]"
] | |
2199eb93227afc8efb0144072999e74f59f30930 | 1d925c3989e80d4cc9d635bf928318646621f722 | /PythonClassData/Python_IMP/Link Codes/FileHandling/FilehandlingTasks.py | aed1d7bf98389f3b3da982037012df6abbf86b56 | [] | no_license | mukeshbhoria/PythonClassData | 0af81db1c9635d4a08db92bd36d1bbf1db251212 | e2378b7ddb0acc5123e0c7227ce617fb63c701ff | refs/heads/master | 2022-04-22T05:14:15.285934 | 2020-04-15T07:07:57 | 2020-04-15T07:07:57 | 255,844,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | '''
FIle Handling Tasks
'''
'''
**************************************** Create Backup File
'''
file_name = input("Enter the file name: ")
Directory = input("Enter the directory of a file: ")
infile = open(Directory+'/'+file_name, 'r')
content = infile.read()
base_file_name = file_name.split('.')[0]
outfile = open(Directory+'/'+base_file_name+'.bckp', 'w')
outfile.write(content)
outfile.flush()
outfile.close()
'''
**************************************** Delete Empty files in directory
'''
import os
basedir = input("Enter the directory")
for dir,subdir,files in os.walk(basedir):
#print dir
#print subdir
#print files
os.chdir(dir)
for i in files:
#print files[i] + str(os.basedir.getsize(files[i]))
#print i, os.path.getsize(i)
if os.path.getsize(i) == 0:
print("file to delete is ", i)
#os.unlink(i)
'''
************************ Accept directory name from user and remove if it is modify 30days older and if size is 100kb(use fun m10)
'''
import os, datetime, time
directory = input("Enter the directory name")
D_path = input("Enter the directory path")
dir_size = 0
for dir, subdir, files in os.walk(D_path + '/' + directory):
os.chdir(dir)
for i in files:
dir_size += os.path.getsize(i)
dir_time_in_float = os.path.getmtime(D_path + '/' + directory)
Curren_Time_in_float = time.time()
Current_time_in_all = datetime.datetime.fromtimestamp(Curren_Time_in_float)
timeDiff = Curren_Time_in_float - dir_time_in_float
if timeDiff > (3600 * 24 * 30) and dir_size < 100000:
print(" file or directory is more than 30 days older, Need to remove")
# os.unlink(D_path+'/'+directory)
else:
print("file is recently mdified on : ", datetime.datetime.fromtimestamp \
(dir_time_in_float))
'''
*********************************** Find python files in entered directory
'''
import os
directory = input("Enter the directory name")
D_path= input("Enter the directory path")
cnt = 0
for dir,subdir,files in os.walk(D_path+'/'+directory):
os.chdir(dir)
for i in files:
if i.split('.')[1] == 'py':
cnt += 1
print("Number of python files are {}".format(cnt)) | [
"[email protected]"
] | |
f54a7460258b64fddbd6b317be7429f7fed6510c | edf8b4fdc80ff333eb6cb60aff8cf41968892516 | /ex02/8_7.py | 5bb93df4673e873da3479e83c793585be5c2c55d | [] | no_license | khimacademy/c101 | aad84d70e240885b41a4c948bffc462aa5a9b174 | 2100231a5e838884bc285bf33b12feb41ffad2b5 | refs/heads/master | 2020-03-16T22:48:45.057623 | 2018-05-11T15:07:47 | 2018-05-11T15:07:47 | 133,053,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import random
birth_year = ['시끄러운 ', '푸른 ', '적색 ', '조용한 ', '웅크린 ', '백색 ', '지혜로운 ', '용감한 ', '날카로운 ', '욕심많은 ']
birth_month = ['늑대', '태양', '양', '매', '황소', '불꽃', '나무', '달빛', '말', '돼지', '하늘', '바람']
birth_date = ['와(과) 함께 춤을', '의 기상', '은(는) 그림자 속에', '', '', '', '의 환생', '의 죽음', ' 아래에서', '을(를) 보라', '이(가) 노래하다', ' 그림자', '의 일격', '에게 쫓기는 남자', '의 행진', '의 왕', '의 유령', '을(를) 죽인자', '은(는) 맨날 잠잔다', '처럼', '의 고향', '의 전사', '은(는) 나의 친구', '의 노래', '의 정령', '의 파수꾼', '의 악마', '와(과) 같은 사나이', '을(를) 쓰러트린자', '', '은(는) 말이없다']
random_name = random.choice(birth_year) + random.choice(birth_month) + random.choice(birth_date)
print('당신의 인디언식 이름은', random_name, '입니다.')
| [
"[email protected]"
] | |
b65fffee66504b5b0e39054ba8119d5315174f22 | 7abbcd16dcf2e639e53665d50ec113e1374b79eb | /ROIIM/settings.py | 4694ca27b2c475b4f9f05f448a344b3f718e71a2 | [] | no_license | srajsonu/ROIIM-Assignment-Paysafe | ab6f160641adb69cef2f78bde594322f286ff089 | 1d9586e29f1871e4e9577ff2befd594c8a9cbbe4 | refs/heads/main | 2023-01-04T23:42:53.403941 | 2020-10-31T17:17:57 | 2020-10-31T17:17:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | """
Django settings for ROIIM project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'al_&n_c(==dma7bl7or4v_@fqz3%bcn1$e@1t^n_877v0ifp5r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'checkout.apps.CheckoutConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ROIIM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ROIIM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# #for heroku deployment
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# #Activate Django-heroku
# django_on_heroku.settings(locals())
| [
"[email protected]"
] | |
73d4a33c3b4ba57b5f94c9863889924014b14f4d | 1326d8961d9e29edf46af79e7eec0defc80095f8 | /Code/78. Subsets.py | 80f14f75363aefd0c87c6f46276bae210bf48cc1 | [] | no_license | yuansun86/leetcode | 58e01131bdbd8314583d7319866570d49cf12eb6 | a2f626eef66a3b0e3a11f385ff59b961b3b85edc | refs/heads/master | 2023-07-12T20:11:54.481347 | 2021-08-17T06:44:15 | 2021-08-17T06:44:15 | 286,399,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
def dfs(nums, current, index, result):
result.append(current.copy())
for i in range(index + 1, len(nums)):
current = current + [nums[i]]
dfs(nums, current, i, result)
current.pop()
cur = []
result = []
dfs(nums, cur, -1, result)
return result | [
"[email protected]"
] | |
939e29581caa80c464f394037ba3b65483285f56 | 10c6c706a81e45222f936c4c10f97e6654bb488c | /locallibrary/urls.py | 6e054668512a8a666b6984195a5ddb93f39849f3 | [] | no_license | souravramos/locallibrary | e72ec06a171db276f30158a6aff63305b57d0404 | eddb897a0df7fef72cdcd4427c6d5b362716f849 | refs/heads/master | 2022-12-09T09:30:37.021694 | 2020-09-09T10:10:09 | 2020-09-09T10:10:09 | 294,070,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | """locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('catalog.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
a9761cdec486e1cf7f1b3070dae451027449aae9 | 27d9474e974b0ac965bc30a1e2529c20c74dd74a | /src/abc161_a.py | fe49a03cebf509d2edaae0c53f4d7394fcb1e7ed | [] | no_license | ShirasuSalaD/CompetitiveProgramming | ed16ef864e9730693ee2fc2da07f1f51f70c6e7d | 60e0e9a46aec686d8c32986d39a790c47884d06c | refs/heads/master | 2022-07-25T12:50:23.851627 | 2020-05-17T15:53:50 | 2020-05-17T15:53:50 | 254,588,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | s = list(map(int, input().split()))
c = s[-1]
s.pop(-1)
s.insert(0,c)
print(*s)
| [
"[email protected]"
] | |
7ecad64dbd2a4a3eed561b90cdf6a5522b1482c2 | a10b828fc190d635a0b881289e24b02317a86f49 | /setup.py | 5ce3541905cb1f2d4b30467835a8708018455348 | [
"MIT"
] | permissive | fjhheras/hyperevolve | c99bb5da1515feff312c0a4433d1b54aebcd15d9 | 3aaf20660fc83f0c854a977d2089e5d9c6ffda16 | refs/heads/master | 2020-03-17T01:33:06.172988 | 2018-05-12T16:00:52 | 2018-05-12T16:00:52 | 133,157,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from distutils.core import setup
setup(
name='HyperEvolve',
version='0.1dev',
packages=['hyperevolve',],
license='MIT',
long_description=open('README.md').read(),
)
| [
"[email protected]"
] | |
5c8789023e51ce9b762629769c26a4978ba5edf1 | ecb45574b9fe9bc3404eb0e1143bbbe96341dd4a | /first.py | 1022d5cb82727f60603941d0465fb44839f4a749 | [] | no_license | Harry2522/webUIAuto | ebe946c2cfa721c7edc2b4817a8eae2e17e4d9f2 | f8e7e073f9dec25e40bc44263f3ee85b61f480be | refs/heads/master | 2022-11-21T05:36:31.834738 | 2020-07-23T06:00:16 | 2020-07-23T06:00:16 | 279,778,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName:first.py
# @Time :2020/7/15 14:09
# @Author :Harry
'''
webdriver 控制浏览器
'''
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.maximize_window() #窗口最大化
# driver.set_window_size(600,400) #设置窗口大小
driver.get("http://www.baidu.com") #打开浏览器并访问百度
time.sleep(2)
driver.refresh() #刷新页面
time.sleep(2)
driver.get("http://www.taobao.com")
time.sleep(2)
driver.get("http://www.jd.com")
time.sleep(2)
driver.back() #回退
time.sleep(2)
driver.forward()#前进
time.sleep(3) #等待3s
# driver.close() #关闭浏览器
driver.quit() #退出浏览器
| [
"[email protected]"
] | |
00bcc1cd0d93ac41e1e3d81388d2d53362e54606 | 75aefcdfa5ea4796f93ec7fdfb972bdd1b3aeea2 | /fhirclient/models/claim_tests.py | d67c0e4ef7c00d751a15b05de0d61289948aa908 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | bchaballout/client-py | 89a90dd2c991bbbd3710d2783afdc7bb3db5bca6 | f1c2807fdc4a534cbd9e8b17ccb54f93de848bcb | refs/heads/master | 2021-01-21T20:23:12.412748 | 2015-08-10T23:36:08 | 2015-08-10T23:36:08 | 39,397,021 | 0 | 0 | null | 2015-07-20T17:05:34 | 2015-07-20T17:05:34 | null | UTF-8 | Python | false | false | 33,861 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 0.5.0.5149 on 2015-07-06.
# 2015, SMART Health IT.
import os
import io
import unittest
import json
from . import claim
from .fhirdate import FHIRDate
class ClaimTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Claim", js["resourceType"])
return claim.Claim(js)
def testClaim1(self):
inst = self.instantiate_from("claim-example-institutional.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim1(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim1(inst2)
def implClaim1(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "960150")
self.assertEqual(inst.identifier[0].system, "http://happyhospital.com/claim")
self.assertEqual(inst.identifier[0].value, "9612345")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 125.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "exam")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/ex-serviceproduct")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 125.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "institutional")
self.assertEqual(inst.use, "complete")
def testClaim2(self):
inst = self.instantiate_from("claim-example-oral-average.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim2(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim2(inst2)
def implClaim2(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "123456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100151")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12346")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "1200")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(inst.item[1].bodySite.code, "21")
self.assertEqual(inst.item[1].bodySite.system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[1].net.code, "USD")
self.assertEqual(inst.item[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[1].net.value, 105.0)
self.assertEqual(inst.item[1].sequence, 2)
self.assertEqual(inst.item[1].service.code, "21211")
self.assertEqual(inst.item[1].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[1].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[1].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[1].subSite[0].code, "L")
self.assertEqual(inst.item[1].subSite[0].system, "http://fdi.org/fhir/oralsurfacecodes")
self.assertEqual(inst.item[1].type.code, "service")
self.assertEqual(inst.item[1].unitPrice.code, "USD")
self.assertEqual(inst.item[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[1].unitPrice.value, 105.0)
self.assertEqual(inst.item[2].bodySite.code, "36")
self.assertEqual(inst.item[2].bodySite.system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[2].detail[0].net.code, "USD")
self.assertEqual(inst.item[2].detail[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[0].net.value, 750.0)
self.assertEqual(inst.item[2].detail[0].sequence, 1)
self.assertEqual(inst.item[2].detail[0].service.code, "27211")
self.assertEqual(inst.item[2].detail[0].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[0].type.code, "service")
self.assertEqual(inst.item[2].detail[0].unitPrice.code, "USD")
self.assertEqual(inst.item[2].detail[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[0].unitPrice.value, 750.0)
self.assertEqual(inst.item[2].detail[1].net.code, "USD")
self.assertEqual(inst.item[2].detail[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[1].net.value, 350.0)
self.assertEqual(inst.item[2].detail[1].sequence, 2)
self.assertEqual(inst.item[2].detail[1].service.code, "lab")
self.assertEqual(inst.item[2].detail[1].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[1].type.code, "service")
self.assertEqual(inst.item[2].detail[1].unitPrice.code, "USD")
self.assertEqual(inst.item[2].detail[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[1].unitPrice.value, 350.0)
self.assertEqual(inst.item[2].net.code, "USD")
self.assertEqual(inst.item[2].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].net.value, 1100.0)
self.assertEqual(inst.item[2].sequence, 3)
self.assertEqual(inst.item[2].service.code, "27211")
self.assertEqual(inst.item[2].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[2].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[2].type.code, "group")
self.assertEqual(inst.item[2].unitPrice.code, "USD")
self.assertEqual(inst.item[2].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].unitPrice.value, 1100.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "oral")
self.assertEqual(inst.use, "complete")
def testClaim3(self):
inst = self.instantiate_from("claim-example-oral-contained.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim3(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim3(inst2)
def implClaim3(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "123456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100152")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12347")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "1200")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "oral")
self.assertEqual(inst.use, "complete")
def testClaim4(self):
inst = self.instantiate_from("claim-example-oral-orthoplan.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim4(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim4(inst2)
def implClaim4(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2015-03-16").date)
self.assertEqual(inst.created.as_json(), "2015-03-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "123457")
self.assertEqual(inst.diagnosis[0].diagnosis.system, "http://hl7.org/fhir/icd-10")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100153")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12355")
self.assertEqual(inst.item[0].detail[0].net.code, "USD")
self.assertEqual(inst.item[0].detail[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[0].net.value, 1000.0)
self.assertEqual(inst.item[0].detail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[0].service.code, "ORTHOEXAM")
self.assertEqual(inst.item[0].detail[0].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[0].type.code, "service")
self.assertEqual(inst.item[0].detail[0].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].detail[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[0].unitPrice.value, 1000.0)
self.assertEqual(inst.item[0].detail[1].net.code, "USD")
self.assertEqual(inst.item[0].detail[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[1].net.value, 1500.0)
self.assertEqual(inst.item[0].detail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[1].service.code, "ORTHODIAG")
self.assertEqual(inst.item[0].detail[1].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[1].type.code, "service")
self.assertEqual(inst.item[0].detail[1].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].detail[1].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[1].unitPrice.value, 1500.0)
self.assertEqual(inst.item[0].detail[2].net.code, "USD")
self.assertEqual(inst.item[0].detail[2].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[2].net.value, 500.0)
self.assertEqual(inst.item[0].detail[2].sequence, 3)
self.assertEqual(inst.item[0].detail[2].service.code, "ORTHOINITIAL")
self.assertEqual(inst.item[0].detail[2].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[2].type.code, "service")
self.assertEqual(inst.item[0].detail[2].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].detail[2].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[2].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[2].unitPrice.value, 500.0)
self.assertEqual(inst.item[0].detail[3].quantity.value, 24)
self.assertEqual(inst.item[0].detail[3].sequence, 4)
self.assertEqual(inst.item[0].detail[3].service.code, "ORTHOMONTHS")
self.assertEqual(inst.item[0].detail[3].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[3].type.code, "service")
self.assertEqual(inst.item[0].detail[3].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].detail[4].net.code, "USD")
self.assertEqual(inst.item[0].detail[4].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[4].net.value, 250.0)
self.assertEqual(inst.item[0].detail[4].quantity.value, 24)
self.assertEqual(inst.item[0].detail[4].sequence, 5)
self.assertEqual(inst.item[0].detail[4].service.code, "ORTHOPERIODIC")
self.assertEqual(inst.item[0].detail[4].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].detail[4].type.code, "service")
self.assertEqual(inst.item[0].detail[4].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].detail[4].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[4].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[4].unitPrice.value, 250.0)
self.assertEqual(inst.item[0].diagnosisLinkId[0], 1)
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 9000.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "ORTHPLAN")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2015-05-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2015-05-16")
self.assertEqual(inst.item[0].type.code, "group")
self.assertEqual(inst.item[0].type.system, "http://hl7.org/fhir/actinvoicegroupcode")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 9000.0)
self.assertEqual(inst.item[1].bodySite.code, "21")
self.assertEqual(inst.item[1].bodySite.system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[1].net.code, "USD")
self.assertEqual(inst.item[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[1].net.value, 105.0)
self.assertEqual(inst.item[1].sequence, 2)
self.assertEqual(inst.item[1].service.code, "21211")
self.assertEqual(inst.item[1].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[1].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[1].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[1].subSite[0].code, "L")
self.assertEqual(inst.item[1].subSite[0].system, "http://fdi.org/fhir/oralsurfacecodes")
self.assertEqual(inst.item[1].type.code, "service")
self.assertEqual(inst.item[1].unitPrice.code, "USD")
self.assertEqual(inst.item[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[1].unitPrice.value, 105.0)
self.assertEqual(inst.item[2].bodySite.code, "36")
self.assertEqual(inst.item[2].bodySite.system, "http://fdi.org/fhir/oraltoothcodes")
self.assertEqual(inst.item[2].detail[0].net.code, "USD")
self.assertEqual(inst.item[2].detail[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[0].net.value, 750.0)
self.assertEqual(inst.item[2].detail[0].sequence, 1)
self.assertEqual(inst.item[2].detail[0].service.code, "27211")
self.assertEqual(inst.item[2].detail[0].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[0].type.code, "service")
self.assertEqual(inst.item[2].detail[0].unitPrice.code, "USD")
self.assertEqual(inst.item[2].detail[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[0].unitPrice.value, 750.0)
self.assertEqual(inst.item[2].detail[1].net.code, "USD")
self.assertEqual(inst.item[2].detail[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[1].net.value, 350.0)
self.assertEqual(inst.item[2].detail[1].sequence, 2)
self.assertEqual(inst.item[2].detail[1].service.code, "lab")
self.assertEqual(inst.item[2].detail[1].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].detail[1].type.code, "service")
self.assertEqual(inst.item[2].detail[1].unitPrice.code, "USD")
self.assertEqual(inst.item[2].detail[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].detail[1].unitPrice.value, 350.0)
self.assertEqual(inst.item[2].net.code, "USD")
self.assertEqual(inst.item[2].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].net.value, 1100.0)
self.assertEqual(inst.item[2].sequence, 3)
self.assertEqual(inst.item[2].service.code, "27211")
self.assertEqual(inst.item[2].service.system, "http://hl7.org/fhir/oralservicecodes")
self.assertEqual(inst.item[2].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[2].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[2].type.code, "group")
self.assertEqual(inst.item[2].unitPrice.code, "USD")
self.assertEqual(inst.item[2].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[2].unitPrice.value, 1100.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "oral")
self.assertEqual(inst.use, "proposed")
def testClaim5(self):
inst = self.instantiate_from("claim-example-pharmacy.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim5(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim5(inst2)
def implClaim5(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "760150")
self.assertEqual(inst.identifier[0].system, "http://happypharma.com/claim")
self.assertEqual(inst.identifier[0].value, "7612345")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 60.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "smokecess")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/ex-pharmaservice")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 60.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "stat")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Pharmacy Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "pharmacy")
self.assertEqual(inst.use, "complete")
def testClaim6(self):
inst = self.instantiate_from("claim-example-professional.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim6(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim6(inst2)
def implClaim6(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "654456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "860150")
self.assertEqual(inst.identifier[0].system, "http://happypdocs.com/claim")
self.assertEqual(inst.identifier[0].value, "8612345")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 75.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "exam")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/ex-serviceproduct")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 75.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "professional")
self.assertEqual(inst.use, "complete")
def testClaim7(self):
inst = self.instantiate_from("claim-example-vision-glasses.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim7(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim7(inst2)
def implClaim7(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "654321")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "660151")
self.assertEqual(inst.identifier[0].system, "http://happysight.com/claim")
self.assertEqual(inst.identifier[0].value, "6612346")
self.assertEqual(inst.item[0].detail[0].net.code, "USD")
self.assertEqual(inst.item[0].detail[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[0].net.value, 100.0)
self.assertEqual(inst.item[0].detail[0].sequence, 1)
self.assertEqual(inst.item[0].detail[0].service.code, "frame")
self.assertEqual(inst.item[0].detail[0].service.system, "http://hl7.org/fhir/ex-visionservice")
self.assertEqual(inst.item[0].detail[0].type.code, "product")
self.assertEqual(inst.item[0].detail[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[0].unitPrice.value, 100.0)
self.assertEqual(inst.item[0].detail[1].net.code, "USD")
self.assertEqual(inst.item[0].detail[1].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[1].net.value, 100.0)
self.assertEqual(inst.item[0].detail[1].quantity.value, 2)
self.assertEqual(inst.item[0].detail[1].sequence, 2)
self.assertEqual(inst.item[0].detail[1].service.code, "lens")
self.assertEqual(inst.item[0].detail[1].service.system, "http://hl7.org/fhir/ex-visionservice")
self.assertEqual(inst.item[0].detail[1].type.code, "product")
self.assertEqual(inst.item[0].detail[1].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[1].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[1].unitPrice.value, 50.0)
self.assertEqual(inst.item[0].detail[2].factor, 0.07)
self.assertEqual(inst.item[0].detail[2].net.code, "USD")
self.assertEqual(inst.item[0].detail[2].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[2].net.value, 14.0)
self.assertEqual(inst.item[0].detail[2].sequence, 3)
self.assertEqual(inst.item[0].detail[2].service.code, "fst")
self.assertEqual(inst.item[0].detail[2].service.system, "http://hl7.org/fhir/ex-visionservice")
self.assertEqual(inst.item[0].detail[2].type.code, "tax")
self.assertEqual(inst.item[0].detail[2].unitPrice.code, "USD")
self.assertEqual(inst.item[0].detail[2].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].detail[2].unitPrice.value, 200.0)
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 214.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "glasses")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/ex-visionservice")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "group")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 214.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Vision Claim for Glasses</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "vision")
self.assertEqual(inst.use, "complete")
def testClaim8(self):
inst = self.instantiate_from("claim-example-vision.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim8(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim8(inst2)
def implClaim8(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "654321")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "660150")
self.assertEqual(inst.identifier[0].system, "http://happysight.com/claim")
self.assertEqual(inst.identifier[0].value, "6612345")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 80.0)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "exam")
self.assertEqual(inst.item[0].service.system, "http://hl7.org/fhir/ex-visionservice")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 80.0)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Vision Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "vision")
self.assertEqual(inst.use, "complete")
def testClaim9(self):
inst = self.instantiate_from("claim-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Claim instance")
self.implClaim9(inst)
js = inst.as_json()
self.assertEqual("Claim", js["resourceType"])
inst2 = claim.Claim(js)
self.implClaim9(inst2)
def implClaim9(self, inst):
self.assertTrue(inst.coverage[0].focal)
self.assertEqual(inst.coverage[0].relationship.code, "self")
self.assertEqual(inst.coverage[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.diagnosis[0].diagnosis.code, "123456")
self.assertEqual(inst.diagnosis[0].sequence, 1)
self.assertEqual(inst.id, "100150")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/claim")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.item[0].net.code, "USD")
self.assertEqual(inst.item[0].net.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].service.code, "1200")
self.assertEqual(inst.item[0].serviceDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].serviceDate.as_json(), "2014-08-16")
self.assertEqual(inst.item[0].type.code, "service")
self.assertEqual(inst.item[0].unitPrice.code, "USD")
self.assertEqual(inst.item[0].unitPrice.system, "urn:std:iso:4217")
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(inst.payee.type.code, "provider")
self.assertEqual(inst.priority.code, "normal")
self.assertEqual(inst.text.div, "<div>A human-readable rendering of the Oral Health Claim</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "oral")
self.assertEqual(inst.use, "complete")
| [
"[email protected]"
] | |
6df1b6d43dfd9b7d2c8c66f79844bb9990260bb6 | 863a420520418f4b9a1fa88219252772df6ead25 | /box_generator.py | 601b5633468a5765d865452365e1dc4013d7338e | [] | no_license | orborde/optimizers-curse | 25693b4fdf8d34137f2140c2e0779be0dfefca29 | 93d16f2e4ea4b1a893d3dedd4fdf1827c383f55e | refs/heads/master | 2021-04-30T16:37:46.046514 | 2017-01-26T03:14:36 | 2017-01-26T03:14:36 | 80,079,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # Boxes have labels from 0 to 20. We want the label to accurately
# report the expected value of the box.
#
# Turns out, this is actually kinda tricky.
LABELS = range(0, 20+1)
ERROR_MAX = 5
# For each label, generate boxes of the same error size in pairs (one
# high, and one low) to keep the EV for that label symmetric.
BOXES = []
for l in LABELS:
# Generate a zero-error box.
BOXES.append( (l, l) )
# Generate error-offset pairs.
for e in xrange(1, ERROR_MAX + 1):
# You can't put negative dollars in the box, though.
if e > l:
continue
BOXES.append( (l, l - e) )
BOXES.append( (l, l + e) )
def mean(arr):
return sum(arr) / float(len(arr))
if __name__ == '__main__':
# Make sure that the EV actually matches the label for all boxes.
import collections
outcomes = collections.defaultdict(list)
for label, actual in BOXES:
outcomes[label].append(actual)
for label in sorted(outcomes.keys()):
values = outcomes[label]
assert label == mean(values)
| [
"[email protected]"
] | |
dd452cb832b730c1ce93788b8274c1a6b799f5f4 | 9b2b14bc68af07d8640660aedc559e852e41deaf | /django_admin_shell/settings.py | 9d8ffafcfb45dd22d337218a34f06794d38f9c54 | [
"MIT"
] | permissive | luoshuihudie/django-admin-shell | 872e61bd46dfa9619223dbaf6c38cf068c214ce4 | 62c09e259c6e8e07b2a69f08fccebebd06605d07 | refs/heads/master | 2022-09-29T20:06:12.262415 | 2020-06-08T06:26:47 | 2020-06-08T06:26:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from django.conf import settings
def from_settings_or_default(name, default):
"""Get attribute from settings by name or return default value"""
return getattr(settings, name, default)
ADMIN_SHELL_ENABLE = from_settings_or_default('ADMIN_SHELL_ENABLE', True)
ADMIN_SHELL_ONLY_DEBUG_MODE = from_settings_or_default('ADMIN_SHELL_ONLY_DEBUG_MODE', True)
ADMIN_SHELL_ONLY_FOR_SUPERUSER = from_settings_or_default('ADMIN_SHELL_ONLY_FOR_SUPERUSER', True)
ADMIN_SHELL_OUTPUT_SIZE = from_settings_or_default('ADMIN_SHELL_OUTPUT_SIZE', 250)
ADMIN_SHELL_SESSION_KEY = from_settings_or_default('ADMIN_SHELL_SESSION_KEY', 'django_admin_shell_output')
| [
"[email protected]"
] | |
479d22e2a90d378aefcde1622fcf3f9b20defb3b | 6f407956c4eb87d46c35776a61e1d788ac148f26 | /Scripts/__imageUtils/setup.py | 69986d27b814d91939cc0ac5d9d0a664260551bc | [] | no_license | decobeirne/collab-rob-fwork | 3b99402132d49568ffb745732d10530a80575fb7 | 527c9f09c8a49af28a33fe2dccffd0ffa9bbd547 | refs/heads/master | 2021-01-01T18:18:36.408989 | 2013-11-24T20:13:16 | 2013-11-24T20:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Run "setup.py install" or "setup.py --help"
from distutils.core import setup, Extension
imageUtils_mod = Extension('imageUtils', sources = ['imageUtils.c'])
setup(name = "imageUtils",
version = "1.0",
description = "A module providing functionality to process image data for the purpose of training an object detection model.",
ext_modules = [imageUtils_mod])
| [
"[email protected]"
] | |
e8158614834d793346fa04dcca631a7b03ad848b | 061be453f57c13c7d6d94909119874727efc45cf | /algorithm_puzzles/book1/23.py | 043cc9dc18afedc95c2b6822509592d99e56d0b0 | [] | no_license | woodchuckchoi/algorithms | e600407d038f6fa6bfbfc3fc7d43ec7ff6677dc6 | 9998219a0b1ba2c612814c13f3b0a035621634f6 | refs/heads/master | 2022-12-12T11:38:39.257936 | 2020-09-14T16:02:15 | 2020-09-14T16:02:15 | 295,451,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | memo = {}
def game(coin, depth):
key = (coin, depth)
if key in memo:
return memo[key]
if coin == 0:
return 0
if depth == 0:
return 1
win = game(coin + 1, depth - 1)
lose = game(coin - 1, depth - 1)
memo[key] = win + lose
return memo[key]
print(game(10, 24))
| [
"[email protected]"
] | |
50f303a45e9e71e6835653f8748e6342d5a399a7 | 76dacf3bfef69c72da228bb4242194dd042b8b3f | /jasmin.py | 7c69f1198c880e1bdaf169b4a09223a43f28a3a2 | [] | no_license | lillamy2012/scripts | fa64bcfbfafdd0e6ae4b7af482f599ea2d37625a | 621c7606e07d21348854665a79d451e01930520f | refs/heads/master | 2021-01-22T12:36:55.260771 | 2018-02-15T10:42:25 | 2018-02-15T10:42:25 | 102,352,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import os
import sys
infile = open('../Araport11_jasmin.cumulated_incl200up.downstream.bed')
for line in infile:
a=str.split(line)
if a[3] == "Upstream_200bp":
a[2]=int(a[1])+199
if a[4] == "-":
a[3]= "Downstream_200bp"
elif a[3] == "Downstream_200bp":
a[1]=int(a[2])-199
if a[4] == "-":
a[3]= "Upstream_200bp"
print('\t'.join(map(str,a)))
| [
"[email protected]"
] | |
655712c48b8e3269f7ded7e0a10596e8d7ca99d6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_333/ch158_2020_06_22_14_11_49_882526.py | 35e060ff4493472da35d6d453c952ee5f1ee6e52 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | with open('texto.txt', 'r') as arquivo:
conteudo = arquivo.read()
lista_letras = conteudo.split()
print(len(lista_letras)) | [
"[email protected]"
] | |
8cd8751626def413a2d50c0ebdd69cd028aa5d1b | c4b4a7c4a3d5df779d9926e5e143070b2244c234 | /TerraSible/scripts/run_init.py | d1c60eb11c83f28a03977b7c46fe10104c491744 | [] | no_license | ConductorLabsManha/Sprint01 | 03f5f45407e46205589a7c6dfe5172e46618030c | 63fde6990532138a5b2fb55fbc53253a6352342a | refs/heads/master | 2020-04-10T17:29:49.220166 | 2018-12-19T23:49:05 | 2018-12-19T23:49:05 | 161,176,027 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | import subprocess
import os
os.system("rm -rf ~/.aws")
os.system("cp --force -r ../utils/aws ~/")
os.system("mv ~/aws ~/.aws")
proc = subprocess.Popen(["aws ec2 describe-instances --query 'Reservations[*].Instances[*].PublicIpAddress' --output=text --profile ellan"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
out = str(out)
out = out.replace("\\n", ",")
out = out[:len(out)-2]
out = out.replace("b'", "")
ips = out.split(",")
print ("program output:", ips)
os.system("rm -rf ~/.ssh/ellan.pem")
os.system('cp --force ../utils/ssh/ellan.pem ~/.ssh')
os.system('chmod 400 ~/.ssh/ellan.pem')
os.system('rm -rf ~/.ansible-config')
os.system('cp --force -r ../utils/ansible-config ~/')
os.system('mv --force ~/ansible-config ~/.ansible-config')
os.system('rm -rf ~/.ansible.cfg')
os.system('cp --force -r ../utils/ansible-config/ansible.cfg ~/')
os.system('mv --force ~/ansible.cfg ~/.ansible.cfg')
fl = open('hosts', 'a')
fl.write('[gitlab]\n')
fl.write(ips[0]+'\n')
fl.write('[gitlab:vars]\n')
fl.write('ansible_ssh_user=centos\n')
fl.write('ansible_ssh_private_key_file = ~/.ssh/ellan.pem')
fl.write('\n\n')
fl.write('[rancher]\n')
fl.write(ips[1]+'\n')
fl.write('[rancher:vars]\n')
fl.write('ansible_ssh_user=centos\n')
fl.write('ansible_ssh_private_key_file = ~/.ssh/ellan.pem')
os.system('mv hosts ~/.ansible-config/hosts')
| [
"[email protected]"
] | |
3be150e918d31dbb4cc65fb5d2b4cba4f94120b7 | d2e69d4d3d1e11a87f5a377e4a423422fe0a7058 | /ProxyServerSpiders/ProxyServerSpiders/pipelines.py | 856fde62341fc0d507149b6748414b38ec27869e | [] | no_license | oJacker/_python | 6f30dd4a60c1593d27c00ac485163fc0ba77dd8c | 8086d0cd78e156abfff9819a56384149dd431c56 | refs/heads/master | 2021-05-06T03:13:29.167281 | 2018-02-01T09:41:42 | 2018-02-01T09:41:42 | 114,827,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ProxyserverspidersPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
8fac8218dde5732dcaccb6b65cf7d70e11ed6c6a | 03ec5158c12d7e4042936b084cbc7e2a266b3168 | /python/com/jpmanjarres/hackerrank/warmup/CompareTheTriplets.py | d79cb80570ff0efdb7f86f31056613951c49e435 | [] | no_license | paul-manjarres/coding-exercises | 063271dc89c4477df6f2cdd4b058af84de8f2892 | f9e1d53d1247dde6751baaf2e188495e00bb8129 | refs/heads/master | 2022-12-16T02:37:40.510609 | 2022-11-28T00:01:42 | 2022-11-28T00:01:42 | 46,098,833 | 0 | 0 | null | 2020-04-25T03:53:29 | 2015-11-13T03:56:11 | Java | UTF-8 | Python | false | false | 278 | py | a = list(map(int, input().strip().split(" ")))
b = list(map(int, input().strip().split(" ")))
a_points=0
b_points=0
for i in range(len(a)):
if a[i] > b[i]:
a_points=a_points+1
elif a[i] < b[i]:
b_points=b_points+1
print("%d %d" % (a_points, b_points) ) | [
"[email protected]"
] | |
dc3f11f51eb8421fb823f581064858c0c963d173 | 4dcfaa112a5bcb888bcd2523ab76e57b6ab0d3b3 | /server/jotdown/api/migrations/0002_auto_20190309_1945.py | 5582d5742bfc8ca2400702987ea097965cf3d04c | [] | no_license | eastend-street/jotdown | ae466181c1358d10b70e62948faf7ae038c62c98 | b80175fb528c80d8a82062e7f073c646bff40923 | refs/heads/master | 2023-03-11T06:52:25.487151 | 2022-12-08T09:12:31 | 2022-12-08T09:12:31 | 161,026,871 | 9 | 1 | null | 2023-03-04T17:15:42 | 2018-12-09T10:12:11 | Python | UTF-8 | Python | false | false | 914 | py | # Generated by Django 2.1.7 on 2019-03-09 19:45
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='User',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to='api.User'),
),
migrations.AddField(
model_name='bookmark',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='bookmark',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"[email protected]"
] | |
7c75bdd84cea9c5e7b3aac6e40ef9b847fcf6689 | 540f3ad8571aeeaf5b9e073aed03ec44928848b5 | /2sem/dz2/2.4.py | 120fdc9116aecb1f97f875079a2fd7bee5142e2a | [] | no_license | Hecomer/2s | decb85faab54ab308893b3e7b268a79b2620bbfe | 0914a60cd1ea9bbae128f761b1dd0fd96c8cb96d | refs/heads/master | 2023-04-09T20:29:10.288516 | 2021-04-26T18:06:24 | 2021-04-26T18:06:24 | 361,829,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | def swap(first, second):
first[:], second[:] = second[:], first[:]
first = [1, 2, 3]
second = [4, 5, 6]
first_content = first[:]
second_content = second[:]
swap(first, second)
print(first, second_content, first == second_content)
print(second, first_content, second == first_content) | [
"[email protected]"
] | |
c185b8cb8f190c9a29aa87446ed91c81c412b885 | d63da6a2c531bb675e898f09a25b551f97b13ac5 | /twitter_dashboard/tweet_cleaning.py | 965748ffbd5d9385a286209e434967e32456d901 | [] | no_license | AiswaryaSrinivas/DeepDiveData | df78c2cefe75ada7a069a3dff36ee074dea1378b | 1abd2cbfde593e80aa8664e548c3c9366b37ab70 | refs/heads/master | 2023-07-25T17:10:29.104231 | 2020-03-23T09:12:35 | 2020-03-23T09:12:35 | 230,627,612 | 0 | 0 | null | 2023-07-06T21:36:03 | 2019-12-28T15:26:55 | Python | UTF-8 | Python | false | false | 3,195 | py | import pandas as pd
import numpy as np
from nltk.sentiment.util import *
from nltk import tokenize
from textblob import TextBlob
import sys
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
STOPWORDS=stopwords.words("english")
STOPWORDS.append("with")
ps = PorterStemmer()
sys.path.append("TwitterEmotion/")
from TwitterEmotion.emotion_predictor import EmotionPredictor
import TwitterEmotion
## Function to remove URL from tweets
def removeURL(text,replace_text=""):
re_url="http(\S+)*|\S+\.com\S+|bit.ly\S+|\S+utm_source\S+|bit\.ly(\S+)*|ow\.ly(\S+)*" #\S+ matched all non-whitespace characters
return re.sub(re_url,replace_text,text)
## Remove any kind of user mention from tweet
def removeUserMentions(text,replace_text=""):
re_usermentions="^@\w{1,15}|@\w{1,15}"
return re.sub(re_usermentions,replace_text,text)
## Remove any HashTag present in the tweet
def removeHashTag(text,replace_text=""):
re_hashtag="#\S+"
return re.sub(re_hashtag,replace_text,text)
def getSenti(polarity):
if polarity>0:
return "positive"
if polarity<0:
return "negative"
return "neutral"
import string
import re
from nltk.stem import PorterStemmer
def clean_text(text):
ps=PorterStemmer()
text = text.translate(str.maketrans({key: " {0} ".format(key) for key in string.punctuation}))
#remove extra white space
text_cleaned="".join([x for x in text if x not in string.punctuation])
text_cleaned=re.sub(' +', ' ', text_cleaned)
text_cleaned=re.sub(r'[^\x00-\x7F]+',' ', text_cleaned)
text_cleaned=text_cleaned.lower()
tokens=text_cleaned.split(" ")
tokens=[token for token in tokens if token not in STOPWORDS]
text_cleaned=" ".join([ps.stem(token) for token in tokens])
return text_cleaned
def getEmotionModel(method="ekman",setting="mc"):
return EmotionPredictor(classification=method, setting=setting)
def detectEmotion(tweet,model):
return model.predict_classes([tweet])['Emotion'].tolist()[0]
if __name__ == '__main__':
tweets=pd.read_csv("/Users/aiswarya/DataScienceArena/deep_dive_analytics/twitter_dashboard/TweetScraper-master/CHAPPAK_DATA/Tweets.csv",encoding="utf-8")
tweets['cleaned_tweet']=tweets['text'].apply(lambda x:removeUserMentions(x))
tweets['cleaned_tweet']=tweets['cleaned_tweet'].apply(lambda x:removeURL(x))
tweets['cleaned_tweet']=tweets['cleaned_tweet'].apply(lambda x:removeHashTag(x))
tweets['score']=tweets['cleaned_tweet'].apply(lambda x:TextBlob(x).sentiment)
tweets['polarity']=tweets['score'].apply(lambda x:x.polarity)
tweets['subjectivity']=tweets['score'].apply(lambda x:x.subjectivity)
tweets['sentiment']=tweets['polarity'].apply(lambda x:getSenti(x))
tweets['cleaned_tweet']=tweets['cleaned_tweet'].apply(lambda x:clean_text(x))
print("Detecting Moods")
model=getEmotionModel()
tweets['ekman_mood']=tweets['text'].apply(lambda x:detectEmotion(x,model))
model_2=getEmotionModel(method="plutchik")
tweets['plutchik_mood']=tweets['text'].apply(lambda x:detectEmotion(x,model_2))
tweets.to_csv("Chhapak_Tweets_Sentiment.csv",index=False,encoding="utf-8")
| [
"[email protected]"
] | |
a63ced3a2c446078eb60febe8ae557dbda9adcaa | 63c91b089638880151a8a281c109aeadc2b55961 | /evaluation_annotation/process_cf.py | 570d7e5f387906be9c3112c93c884bbaeba56058 | [
"Apache-2.0"
] | permissive | jhlau/deepspeare | 945c1ab4890421c6babbbaaf97bfd737542686ca | 08bd93ee5b023f6097261a51845929deb86296cd | refs/heads/master | 2022-10-17T06:18:05.099040 | 2022-09-30T00:56:33 | 2022-09-30T00:56:33 | 132,543,358 | 74 | 26 | null | null | null | null | UTF-8 | Python | false | false | 3,808 | py | """
Author: Jey Han Lau
Date: Oct 17
"""
import argparse
import sys
import unicodecsv as csv
import cPickle as pickle
from collections import defaultdict
import numpy as np
import operator
#parser arguments
desc = "Process CF results to compute accuracy"
parser = argparse.ArgumentParser(description=desc)
#arguments
parser.add_argument("-r", "--result-csv", required=True, help="csv file containing CF results")
parser.add_argument("-m", "--model-pickle", help="pickle file containing model and IDs")
args = parser.parse_args()
#parameters
debug = False
golden_col = 11
judgement_col = 1
id1_col = 5
id2_col = 6
worker_col = 17
###########
#functions#
###########
def get_model_names(row, model):
selected, unselected = "", ""
if row[judgement_col] == "Poem 1":
selected = row[id1_col]
unselected = row[id2_col]
else:
selected = row[id2_col]
unselected = row[id1_col]
selected_mname = (selected if model == None else model[int(selected)])
unselected_mname = (unselected if model == None else model[int(unselected)])
return selected_mname, unselected_mname
def get_score(selected_mname, unselected_mname):
if selected_mname != unselected_mname:
if selected_mname == "real":
return unselected_mname, 1.0
else:
return selected_mname, 0.0
else:
return None, None
######
#main#
######
def main():
#load ids and model names
model = None
if args.model_pickle:
model = pickle.load(open(args.model_pickle))
if debug:
print model
#first parse to find perfect score worker (perfect score worker might be cheating)
worker_accs = defaultdict(list)
for row in csv.reader(open(args.result_csv), encoding="utf-8"):
if row[golden_col] != "false":
continue
worker_id = (row[worker_col], row[worker_col+1])
selected_mname, unselected_mname = get_model_names(row, model)
key, score = get_score(selected_mname, unselected_mname)
if key != None:
worker_accs[worker_id].append(score)
#remove annotations from perfect workers
perfect_workers = set([])
worker_meanacc = {}
country_count = (defaultdict(int), defaultdict(int))
for k, v in sorted(worker_accs.items()):
worker_meanacc[k] = np.mean(v)
if np.mean(v) == -1.0:
perfect_workers.add(k)
country_count[1][k[1]] += 1
country_count[0][k[1]] += 1
#print "Number of perfect workers =", len(perfect_workers), "/", len(worker_accs)
if debug:
print "\nall country count =", sorted(country_count[0].items(), key=operator.itemgetter(1), reverse=True)
print "\nperfect country count =", sorted(country_count[1].items(), key=operator.itemgetter(1), reverse=True)
for k, v in sorted(worker_meanacc.items(), key=operator.itemgetter(1), reverse=True):
print k, v
#parse results csv
accs = defaultdict(list)
for row in csv.reader(open(args.result_csv), encoding="utf-8"):
if row[golden_col] != "false":
continue
worker_id = (row[worker_col], row[worker_col+1])
if worker_id in perfect_workers:
continue
selected_mname, unselected_mname = get_model_names(row, model)
key, score = get_score(selected_mname, unselected_mname)
if key != None:
accs[key].append(score)
if debug:
print "\n", row[0], ":", row[id1_col], "vs.", row[id2_col]
print "Selected ID =", selected_mname, score
#print mean accuracy
for k in accs.keys():
print "Mean Accuracy: Real vs.", k, "=", np.mean(accs[k]), "(", len(accs[k]), ")"
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c438f8a8958392194e5ec8f5b040dc6b9fb93094 | 2c3b090bc2d6c5d2ca2b86f132b950f250ce5c02 | /tutorials/evaluating_model/tutorial_four.py | 10e8f4cc15ccc2e7ed8ae4f1b4cb4b767121cb70 | [
"MIT"
] | permissive | InspectorDidi/CausalWorld | b8f7096f0cb623d5c8b7f6040c830f6ef07b4127 | 548e66c36fba01125cf6290992dfd833ae42709b | refs/heads/master | 2022-12-26T23:37:15.568356 | 2020-10-12T08:02:32 | 2020-10-12T08:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | """
This tutorial shows you how to use a controller and evaluate it afterwards using
an evaluation pipeline compromised of different evaluation protocols.
"""
from causal_world.evaluation.evaluation import EvaluationPipeline
import causal_world.evaluation.protocols as protocols
import causal_world.evaluation.visualization.visualiser as vis
log_relative_path = './reacher_controller_evaluation'
def control_policy(env):
def _control_policy(obs):
return \
env.get_robot().get_joint_positions_from_tip_positions(
obs[-9:], obs[1:10])
return _control_policy
def evaluate_controller():
# pass the different protocols you'd like to evaluate in the following
task_params = dict()
task_params['task_generator_id'] = 'reaching'
world_params = dict()
world_params['normalize_observations'] = False
world_params['normalize_actions'] = False
evaluator = EvaluationPipeline(evaluation_protocols=[
protocols.ProtocolGenerator(name=
'goal_poses_space_a',
first_level_regex=
'goal_.*',
second_level_regex=
'cylindrical_position',
variable_space='space_a'),
protocols.ProtocolGenerator(name=
'goal_poses_space_b',
first_level_regex=
'goal_.*',
second_level_regex=
'cylindrical_position',
variable_space='space_b')
], task_params=task_params, world_params=world_params,
visualize_evaluation=True)
controller_fn = control_policy(evaluator.evaluation_env)
# For demonstration purposes we evaluate the policy on 10 per
# cent of the default number of episodes per protocol
scores = evaluator.evaluate_policy(controller_fn, fraction=0.02)
evaluator.save_scores(log_relative_path)
experiments = {'reacher_model': scores}
vis.generate_visual_analysis(log_relative_path, experiments=experiments)
print(scores)
if __name__ == '__main__':
evaluate_controller()
| [
"[email protected]"
] | |
9b035474444b9df6e18203574899dd3bc88ed562 | 917f2a06cf57e2ca5f0d8f8dab3c363400f1396d | /utils/extract_name_id.py | 5926ffdc26c646fea3de6eaed89ca5fb9bf00f3e | [] | no_license | hammer-wang/file_processing | b6719d6b62a9396a7da994db087d9f85ab554435 | 223f85524a1eba48921b9d85e5349066100d34ad | refs/heads/master | 2021-01-14T04:32:53.194223 | 2020-09-24T15:53:26 | 2020-09-24T15:53:26 | 242,601,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | """
This script renamed the .ipynb files with the format [uniquename]_[umid].ipynb and put them in to a new dir.
Author: Haozhu Wang ([email protected])
Date: 2020-02-21
Dependency:
-- tqdm
-- jupyter nbconvert 5.6.0 (this should come with jupyter lab)
Example:
python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset1/submissions --target ~/Documents/EECS504_files/psets/pset1/submissions_renamed
python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset5/submissions --target ~/Documents/EECS504_files/psets/pset5/submissions_renamed
python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset6/submissions --target ~/Documents/EECS504_files/psets/pset6/submissions_renamed
"""
# python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset6/submissions --target ~/Documents/EECS504_files/psets/pset6/submissions_renamed
# python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset7/submissions --target ~/Documents/EECS504_files/psets/pset7/submissions_renamed
# python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset8/submissions --target ~/Documents/EECS504_files/psets/pset8/submissions_renamed
# python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset9/submissions --target ~/Documents/EECS504_files/psets/pset9/submissions_renamed
# python extract_name_id.py --source ~/Documents/EECS504_files/psets/pset10/submissions --target ~/Documents/EECS504_files/psets/pset10/submissions_renamed
import os
import tqdm
import subprocess
import re
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--source', type=str, help='where to extrac the ipynbs', default=None)
argparser.add_argument('--target', type=str, help='where to store the converted pdfs', default=None)
argparser.add_argument('--smoke_test', action='store_true')
args = argparser.parse_args()
source = args.source
target = args.target
files = os.listdir(source)
if ".DS_Store" in files:
files.remove(".DS_Store")
print('Total number of ipynb files {}'.format(len(files)))
if not os.path.exists(target):
os.mkdir(target)
if args.smoke_test:
files = files[:3]
for file in tqdm.tqdm(files):
if "LATE" in file:
file_ = '_'.join(file.split('_')[4:6])
elif "EECS" in file:
file_ = file
elif len(file.split('_')) <= 4:
file_ = file
else:
file_ = '_'.join(file.split('_')[3:5])
# print edge case
if ".ipynb" == file_:
print(file)
if '.ipynb' not in file_:
file_ += '.ipynb'
# remove version
if '-' in file_:
file_ = re.sub(r"-\d", "", file_)
cmd = 'cp {} {}'.format(os.path.join(source, file), os.path.join(target, file_)).split(' ')
# print(cmd)
command_run = subprocess.call(cmd)
if command_run:
print(cmd) | [
"[email protected]"
] | |
da71f5d45b34ab62983eb0904dc4d8d27ccc828d | c8f820edc0c480082ac57306b9a33ad8570203ba | /keras/0723/keras06_RMSE.py | 5cfd8d2cfd1f36f04aa731f2f1569cdd8711b82d | [] | no_license | JIN-YEONG/keras_example | 4f9224158babb2224053377e511695a2fea64ad1 | 97b279dbf2ba7a16ce93abca55711fa9b2cce252 | refs/heads/master | 2022-02-21T22:01:37.110003 | 2019-09-15T23:59:20 | 2019-09-15T23:59:20 | 198,376,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py |
# 1. 데이터
import numpy as np
x_train = np.array([1,2,3,4,5,6,7,8,9,10]) # 10행 1열의 데이터
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15,16,17,18,19,20])
y_test = np.array([11,12,13,14,15,16,17,18,19,20])
# x3 = np.array([101,102,103,104,105,106]) # 6행 1열의 데이터
# x4 = np.array(range(30,50)) # 30~49값
# 딥러닝의 데이터는 열이 우선된다(행은 무시)
# input.shape(a,b) => 데이터의 행,열의 표현
# => a -> 행, b-> 열
# 2. 모델 구성
from keras.models import Sequential
from keras.layers import Dense
model = Sequential() # 순서대로 내려가는 모델
# 노드가 5개, 3개, 4개인 레이어 3개를 가진 모델
# model.add(Dense(5, input_dim=1, activation='relu')) # input_dim = 입력 데이터의 컬넘의 개수
# 데이터의 행과 상관없이 열의 개수만 맞아도 데이터를 넣을 수 있다.
model.add(Dense(21, input_shape=(1,), activation='relu')) # input_shape = 데이터의 shape를 기준으로 입력
model.add(Dense(7))
model.add(Dense(5))
model.add(Dense(1))
# model.summary()
# 3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) # mse = mean squared error 평균 제곱 에러
# model.fit(x,y,epochs=100, batch_size = 3)
model.fit(x_train,y_train,epochs=1000)
# 4. 평가 예측
lose,acc = model.evaluate(x_test,y_test,batch_size=1)
print('acc: ',acc) # acc는 회귀모델에서만 사용할 수 있다.
y_predict = model.predict(x_test) # 모델의 예측값
print(y_predict)
# RMSE 구하기
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict): # 평균 제곱근 오차
return np.sqrt(mean_squared_error(y_test, y_predict)) # root(mean((y_test - y_predict)^2))
# 루트를 씨우는 이유 -> 값을 작게 만들기 위해
print('RMSE: ', RMSE(y_test, y_predict)) # 작을 수록 좋다. | [
"[email protected]"
] | |
7af2abeb802513450eeacf1a37de43fd1172416f | acd981a3b0ed2865c75f3ebd6c3a0854556386f9 | /Pytroch/Example/ganNet.py | 02ed6bebb100e2cefab9d941d5c0c5fce71ce14a | [] | no_license | haolingyi/Python | 8719b88246e58f11cbb6277475d12c4def8e24d1 | 5229bf4ae4462e2a4fa7999a35cfbdad20bab8b2 | refs/heads/master | 2020-04-17T20:47:32.872615 | 2019-01-22T03:36:11 | 2019-01-22T03:36:11 | 161,257,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
BATCH_SIZE = 64
LR_G = 0.0001
LR_D = 0.0001
N_IDEAS = 5
ART_COMPONENTS = 15
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS)
for _ in range(BATCH_SIZE)])
def artist_works():
a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
paintings = a * np.power(PAINT_POINTS, 2) + (a-1)
paintings = torch.from_numpy(paintings).float()
return paintings
G = nn.Sequential(
nn.Linear(N_IDEAS, 128),
nn.ReLU(),
nn.Linear(128, ART_COMPONENTS),
)
D = nn.Sequential(
nn.Linear(ART_COMPONENTS, 128),
nn.Linear(128, 1),
nn.Sigmoid(),
)
opt_D = torch.optim.Adam(D.parameters(), lr=LR_D)
opt_G = torch.optim.Adam(G.parameters(), lr=LR_G)
plt.ion()
for step in range(10000):
artist_paintings = artist_works()
G_ideas = torch.randn(BATCH_SIZE, N_IDEAS)
G_paintings = G(G_ideas)
prob_artist0 = D(artist_paintings)
prob_artist1 = D(G_paintings)
D_loss = - torch.mean(torch.log(prob_artist0) +
torch.log(1. - prob_artist1))
G_loss = torch.mean(torch.log(1. - prob_artist1))
opt_D.zero_grad()
D_loss.backward(retain_graph=True)
opt_D.step()
opt_G.zero_grad()
G_loss.backward()
opt_G.step()
if step % 50 == 0: # plotting
plt.cla()
plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[
0], c='#4AD631', lw=3, label='Generated painting',)
plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0],
2) + 1, c='#74BCFF', lw=3, label='upper bound')
plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0],
2) + 0, c='#FF9359', lw=3, label='lower bound')
plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' %
prob_artist0.data.numpy().mean(), fontdict={'size': 13})
plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -
D_loss.data.numpy(), fontdict={'size': 13})
plt.ylim((0, 3))
plt.legend(loc='upper right', fontsize=10)
plt.draw()
plt.pause(0.01)
plt.ioff()
plt.show()
| [
"1031909300@qq.com"
] | 1031909300@qq.com |
025feeecda138da3b5497a8bf20f9c7c167b5c9a | 972a694174dfbd25dbf27d8aa98d272625c2400f | /bin/apred | 615f8a79cb96cb34f9e655aafc3447ad2c83724f | [
"BSD-3-Clause"
] | permissive | sdss/apogee | d3344021ac66f91e4259dd1fcf160ee7657740ec | e134409dc14b20f69e68a0d4d34b2c1b5056a901 | refs/heads/master | 2021-08-06T18:44:13.668542 | 2021-06-11T13:00:51 | 2021-06-11T13:00:51 | 127,458,050 | 5 | 5 | BSD-3-Clause | 2018-09-20T22:18:47 | 2018-03-30T18:02:58 | IDL | UTF-8 | Python | false | false | 1,292 | #!/usr/bin/env python
# encoding: utf-8
#
# @Author: Jon Holtzman
# @Date: March 2018
# @Filename: apred
# @License: BSD 3-Clause
# @Copyright: Jon Holtzman
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import sys
import subprocess
import pdb
if __name__ == '__main__' :
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description='Runs apogee IDL reduction')
parser.add_argument('planfile', type=str, help='Plan file')
parser.add_argument("--done")
parser.add_argument("--host")
parser.add_argument('--clobber', help='Overwrite files?',action="store_true")
parser.add_argument("--flag",type=str,default='11111')
args=parser.parse_args()
if args.clobber: clobber='1'
else : clobber='0'
subprocess.call(["idl","-e","apred,'"+args.planfile+"','"+args.flag+"','"+clobber+"'"])
if args.done is not None :
subprocess.call(['setdone',args.done])
try:
subprocess.call(['setdone',done])
except: pass
print('host', args.host)
if args.host is not None :
try: os.remove(args.done+'.'+args.host)
except: pass
| [
"[email protected]"
] | ||
22f6f5181a3fae5dcc7e8de760d03839b9c2c842 | 472ddd7186ce8495ab37aaa4cd6ec29ed38f5462 | /Tests/test_BioSQL_sqlite3.py | c282caf8dd0d7c434447a1dcb3dcd3f980f04bc2 | [
"BSD-3-Clause",
"LicenseRef-scancode-biopython"
] | permissive | EsamTolba/biopython | 66ef643d6d4602900971cc0c5cc1e70d77f17f57 | 120616cf0d28cb8e581898afd6604e5a2065a137 | refs/heads/master | 2020-06-25T00:52:45.827047 | 2019-07-23T06:54:14 | 2019-07-25T09:25:12 | 199,145,343 | 1 | 0 | NOASSERTION | 2019-07-27T09:39:19 | 2019-07-27T09:39:18 | null | UTF-8 | Python | false | false | 2,723 | py | #!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using SQLite."""
import os
import unittest
from Bio import SeqIO
from BioSQL import BioSeqDatabase
# Really do want "import *" to get all the test clases:
from common_BioSQL import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:
from common_BioSQL import load_biosql_ini, check_config, compare_records, temp_db_filename
# Constants for the database driver
DBDRIVER = 'sqlite3'
DBTYPE = 'sqlite'
DBHOST = None
DBUSER = 'root'
DBPASSWD = None
TESTDB = temp_db_filename()
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
if False:
# This is how I generated test file Tests/BioSQL/cor6_6.db
# which is test cross-checked with the latest bindings to
# catch any regressions in how we map GenBank entries to
# the database.
assert not os.path.isfile("BioSQL/cor6_6.db")
server = BioSeqDatabase.open_database(driver=DBDRIVER,
db="BioSQL/cor6_6.db")
DBSCHEMA = "biosqldb-" + DBTYPE + ".sql"
SQL_FILE = os.path.join(os.getcwd(), "BioSQL", DBSCHEMA)
assert os.path.isfile(SQL_FILE), SQL_FILE
server.load_database_sql(SQL_FILE)
server.commit()
db = server.new_database("OLD")
count = db.load(SeqIO.parse("GenBank/cor6_6.gb", "gb"))
assert count == 6
server.commit()
assert len(db) == 6
server.close()
class BackwardsCompatibilityTest(unittest.TestCase):
def test_backwards_compatibility(self):
"""Check can re-use an old BioSQL SQLite3 database."""
original_records = list(SeqIO.parse("GenBank/cor6_6.gb", "gb"))
# now open a connection to load the database
server = BioSeqDatabase.open_database(driver=DBDRIVER,
db="BioSQL/cor6_6.db")
db = server["OLD"]
self.assertEqual(len(db), len(original_records))
# Now read them back...
biosql_records = [db.lookup(name=rec.name)
for rec in original_records]
# And check they agree
# Note the old parser used to create BioSQL/cor6_6.db
# did not record the molecule_type, so remove it here:
for r in original_records:
del r.annotations["molecule_type"]
self.assertTrue(compare_records(original_records, biosql_records))
server.close()
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| [
"[email protected]"
] | |
28af29cc61c2977f5a3a64e7e807be08ae6a4b16 | 5410700e83210d003f1ffbdb75499062008df0d6 | /leetcode/distributeCandies-2.py | 149c757de3160a89b886ce61026bdb4a8fd7c015 | [] | no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
ans = [0] * num_people
r = 1
while candies > 0:
for i in range(num_people):
if candies < 0:
break
if candies > i+1+(r-1)*num_people:
ans[i] += (i+1) + (r-1)*num_people
candies -= (i+1) + (r-1)*num_people
else:
ans[i] += candies
candies = 0
r += 1
return ans | [
"[email protected]"
] | |
71ac1c1d19491024685edd2fcd786657244b6e4d | b023d1fe4e5941737af34d59d5d4b73a9a8798f7 | /UpdatedDataBase/CRUD.py | 69066695976c7e3522f6a7a9a3eece55fa7ea687 | [] | no_license | dbomb995/dbomb995.github.io | 40e6be3c8191e1047a0e9f0fd874bbfb9bfae84c | 1fa0cf95d3d99254f8135b32411dc150e0b4cdae | refs/heads/master | 2022-12-01T23:24:26.478278 | 2020-08-17T03:25:21 | 2020-08-17T03:25:21 | 275,789,774 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,899 | py | import json
import pymongo
from bson import json_util
from pymongo import MongoClient
#Opens connection to market.stocks MongoDB
connection = MongoClient()
db = connection['market']
collection = db.stocks
#Displays options for the user to select from when the menu() function is called
def option():
print("")
print("*" * 20)
print("1: Create \n2: Read \n3: Update \n4: Delete \n5: Exit ")
print("*" * 20)
val = input("Select an option: ")
return val
#converts JSON formatted strings to type JSON
def stringToJson(str1):
JSON1 = json.loads(str1)
return JSON1
#takes JSON formatted strings as input
def JSONinput():
str1 = input("Please enter JSON formatted text: ")
return str1
def create(input1):
#if create() function is called from the menu() function then the function asks the user for input and creates a file based on user input
if(input1 == ""):
#takes json formatted string and creates a document
try:
print("You have selected to Create a new entry in the database ", end = '')
result = collection.insert_one(stringToJson(str(JSONinput())))
return result
except:
print("an error occured during creation please try again")
return ""
#if create() function is passed a non-null argument creats file based on input
else:
result = collection.insert_one(input1)
return result
def read(input1):
#takes json formatted string and queries a document
#If read function is called with an empty string passed then asks the user for input to search for
if(input1 == ""):
try:
print("You have selected to query an entry in the database ", end = '')
result = collection.find(stringToJson(str(JSONinput())))
for x in result:
print (x)
return result
except:
print("an error occured during the read process please try again")
return ""
#if read() is called with a non-empty string searches for a file based on the passed parameters
else:
result = collection.find(stringToJson(str(input1)))
value= {}
for x in result:
value.update(x)
#print(x)
continue
return value
def update(input1, input2):
#takes json formatted string and updates a document
#if input1 and input2 are both empty asks the user for input
if(input1 == "" and input2==""):
try:
print("You have selected to update an entry in the database ")
print("Entry to be updated")
oldData = stringToJson(str(JSONinput()))
print("NEW DATA")
newData = stringToJson(str(JSONinput()))
result = collection.update(oldData, newData)
return result
except:
print("an error occured during the update process please try again")
return ""
#if input1 and input2 both =1 then function asks for a ticker symbol to search for and update
elif(input1 == "1" and input2 == "1"):
print("You have selected to update an entry in the database ")
ticker = input("enter ticker symbol to update: ")
tickerSymbolString = "{\"Ticker\" : " + "\"" + ticker + "\"} "
tickerJson = stringToJson(tickerSymbolString)
volume = input("enter volume amount: ")
volumeString = "{\"Volume\" : " + "\"" + volume + "\"} "
volumeJson = stringToJson(volumeString)
result = collection.update(tickerJson, volumeJson)
return result
#if input1 and input2 are both non-empty & != 1 then function will update based on JSON formatted strings
else:
result = collection.update(input1, input2)
return result
def delete(input1):
#takes json formatted string and deletes a document
#if input is empty asks the user for input then searches and deletes based on user input
if(input1 == "1"):
try:
print("You have selected to delete a new entry in the database ", end = '')
result = collection.remove(stringToJson(str(JSONinput())))
return result
except:
print("an error occured during the deletion process please try again")
#if input is = 1 then asks the user for a ticker symbol to search for and delete
elif(input1 == ""):
ticker = input("Please enter the ticker symbol of the entry you would like to delete: ")
#tickerJson = stringToJson("{\"Ticker\" : \"" + ticker + "\"}" )
tickerJson = print("{\"Ticker\" : \"" + ticker + "\"}" )
result = collection.remove(tickerJson)
return result
#if input is non-empty and !=1 takes JSON formatted input and deletes an object
else:
result = collection.remove(stringToJson(str(input1)))
return result
def displayALL():
#Displays all entries in a database (Not Listed in menu)
result = collection.find()
for x in result:
print (x)
def menu():
#asks the user to choose an option
loop = "x"
while(loop == "x"):
val = option()
#calls create()
if(val == "1"):
print(create(""))
#calls read()
elif(val == "2"):
print(read(""))
#calls update()
elif(val =="3"):
print(update("1", "1"))
#calls delete()
elif(val == "4"):
print(delete("1"))
#exits loop
elif(val == "5"):
loop = "y"
#calls displayALL()
elif(val == "6"):
displayALL()
#catches values not previously listed
else:
print("Please Enter appropriate value")
#def main():
# menu()
#main()
| [
"[email protected]"
] | |
4a6d1fd4f63b7607b2710377fbe8bc317dc6917f | 4382c60f18aba351a2e7cdab7ce2793c2d27717c | /Algorithm 191021/venv/Scripts/pip3-script.py | 212857ca75ce92bc01364279892e2ab932ceb2a8 | [] | no_license | vxda7/pycharm | e550b1db4cabe1a0fa03e140f33b028ef08bd4cb | ce29f682a923875b62a8c7c0102790eef11ab156 | refs/heads/master | 2020-07-03T11:27:27.807096 | 2019-11-15T08:50:32 | 2019-11-15T08:50:32 | 201,891,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #!"C:\Users\student\pycharm\Algorithm 191021\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
aedb84bd3fcf3e2a5cc04325bb7819bf98adcc0b | 7b4b31a8b736db18995474ac109cfbee868cca7a | /interface/move_friend.py | 36831720c7d96876fa9a2b686da598a106cfbe27 | [] | no_license | morainwang/yqt | d727cf61c481943b61eef67af44ce91651d675e5 | 390e5f8b50df046f4c02be5461f3fc0be55cef34 | refs/heads/master | 2021-01-10T06:47:14.763655 | 2016-02-04T06:02:15 | 2016-02-04T06:02:15 | 51,055,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | #!/usr/bin/env python
# coding=utf-8
'''
Created on 2015年7月13日
@author: wang
'''
import socket
import time
import msgserver_login
msgserver = msgserver_login.Login(587)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((msgserver[0], 8888))
def keepalive():
while (True):
reqs = 'POST /im/keepalive.do HTTP/1.1\r\n'
reqs += 'Content-Length: 0\r\n\r\n'
sock.send(reqs)
time.sleep(30)
body = '{\r\n'
body += '"uid":587,\r\n'
body += '"termtype":1\r\n'
body += '}'
reqs = 'POST /im/login.do?token={:s} HTTP/1.1\r\n'.format(msgserver[1])
reqs += 'Content-Length: {:d}\r\n\r\n{:s}'.format(len(body), body)
sock.send(reqs)
res = sock.recv(10000)
if (res):
print res
body = '{"uid":587,"termtype":1,"fuid":586,"oftid":1,"nftid":2}'
msg = 'POST /im/movefriend.do?token={:s} HTTP/1.1\r\nContent-Length: {:d}\r\n\r\n{:s}'.format(msgserver[1], len(body), body)
sock.send(msg)
res = sock.recv(10000)
print res
while (True):
res = sock.recv(10000)
if (res):
print res
| [
"[email protected]"
] | |
4ee1b7c42d9547199a0d99865d9381741c6123b1 | 2bfbe221abb5c3fa00379de05cb743353ce0188e | /{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{cookiecutter.app_slug}}/tests.py | c6b1d486e5929c9927a34056b6a066cded757bbb | [
"MIT"
] | permissive | OpenUpSA/cookiecutter-django-dokku | d4893c155cb2ced6be2cab838da81db14de01b54 | cfd2ae5ed55f726cebaba5c79a1bafc8001bdc03 | refs/heads/master | 2023-04-01T17:01:04.781614 | 2022-12-08T15:30:31 | 2022-12-08T15:30:31 | 233,432,629 | 4 | 6 | MIT | 2023-03-21T22:36:38 | 2020-01-12T17:43:07 | Python | UTF-8 | Python | false | false | 599 | py | from django.test import Client, TestCase
import html5lib
class IndexTestCase(TestCase):
def test_index(self):
c = Client()
response = c.get("/{{ cookiecutter.app_slug }}")
self.assertContains(
response, "index for {{ cookiecutter.app_slug }} in {{ cookiecutter.project_slug }}",
)
assertValidHTML(response.content)
def assertValidHTML(string):
"""
Raises exception if the string is not valid HTML, e.g. has unmatched tags
that need to be matched.
"""
parser = html5lib.HTMLParser(strict=True)
parser.parse(string)
| [
"[email protected]"
] | |
8c7d1a3a84798547b9863732dd3bde921ea2269c | 52c448f927aa4ef75226874dfbb0bc07b1702c4e | /training-scripts/zipporah/split_data.py | da0d0169a01837c0e3384e89515ccd262cea59da | [] | no_license | hiropppe/abc | 38c5e958d42cd2c57e889edd89cef72903ad18f8 | 25c05a97a86c2f9c6d40cb3a4a9418009ed38c53 | refs/heads/master | 2021-09-10T22:11:58.117828 | 2021-09-10T08:00:19 | 2021-09-10T08:00:19 | 220,932,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import numpy as np
import sys
data = [l.strip() for l in open(sys.argv[1])]
np.random.seed(123)
indices = np.random.permutation(len(data))
bad = float(sys.argv[2])
dev = float(sys.argv[3])
bad_size = int(len(data) * bad)
dev_size = int(len(data) * dev)
bad_indices = indices[:bad_size]
dev_indices = indices[bad_size: bad_size + dev_size]
good_indices = indices[bad_size + dev_size:]
with open(sys.argv[4], "w") as good:
for i in good_indices:
print(data[i], file=good)
with open(sys.argv[5], "w") as bad:
for i in bad_indices:
print(data[i], file=bad)
with open(sys.argv[6], "w") as dev:
for i in dev_indices:
print(data[i], file=dev)
| [
"[email protected]"
] | |
f2d30d55e098e543d0f035e1dec0b2bf27b106d8 | cb625ff39b40e99f617e8654292023ef84d909c9 | /tags/views.py | 47fd51f86e89571cf92441582b039a4e050b045c | [] | no_license | adeepivt/learning-drf | c702e0cde5d24ad954fbb83b34097a8372146527 | 3dba082f867149bb9c521ef4ae1dcdef1efc71c4 | refs/heads/master | 2023-08-31T10:04:21.741233 | 2021-09-24T04:49:22 | 2021-09-24T04:49:22 | 401,654,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | from django.shortcuts import render, HttpResponse
from .models import Bookmark, Tag
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .serializers import BookmarkSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework import status
class BookmarkViewSet(ModelViewSet):
queryset = Bookmark.objects.all()
serializer_class = BookmarkSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
def create(self, request, *args, **kwargs):
data = request.data
tag = data.get('tag')
if tag:
try:
title = Tag.objects.get(title=tag)
except:
title = Tag.objects.create(title=tag.lower())
tag = title
else:
tag = title
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| [
"[email protected]"
] | |
a415186085d91d6da918529b721765c1974f6a51 | e880191135eb932c0dcde3b02358f808c7087507 | /Geo_GDAL/Demo/blend2013.py | ef4521cfc6e2c0e4c891819ecf81c22543578974 | [] | no_license | THRILLERLEMON/ThrillerPython | e5a62148d1e80f1374313edf2e1604a12989300a | 35cbdef0c4a606c53d4dd8f38d532e4ab5c1b74d | refs/heads/master | 2021-06-28T07:22:56.781004 | 2021-01-06T07:46:17 | 2021-01-06T07:46:17 | 200,649,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import glob
import os
import sys
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
import numpy
files = glob.glob(r'C:\Users\thril\Desktop\1bi25\**\lrdl.shp')
ogr.RegisterAll()
driver = ogr.GetDriverByName('ESRI Shapefile')
ds1st = driver.Open(files[0]).GetLayer('lrdl')
outds = driver.CreateDataSource(r'C:\Users\thril\Desktop\merge')
print('begin creat layer')
mergelyr = outds.CreateLayer('merged', geom_type=ogr.wkbMultiLineString)
print('in loop')
for f in files:
ds = driver.Open(f)
dslayer = ds.GetLayer('lrdl')
print(dslayer.GetGeomType())
mergelyr.Union(dslayer, mergelyr)
print('ok')
| [
"[email protected]"
] | |
c33207c35a3baea0264021dfb406a6a82389c6e0 | fd87dff3c3e82cbc3a3af316ac91ca3dac88a80a | /python_example2.py | 9e64d833e90c10c1e4c9e7b36d84b8a550625721 | [] | no_license | jwkrijnen/Code-Portfolio | 4a76180144cdbd422841a92da51368bdfbd86610 | 910f57146e3fdd924b7a104449ed4f2fa1580b69 | refs/heads/master | 2021-01-01T04:06:17.712243 | 2016-05-13T13:00:57 | 2016-05-13T13:00:57 | 58,719,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,712 | py | import os.path
import socket
import select
import time
import threading
import sys
from random import uniform
from python_example3 import RipPacket
from copy import deepcopy
UDP_IP = "127.0.0.1"
out_socket = None
triggered_update_blocking = False
triggered_update_queued = False
class Router():
#TODO Docstring
def __init__(self, ID, input_ports, outputs, timers):
self.ID = ID
self.input_ports = input_ports #List of input ports
self.outputs = outputs #Dictionary of neighbour information
self.routing_table = {ID: {'Metric': 0,
'NextHop': ID,
'RouteChangeFlag': False,
'Timer': 0}}
self.scheduled_timer_period = timers[0]
self.timeout_period = timers[1]
self.garbage_collection_period = timers[2]
self.garbage_bin = {}
self.timers = {}
#TODO: Functions to process routing table
'''Init Functions'''
#----------------------------------------------------------------------#
def readFile(file):
lines = []
with open(file) as open_file:
for line in open_file:
lines.append(line)
return lines
def processConfigInfo(config_file):
rawinput = readFile(config_file)
processed_input = []
for line in rawinput:
processed_input.append(line.replace(",","").rstrip().split())
if processed_input[0][0] != 'router-id':
raise Exception, "Could not find router IDs"
if processed_input[1][0] != 'input-ports':
raise Exception, "Could not find input ports"
if processed_input[2][0] != 'outputs':
raise Exception, "Could not find outputs"
router_id = int(processed_input[0][1])
if router_id > 64000:
raise Exception, "Router ID value is too large"
if router_id < 1:
raise Exception, "Router ID value is too small"
router_input_ports = []
for iport in processed_input[1][1:]:
if int(iport) > 64000:
raise Exception, "A router input port number is too large"
if int(iport) < 1024:
raise Exception, "A router input port number is too small"
router_input_ports.append(int(iport))
router_outputs = {}
for out in processed_input[2][1:]:
neighbour_info = {}
neighbour_list = out.split("-")
if int(neighbour_list[0]) > 64000:
raise Exception, "A router output port number is too large"
if int(neighbour_list[0]) < 1024:
raise Exception, "A router output port number is too small"
neighbour_info['Outport'] = int(neighbour_list[0])
if int(neighbour_list[1]) < 0:
raise Exception, "A neighbour has a negative metric"
neighbour_info['Metric'] = int(neighbour_list[1])
if int(neighbour_list[2]) > 64000:
raise Exception, "A neighbour router ID value is too large"
if int(neighbour_list[2]) < 1:
raise Exception, "A neighbour router ID value is too small"
router_outputs[int(neighbour_list[2])] = neighbour_info
router_timers = []
for timer in processed_input[3][1:]:
router_timers.append(int(timer))
if int(router_timers[0]) * 6 != int(router_timers[1]):
raise Exception, "First or second timer value is incorrect"
if int(router_timers[0]) * 4 != int(router_timers[2]):
raise Exception, "First or third timer value is incorrect"
this_router = Router(router_id, router_input_ports, router_outputs, router_timers)
print(this_router.outputs)
return this_router
def initialiseRouter():
'''Get config file name from stdin'''
file_read = False
while not file_read:
config_file = raw_input('Please enter config file name: ')
try:
open(config_file)
except IOError:
print("File doesn't exist, try again")
continue
else:
router = processConfigInfo(config_file)
file_read = True
return router
def initialiseSockets(input_ports):
'''Create sockets given from config file'''
global UDP_IP
sockets = []
for port in input_ports:
addr = (UDP_IP, port)
input_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
input_socket.bind(addr)
sockets.append(input_socket)
return sockets
'''Packet Functions'''
#----------------------------------------------------------------------#
def processRipPacket(socket, router):
data,addr = socket.recvfrom(8000)
my_bytes = bytearray(data)
packet = RipPacket()
packet.fromBytes(my_bytes)
router = updateRoutingTable(packet, router)
for destinationID in router.routing_table:
resetTimeoutTimer(router, destinationID, packet)
print("--------------------ROUTING TABLE UPDATED--------------------")
print("Source: " + str(packet.routerID))
for destination, information in router.routing_table.iteritems():
# print("Destination: " + str(destination))
print("Information: " + str(information))
return router
'''Update Functions'''
#----------------------------------------------------------------------#
def resetTimeoutTimer(router, destinationID, packet):
if router.routing_table[destinationID]['NextHop'] == packet.routerID:
router.routing_table[destinationID]['Timer'] = 0
def scheduledUpdate(router):
global out_socket
print "Update going out now"
timer_value = uniform(0.8, 1.2) * router.scheduled_timer_period
timer = threading.Timer(timer_value, scheduledUpdate, [router])
timer.daemon = True
timer.start()
for neighbour_id, neighbour_info in router.outputs.iteritems():
temp_changes = {}
for dest_id, dest_info in router.routing_table.iteritems():
if dest_info['NextHop'] == neighbour_id:
temp_changes[dest_id] = router.routing_table[dest_id]['Metric']
router.routing_table[dest_id]['Metric'] = 16
ripPacket = RipPacket().toBytes(router.ID, router.routing_table)
out_socket.sendto(ripPacket, (UDP_IP, neighbour_info['Outport']))
for dest_id, metric in temp_changes.iteritems():
router.routing_table[dest_id]['Metric'] = metric
def triggeredUpdate(router):
global out_socket
global triggered_update_blocking
update_info = {}
triggered_update_queued = False
print "Triggered update going out"
for dest_id, dest_info in router.routing_table.iteritems():
if dest_info['RouteChangeFlag'] == True:
update_info[dest_id] = dest_info
router.routing_table[dest_id]['RouteChangeFlag'] = False
for neighbour_id, neighbour_info in router.outputs.iteritems():
routing_info = dict((k, v) for k, v in update_info.iteritems())
temp_changes = {}
for dest_id, dest_info in update_info.iteritems():
if dest_info['NextHop'] == neighbour_id:
temp_changes[dest_id] = update_info[dest_id]['Metric']
update_info[dest_id]['Metric'] = 16
ripPacket = RipPacket().toBytes(router.ID, update_info)
out_socket.sendto(ripPacket, (UDP_IP, neighbour_info['Outport']))
for dest_id, metric in temp_changes.iteritems():
update_info[dest_id]['Metric'] = metric
timer_value = uniform(1, 5)
timer = threading.Timer(timer_value, triggeredUpdateBlockingEnd, [router])
timer.daemon = True
timer.start()
triggered_update_blocked = True
def triggeredUpdateBlockingEnd(router):
global triggered_update_blocking
global triggered_update_queued
triggered_update_blocking = False
if triggered_update_queued:
triggeredUpdate(router)
#ENTRY POINT
def processTriggeredUpdate(router):
global triggered_update_blocking
global triggered_update_queued
if triggered_update_blocking:
triggered_update_queued = True
else:
triggeredUpdate(router)
'''Distance Vector Algorithm Functions'''
#----------------------------------------------------------------------#
def reuseGarbage(router, destinationID):
router.garbage_bin[destinationID].cancel()
router.garbage_bin.pop(destinationID, None)
def collectGarbage(router, destinationID):
print "-------------------Collecting the Trash----------------------------"
print destinationID
router.garbage_bin.pop(destinationID, None)
router.routing_table.pop(destinationID, None)
def addGarbage(router, destinationID):
timer = threading.Timer(router.garbage_collection_period, collectGarbage, [router, destinationID])
timer.daemon = True
timer.start()
router.garbage_bin[destinationID] = timer
def expire(destinationID, router):
router.routing_table[destinationID]['Timer'] += 1
#Start garbage collection timer
if router.routing_table[destinationID]['Timer'] >= router.timeout_period:
if destinationID not in router.garbage_bin:
addGarbage(router, destinationID)
router.routing_table[destinationID]['Metric'] = 16
router.routing_table[destinationID]['RouteChangeFlag'] = True
processTriggeredUpdate(router)
else:
timer = threading.Timer(1, expire, [destinationID, router])
timer.daemon = True
timer.start()
router.timers[destinationID] = timer
def shouldRouteChange(current_destinationID_metric, received_destinationID_metric, nexthop, originatingID):
if (int(received_destinationID_metric) < int(current_destinationID_metric)):
return True
if (int(received_destinationID_metric) > int(current_destinationID_metric) and nexthop == originatingID):
return True
def updateRoutingTable(rip_packet, router):
originatingID = rip_packet.routerID
for destinationID, cost in rip_packet.rtePayloads.iteritems():
current_destinationID_metric = 16 #From the specification, if metric is > 16, use 16
nexthop = None
destinationID = int(destinationID)
if (destinationID in router.routing_table):
nexthop = router.routing_table[destinationID]['NextHop']
current_destinationID_metric = int(router.routing_table[destinationID]['Metric'])
received_destinationID_metric = min((int(router.outputs[originatingID]['Metric']) + int(cost)), 16)
if (shouldRouteChange(current_destinationID_metric, received_destinationID_metric, nexthop, originatingID)):
new_route = {'Metric': received_destinationID_metric,
'NextHop': originatingID,
'RouteChangeFlag': True,
'Timer': 0}
if destinationID in router.garbage_bin and received_destinationID_metric != 16:
reuseGarbage(router, destinationID)
router.routing_table[destinationID] = new_route
if destinationID in router.timers:
router.timers[destinationID].cancel()
expire(destinationID, router)
elif received_destinationID_metric == 16 and destinationID not in router.garbage_bin:
addGarbage(router, destinationID)
processTriggeredUpdate(router)
router.routing_table[destinationID] = new_route
if destinationID in router.timers:
router.timers[destinationID].cancel()
elif received_destinationID_metric != 16:
router.routing_table[destinationID] = new_route
if destinationID in router.timers:
router.timers[destinationID].cancel()
expire(destinationID, router)
return router
def main():
#Initialise
global out_socket
router = initialiseRouter()
in_sockets = initialiseSockets(router.input_ports)
out_socket = in_sockets[0]
scheduledUpdate(router)
while True:
try:
readable, _, _ = select.select(in_sockets, [], [])
for s in readable:
if s in in_sockets:
router = processRipPacket(s, router)
except select.error, v:
continue
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
91917fac81bb1808c922dffe9aeabecc53f78eb3 | c45c303164c8723483f328195f947eed93077752 | /View/sale/sale_base.py | 690afe26c9041fa4eb8c7c12c16b87e34a11c5e4 | [] | no_license | jinshang123/Py-store | 3a22d8042346509c9d644bfbd964f943ddb4c101 | 4ad71d60880a3b397d692c7555e17e946f6e4e53 | refs/heads/master | 2021-09-04T02:16:44.238087 | 2018-01-14T14:50:54 | 2018-01-14T14:50:54 | 114,749,342 | 0 | 0 | null | 2018-01-14T14:07:37 | 2017-12-19T10:05:02 | Python | UTF-8 | Python | false | false | 2,923 | py | import traceback
from PyQt5 import QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QFileDialog
from View.sale.excel_process import ExcelProcess
from View.sale.ui.ui_sale_detail import Ui_SaleDetail as SaleDetail
from View.utils import table_utils
class SaleBase(QtWidgets.QWidget, SaleDetail):
def __init__(self):
super(SaleBase, self).__init__()
self.setupUi(self)
my_icon = QIcon('img/logo.png')
self.setWindowIcon(my_icon)
self._signal_slot_init()
self.table_title = (
'订单号', '消费时间', '消费门店', '车牌号', '车主姓名', '联系电话', '车型', '操作人员', '消费项目', '数量', '单价', '小计', '总价', '单位', '备注')
self._init_table()
def _init_table(self):
table_utils.set_table_content(self.sales_details_result_table, [], self.table_title)
def _signal_slot_init(self):
self.details_import_button.clicked.connect(self._sale_detail_import)
self.details_export_button.clicked.connect(self._sale_detail_export)
def _sale_detail_import(self):
file_dialog = QFileDialog()
file_name, file_type = QtWidgets.QFileDialog.getOpenFileName(file_dialog, "选取文件", "C:/",
"Text Files (*.xlsx;*.xls)") # 设置文件扩展名过滤,注意用分号间隔
if file_name:
try:
excel_handler = ExcelProcess()
excel_handler.import_sale_detail(file_name, self)
QtWidgets.QMessageBox.information(self.details_import_button, "提示", "导入成功")
except Exception as e:
print(e)
print('traceback.print_exc():{}'.format(traceback.print_exc()))
print('traceback.format_exc():\n{}'.format(traceback.format_exc()))
QtWidgets.QMessageBox.information(self.details_import_button, "提示", "文件错误")
def _sale_detail_export(self):
start_time = self.start_date.text()
end_time = self.end_date.text()
excel_handler = ExcelProcess()
file_name = excel_handler.export_sale_detail(start_time, end_time)
if file_name:
QtWidgets.QMessageBox.information(self.details_export_button, "提示", "文件名为:{}".format(file_name))
else:
QtWidgets.QMessageBox.information(self.details_export_button, "提示", "暂无消费记录")
def _result_process(self, result_str):
if result_str:
pass
elif not result_str:
QtWidgets.QMessageBox.information(self.details_query_button, "提示", "暂无消费记录")
elif result_str == 'restart':
QtWidgets.QMessageBox.information(self.details_query_button, "提示", "与服务器链接中断,请重新运行软件")
else:
pass
| [
"[email protected]"
] | |
f839bce97bc08a27d763a4accd8677b3d4ea5af2 | 7a5b0a67cd5006abe4f2682fd5c0bde3c7347b45 | /download2.py | 062adc4f46d99f305b95eb77617b2929fdaa1c5c | [] | no_license | krishnamittal96/hackillinois | 6981540f8507f9d689c593c094bcb1dc4327be62 | 6ba09d665616d20f47299dd3b5279fcbde5834be | refs/heads/master | 2016-09-08T00:37:01.033357 | 2015-03-02T00:35:57 | 2015-03-02T00:35:57 | 31,477,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import gdata.youtube
import gdata.youtube.service
import youtube_dl
import os
from subprocess import call
import sys
yt_service = gdata.youtube.service.YouTubeService()
def SearchAndPrint(search_terms):
yt_service = gdata.youtube.service.YouTubeService()
query = gdata.youtube.service.YouTubeVideoQuery()
query.vq = search_terms
query.orderby = 'relevance'
query.racy = 'include'
feed = yt_service.YouTubeQuery(query)
temp=open("songdata.txt","w")
temp.write(str(feed))
temp.close()
songname=""
i=1
while i<len(sys.argv):
if i == 1:
songname = sys.argv[i]
else:
songname=songname+ ' ' +sys.argv[i]
i=i+1
SearchAndPrint(songname)
infile=open("songdata.txt","r")
while infile:
line=infile.readline()
if line.find("watch")>=0:
n=line.find("watch")
code=""
count=1;
while(line[n]!='&'):
if count>=9 :
code=code+line[n]
n=n+1
count=count+1
break
infile.close()
os.remove("songdata.txt")
command="youtube-dl -f 141 -g http://www.youtube.com/watch?v="+code
temp=open("chrome.txt","w")
call(command.split(), shell=False,stdout=temp)
#temp=open("filename.txt","w")
#os.rename(code+".mp3",songname+".mp3")
#temp.write(songname+".mp3")
#temp.close()
| [
"[email protected]"
] | |
e0c2cdc1977ef458e236976eb45e9dc4724592ef | be0e1a61c2f79a5323ad613d3f9fff88538ec97f | /tests/python/test_stokes_rt.py | b0c990ed3648f1db6d8d415055ce0151e80376de | [
"MIT"
] | permissive | apusok/FD-PDE | 1686f1b57a3e1eb1376bfa02cab6ee4b1cbc9562 | a2472e1da2fa2e4faf4b77b4e4383d0ecd145cb7 | refs/heads/main | 2023-04-10T02:39:39.904772 | 2022-07-22T15:08:23 | 2022-07-22T15:08:23 | 516,734,168 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # ---------------------------------------
# Run Rayleigh-Taylor instability test with particles
# ---------------------------------------
# Import modules
import os
import sys, getopt
print('# --------------------------------------- #')
print('# Rayleigh-Taylor Instability+Particles (STOKES) ')
print('# --------------------------------------- #')
# Input file
fname = 'out_stokes_rt'
try:
os.mkdir(fname)
except OSError:
pass
# Get cpu number
ncpu = 1
options, remainder = getopt.getopt(sys.argv[1:],'n:')
for opt, arg in options:
if opt in ('-n'):
ncpu = int(arg)
solver = ' -pc_type lu -pc_factor_mat_solver_type umfpack -snes_monitor -snes_converged_reason -ksp_monitor -ksp_converged_reason'
str1 = 'mpiexec -n '+str(ncpu)+' ../test_stokes_rt.app'+solver+' -snes_type ksponly -snes_fd_color -output_dir '+fname+ \
' -nt 101 -nx 21 -nz 21 > log_'+fname+'.out'
print(str1)
os.system(str1)
# DMSwarmViewXDMF() doesn't work with directory prefix
os.system('mv -f *.pbin *.xmf '+fname) | [
"[email protected]"
] | |
f170e5ebabb16f16c2787f9d5457e3cafb7b4bc5 | 65406cada87de2ea97d80214ad254eb8cdef7f78 | /flexiqueue.py | 4ce7885a9c142c32547c661aaa7fd3b05f9b0f08 | [] | no_license | varunkashyapks/Network | 440f7d6e68e1160e09c9dab47e31b333e9266a24 | 1092909dac9b9ebd1c3ce02e0091f26d0f51fed5 | refs/heads/master | 2021-01-20T08:43:20.966665 | 2017-05-03T20:17:42 | 2017-05-03T20:17:42 | 89,952,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | class flexiqueue:
capacity = 10
def __init__(self):
self.data = [None]*flexiqueue.capacity
self.front = 0
self.size = 0
def is_emtpty(self):
return self.size == 0
def count(self):
return self.size
def first_element(self):
if not self.is_emtpty():
return self.data[self.front]
def resize(self,cap):
old_list = self.data
self.data = [None]*cap
walk = self.front
for k in range(self.size):
self.data[k] = [walk]+old_list
walk = (walk+1)%len(old_list)
self.front = 0
def enqueue(self,element):
if self.size == len(self.data):
self.resize(2*len(self.data))
rear = (self.front+self.size)%len(self.data)
self.data[rear] = element
self.size += 1
def dequeue(self):
if self.is_emtpty():
raise Empty('As error')
answer = self.data[self.front]
self.data[self.front] = None
self.front = (self.front+1)%len(self.data)
self.size -= 1
return answer
def show_elements(self):
return self.data
Q = flexiqueue()
Q.enqueue(10)
Q.enqueue(12)
Q.enqueue(3)
Q.enqueue(156)
print Q.show_elements()
Q.dequeue()
print Q.show_elements() | [
"[email protected]"
] | |
cfa73a0feeceb3dba9e7da7f826e1a0333e089ee | 6774fb78abc8589f63f067cb0866e335b1e9b2a6 | /BioAsq6B/QaSimSent/predictor.py | 51ce33fc6236f324e6b53e954611e832430e7baf | [] | no_license | romanegloo/18-bioasq6b | 87f57b34771ae98672929d5b6b4aff38182daccc | 31e701961ba19c481556f0fb5f81b8d591d6254c | refs/heads/master | 2022-01-12T10:40:55.627692 | 2019-05-18T13:15:19 | 2019-05-18T13:15:19 | 114,382,674 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,607 | py | #!/usr/bin/env python3
"""QA_Proximity predictor; Classifies if the given text is relevant to the
question."""
from typing import Tuple, List
import logging
import spacy
from spacy.tokenizer import Tokenizer
import torch
import torch.functional as F
import re
import pickle
import prettytable
from .model import QaSimSent
from .. import PATHS
logger = logging.getLogger()
class Predictor(object):
"""Interface for computing QASim scores"""
def __init__(self, model_path=None, nlp=None, load_wd=False):
"""Set default properties and load a pretrained model"""
self.nlp = nlp if nlp is not None else spacy.load('en')
self.tokenizer = Tokenizer(self.nlp.vocab)
if model_path is None:
self.model, _ = QaSimSent.load(PATHS['qasim_model'])
else:
self.model, _ = QaSimSent.load(model_path, load_wd=load_wd)
self.conf = self.model.conf
self.add_words = set()
# Model setup according to the trained model configuration
if 'idf' in self.conf['features']:
logger.info('Loading idf file...')
self.idf = pickle.load(open(PATHS['idf_file'], 'rb'))
self.q_ex = self.q_f = self.q_type = self.q_mask = None
def get_qasim_scores(self, q, qtype, a):
"""Called by an interactive script"""
print("[Questions: {}]".format(q))
# Encode the question
self.set_q(self.sanitize(q), qtype)
# Batchify the candidate answers
batches, doc = self.batchify(self.sanitize(a))
if batches is None:
return [], []
predictions = self.model.predict(batches)
table = prettytable.PrettyTable(['Score', 'Sentence'])
table.align['Score'] = 'r'
table.align['Sentence'] = 'l'
table.max_width['Sentence'] = 80
for i, sent in enumerate(doc.sents):
table.add_row([predictions[i].item(), sent.text])
print(table.get_string())
return predictions, doc
def predict_prob_b(self, body, docid):
"""Called by QA_reranker; Run the model on a document body (ignore the
title, assuming that the title does not answer any question)"""
"""Feed in to the model and get scores in batch"""
results = list() # List of results by sentences
# A question must be given
if any(e is None
for e in [self.q_ex, self.q_f, self.q_type, self.q_mask]):
return [], []
# Apply the model on the body document
batch_b, doc = self.batchify(self.sanitize(body))
if batch_b is None:
return [], []
pred_b = self.model.predict(batch_b)
res = torch.sigmoid(pred_b)
# From body
assert len(res) == len(list(doc.sents))
for i, sent in enumerate(doc.sents):
entry = {
'document': "http://www.ncbi.nlm.nih.gov/pubmed/" + docid,
'text': sent.text,
'offsetInBeginSection': sent.start_char,
'offsetInEndSection': sent.end_char,
'beginSection': 'abstract',
'endSection': 'abstract',
'score': res[i].item()
}
results.append(entry)
return results
def set_q(self, q, qtype):
self.q_ex, self.q_f, self.q_type, self.q_mask = self._encode_q(q, qtype)
def _encode_q(self, q, qtype):
tokens = self.nlp(q)
text_lower = [t.text.lower() for t in tokens]
q_ = [self.model.word_dict[t] if t in self.model.word_dict else 0
for t in text_lower]
q = torch.LongTensor(q_)
q_f = torch.zeros(len(tokens), self.conf['num-features'])
if 'pos' in self.model.conf['features']:
# Feature POS
for i, t in enumerate(tokens):
if 'pos=' + t.pos_ in self.model.feature_dict:
q_f[i][self.model.feature_dict['pos='+t.pos_]] = 1
if 'ner' in self.model.conf['features']:
# Feature NER
for i, t in enumerate(tokens):
if 'ner=' + t.ent_type_ in self.model.feature_dict:
q_f[i][self.model.feature_dict['ner='+t.ent_type_]] = 1
if 'idf' in self.model.conf['features']:
if 'idf' in self.conf['features']:
for i, t in enumerate(text_lower):
try:
q_f[i][-1] = self.idf[t]
except KeyError:
q_f[i][-1] = 0 # ignore the tokens that are not indexed
question_types = ['yesno', 'factoid', 'list', 'summary']
q_type = torch.zeros(len(question_types), dtype=torch.float)
try:
q_type[question_types.index(qtype)] = 1
except ValueError:
q_type[3] = 1
q_mask = torch.zeros(len(tokens), dtype=torch.uint8)
return q, q_f, q_type, q_mask
def batchify(self, context):
if len(context) == 0:
return None, []
try:
doc = self.nlp(context)
except: # SpaCy tokenizer has some issues with certain characters
return None, []
batch_len = len(list(doc.sents))
max_doc_length = max([len(s) for s in doc.sents] + [0])
ft_size = self.conf['num-features']
c = torch.zeros(batch_len, max_doc_length, dtype=torch.long)
c_mask = torch.ones(batch_len, max_doc_length, dtype=torch.uint8)
c_f = None
if ft_size > 0:
c_f = torch.zeros(batch_len, max_doc_length, ft_size)
for i, sent in enumerate(doc.sents):
c_, c_f_, c_mask_ = \
self._encode_ex(sent.text, doc[sent.start:sent.end])
clen = c_.size(1)
try:
c[i, :clen].copy_(c_.view_as(c[i, :clen]))
except:
logger.error(sent)
raise
c_mask[i, :clen].fill_(0)
if ft_size > 0:
c_f[i, :clen].copy_(c_f_)
# Repeat the question tensors
q_ex = self.q_ex.unsqueeze(0).repeat(batch_len, 1) # batch x qlen
q_f = self.q_f.unsqueeze(0).repeat(batch_len, 1, 1) # batch x qlen x nf
q_type = self.q_type.repeat(batch_len, 1) # batch x 4
q_mask = self.q_mask.unsqueeze(0).repeat(batch_len, 1) # batch x qlen
inputs = (c, c_f, c_mask, q_ex, q_f, q_type, q_mask)
return inputs, doc
def _encode_ex(self, sent, tokens=None):
if len(self.conf['features']) == 0:
"""Run tokenizer only"""
if tokens is None:
tokens = self.tokenizer(sent)
ex = dict()
ex['context'] = [t.text.lower() for t in tokens]
c_text = [self.model.word_dict[w] for w in ex['context']]
x1 = torch.LongTensor(c_text).unsqueeze(0)
x1_f = None
x1_mask = torch.ByteTensor(1, len(ex['context'])).fill_(0)
return x1, x1_f, x1_mask
tokens = self.nlp(sent)
ex = dict()
ex['context'] = [t.text.lower() for t in tokens]
ex['pos'] = [t.pos_ for t in tokens]
ex['ner'] = [t.ent_type_ for t in tokens]
ft_len = self.conf['num-features']
ex_len = len(ex['context'])
# Index words
c_text = []
for w in ex['context']:
if w in self.model.word_dict:
c_text.append(self.model.word_dict[w])
else:
c_text.append(1)
self.add_words.add(w)
x1 = torch.LongTensor(c_text).unsqueeze(0)
x1_f = torch.zeros(ex_len, ft_len)
x1_mask = torch.ByteTensor(1, ex_len).fill_(0)
# Feature POS
for i, w in enumerate(ex['pos']):
if 'pos='+w in self.model.feature_dict:
x1_f[i][self.model.feature_dict['pos='+w]] = 1.0
# Feature NER
for i, w in enumerate(ex['ner']):
if 'ner='+w in self.model.feature_dict:
x1_f[i][self.model.feature_dict['ner='+w]] = 1.0
if 'idf' in self.conf['features']:
for i, w in enumerate(ex['context']):
try:
x1_f[i][-1] = self.idf[w.lower()]
except KeyError:
x1_f[i][-1] = 0 # ignore the tokens that are not indexed
return x1, x1_f, x1_mask
def sanitize(self, text):
if text is None:
return ''
# clean up the text before using a Tokenizer
text = re.sub('[\n?\']', '', text)
text = re.sub('[()<>/]', ' ', text)
text = re.sub('\s+', ' ', text)
return text
| [
"[email protected]"
] | |
2b49a203f5f3fab8478211693e180d4aa7513f1c | 091155389673325cfe8b0da3dc64c113f1ded707 | /tests/configs/test_config.py | 1a90b597484431762d0251da6cde3e2baac90477 | [
"Apache-2.0"
] | permissive | Megvii-BaseDetection/cvpods | 7b7c808257b757d7f94d520ea03b370105fb05eb | 2deea5dc659371318c8a570c644201d913a83027 | refs/heads/master | 2023-03-22T00:26:06.248877 | 2023-03-10T10:05:26 | 2023-03-10T10:05:26 | 318,124,806 | 659 | 91 | Apache-2.0 | 2023-03-10T10:05:28 | 2020-12-03T08:26:57 | Python | UTF-8 | Python | false | false | 7,550 | py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pylint: disable=W0613
import os
import tempfile
import unittest
import torch
# from cvpods.configs import configurable, downgrade_config, get_cfg, upgrade_config
from cvpods.layers import ShapeSpec
_V0_CFG = """
MODEL:
RPN_HEAD:
NAME: "TEST"
VERSION: 0
"""
_V1_CFG = """
MODEL:
WEIGHT: "/path/to/weight"
"""
# flake8: noqa
# TODO: fix
@unittest.skip("Tests don't compatible cvpods.configs")
class TestConfigVersioning(unittest.TestCase):
def test_upgrade_downgrade_consistency(self):
cfg = get_cfg()
# check that custom is preserved
cfg.USER_CUSTOM = 1
down = downgrade_config(cfg, to_version=0)
up = upgrade_config(down)
self.assertTrue(up == cfg)
def _merge_cfg_str(self, cfg, merge_str):
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False)
try:
f.write(merge_str)
f.close()
cfg.merge_from_file(f.name)
finally:
os.remove(f.name)
return cfg
def test_auto_upgrade(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
cfg.USER_CUSTOM = 1
self._merge_cfg_str(cfg, _V0_CFG)
self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST")
self.assertEqual(cfg.VERSION, latest_ver)
def test_guess_v1(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
self._merge_cfg_str(cfg, _V1_CFG)
self.assertEqual(cfg.VERSION, latest_ver)
def configurable(func):
pass
class _TestClassA(torch.nn.Module):
@configurable
def __init__(self, arg1, arg2, arg3=3):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
assert arg1 == 1
assert arg2 == 2
assert arg3 == 3
@classmethod
def from_config(cls, cfg):
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
return args
class _TestClassB(_TestClassA):
@configurable
def __init__(self, input_shape, arg1, arg2, arg3=3):
"""
Doc of _TestClassB
"""
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
@classmethod
def from_config(cls, cfg, input_shape): # test extra positional arg in from_config
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
return args
class _LegacySubClass(_TestClassB):
# an old subclass written in cfg style
def __init__(self, cfg, input_shape, arg4=4):
super().__init__(cfg, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _NewSubClassNewInit(_TestClassB):
# test new subclass with a new __init__
@configurable
def __init__(self, input_shape, arg4=4, **kwargs):
super().__init__(input_shape, **kwargs)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _LegacySubClassNotCfg(_TestClassB):
# an old subclass written in cfg style, but argument is not called "cfg"
def __init__(self, config, input_shape):
super().__init__(config, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _TestClassC(_TestClassB):
@classmethod
def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
args.update(kwargs)
return args
class _TestClassD(_TestClassA):
@configurable
def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3):
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
# _TestClassA.from_config does not have input_shape args.
# Test whether input_shape will be forwarded to __init__
# flake8: noqa
# TODO: fix
@unittest.skip("Tests don't compatible cvpods.configs")
class TestConfigurable(unittest.TestCase):
def testInitWithArgs(self):
_ = _TestClassA(arg1=1, arg2=2, arg3=3)
_ = _TestClassB("shape", arg1=1, arg2=2)
_ = _TestClassC("shape", arg1=1, arg2=2)
_ = _TestClassD("shape", arg1=1, arg2=2, arg3=3)
def testPatchedAttr(self):
self.assertTrue("Doc" in _TestClassB.__init__.__doc__)
self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int)
def testInitWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
cfg.ARG3 = 3
_ = _TestClassA(cfg)
_ = _TestClassB(cfg, input_shape="shape")
_ = _TestClassC(cfg, input_shape="shape")
_ = _TestClassD(cfg, input_shape="shape")
_ = _LegacySubClass(cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(cfg, input_shape="shape")
with self.assertRaises(TypeError):
# disallow forwarding positional args to __init__ since it's prone to errors
_ = _TestClassD(cfg, "shape")
# call with kwargs instead
_ = _TestClassA(cfg=cfg)
_ = _TestClassB(cfg=cfg, input_shape="shape")
_ = _TestClassC(cfg=cfg, input_shape="shape")
_ = _TestClassD(cfg=cfg, input_shape="shape")
_ = _LegacySubClass(cfg=cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg=cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(config=cfg, input_shape="shape")
def testInitWithCfgOverwrite(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 999 # wrong config
with self.assertRaises(AssertionError):
_ = _TestClassA(cfg, arg3=3)
# overwrite arg2 with correct config later:
_ = _TestClassA(cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3)
# call with kwargs cfg=cfg instead
_ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
def testInitWithCfgWrongArgs(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
with self.assertRaises(TypeError):
_ = _TestClassB(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassC(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassD(cfg, "shape", not_exist=1)
def testBadClass(self):
class _BadClass1:
@configurable
def __init__(self, a=1, b=2):
pass
class _BadClass2:
@configurable
def __init__(self, a=1, b=2):
pass
def from_config(self, cfg): # noqa
pass
class _BadClass3:
@configurable
def __init__(self, a=1, b=2):
pass
# bad name: must be cfg
@classmethod
def from_config(cls, config): # noqa
pass
with self.assertRaises(AttributeError):
_ = _BadClass1(a=1)
with self.assertRaises(TypeError):
_ = _BadClass2(a=1)
with self.assertRaises(TypeError):
_ = _BadClass3(get_cfg())
| [
"[email protected]"
] | |
41dcee6f352b6aeb4d60cf0a7e7cefc0c8539933 | 050ea6fa9d32fd293b37ff88e27260c4b441ef60 | /core/employee_lookup.py | 836f1838a6ead2a99c724e537ed37fd9709c1299 | [
"MIT"
] | permissive | hpasalar/Hackerwasi | eaa945ae8c74440bf60e865a86dcb4e4c80cbbf6 | 72f8463a8384cac08ff87f9a59816a08381b6916 | refs/heads/master | 2022-12-06T00:40:27.077837 | 2020-08-16T05:34:47 | 2020-08-16T05:34:47 | 289,902,665 | 3 | 0 | MIT | 2020-08-24T10:55:19 | 2020-08-24T10:55:18 | null | UTF-8 | Python | false | false | 829 | py | # -*- coding: utf-8 -*-
from core.LinkedIn import searchLinkedIn
from colorama import init, Fore, Back, Style
from terminaltables import SingleTable
warning = "["+Fore.RED+"!"+Fore.RESET+"]"
question = "["+Fore.YELLOW+"?"+Fore.RESET+"]"
found = "["+Fore.GREEN+"+"+Fore.RESET+"]"
wait = "["+Fore.MAGENTA+"*"+Fore.RESET+"]"
init()
def employee_lookup():
entreprise = input(" Entreprise: ")
city = input(" Ville: ")
print("\n"+wait+" Recherche des employés de '%s'...\n" % (entreprise))
linkedin = searchLinkedIn()
linkedin.search(entreprise, city)
found = linkedin.found
if found:
employee = linkedin.employees
TABLE_DATA = [
("Num", "Name"),
]
x = 1
for employe in employee:
TABLE_DATA.append((x, employe))
x += 1
table = SingleTable(TABLE_DATA, title=" LinkedIn ")
print(table.table) | [
"[email protected]"
] | |
9215430ed85c25b3937c5251aa1bad39a44402d8 | 5d751981646de202f81f61b9c9a7443453a64839 | /02 数据库/02 redis/03 python使用redis.py | 01947292507a86c2a522958b2d11e92bbbf5bdd1 | [] | no_license | jia80H/python-advance-learning | 4464d9499e32d0dfab4181f7c9aa41c2313382bf | b03bbd65d842507e85b0a8296794a051ea89b884 | refs/heads/main | 2023-03-14T07:54:33.471717 | 2021-03-07T00:23:44 | 2021-03-07T00:23:44 | 337,283,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | import redis
client = redis.Redis()
client.set('username', 'admin')
client.hset('student', 'name', 'hao')
client.hset('student', 'age', 18)
d = client.keys('*')
e = client.get('username')
f = client.hgetall('student')
print(d, e, f, sep='\n')
| [
"[email protected]"
] | |
3834848d95757726447869fd51bb62c81af6ec51 | b9120c5bc5aa5492de4631ace309a8c9333b704f | /Sesion_07/Ejem_Escritura_Archivo.py | 8442cf373b3d9d6fc64036f06651c8d00cfc1245 | [] | no_license | JuniorLlancari/Quantum-python | 02af8333d608f3ca6a3e11c3f8a52169efcc70a3 | ffd9abecfa78639d2e7c2deb641acc9baf8313b3 | refs/heads/master | 2023-03-31T00:44:25.350271 | 2021-04-01T16:34:56 | 2021-04-01T16:34:56 | 335,133,876 | 1 | 1 | null | 2021-04-01T16:34:57 | 2021-02-02T01:31:41 | Python | UTF-8 | Python | false | false | 107 | py | #CREANDO UN FICHERO
# with open('numero.txt','w') as f:
# for i in range(15):
# f.write(str(i)) | [
"[email protected]"
] | |
d0fb5a2dd10cecf5a018af51d1c99441fbf430df | 43d9e1c55f6f06453bf1836ab288b4a71d862228 | /ProyectoWeb/blog/admin.py | 3d67e3d4eb2a023adbe2e75adc10fbd9bd15242f | [] | no_license | billyordores/ProyectoWeb | ceb602689bb90a06aba29914456bf5fa15e474d5 | 64bfaa275f03bf4688c9ded3fdc58821da999ab6 | refs/heads/main | 2023-06-24T11:03:45.948784 | 2021-07-27T11:28:17 | 2021-07-27T11:28:17 | 388,111,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from django.contrib import admin
from .models import Categoria, Post
# Register your models here.
class CategoriaAdmin(admin.ModelAdmin):
readonly_fields=('created','updated')
class PostAdmin(admin.ModelAdmin):
readonly_fields=('created', 'updated')
admin.site.register(Categoria, CategoriaAdmin)
admin.site.register(Post, PostAdmin) | [
"[email protected]"
] | |
147b3f49376d8ffc70e3fd5db7ced898b8fad8f0 | dd7be0b5437a4e0afc19f8a71aace9ec65a43135 | /IMDBScrapy.py | d62c21a3a8e1d00b2a84db2d1dc97e3a1c2af26d | [] | no_license | Scipius02/IMDBVisualiser | dec9efee08c9c1d4dddc4bd103c0c0bb68f436c9 | 61265305664c2bcf407804bf9136ac7fe137a941 | refs/heads/master | 2021-05-18T10:54:00.404359 | 2020-03-30T06:14:34 | 2020-03-30T06:14:34 | 251,217,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | from bs4 import BeautifulSoup
import requests
import re
import json
# get number of seasons of a tv show
def numberofseasons():
URL = 'https://www.imdb.com'
newURL = URL + '/title/tt0092455/'
page = requests.get(newURL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(id='title-episode-widget')
seasonlist = results.find_all('a', href = True)
for i in seasonlist:
if "season=" not in i:
seasonlist.pop()
numSeasons = len(seasonlist)
#print(numSeasons)
return numSeasons
numberofseasons()
def seasonratings(seasonid):
URL = 'https://www.imdb.com'
newURL = URL + '/title/tt0092455/episodes?season=' + str(seasonid)
page = requests.get(newURL)
soup = BeautifulSoup(page.content, 'html.parser')
episodelist = []
for episode in soup.find_all('strong'):
if "/title/" in str(episode): #this possibly could've been replaced by soup.get('href')
episode = re.findall(r'"([^"]*)"', str(episode))
episodelist.append(episode[0])
URLnew = ''
epinfoJSON = []
for episode in episodelist:
URLnew = URL + episode
#print(URLnew)
page = requests.get(URLnew)
soup = BeautifulSoup(page.content, 'html.parser')
epinfoJSON.append(json.loads(soup.find('script', type="application/ld+json").text))
for i, k in enumerate(epinfoJSON):
if "season" not in k.keys():
k["season"] = seasonid
if "episode" not in k.keys():
k["episode"] = (i+1)
k.pop("@context", None)
k.pop("@type", None)
k.pop("image", None)
k.pop("contentRating", None)
k.pop("actor", None)
k.pop("director", None)
k.pop("creator", None)
k.pop("description", None)
k.pop("datePublished", None)
k.pop("keywords", None)
k.pop("review", None)
k.pop("timeRequired", None)
k.pop("trailer", None)
#print(epinfoJSON)
return epinfoJSON
#seasonratings(1)
def showratingaggregate():
seriesaggregateratingslist = []
for i in range(1,numberofseasons()+1):
seriesaggregateratingslist.append(seasonratings(i))
#print(seriesaggregateratingslist)
return seriesaggregateratingslist
"""u = 0
for dict in seriesaggregateratingslist:
for doubledict in dict:
u += 1
print(u)"""
#print(type(showratingaggregate()))
| [
"[email protected]"
] | |
4606fa88ae7df1801ce384b62f6b18e650bd9d27 | 5ad585dcfd36707bc6bab8033b3844bdbc535f6b | /pdfread/__main__.py | 39880da1905946142240140e91773fe5d9eea053 | [] | no_license | wlizama/python-training | 09bbc9b2648edfcc7e177d49c9fa7d89309fa30e | 1ff70727d6f528d5f60d5af666c6d5afdd218736 | refs/heads/master | 2022-11-27T07:49:02.988278 | 2020-05-23T17:09:48 | 2020-05-23T17:09:48 | 122,639,831 | 0 | 1 | null | 2022-11-22T04:32:08 | 2018-02-23T15:40:14 | Python | UTF-8 | Python | false | false | 1,212 | py | import PyPDF2
def findFile(fullNameFile):
fileToFind = None
try:
fileToFind = open(fullNameFile, "rb")
except FileNotFoundError:
pass
return fileToFind
def execute():
_strRUCEmisor = "20123456789"
_strTipoDocEmisor = "07"
_strSerieDocEmisor = "F005"
_filePDFNumDesde = 1
_filePDFNumHasta = 999
for num in list(range(_filePDFNumDesde, _filePDFNumHasta + 1)):
strFileNameToFind = "%s-%s" %(_strSerieDocEmisor, str(num))
filePDF = findFile("W:\\DIR\\TO\\SEARCH\\%s-%s-%s.PDF"
%(_strRUCEmisor, _strTipoDocEmisor, strFileNameToFind))
if filePDF != None:
filePDFReader = PyPDF2.PdfFileReader(filePDF)
pageObj = filePDFReader.getPage(0)
fullTextPDF = pageObj.extractText()
strIni = "RUC: 20987654321"
strTextLimit = "SEÑORES: XXX"
strFound = fullTextPDF[len(strIni)-1:fullTextPDF.find(strTextLimit)]
strFound = strFound.replace(" ", "")
if strFileNameToFind != strFound:
print("strFileNameToFind: %s, strFound: %s" %(strFileNameToFind, strFound))
if __name__ == "__main__":
execute() | [
"[email protected]"
] | |
85b30b860deb61f064afb3ef616df3dde8e9ad89 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/Tumblr/CreateChatPost.py | 347314b1976b8c41d3939a68880737b693241ed9 | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,701 | py |
###############################################################################
#
# CreateChatPost
# Creates a new chat post for a specified Tumblr blog.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class CreateChatPost(Choreography):
"""
Create a new instance of the CreateChatPost Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Tumblr/CreateChatPost')
def new_input_set(self):
return CreateChatPostInputSet()
def _make_result_set(self, result, path):
return CreateChatPostResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateChatPostChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the CreateChatPost
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class CreateChatPostInputSet(InputSet):
"""
Set the value of the Conversation input for this choreography. ((required, string) The text of the conversation/chat, with dialogue labels (no HTML))
"""
def set_Conversation(self, value):
InputSet._set_input(self, 'Conversation', value)
"""
Set the value of the BaseHostname input for this choreography. ((required, string) The standard or custom blog hostname (i.e. temboo.tumblr.com))
"""
def set_BaseHostname(self, value):
InputSet._set_input(self, 'BaseHostname', value)
"""
Set the value of the Date input for this choreography. ((optional, date) The GMT date and time of the post. Can be an epoch timestamp in milliseconds or formatted like: Dec 8th, 2011 4:03pm. Defaults to NOW().)
"""
def set_Date(self, value):
InputSet._set_input(self, 'Date', value)
"""
Set the value of the Markdown input for this choreography. ((optional, boolean) Indicates whether the post uses markdown syntax. Defaults to false. Set to 1 to indicate true.)
"""
def set_Markdown(self, value):
InputSet._set_input(self, 'Markdown', value)
"""
Set the value of the OauthConsumerKey input for this choreography. ((required, string) The Oauth Consumer Key provided by Tumblr after registering your application)
"""
def set_OauthConsumerKey(self, value):
InputSet._set_input(self, 'OauthConsumerKey', value)
"""
Set the value of the OauthConsumerSecret input for this choreography. ((required, string) The Oauth Consumer Secret provided by Tumblr after registering your application)
"""
def set_OauthConsumerSecret(self, value):
InputSet._set_input(self, 'OauthConsumerSecret', value)
"""
Set the value of the OauthTokenSecret input for this choreography. ((required, string) The Oauth Token Secret retrieved during the Oauth process)
"""
def set_OauthTokenSecret(self, value):
InputSet._set_input(self, 'OauthTokenSecret', value)
"""
Set the value of the OauthToken input for this choreography. ((required, string) The Oauth Token retrieved during the Oauth process)
"""
def set_OauthToken(self, value):
InputSet._set_input(self, 'OauthToken', value)
"""
Set the value of the Slug input for this choreography. ((optional, string) Adds a short text summary to the end of the post URL)
"""
def set_Slug(self, value):
InputSet._set_input(self, 'Slug', value)
"""
Set the value of the State input for this choreography. ((optional, string) The state of the post. Specify one of the following: published, draft, queue. Defaults to published.)
"""
def set_State(self, value):
InputSet._set_input(self, 'State', value)
"""
Set the value of the Tags input for this choreography. ((optional, string) Comma-separated tags for this post)
"""
def set_Tags(self, value):
InputSet._set_input(self, 'Tags', value)
"""
Set the value of the Title input for this choreography. ((optional, string) The title of the chat)
"""
def set_Title(self, value):
InputSet._set_input(self, 'Title', value)
"""
Set the value of the Tweet input for this choreography. ((optional, string) Manages the autotweet (if enabled) for this post. Defaults to off for no tweet. Enter text to override the default tweet.)
"""
def set_Tweet(self, value):
InputSet._set_input(self, 'Tweet', value)
"""
A ResultSet with methods tailored to the values returned by the CreateChatPost choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class CreateChatPostResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The response from Tumblr in XML format)
"""
def get_Response(self):
return self._output.get('Response', None)
class CreateChatPostChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateChatPostResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
fcee5c12f32936a9f042d97e4578c6bd4fc6b86a | 800985e7651360f37a3ec76b8934cf6b04c6080f | /app.py | 1c113df3fa9955ad6356d20718dab127be90975e | [] | no_license | Dikshitha0812/Api-alphabet-recognition | da5002e2b609980d2c7922d9b62753ad188e3bce | 67096b58f43932a304330464ca4e687524b486f3 | refs/heads/main | 2023-07-07T22:29:52.421535 | 2021-08-25T12:57:48 | 2021-08-25T12:57:48 | 399,819,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from flask import Flask, jsonify, request
from classifier import get_alphabet
App = Flask(__name__)
# defining the portal
@app.route("/predict-alphabet", methods=["POST"])
# writing function for predicting
def predict_data():
image = cv2.imdecode(np.fromstring(request.files.get("alphabet").read(), np.uint8), cv2.IMREAD_UNCHANGED)
image = request.files.get("alphabet")
alphabet = get_alphabet(image)
return jsonify({
"alphabet_predicted": alphabet
}), 200
# running the function
if __name__ == "__main__":
App.run(debug=True) | [
"[email protected]"
] | |
74f8bfe1b1693dba259cbdf2ebce62709b7f0027 | 462ffb47a6474a54ec1724c8385be93653b70b54 | /project/web/views.py | 2b0d2a717c18b77b020deac69e736cd1bcd6536e | [] | no_license | NikitinaKatya/tusur.refectory | 589699e85d6f5c2911b96f7243020af1c49de1c5 | cb720e346542e904d2298736c1d9dbe28da93d35 | refs/heads/master | 2020-08-09T12:18:26.498569 | 2019-10-06T12:22:18 | 2019-10-06T12:22:18 | 214,086,027 | 1 | 0 | null | 2019-10-10T04:24:38 | 2019-10-10T04:24:38 | null | UTF-8 | Python | false | false | 298 | py | from django.shortcuts import render
# from django.http import HttpResponse
def index(request):
return(render(request, 'main.html'))
def index_menu(request):
return(render(request, 'menu.html'))
def index_shop(request):
return(render(request, 'shop.html'))
# Create your views here.
| [
"[email protected]"
] | |
79d6f5512808ca89c83adc1044a871dfba8f6416 | 40a18752fe454bbf029f3f39b7e84cf4403d4977 | /Class Files/test_divisors.py | 2af9817eb7cfdeb98cd0f21a85efb6681f0ccd9a | [] | no_license | jcanning/Class_craftingQualityCode | 51e63b3c08371c1816db48ee3af0ce6813d10712 | 745b9cc4fa0a5b49dd6d64bf9f6243c24c76ea2e | refs/heads/master | 2021-01-01T05:35:45.441972 | 2013-05-01T21:53:09 | 2013-05-01T21:53:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import unittest
import divisors
class TestDivisors(unittest.TestCase):
"""Example unittest test mehtods for get_divisors."""
def test_divisors_example_1(self):
"""Test get_divisors with 8 and [1 ,2, 3]."""
actual = divisors.get_divisors(8, [1, 2, 3])
expected = [1, 2]
self.assertEqual(actual, expected)
def test_divisors_example_2(self):
"""Test get_divisors with 4 and [-2, 0, 2]."""
actual = divisors.get_divisors(4, [-2, 0, 2])
expected = [-2, 2]
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main(exit=False)
| [
"JohnAllan@.(none)"
] | JohnAllan@.(none) |
9c28cbaa9c1ae9facf8736de25b788afaa065c74 | 764832026510a666640e44d7245d623dca6fe615 | /rtxs/test/test03.py | 90c26979e45f0cf446263e519d845c73c86927e6 | [] | no_license | MrWJB/rtxs | 58f2ec1f4918de19b046b2e994f2bd4ad68ac669 | 2f584566de9a5ddccbfaaeef7756ced4ecb2055a | refs/heads/master | 2020-05-18T06:32:09.946064 | 2019-04-30T09:55:57 | 2019-04-30T09:55:57 | 184,236,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # -*- coding: utf-8 -*-
def count():
def f(j):
def g():
return j * j
return g
fs = []
for i in range(1, 4):
fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()
return fs
f1, f2, f3 = count()
print(f1())
print(f2())
print(f3())
# 装饰器
# def now():
# print('2005-3-25')
# f=now
# f()
# print(now.__name__)
# f=now
# print(f.__name__)
# 在代码运行期间动态增加功能的方式,称之为封装器
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
import functools
def log(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2005-3-25')
print(now())
revl = int('12345', base=8)
print(revl)
def int2(x, base=2):
return int(x, base)
print(int2('1000000'))
# 使用偏函数
int2 = functools.partial(int, base=2)
print(int2('100101001'))
| [
"[email protected]"
] | |
9704ad9c7cc99b07e77c6e219ae310c687900ba3 | 048f12857291f0d4c4a5d05d3a13e15c13532997 | /dfs/유기농_배추.py | 54730f9bb8a12be784b59741ac63d9ab74f27bc4 | [] | no_license | jsg921019/algorithm_study | 1841d5b0d7b22a77e00d1a4bbca785acffeb0e4c | 16073349a4e969a8f6af6de76054d7179772738c | refs/heads/main | 2023-06-07T23:48:16.469076 | 2023-05-31T19:21:44 | 2023-05-31T19:21:44 | 366,484,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # https://www.acmicpc.net/problem/1012
# solution 1 : iteration
import sys
sys.stdin = open('private/input.txt', 'r')
input = sys.stdin.readline
def sol(m, n, k):
n_worm = 0
farm = [[0]*n for _ in range(m)]
cabbage = [[*map(int, input().split())] for _ in range(k)]
for i, j in cabbage:
farm[i][j] = 1
for c in cabbage:
i, j = c
if farm[i][j] == 0:
continue
n_worm += 1
farm[i][j] == 0
stack = [c]
while stack:
i, j = stack.pop()
for i_, j_ in [(i+1, j), (i-1,j), (i,j+1), (i,j-1)]:
if 0 <= i_ < m and 0 <= j_ < n and farm[i_][j_] == 1:
farm[i_][j_] = 0
stack.append([i_,j_])
return n_worm
for _ in range(int(input())):
print(sol(*map(int, input().split())))
# solution 2: recursion
import sys
sys.setrecursionlimit(3000)
sys.stdin = open('private/input.txt', 'r')
input = sys.stdin.readline
def sol(m, n, k):
def recur(i, j):
farm[i][j] = 0
for i_, j_ in [(i+1, j), (i-1,j), (i,j+1), (i,j-1)]:
if 0 <= i_ < m and 0 <= j_ < n and farm[i_][j_] == 1:
recur(i_, j_)
n_worm = 0
farm = [[0]*n for _ in range(m)]
cabbage = [[*map(int, input().split())] for _ in range(k)]
for i, j in cabbage:
farm[i][j] = 1
for i, j in cabbage:
if farm[i][j] == 1:
n_worm += 1
recur(i,j)
return n_worm
for _ in range(int(input())):
print(sol(*map(int, input().split()))) | [
"[email protected]"
] | |
35a839d7db607c87500bdb9d971083a1259a382c | 7172aab86832e0d8697f9d746a0a6add90488e49 | /json_pathfinder.py | 15ce35a8cd7697bb5b01388a2c005e8e11f8a500 | [] | no_license | xKTSE/dailyprogrammer | a4f71b3481cf891393a57e673df988be733806ee | c5933dad3e93b50f1e284e86741273214ebaf64e | refs/heads/master | 2021-01-25T05:34:55.202585 | 2015-09-20T20:10:51 | 2015-09-20T20:10:51 | 41,971,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | # https://www.reddit.com/r/dailyprogrammer/comments/3j3pvm/20150831_challenge_230_easy_json_treasure_hunt/
import json
import types
NumberTypes = (types.IntType, types.LongType, types.FloatType, types.ComplexType)
json_str = \
'''
{"dlpgcack": false, "indwqahe": null, "caki": {"vvczskh": null, "tczqyzn":
false, "qymizftua": "jfx", "cyd": {"qembsejm": [null, "dailyprogrammer", null],
"qtcgujuki": 79, "ptlwe": "lrvogzcpw", "jivdwnqi": null, "nzjlfax": "xaiuf",
"cqajfbn": true}, "kbttv": "dapsvkdnxm", "gcfv": 43.25503357696589}, "cfqnknrm":
null, "dtqx": "psuyc", "zkhreog": [null, {"txrhgu": false, "qkhe": false,
"oqlzgmtmx": "xndcy", "khuwjmktox": 48, "yoe": true, "xode": "hzxfgvw",
"cgsciipn": 20.075297532268902}, "hducqtvon", false, [null, 76.8463226047357,
"qctvnvo", null], [null, {"nlp": false, "xebvtnvwbb": null, "uhfikxc": null,
"eekejwjbe": false, "jmrkaqky": null, "oeyystp": false}, [null, 10, "nyzfhaps",
71, null], 40, null, 13.737832677566875], [true, 80, 20, {"weynlgnfro":
40.25989193717965, "ggsirrt": 17, "ztvbcpsba": 12, "mljfh": false, "lihndukg":
"bzebyljg", "pllpche": null}, null, [true, false, 52.532666161803895, "mkmqrhg",
"kgdqstfn", null, "szse"], null, {"qkhfufrgac": "vpmiicarn", "hguztz":
"ocbmzpzon", "wprnlua": null}], {"drnj": [null, false], "jkjzvjuiw": false,
"oupsmgjd": false, "kcwjy": null}]}
'''
def finder(tree, path):
if tree == None:
return
if isinstance(tree, basestring):
if tree == 'dailyprogrammer':
# print path
for p in path:
print p
print 'dailyprogrammer'
else:
return
elif isinstance(tree, list):
for items in tree:
finder(items, path)
elif isinstance(tree, NumberTypes):
return
else:
for key in tree:
finder(tree[key], path + [key])
if __name__ == '__main__':
js = json.loads(json_str)
for key in js:
finder(js[key], [key]) | [
"[email protected]"
] | |
45c74a8e2a9bf988eb230ab9dcf388081ff90d79 | a9e5027d61e493bbf134ccd76cdc7f71fb878c80 | /DawProyecto/web/urls.py | 942c3d28960934fd8a2299af57b7de1eb6894b05 | [] | no_license | rdsuarezb/Designer | 463e5777cfa94d193e03fa43d437d0b31bccaa60 | 62870a18920b4f0ca33615051413beb68e97d789 | refs/heads/master | 2021-01-10T05:03:39.167950 | 2016-01-26T03:37:47 | 2016-01-26T03:37:47 | 50,333,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django.conf.urls import include, url
urlpatterns = [
# Examples:
# url(r'^$', 'DawProyecto.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$',"web.views.CargarPrincipal"),
url(r'^Perfil/$',"web.views.CargarPerfil"),
url(r'^Documentos/$',"web.views.CargarDocumentos"),
url(r'^AreaDeTrabajo/$',"web.views.CargarAreaDeTrabajo"),
url(r'^login$',"web.views.login"),
url(r'^logout$',"web.views.logout"),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.