blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82908ae8ac24e79217bf0b66161e59606ee3b4f4 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /dataexchange_read_2/revision_get.py | aa8e9858a1a6eee389251c26211af7da94e53e10 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/get-revision.html
if __name__ == '__main__':
"""
create-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/create-revision.html
delete-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/delete-revision.html
update-revision : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dataexchange/update-revision.html
"""
parameter_display_string = """
# data-set-id : The unique identifier for a data set.
# revision-id : The unique identifier for a revision.
"""
execute_two_parameter("dataexchange", "get-revision", "data-set-id", "revision-id", parameter_display_string) | [
"[email protected]"
] | |
ed2dee8a9a297a14b1e6a0827a7ecca5e8a197c7 | f3553f36a248d5e2a30713af68dd714df90953d7 | /kuaishou/1.py | 51178e065731b5fe3e9606a854b3219244ac41fe | [] | no_license | Mrzhouqifei/offfer | 8a699653850cf6cc91ed5a622ad166fd61b8e294 | 4c73e7a591e79348471e00272dcb8e1b5cc6d7cb | refs/heads/master | 2023-04-09T05:58:49.858037 | 2020-12-30T06:13:52 | 2020-12-30T06:13:52 | 298,285,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | s = str(input().split())
match, left, right = 0, 0 , 0
stack = []
for x in s:
if x == '(' or x == ')':
stack.append(x)
while len(stack) > 0:
t = stack.pop()
if t == '(':
if right > 0:
match += 1
right -= 1
else:
left += 1
elif t == ')':
right += 1
print(match, end=' ')
print(left, end=' ')
print(right)
| [
"[email protected]"
] | |
c2b218be5ab2b6c61f063656e3d0cc3fad868684 | 0fd49b4779351c68bbe51ee978939f39c8e57d7c | /400-1000/412-Fizz Buzz.py | d10f822657cd03fff2f4df88968aae90c1ba0e31 | [] | no_license | jia0713/leetcode | 8f632b96d0bc900cf4357ab1b8affd6068964dec | 13565941f16c74d32124020285ce887a4cb31b27 | refs/heads/master | 2023-06-17T20:41:47.185832 | 2021-07-16T09:42:39 | 2021-07-16T09:42:39 | 247,866,418 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = [0] * (n + 1)
for i in range(n + 1):
res[i] = str(i)
for i in range(n // 3 + 1):
res[3 * i] = "Fizz"
for i in range(n // 5 + 1):
if res[5 * i] == "Fizz":
res[5 * i] = "FizzBuzz"
else:
res[5 * i] = "Buzz"
res.pop(0)
return res
| [
"[email protected]"
] | |
19a3e8991f3df85a01b43303066f119c19e1c908 | 87d93aa41de884cbaf8d3a4d7131a4ffd090c0bc | /mysite/mysite/settings.py | 10ae920d99b6fdd8f27a43baeb49bf1f6bcc401d | [] | no_license | Ryoung27/Django-Pract | bb99606a91da65788fb779a4216302398c6d0c8a | a29942ba4ec20ba259f06dc6696db47bd7f3eb3c | refs/heads/master | 2020-03-23T11:29:27.735366 | 2018-07-19T01:05:34 | 2018-07-19T01:05:34 | 141,506,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7d_nt#1q!)(3w!+1ufgrglz4g$*c6zb5ath3hq1ytyt^nh4of2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
af2e4c746c1d9621db5b7db5b430222178b55234 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/ExpanseV2/Scripts/ExpansePrintSuggestions/ExpansePrintSuggestions.py | b20540e9e643c26375a3bd4f86a41fbb04bfdb83 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 20,407 | py | import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
"""ExpansePrintSuggestions
"""
from typing import Dict, Any
EXPANSE_LOGO = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAeIAAADFCAMAAAC/6QGrAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAJPUExURUdwTO"
"nr7vn6+vv8/P39/fDx8fH38/7+/v7+/v////r6+/n7+v7+/v/+/v7+/v39/v3+/vf4+f39/fz9/P39/f7///z8/Pv499jd4f39/ff4+f39/klXbNbb3////6"
"y1vv///+rs7v7+/srP1fv8/OHl5+Hk5vv7+/P09u7x88fO1Pv+/c/U2fz8/Pz8/Ons7ba9xfDy9IGOnPj5+vv7/J+ps73Eyt7i5fj5+ebo6s7V24+bpvPw8O"
"zu8L7Gzs7U2ra/yKm0vtDV18XL0HiGlG9+jba8xG5+jj5TZvDy842YpMLJ0LrBx+Xp7bG5wK+4wp+qtd7j5zNIXr3EypWhrtLX24SRoP3u7lBidqartWx7i+"
"Dj53+LlImWoezv8X6MmZGeq9XY3JagrFxsfoiVo4aToKOrs+/w8fvT0frS0jZKYUxgcamyuv3w7ytAV4+bphAnQjtPZS1CWkdZb5ikrSA1UPdwbB40Tx0zTi"
"E2URsxTRowTCI4UhgvSh41UBYtSRMqRhAoRD9TaSU7VDdLYv///y5DWw4lQio/WNTa4FRmeQohPzJHXkpdcUZZbniGlXKBkWZ2h1prfU9hdTxQZkJVazlNZf"
"doZPdsaLe/yMrR2PL092Fxg/z+/+fr78/W3K+5w/b5/H2KmW18jOHm652ptIiVooyZpcDI0KOuuPhuauzv8pGdqZeirwMcOdvg5aeyvISRnvZ5df/w7/eAfv"
"/MyviLh//W1P/j4vZzb/menPiVkvZkX/67uPyzsf7DwQAQMPZva/atqv+qp/uqqRXzhNQAAAB1dFJOUwA0DRbMAwEGSx6LCY+mnnt1SJcQZVVfQFPt6oIF+G"
"n99PovT/f6YNb36/0n+7C5i/h2/jnF9vvu4Vbh96ih6u7g5ZJf+fRQ3BXbXfei8oTx4uKVbeGs2u6ZDZV2FfrWo+hq9u3nlJq39S/U1NTe1PD599Tk7yu+2j"
"kAAB1BSURBVHja7d3ndxTHtijwPhLdIyQjIRSJAoRkEMZkTLDBmOCHsQ1+OPs5huPj9+59595331aFjjM9SZOjcs6yEtngdA73nD/sfpDAYPX09My0wl2r6p"
"PXMt1T+OddXdVdtTfHrWZzrLPvXgInCIJwzNMzPT3aMjUe6w1EXG6v3+8PhVzhQDw2cXOgf3q6Z0gQBEEQbPxLlKznWFt+YoEXjtV919/SNRYNuJ1zPk2WRY"
"wwQoAQQhhjUVS1uTkxlRie6hro79sj8DzPiP+7EAu8IBzwjDZEExGv6lQpIgQMGyEIU9WHQ+HEka7pvs02KTPi5SXmeX6Dp/LNcbcuIYzAWkNY0XXXbI89yo"
"x4OYl5vtzT0x0lQfUZQgUhACCPo3nhHxBSnv1D6px3Yr5nS8HKjHj5iHl+U8+JI+6gE/8enwrCGCNZlrztkUg4GQgEAoFkOJJ2exVVRBiB8nuoE+wLRmYme7"
"YUhsyIl4eYdzj40oGoi2r0CS9CSFE1f3Lk05uHTrRsb2ratWvv3r17p3c1Nc2/0HLi0PFb8TA4KULod2bRScK35k86HDwjXlPEvKPUMzQYRvT3+AVJp3L64t"
"Vde/b07c5w2c49e3p2NVwM/ZskPTVgU0oCA56TDgcjXjvEfKmnfzYUlB9HLxA95E0P76qzOge/eink9urSk2hW5yJdQ8/nicyIbSfmHQdHx0M+cWEyhRFG7s"
"TFhj2Vud2k6fhIwIsWZ+GEqD7XWH9+yIzYXmLewVeMRr3aYgRjJOuJ4y/sLMnnx/e2fBWh8HippWqp8f51Dka8ysR86aYbs7qGAAAURZEU95k9NQV0oGjvJV"
"1/PMumamjqs7IyByNePWK+bGPfmDeIF5a5yB9KHtpZU2AXqnZ+lQ5JsDBvE4OuSc+6Mka8WsT8Rk9XWFMJACCM9cjFl2o5rvAPCsU7D424JLSArMm9A8eKyh"
"jxqhDzh+fjSEMEABQRBab2VtvVjeKd28+k6MJDmWr6cP+xMka88sR80Zc3vSpeWCUpkYadO2ztyfqdX4UWF1FUdQ2+mEMgM2JbiPmi6v64UwMAQKC7zmzeYX"
"tf1u+95NYxAiDgpMPTL5Yx4pUk5nd4pkJBRQJAVEp/umUjJyxDb/a/FXMTEQAkMZie/K6qjBGvGLGjuj8uaxIAYNF78a2Ny9Wd/ZvPfi1RDEBAk470WAxkRl"
"w4saO5O+1DBEDBSmB7xXJ2SNh/1YUxApBELdF2oIgRrwQxX3XhJg4SAgpWUkf2VSxzl3a/FQthDCDhoLfru6oiRrzsxHztUFSWJQBM/b0tFcvfp92bjyclqg"
"Ahmj7RZyGQGXFhxHxzW0CTCQAW289s2boivdq975JXRgCgiSOj2Y0ZcUHEjlOTIR8GUDAKXy1doV4J3J+utWOkABGd4YENRYx4+YgdxfVdXo0AYOof2V66gh"
"07ei6gUwUI1tIt2YwZcQHElZ4xvwYLg/TJ0hXt2dF9w14RAcGaa/LlIka8TMR84yzWCAAVI4eeW5aXHSatbvNf20UMBDm9g+bGjDhvYkfjmFMkABQFWp5bhc"
"4dPZumGIA49QZTY0acJ7Fj/eUxmSoAVI9vXw1hTqg7F0BUAdD8g68UMWK7iYWjngmqAiDZP7xr0+r0Tqh7KY5ERJDTa2bMiPMj5htnZI0Akv23Tm5atf5V7o"
"shWSHIZxbHjDgvYr7xpiZLgET/V5tWs4OVm6NERERy+rsyGjPifIj5+kEkAyDZO/vd6vawcktUEhGA5p7cVMSIbSPm6ye9mgKK7P30Svkqd7FkX1SSQaJapH"
"tTFSO2iZivb/E6JVBU/5lXVr+PJfuiICuEOsNtxsaMOGdivmTerRFQRH3mxTXQR6FkSwyJQKiW6Dc0ZsS5EjtKhhJOBUDUh/eWc2vC+KU4wiCpNNq3lREXTi"
"zs90RVDEBRvKl8bfSSLzmXEDEQTZ86WMWICybm66eQDIDFwPbyNdPPU2cjKgbkdHc/V8WICyTmS9oklQAWI1fL11BHT51PyYggX3h06eOYEedE7CjpCfuAKN"
"TdUL6menrhC6+IJFUb6atgxIUQC+s9MR8QoP7ZY2usqzfGJQySRsaWPI4ZcS7EjspBggCwEt+1YY11tfZcgmIgmqvtuWJGnD/x+p6UBoDFyAvla66vNddcKp"
"JEZ3xoIyPOm1i4HA0qBInuaysbw7t3796f/U/Vf+GVkeRU/jhUM2LrxI7KLhUDUH1m3Ur6fl/p8Xg++P7777M48zfGCVWIzz3w7FDNiK0TF0+7NQCEEyv3IN"
"79fd2de7/+9NNPP/326707H5gjV58OUCRRZ+/QDkacF7GjcVhUQMGpQxmEi3f/ybjtLsrr14XddZ4f/3H30S8dHR0dv/zy8P4/H/yrGbJw4d2QiIjmv/nMUM"
"2ILRMXz/tFBFS5dSXDnz7saek2ai1DFXkZ13l+/On2Lx2PWhfaD7/8cvufd/7VLJDfjmIK4Ez2Pz1UM2KrxHxjXEWAxGRThiAWSk7Hve529x9buzcxmo9xne"
"c/b//S0fpUe9jRcf9vZsbVZyMyIqoy7tnBiHMnLu6mIoDsP5TxQSw0Xg/N+Zy+PzRn0Nk7nbtx3Z2fHz0D3Nra2vpDx8PfzIybv/DLhATTA6XFjDhn4ssRFQ"
"Ar0edN/nzjdbdMpCVNpNG+iqqcfpmvu3O/o7N1aets/dXM+O0YwoT6RjzVjDhHYkfloBMRRF0vmM6mvxzTZYN08U59xpNbAj3e8/OTh/CzrcPUuPZ8O0V60D"
"u5tZgR50ZcNRTCCmD91vPmr5iuRBXRIE2809vwSS5DtVD3T8MYbm1tbe28fe/5TMaC8PawhAE5433VjDgnYkf9BMUAOLU9y5K45J24TJeGMXKGJg/nYCw8eN"
"iRQbj1Uef9O5k/guw43y4C0UJdT8KYEVsiLh5yYQSYTJzMdknN6V4ZLzUGX7ptY5n1IL6fKYZbWx91Pvx13e6Ml749DBiQOuKpZcS5EH80hTEossvCe62Ssw"
"EDY8BaoseqsVD54OEPGYlbf+j82SyMX03JSPKlnjyNGbEVYkdjUkWA0XErH5jqX42oS2u5EFmLDVk0dnh+fvgoM/Gjjtv3TPZ+vjaBREl0Dj8OY0Zshfijbh"
"kDouHPLF1V/25INZhWq+pw30ZrS6c7d02EW1s7O347ttskjNMikoKRtsUwZsQWiPn6pIyASmNbrF32yZhfXhrHkoZmPEctjdM/mo3Tra2dHWYjNffaOKEgar"
"cWw5gRWyAuG5UxAHZtt/iFqfbKEclg6SQ5vV2HLQzVvOdvnZ1mxI867/5oMlLvOJ+iSJ8Lj9YwYovE/KleigDp0ZNWr6t5JwbU6BWIa97C45jv+63TNIpbzR"
"/G296OSYjI4tjBYkZsjbhq2o8AcOiE9d08Nad7DZbHgIKB6ezGfN9//mBO3Nn6N7OzNl9+E6IgBXv7ahixNeJtMzoCwMl91i/ka04HVIPlMQ3GhzY6CifuNC"
"Xmvg1QhWjuE1uLGbEVYr4+gRBgcjynLXk1r4YNlseS7Iz1bSzK9izOSvzQnPjG67oIonjLU8OIrRAXjfopAA1dyenSbfXXXaLBtFql457dWYhf/NWc+FGH6X"
"SL47iPXTKAL9FfwoitEG+bpRSweGtLbtcKF973Gk2rVb2hOttQfc98Rv1Dx/0H5sRvxjCWfO7JrcWMODsxfypAEVCU+ym118Yloy+LPvfAYXNj/sHtLOvinz"
"4wz5m7412/TER13FPDiLMTFw1RCogG9uV8de07MWQQxxBMz5tPuRyen80fxY9+PZDlt78NUKzPJXoYsQXij2ZlCpjObsn5aqH2nREDY0Kdgf6NZsVq+QO/tZ"
"q9wey4+yDLkSrhtWGFSr5QdyUjzk68LU0BkHdycx7X155NYIOlE9biWabVd8zeYHZ2ZhunOa58yk8RCs6WFjPibMT8BUUEgGRTXvm1ms+HDd5WA9WGPevNw9"
"hkvtWZbT7NcUL1qxGE9L/HPTWMOBvxR4MKBoXETuZ3h+braaPlsUrHPjGtK/7y/Y7MT+J/fpC9QMGf4xKSfKmBSkacdaAOIADFP7U5z1s0v9tu9PVY0ycPmx"
"gL3IPbHRk/M31goUzQexN+DEgbKy2uZMSmxMKFFAZA3u1550E0Xh6DFmrbaJLC2nHg3qPODGviv1gpBFX9bogCCUY9JXWM2JSYb/NiAJTal/9N3rslGXyRAF"
"+y38yY2/C32x0/GEy1rAlzwscRDMSZ6K9kxFkG6hkdgUKie/O/SfU7cWpgrAQTPabT6g337nZ0/DGEH/30F4vF3N4ckRCoqe6jjNicuHYEECDxRCFJxatPJ4"
"y+LGJn3FNsdt2GB/94+Evno6eAO+7++oFFYaH5dT8FWW8orWbEZsTPewKAAMv7CrpN9dkkRUbL44lPzBe3L//48+2Ojs6F1tF59x9/Kbdc7Ev4MCQSiic85c"
"UM0oy4zaUA4FRhxFzt+bTBVyeQaddhLhvyP366f/f27dt37//8aw7AHMd9nqYKUmPTmxixKfGkHwOisS2F3Uf48gu30dLJ6W3JNu6Wb3r5wY/37t378c7e8t"
"zK9b0ZR1gKJkYZsTlxA6aA1ME9hd7py29CRq+5NPdANmOBKy8v31ReXp5rPcaXxxGV5iJLkjSx9izxBGBAPhtKuFx43W+4PHaNLle549r3JVHyuSdLGbEp8Y"
"iCAOsvFX6rbe/FJINzMODszb6ZK8+nw4dukah6AyM2Jb6SxAho5C0b7rXjXMJgWg1Yi3qWh5j7PEwBa4tbbVnLQPyZVwRF7N1nx82qz4ZVo2m1PHN4eaL4zw"
"mEpLlxRmxOjEVA8vAWW+6241raYOMtUHlweR7Hb8YR1v8+/BojNmn8aYUCUmftIeaqj6cMl8d697IYvxfFVP97jBGbRvG/SBSQdnOzTfdr/saNFYMzi+n+im"
"V4Hr8yjKk01/vnIgZpEsXv6xgQHrSLmLtxyWswVCtawLMMvX9lXKJSMPDt/2KQJiud13UMyH/CNmLuja91o6WTL7YMU66XZ/yYOMMfM2JTYoIBhVpsI9627Y"
"3/IAbGJDhmv/GGsRACLcKITUX+D0GAUgObbbzlqwlsMOVC8mCF7cQ33YhoqQ8ZsSmxhABcbTttvOeOQxGjVyDUP2C38YbBdgSy+8P/wSBNiP83QYDS2239j1"
"/9VQopBm+rU/0VdhOnFBBD/5MRmxMrgMMv2XvXU4ZTLikYmN5q6+8cmEwpQL2M2JT4/ylgP/Eb/6EbjNQgBXvtNa75lwhSsJ8RmxL/X7CdeNvlr/1G6yYAIo"
"8MbRVs/KnPkwgx4mzExHbiCxe9xsIAQMdtfQXCiC1GMbKVuPmvblHJRCxLUzYO1QIjXo1ncfP/b5czCi9koLX1WcyIrcyoFRuJm4+7jBbFTxm7J20zPjDpYj"
"NqS+tisG9dXHstiRQwa0RMzdtlzNbFVojtfbtVfa4XYcjS1MB0qV3E7QhU9nYrO7F976jfiyk0mzAoau+QPcYbGtg76uzEr0vIti9Nwhtf6yJkb1ge6bPl98"
"rHQph9acpKrCPbvhdfuOSlYKUh+dODdvwg+15sodm566P5m3YjYaP5NZYa7Biq2a4PC83GvVvN1w1ProHPaOyWQ102GLO9W1ai+DTYtAOz9nxENJpMB2dC1G"
"AZpbbbYMx2YFqJ4s+wCEgsfB917dmksXD0k6mQ0b8Q0wMFG7N91JaIvSIotODTENXneqnhgaaAp6x23OjDkyIm+gs8K8dOQ1givpLECHDBZ5reiyrYsI7AaE"
"URd2FEMjrrJI5MF/irn4cpYJWdaTInfn4EIcBSgS+p33jdT412yOvdFRzHcW8EiNFmLnSpsKF68WTiIDuZaE48gQo/X3z5dcPMW6I4eHjxnci/G23KpHi8IO"
"Nqdr7YEnEDpYC1grIEXPim3eALMZHFmcOPt3h0pYyMsT5ViDHLEmCNuPBcH83XIwYLYkK16O8pmWoHDXeCYHdXAcMHy/VhjbjgjD21ryZlw+VS79BTxeebJ4"
"w2ZSI5PZC/8edpqiCNZezJRuxJFJZ3q/Z0QDV6b+lMj1Y8neb0smGaCKSGR/M2/jAkEkpZ3q0sxOtqYwVlz6s1LskFTvfAM3nlBeENo3MwBGu9eRovZs+TBl"
"n2vCzECzkwlTxzYNa+EwNsWDxxcknlgKawUYY9JI5M52f85gjBRGtnOTCzES9kssXteY3UwmsTulHpRBl3LanxItQMuA2nXNKRvOZ6wscRyjLZWiEWmgvIR1"
"3/vlc2mkw7Jz5ZmhRAqGkw+p6MRP1MPmFc/W6IgsTyUVsYqAvIKn/q3ZRqKBz37DeKu/pZ3eirk+ydysP4vQk/BsyyylsgXqwNMZJ7bYhT1yMqJkvD0peYrj"
"AuRt44gwyXTqlDuRuz2hCWiRcqvCi5V3ipybAg9oVHM+RuEYTLMaNpNZLD3bkaswovOQzU29JiPnWaaozzjIMW6jbJzvNG2uhtNlYDTTkal0/5KbA6TdaI86"
"u2VnM6LhrWmpfMa80PGJ6HoXI8N+PFamtuVm3NCnFeNRMzlEuUNDz7iWkV8pIWr2x0ZBHHcnsF8m2AImmul9VMtEKcT+XTbZ/M6EZfl0Qt6jlqfmXloGE9GK"
"qP9+RgzCqf5kLMbRvLuX5x4/sh1UAYO3t7Mkymn1o6TSiG02rv1PM5vNqKYcrqF1smzr0Kef11l/GCODxaUZb16su9RjNxrLqvWf96/LFLBMKqkFsl5usTCA"
"Emx62O1I3GC2ISTLVZEOa4nqRRUmOspg9ZNb7xuk6B0lueGkZshVjYNiMhAJS0OOGqN14QE1+o67CVXetCSX/KYJgHLCatfvD6NkAVorpPbC1mxFaIuappPw"
"JAoUOWwrjktOGGWknVZzx11n62ZMBwWo1xr7VNZF9+E6Ig+Xr7ajhGbImYP9UrI0CSpQK3JRmWS6o43JdtqvWkVQ4anmEUIfaWBWPh7ZiEiEwXt9cy4uzEXN"
"m8RkHBaQvrppLPokg2EnbGn97Ik/VxPmb0agzJ0vCu7MY7zqco0ufC/TUcI7ZIzNcnZQRUGsu6bqq5ckQyECY4mOipyOX42OURwxfcsj6xJ6vxa+MSBVG75a"
"llxFaJuY+6ZQxIDH+W9Sk45Td6AYl84dGtOR0Q5IcCqmHC29DUsWxB/GpaRFIw0ra1mBFbJnY0JlUEFGdbNzVebzeqqUac7u6K3I6A8pX9ac0whYAr2/L4tX"
"EsSqJzeDGIGbElYu6jmxiDIrt2bTD743Xnw6rhVi3/zcM5H/KtHPBqxMg4bL483vFqSkaSM9WyGMSM2Bpx8ZALI8Bk3GxSXXc2aSisiUc8Jbn/eOVNMPwiIQ"
"fOlZpNp4cJBqSOPA5iRmyN2FE/QTEATW3PHMZ1pxOGL7VkLTa0tSqPH/fcUoy+ZVAxPp/ZuPp8u7iQha+YEedCzFUNhbACWL+V8VtA3TsxTMmSJolab8/WfH"
"Ix8JWemIyW3pFQKboro/HbwxIG5Iz3VXOMOCdiR+WgExPArpYNmV55jGhBp0EL5jqZfmqoHuoN+oxuKUczvQKpPd9OkR4MTT4JYkZskZjjLkdUAKxEM4RxfV"
"e63WXQ3MnurXnnU6mcTqYMbxruOpAhiGMIE+qMeao5RpwrcdUAFQFk/yHDMC7ZPDrQZtQGeg4WkDGncqjN+K6jfYbbBZu/8MuEBNMDTx0qZsQWiQW+Ma4iQG"
"KyycBYKD76pwztaFUhPch8W6Ot2NVnIzIiKhr37OAYcc5RzBXP+0UEFB25smb7+3YUUwBnoP/pY+OM2DKxo/GIqACi7Yc2rNHuXng3RBWi+m8efHrkYMSWib"
"niabcGgFBi19o0rj6dpEiiWnxoB8eI8yEWHJVdKgUQ9Zl1a7Gzwo1xEBXiax94NrsHI7YexZxwedinECS6r63FML7whVdEklMZO/jsBI8R50DMre9JaQBYjL"
"xQvub6WnPNpSJJ1uJ9fyhpzohzIXZUDgICwEp8zT2Oa88lKAaiuZYkYWLEuRAL6z0xJxCg/tlja6yrN8YlCpImTR384zqcEedCzDlKesI+IIrovrm2hurGL7"
"wiSKo20rekGg0jzomY40vadJUAFtNX15LxqfMpGQHyhUc3VTHiwog5vv4mkgGwGNi+doxPnY3ICJDT3f3c0teljDhHYm6/J6phAKrEm9aIsVByLkAxEM2/9E"
"HMiPMgdpQMJZwKgChF964N45KX4goFScXDfUaF2hhxrsQcXzKa0gAUUZ95cU3E8JaYQkEStUT/pipGbAcxx1d2e50SKKp+5pU1EMP7oiArEnWG2wyFGXEexB"
"xfP+nVFFBk76cnN62+MJFBwlpkwFiYEedDzPH1XUgEANE7s2d1n8eVW6KSiADU9slNGXaXMOJ8iDm+8aYmEkCyf/bAqgpvjhIREeL0d72Saf8QI86LmOMbZ1"
"VNIkj2H1nFsbpyXwzJCkE+/2BGYUacJzF31DOBNQCQ/cO7Vsu47qU4khFBzpCJMCPOl9ix/vLCGWCqx7c/tzrC5wIKVQA0sxhmxHkTc5yjccwpEgCKAi2rYX"
"z0bBpjAKLpDS+b7eNlxHkTc47GWawRAEojh0pXPIQ3/9UtIiDI6R00FWbEBRBzlZ4xvwYAWGz/9OTKIh/dN+wVAQjWXJPmwoy4EGJHcX2XXyMAmPpHtq+k8d"
"FzAZ0qQLCWbtmQ5bQFIy6AmBMcpybdPgSgYBS+unLGf7rmxkgBIjqTA9mEGXFBxBznaJ4POGUCgEX3mS1bV6RXu/dd8ssIADQ8Mnog64kpRlwYMcfXDkVlWQ"
"LAor/30OblR969+XhSoggIUfWJvuzCjLhQYo6vutCAgoSAgpX28emK5RZ+aySEMYCEg6Gu76osnHpkxIUSc7yjeSDtQwRAQSiwfXmN9191KQgBSKIz0XbA0r"
"FWRlwwMcc5qvvjsiYBABK9F9/auGy+m89+LVEEQEDTbw29WMYx4hUi5vgdnqlQUJEAEJVct7YsD/L+t2JuIgIAEX2Rye+qrAkzYluIOb6ouj/uVAEAEOiuM5"
"t32N6X9XsvuXWEAAg46XCPxRBmxHYRc4Kj6MuG0EJOQ4SUSMNOe5HX7zwTkhACAKCqq+vFIsvCjNgmYo7jHIfn44qGAQAUEQWO7622qxvFO7efSYkYLVQv9x"
"/pP1aWw9WM2DZijt/o6UpqKgEAhKkeufhSrT3Ah0ZS0mKpPU2ODxwrykWYEdtIzPFlG/umQkFMAAAh5A8lr+2sKbALVTu/coUeA4tB16RnXVlud2DENhJzAl"
"+66casfyETpqKApLjP7CkAuWhn0yVdB2UhTSJVQ1OflZU5OEa8esQcxzv4itGoV1tMUIoUUUocf2FnST4/vvfqmTBSHhfKVFXXeP86R+73YcT2EnOcwDsO9o"
"+HfI+REUbuwMWGPZW5/a/SdHwk6ceLwISozvRY//OOPIQZse3EHMfxpZ7pqXafvJDWVkFA9JDXdXG0zur/JSdGQm6vtLhIAgB1LtI1lB8wI14WYo53OPjSyT"
"Cm+Pdkw5KO1X+/eGJXz56+3Rku27lnz56mhkv+f5Okp2oFUCwFBjwn8wRmxMtDzHEcx/OlbVEX1p5UakEIIUV26uGRT6cOnWh5Yb6paddCa2qaf6HlxKHjt+"
"IRUaMIod+LD4iaFB6fP+lw8Hl3hBEvFzG3jd/k6T9xxB10oieJyBEgrCAkq5K3PRIJJ5OBQDKZDEfSbj9WRQz4aV5CfcHwzGTPFp4v5C/BiJeNmON4ni/39H"
"QP68FnC+QpCAEAIQoAAAEgCwvpZzPIK+pcaGa+ZwtfGDAjXlbiBeUNnso3J9p1CWGD8j6GDVFFl1yzPX2bC/ZlxMtOzHGcwAvCAU9/QzQRCalOlSJCjGUJQZ"
"hqPuQOJ4502eTLiFeCeIGZP1Z3YLq7ayyaaNfmgk5VFDHGCCGEEMZYlGXVOTeHXb23proG+vv2CDb5MuKVIuY4TuAEQRCOeXqmp/u7b05E44Gwyx3yer0hdz"
"qcGInONLT1T/f0DAmCIAiCjX+JtU78X7lDYMJuyFZZAAAAAElFTkSuQmCC"
)
""" STANDALONE FUNCTION """
""" COMMAND FUNCTION """
def expanse_print_suggestions(args: Dict[str, Any]) -> CommandResults:
ip = args.get("ip")
port = args.get("port")
fqdn = args.get("fqdn")
expanse_users = argToList(args.get("expanse_users", []))
expanse_devices = argToList(args.get("expanse_devices", []))
expanse_ips = argToList(args.get("expanse_ips", []))
expanse_issue_tags = argToList(args.get("expanse_issue_tags", []))
expanse_asset_tags = argToList(args.get("expanse_asset_tags", []))
expanse_business_units = argToList(args.get("expanse_business_units", []))
shadow_it = argToList(args.get("shadow_it", []))
provider = args.get("provider")
region = args.get("region")
service = args.get("service")
prisma_cloud_assets = argToList(args.get("prisma_cloud_assets", []))
md = f"\n\n"
md += f"# Expanse Attribution for service {ip}:{port}\n\n"
md += "## Executive Summary\n\n"
md += (
"The Expanse Attribution Playbook has performed **enrichment** across several systems to help you determine the owner of"
" this asset. Data has been searched in **Cortex Data Lake**, **Panorama**, **Prisma Cloud** and **Splunk**.\n\n The"
" findings are reported in the following sections.\n"
)
md += "## Service Details\n\n"
md += "Logs and asset information were searched for the following service:\n"
md += tableToMarkdown(
name="Service Information",
t=[{"IP": ip, "port": port, "FQDN": fqdn}]
)
if provider:
md += f"The asset has been attributed to the following provider: **{provider}**\n"
if region:
md += f"The IP address belongs to the following Public Cloud region: **{region}**\n"
if service:
md += f"The IP address belongs to the following Public Cloud service: **{service}**\n"
md += "\n\n"
if shadow_it and isinstance(shadow_it, list):
md += "## Shadow IT\n\n"
md += (
"Based on the information above, the Playbook tries to determine whether this service is sanctioned or can be Shadow"
" IT. The following conditions are checked:\n"
)
shadow = False
for n, c in enumerate(shadow_it):
if isinstance(c, dict) and c.get('value') is True:
shadow_it[n]["result"] = "✅"
shadow = True
else:
shadow_it[n]["result"] = "❌"
md += tableToMarkdown("Shadow IT Conditions", shadow_it, headers=["condition", "result"], headerTransform=pascalToSpace)
md += "\n\n"
if shadow:
md += "### Enrichment determined that this service **IS NOT** Shadow IT.\n"
else:
md += "### Enrichment determined that this service **MIGHT BE** Shadow IT.\n"
md += "\n\n"
md += "## Attribution\n\n"
md += (
"This section reports attribution information based on Expanse detected business units and tags for this issue and its"
" related assets.\n\n"
)
if expanse_business_units:
md += f'Business Units: **{", ".join(expanse_business_units)}**.'
else:
md += "No Business Units reported by Expanse for this Issue."
md += "\n\n"
if expanse_issue_tags:
md += f'Issue Tags: **{", ".join(expanse_issue_tags)}**.'
else:
md += "No relevant tags reported by Expanse for this Issue."
md += "\n\n"
if expanse_asset_tags:
md += f'Related Asset Tags: **{", ".join(expanse_asset_tags)}**.'
else:
md += "No relevant tags reported by Expanse for assets related to this Issue."
md += "\n\n"
md += "## Detected users connecting to this service\n\n"
md += (
"The enrichment correlates log information from Firewalls with UserID enabled. If users from within your corporate"
" network or **Prisma Access** are connecting to this service, they will appear in the following table.\n\n"
)
if expanse_users and isinstance(expanse_users, list):
for n, u in enumerate(expanse_users):
if not isinstance(u, dict):
continue
if (groups := u.get("groups", [])) and isinstance(groups, list):
for m, g in enumerate(groups):
f = re.search("CN=([^,]*),*", g)
if f:
expanse_users[n]["groups"][m] = str(f.groups(0)[0])
if (manager := u.get("manager", [])) and isinstance(manager, str):
f = re.search("CN=([^,]*),*", manager)
if f:
expanse_users[n]["manager"] = str(f.groups(0)[0])
md += tableToMarkdown(
name="Detected users connecting to this service",
t=expanse_users,
headers=["username", "domain", "mail", "groups", "manager", "sightings"],
headerTransform=pascalToSpace,
)
else:
md += "*No user evidence found in logs.*\n"
md += "\n\n"
md += "## Top IPs communicating to this service\n\n"
md += (
"The enrichment correlates log information from Firewalls that terminate connections on this service. If any firewall"
" that is sending logs to Panorama, Cortex Data lake or Splunk is seeing traffic to this service from any network,"
" the information will be reported. The top talkers that are connecting to this service are displayed in the following"
" table.\n\n"
)
if expanse_ips and isinstance(expanse_ips, list) and all(isinstance(x, dict) for x in expanse_ips):
md += tableToMarkdown(
name="Top IPs communicating to this service",
t=expanse_ips,
headers=["ip", "internal", "sightings"],
headerTransform=pascalToSpace
)
else:
md += "*No IP evidence found in logs.*\n"
md += "\n\n"
md += "## PAN-OS Firewalls with sightings\n\n"
md += (
"The enrichment correlates log information from Firewalls that terminate connections on this service. If any firewall"
" that is sending logs to Panorama, Cortex Data lake or Splunk is seeing traffic to this service, they will be reported"
" in the following table.\n\n"
)
if expanse_devices and isinstance(expanse_devices, list) and all(isinstance(x, dict) for x in expanse_devices):
md += tableToMarkdown(
name="PAN-OS Firewalls",
t=expanse_devices,
headers=["serial", "vsys", "device-group", "exposing_service", "expanse-tag", "sightings"],
headerTransform=pascalToSpace
)
md += (
"(*) ***exposing_service*** *means that Firewall logs were found where the destination IP:port corresponds to this"
" service, and the source is a non-private IP. Such Firewalls are likely to be protecting the service.*"
)
else:
md += "*No PAN-OS devices found in logs.*\n"
md += "\n\n"
md += "## Prisma Cloud Inventory\n\n"
md += (
"The enrichment correlates asset information from Prisma Cloud inventory, searching for assets that own the IP address or"
" the FQDN. If found, the cloud asset details are reported in the following table.\n\n"
)
if prisma_cloud_assets and isinstance(prisma_cloud_assets, list) and all(isinstance(x, dict) for x in prisma_cloud_assets):
md += tableToMarkdown(
name="Asset information from Prisma Cloud inventory",
t=prisma_cloud_assets,
headers=[
"cloudType",
"service",
"regionId",
"accountName",
"accountId",
"resourceType",
"resourceName",
"ip",
"fqdn",
"rrn",
"id",
],
headerTransform=pascalToSpace,
)
else:
md += "*The asset was not found in Prisma Cloud inventory.*\n"
md += "\n\n"
return CommandResults(
readable_output=md,
outputs=None,
)
""" MAIN FUNCTION """
def main():
try:
return_results(expanse_print_suggestions(demisto.args()))
except Exception as ex:
return_error(f"Failed to execute ExpansePrintSuggestions. Error: {str(ex)}")
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| [
"[email protected]"
] | |
e719ea9ed023608f7635c6fd8bf85b0b352cde9c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03797/s857928689.py | 5a492d61ed9095cbcb8fca6a600a3c13717f356e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | N, M = map(int, input().split())
cnt = 0
if N<=M//2:
cnt = N
N,M =0,M-2*N
cnt += M//4
print(cnt)
else:
cnt = M//2
print(cnt) | [
"[email protected]"
] | |
3f800f7039c5fc6489b128bf37624959ce17273a | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/ui/shared/planet/pinContainers/LaunchpadContainer.py | 614755276a054e01a0a618a178a4f59d06d3a490 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 2,664 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\planet\pinContainers\LaunchpadContainer.py
import carbonui.const as uiconst
from eve.client.script.ui.control.eveLabel import Label
import uiprimitives
import util
import localization
from .BasePinContainer import BasePinContainer
from .StorageFacilityContainer import StorageFacilityContainer
from .. import planetCommon
class LaunchpadContainer(StorageFacilityContainer):
__guid__ = 'planet.ui.LaunchpadContainer'
default_name = 'LaunchpadContainer'
def ApplyAttributes(self, attributes):
BasePinContainer.ApplyAttributes(self, attributes)
def _GetActionButtons(self):
btns = [util.KeyVal(id=planetCommon.PANEL_LAUNCH, panelCallback=self.PanelLaunch), util.KeyVal(id=planetCommon.PANEL_STORAGE, panelCallback=self.PanelShowStorage)]
btns.extend(BasePinContainer._GetActionButtons(self))
return btns
def PanelLaunch(self):
bp = sm.GetService('michelle').GetBallpark()
text = None
if bp is not None and not self.pin.IsInEditMode():
customsOfficeIDs = sm.GetService('planetInfo').GetOrbitalsForPlanet(sm.GetService('planetUI').planetID, const.groupPlanetaryCustomsOffices)
if len(customsOfficeIDs) > 0:
try:
customsOfficeID = None
for ID in customsOfficeIDs:
customsOfficeID = ID
break
sm.GetService('planetUI').OpenPlanetCustomsOfficeImportWindow(customsOfficeID, self.pin.id)
self.CloseByUser()
return
except UserError as e:
if e.msg == 'ShipCloaked':
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadWhileCloaked')
else:
message = cfg.GetMessage(e.msg)
text = message.text
if text is None:
if self.pin.IsInEditMode():
text = localization.GetByLabel('UI/PI/Common/CustomsOfficeNotBuilt')
else:
solarSystemID = sm.GetService('planetUI').GetCurrentPlanet().solarSystemID
if solarSystemID == session.locationid:
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadNotThere')
else:
text = localization.GetByLabel('UI/PI/Common/CannotAccessLaunchpadLocation')
return Label(parent=self.actionCont, text=text, align=uiconst.TOTOP)
| [
"[email protected]"
] | |
4e16ccc77fd56253143c198ecaa008a328bcd0b8 | f0fa96d39a66c3ddaae4266442a13ec3feb7a462 | /dynaminc_programing/perfectSquare.py | 7b3a7b89619c22b047ff08c46b1d7e59fa335c19 | [] | no_license | ashishgupta2014/problem_solving_practices | 14d587e98d9996a95efe822335ca4baccb39b1a1 | bc4f4b07e1e33273010e34428e0c31d2d6656c14 | refs/heads/master | 2023-04-26T03:47:40.766508 | 2021-06-07T04:55:52 | 2021-06-07T04:55:52 | 298,063,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | import math
def perfectSquare(n):
if (n < 3): return n
square_nums = [i ** 2 for i in range(0, int(math.sqrt(n)) + 1)]
dp = [float('inf')] * (n + 1)
dp[0] = 0
for i in range(1, n + 1):
for square in square_nums:
if (i < square): break
dp[i] = min(dp[i], dp[i - square] + 1) # +1 is for that square we are substracting.
return dp[-1]
print(perfectSquare(12)) | [
"[email protected]"
] | |
5c9e8206af3d623bc4bcb23dcb9e1c079e59e878 | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/6038-MinimizeResult.py | e33698a2a56562956fcdb3f35ab04e87657c7df2 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | class Solution:
def minimizeResult(self, e: str) -> str:
n = len(e)
arr = e.split('+')
l1,l2 = len(arr[0]), len(arr[1])
init = eval(e)
res = float('inf')
p1,p2 = -1,-1
for i in range(l1):
for j in range(n,n-l2,-1):
if i == 0 and j == n:
cur = e[:i] + '(' + e[i:j] + ')' + e[j:]
elif i == 0 and j != n:
cur = e[:i] + '(' + e[i:j] + ')*' + e[j:]
elif j == n and i != 0:
cur = e[:i] + '*(' + e[i:j] + ')' + e[j:]
else:
cur = e[:i] + '*(' + e[i:j] + ')*' + e[j:]
# val = eval(cur)
if eval(cur) < res:
p1 = i
p2 = j
res = eval(cur)
# print(res,p1,p2)
if init < res:
return '(' + e + ')'
else:
return e[:p1] + '(' + e[p1:p2] + ')'+e[p2:] | [
"[email protected]"
] | |
de40442e18ca727417a8eb58201487d77ae1f7eb | 23107f38f7c28da5e2e5e51f6eda3ba6b5b9a2ff | /kitchen_project/settings.py | b1c6eb6dddc924b8e595337e856f15b714f1cb08 | [] | no_license | sarigu/kitchen_app | fe818aca3fb0605c185fe9ab0b496ea4e0bca0c7 | f2eacf907eb75afd4cecd1cdce19900230b8fb33 | refs/heads/master | 2023-02-22T19:34:41.094263 | 2021-01-25T09:24:01 | 2021-01-25T09:24:01 | 315,796,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,204 | py | """
Django settings for kitchen_project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g^w2u@*@88^s-*v%u&2z^th@ug*!_md54943ppa7swu09+fz!3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_rq',
'login_app',
'kitchen_app',
'channels',
'chat',
'api',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'loginrequired_middleware.middleware.LoginRequiredMiddleware'
]
ROOT_URLCONF = 'kitchen_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kitchen_project.wsgi.application'
ASGI_APPLICATION = "kitchen_project.asgi.application"
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
EXCLUDED_URLS = (
'api/images/',
'api/id/<int:id>/',
'admin/',
'admin/login/',
'accounts/login/',
'accounts/logout/',
'accounts/sign_up/',
'accounts/request_password_reset/',
'accounts/set_new_password/',
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': '6379',
'DB': 0,
'DEFAULT_TIMEOUT': 360,
}
}
# EMAIL SETTINGS
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp-relay.sendinblue.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'MqFtvLHkhNJXw2c6' | [
"[email protected]"
] | |
8e1eaca2c534ab590ef058f10c521bcab1b4c678 | 6443a587e16658a58b884a2e5c6dbbab1be50674 | /Leetcode/Unique Binary Search Trees.py | c83974a73a1bea84808319b93ca6f42ec0b06328 | [] | no_license | xiaochenchen-PITT/CC150_Python | a6cbe213946851639a827068961934920b6c3e57 | e96394265d8a41a1b4558d5d2b34aa34af99662f | refs/heads/master | 2020-12-24T17:18:14.606804 | 2014-11-08T21:48:20 | 2014-11-08T21:48:20 | 25,654,100 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # Unique Binary Search Trees
class Solution:
# @return an integer
def numTrees(n):
# DP
mp = {0: 1, 1: 1} # key: n, value: number of different structures
if n in mp:
return mp[n]
for i in range(2, n+1): # i nodes
res = 0
sm = 0
for j in range(0, i):# j nodes can be put either on the left or on the right. j in [0,i-1]
sm += mp[j] * mp[i-1-j]
res += sm
mp[i] = res
return mp[n]
# recursive method
# if n == 0 or n == 1:
# return 1
# res = 0
# for i in xrange(0, n):
# # assign i nodes on the left and (n-1-i) on the right
# # because left side is independent of right side, so multiply them
# res += self.numTrees(i) * self.numTrees(n - 1 -i)
# return res | [
"[email protected]"
] | |
50ab018c1be8d2a4d8012cffc93a214ded31a1c8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03339/s461187106.py | 117afdfcdef0aa4d725b5db1581f92a01b0ab81a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | n = int(input())
s = input()
ec = 0
wc = 0
el = []
wl = []
for f,r in zip(s,reversed(s)):
el.append(ec)
wl.append(wc)
if f == 'W':
wc += 1
if r == 'E':
ec += 1
ans = n
for e,w in zip(wl, reversed(el)):
ans = min(ans, e+w)
print(ans) | [
"[email protected]"
] | |
f50078ae17f73108cf7c97cdbdfeb1d015d3e593 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon15894.py | d280ba45833b4d034ce0c931b621ca9446a9dc27 | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | n=int(input())
print(n*4) | [
"[email protected]"
] | |
0ec54ad0cd05fe5719729c86e746014af74b1ece | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/g60.py | c80e4b4a8a6fdd45b52d8b567f0eeb839fab0048 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'g60':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
60d9374afa434145e400c9430c0c5b40ef4a1df4 | 0b529ba1efe44c47b540dd22a7fd9cc6a73f907f | /src/1300-1400/_1344_angle-between-hands-of-a-clock.py | f352f9345e2663c99a674b740103ff27b7269469 | [] | no_license | alexparunov/leetcode_solutions | b9445a02182bc61f490257328a1960c2a627d7bc | bc19dbcc903782f91846d5b9d73a7ffb9b2f002d | refs/heads/master | 2022-11-28T21:10:24.875260 | 2020-08-15T12:42:40 | 2020-08-15T12:42:40 | 261,517,109 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | """
https://leetcode.com/problems/angle-between-hands-of-a-clock/
"""
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
angle_in_hour_minute = 360 / (12 * 60)
angle_in_minute = 360 // 60
angle_of_hour = (hour * angle_in_hour_minute * 60 + minutes * angle_in_hour_minute) % 360
angle_of_minutes = minutes * angle_in_minute
diff_angle = abs(angle_of_hour - angle_of_minutes)
return min(diff_angle, 360 - diff_angle)
| [
"[email protected]"
] | |
bd8669f2afe46f47983bf9b249cef07baa413cf6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03000/s283498849.py | 4b5184d2bd4bb37f346be633557e0c253010dab9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | def main():
N, X = (int(i) for i in input().split())
L = [int(i) for i in input().split()]
from itertools import accumulate
S = list(accumulate([0] + L))
ans = 0
for s in S:
if X < s:
break
ans += 1
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8850f51e67fcb72971dd6eebd251f1d46269618b | 9be786872889eb8fac6a64e499554c1b364dbc05 | /1_pythonStudy/06_while/day3_while1.py | d0fa54be09451d8eb53be74cf65a8237c6498e29 | [] | no_license | jh5537/TIL | 85b55385873a82eebe57d549782f83ce0e3e6462 | 09b26e673801cdd902878e5f76a8bb30eab6cda6 | refs/heads/master | 2023-06-06T05:23:04.651392 | 2021-06-29T12:27:43 | 2021-06-29T12:27:43 | 373,744,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # while 구문 이해 :조건이 만족하는(True) 동안만 반복문을 수행
# 1~10까지의 정수를 출력
n = 1
while n<=10:
print(n)
n = n + 1
| [
"[email protected]"
] | |
7e75de29e7392a2689f4241b3e42ee1e2d5a54a7 | 0c8214d0d7827a42225b629b7ebcb5d2b57904b0 | /examples/matplotlib/E001_Basics/main.py | 85b515aeab4272d66c0f3674054cc913aa4f050a | [] | no_license | mertturkmenoglu/python-examples | 831b54314410762c73fe2b9e77aee76fe32e24da | 394072e1ca3e62b882d0d793394c135e9eb7a56e | refs/heads/master | 2020-05-04T15:42:03.816771 | 2020-01-06T19:37:05 | 2020-01-06T19:37:05 | 179,252,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import matplotlib.pyplot as mpl
def fib(n: int) -> list:
a = 1
b = 1
result = [a, b]
for i in range(3, n):
c = a + b
result.append(c)
a = b
b = c
return result
if __name__ == '__main__':
y_values = fib(10)
x_values = [i + 1 for i in range(len(y_values))]
mpl.plot(x_values, y_values)
mpl.xlabel('No')
mpl.ylabel('Values')
mpl.show()
| [
"[email protected]"
] | |
a4f3ed80aaf08dd5a18b2c21b6803d9b7bd49b9b | ddd4edc45481e6a7c7141b93e47b974634506d2d | /tradgram/chatrooms/serializers.py | b371a510b3309043cb8b9ef1ab0734ad2bea6c3c | [
"MIT"
] | permissive | didils/tradgram | 407de9d05d01bc840c5c165155d370f092d82f0d | 4868ca082ab78a1b5b96f25ee9f958567bd1bb1e | refs/heads/master | 2021-11-19T02:47:02.224088 | 2019-04-05T08:19:14 | 2019-04-05T08:19:14 | 148,162,588 | 0 | 0 | MIT | 2021-09-08T00:57:43 | 2018-09-10T13:49:57 | Python | UTF-8 | Python | false | false | 350 | py | from rest_framework import serializers
from . import models
from tradgram.users import models as user_models
class ChatRoomSerializer(serializers.ModelSerializer):
class Meta:
model = models.ChatRoom
fields = (
'user1',
'user2',
'last_message',
'new_message'
) | [
"[email protected]"
] | |
3fc82e87b1bddde9014a48c4e580873adf678bc4 | a367a015dbc36287ca933955ded1ee58b5a2a61a | /swagger_client/models/disease_group.py | 776059fb3fa87b5485cc3d698aca7fb81e4dba90 | [] | no_license | kerniee/inno_intership_1_test_task | 70211e153450011c427df595a02e3574dfe7ed9f | fc0619ef54b00806a3b59f3c07c1c1684682d65b | refs/heads/master | 2023-05-23T02:24:40.083723 | 2021-06-21T16:15:04 | 2021-06-21T16:15:04 | 365,855,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,717 | py | # coding: utf-8
"""
Teleagronom
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiseaseGroup(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""DiseaseGroup - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
self.id = id
self.name = name
@property
def id(self):
"""Gets the id of this DiseaseGroup. # noqa: E501
:return: The id of this DiseaseGroup. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DiseaseGroup.
:param id: The id of this DiseaseGroup. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this DiseaseGroup. # noqa: E501
:return: The name of this DiseaseGroup. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DiseaseGroup.
:param name: The name of this DiseaseGroup. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiseaseGroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiseaseGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c565643c4f2c79d599b5eb9b424e914fcb11f621 | 88906fbe13de27413a51da917ebe46b473bec1b9 | /Part-II/Project-2-Data-Visualisation/Chapter 15 - Generating Data/random_walk_2.py | a41d63019aa7797ec13e4ca91ffd50709a3776ab | [] | no_license | lonewolfcub/Python-Crash-Course | 0b127e40f5029d84ad036263fd9153f6c88c2420 | 322388dfb81f3335eeffabcdfb8f9c5a1db737a4 | refs/heads/master | 2021-01-01T16:45:50.617189 | 2017-10-27T14:23:58 | 2017-10-27T14:23:58 | 97,911,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | from random import choice
class RandomWalk():
"""A class to generate random walks."""
def __init__(self, num_points=5000):
"""Initialize attributes of a walk."""
self.num_points = num_points
# All walks start at (0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk."""
while len(self.x_values) < self.num_points:
# Decide how far to go in each direction
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
# Reject the moves that go nowhere
if x_step == 0 and y_step == 0:
continue
# Calculate the next x and y values.
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
| [
"[email protected]"
] | |
f9200b25f79758ec7d91ceee76d4b01687175579 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_3uzv_J.py | 675fb8afa38dcf61c4544fa34ba93dc97dac281a | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '3uzv.csv'
identifier = 'J'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
e78096450e3762e13172fbb51ef0a06a34d1680c | 957e5aef8b48cf21804d51447ed93a026aab35ff | /script/chk_dup.py | 1fdcc86fc2f8ac8af76c1ce69c5e116ae660a27d | [
"Apache-2.0"
] | permissive | dannysauer/oidctest | 045a438ee934b5c9e27aae9876765e08dac16a37 | e7593e02af7caa71f92220ad0f5b67bb40e30f97 | refs/heads/master | 2021-07-08T07:36:30.362597 | 2020-05-14T07:21:25 | 2020-05-14T07:21:25 | 152,679,266 | 0 | 0 | NOASSERTION | 2018-10-12T01:54:49 | 2018-10-12T01:54:49 | null | UTF-8 | Python | false | false | 292 | py | #!/usr/bin/env python3
import json
ap = json.loads(open('assigned_ports.json').read())
inv = {}
for iss, port in ap.items():
try:
inv[port].append(iss)
except KeyError:
inv[port] = [iss]
for port, iss in inv.items():
if len(iss) != 1:
print(port, iss) | [
"[email protected]"
] | |
c8e318873904d5e634587d89ee920d2feffa58ee | 6cc37dfc44880f57823bb9523ea5f8206d5e3f22 | /python_OOP/labs_and_homeworks/07_solid_exercise/05_emails.py | adeddfd64439568ad2e5a90b04ba83bc9cc780b0 | [] | no_license | dimitar-daskalov/SoftUni-Courses | 70d265936fd86712a7bfe0586ec6ebd1c7384f77 | 2054bc58ffb5f41ed86f5d7c98729b101c3b1368 | refs/heads/main | 2023-05-31T06:44:35.498399 | 2021-07-11T10:16:08 | 2021-07-11T10:16:08 | 322,896,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | # SRP (Single Responsibility Principle)
from abc import ABC, abstractmethod
class IEmail(ABC):
@abstractmethod
def set_sender(self, sender):
pass
@abstractmethod
def set_receiver(self, receiver):
pass
@abstractmethod
def set_content(self, content):
pass
class Email(IEmail):
def __init__(self, protocol):
self.protocol = protocol
self.__sender = None
self.__receiver = None
self.__content = None
def set_sender(self, sender):
if self.protocol == 'IM':
self.__sender = ''.join(["I'm ", sender])
else:
self.__sender = sender
def set_receiver(self, receiver):
if self.protocol == 'IM':
self.__receiver = ''.join(["I'm ", receiver])
else:
self.__receiver = receiver
def set_content(self, content):
self.__content = content.format_text()
def __repr__(self):
template = "Sender: {sender}\nReceiver: {receiver}\nContent:\n{content}"
return template.format(sender=self.__sender, receiver=self.__receiver, content=self.__content)
class IContent(ABC):
@abstractmethod
def format_text(self):
pass
class MyContent(IContent):
def __init__(self, text):
self.text = text
def format_text(self):
return '\n'.join(['<myML>', self.text, '</myML>'])
class HTMLContent(IContent):
def __init__(self, text):
self.text = text
def format_text(self):
return '\n'.join(['<div>', self.text, '</div>'])
email = Email('IM')
email.set_sender('qmal')
email.set_receiver('james')
content = MyContent('Hello, there!')
email.set_content(content)
print(email)
| [
"[email protected]"
] | |
03fc2be0614708dcfbee8c1d6b82759f19bcf7fc | 59f4e4f57c4590b9fe969274960c49e7218ed275 | /.venv/bin/ptw | 6cb2d4a3f8ba91373047a8d4474bb0d5b0042e9d | [] | no_license | MohamadSheikhAlshabab/math-series | be82710d0cb0e8784543ee097c569964dfb8a376 | 6fe5772e2b67beadebbf6d27676bbe5aa91bd367 | refs/heads/master | 2022-12-06T12:56:18.678827 | 2020-08-17T16:49:47 | 2020-08-17T16:49:47 | 288,155,962 | 0 | 0 | null | 2020-08-20T22:58:04 | 2020-08-17T10:56:20 | Python | UTF-8 | Python | false | false | 420 | #!/home/mohamad/401/math-series/.venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'pytest-watch==4.2.0','console_scripts','ptw'
__requires__ = 'pytest-watch==4.2.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pytest-watch==4.2.0', 'console_scripts', 'ptw')()
)
| [
"[email protected]"
] | ||
815a53ab6d3b0f60494ac49b3988449512470445 | 38da8edb2102ad29eda8784cbb845cac0b96bbca | /151_lambda_expression.py | f4ae995bf65597a88920a1d5cd79443c18b826fd | [] | no_license | Prateek2201/Python_codes | 1a655a3e6820e7ecb1fb8a8abd266a8ae0508cb5 | 436a36544edac80cbe420c7b9ddb718df46b68da | refs/heads/main | 2023-08-01T03:10:51.864186 | 2021-09-17T18:08:40 | 2021-09-17T18:08:40 | 407,635,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | ##def is_even(a):
## return a%2==0
##print(is_even(5))
##
##is_even2=lambda a:a%2==0
##print(is_even2(6))
##def last_char(s):
## return s[-1]
##print(last_char('Prateek'))
##
##last_char2=lambda s: s[-1]
##print(last_char2('Prateek'))
def f(s):
if len(s)>5:
return True
return False
print(f('Prateek'))
func=lambda s:True if len(s)>5 else False
print(func('Prateek'))
func2=lambda s: len(s)>5
print(func2('harsh'))
| [
"[email protected]"
] | |
aae99ee3d026cd50f0a7c13cedd8cb9ba1957bd9 | 5bad0a225a8b077f5600695e9943dfae43d3f2ed | /mrna/cox/SKCM/cox_regression.py | 10e9a42e54f6beae3beabff1483de7711a676ef5 | [
"MIT"
] | permissive | carrie138/onco_lnc | d13ddb31b7a3aabd0274fb9b771370500678a5c5 | e8d20e43026ffe4651bd25783db36cabc2c1519f | refs/heads/master | 2021-01-12T19:19:36.064852 | 2016-07-15T11:58:28 | 2016-07-15T11:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,310 | py | ## A script for finding every cox coefficient and pvalue for every mRNA in SKCM Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor,06 a metastatic, both were allowed for SKCM
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01' or i[1].split('-')[3][:-1]=='06':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file, or metastatic, or both,
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','SKCM','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| [
"[email protected]"
] | |
57b36a522a4a39bda75590c6ed08055b2fd1ba63 | f3d8e1351e52526959e2d44d72fd716924f1751d | /problems/56_merge_intervals.py | 5b845275a27f0c956cd1a1031bf770ef73b34f38 | [] | no_license | xueyuanl/leetcode-py | c27a4faff5b9040d57cf864d3a11f1683d8182e3 | 03d3e34522c8c819388634ab4b63077da864a4e1 | refs/heads/master | 2021-07-14T23:40:32.913822 | 2021-07-14T13:43:19 | 2021-07-14T13:43:19 | 206,973,737 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: List[List[int]]
"""
if len(intervals) < 1:
return []
res = []
sorted_intervals = sorted(intervals)
new_pair = sorted_intervals[0]
res.append(new_pair)
for pair in sorted_intervals:
if pair[0] <= new_pair[1]:
new_pair[1] = max(pair[1], new_pair[1])
else:
new_pair = pair
res.append(new_pair)
return res
| [
"[email protected]"
] | |
d2c204a4d44b2ff1d4ff5c3b10a7ccc2a91de537 | 1c904e7b4ab661c9f90536c9bfcde970540271d8 | /setup.py | 918cc1176dfec9a809df9ea34f452fb6de684980 | [] | no_license | koslab/pydatamall.webui | a7803a652441acb74adc75d2d09d9dced7cc9520 | b483e8ca1aeef73a2c2c430cabf74e8fd0d0daf2 | refs/heads/master | 2021-01-10T07:56:48.159898 | 2015-11-20T15:55:09 | 2015-11-20T15:55:09 | 45,684,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | from setuptools import setup, find_packages
import os
version = '1.0'
long_description = (
open('README.txt').read()
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
open('CONTRIBUTORS.txt').read()
+ '\n' +
open('CHANGES.txt').read()
+ '\n')
setup(name='pydatamall.webui',
version=version,
description="",
long_description=long_description,
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='',
author_email='',
url='http://github.com/koslab/pydatamall.webui/',
license='agplv3',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['pydatamall'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'pyramid',
'pyramid_layout',
'pyramid_bowerstatic',
'pyramid_chameleon',
'python-social-auth',
'requests'
# -*- Extra requirements: -*-
],
entry_points={
'console_scripts': [
'webui=pydatamall.webui.runner:main'
]
}
)
| [
"[email protected]"
] | |
a3a7c37768dbc87654254b4054e569995dd12bf2 | facbdbdadacd23f6c83d266116dc14744741070f | /Core_Python/Day-7/9.py | 3b6d63c59296372612328ade2b105e64d20f3ed5 | [] | no_license | Yogesh-Singh-Gadwal/YSG_Python | 51b6b53fe34567bf066b6e487c00da766b47ac6b | f0d6841e1f92d1d2b27d8ecdd332d40b49a5ca69 | refs/heads/master | 2023-06-06T04:40:12.004713 | 2021-07-06T19:59:26 | 2021-07-06T19:59:26 | 292,482,586 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # Python
if False:
print('Both value are same .')
else:
print('Condition is false')
print('Rest Data')
| [
"[email protected]"
] | |
6e87c83ff642eaea9ea8bc5eccfac1ca58e50696 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_142/863.py | a5243b5969ac2f11082e3e3b90863e0c03738b35 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py | class GameInstance:
def __init__(self, init_str):
self.strs = init_str
self.total_action = 0
self.tab_strs = []
self.tabularize()
def tabularize(self):
#set_trace()
N = len(self.strs)
for i in range(0, N):
self.tab_strs.append([[self.strs[i][0],1]])
for j in range(1, len(self.strs[i])):
if self.tab_strs[i][-1][0] == self.strs[i][j]:
self.tab_strs[i][-1][1] +=1
else:
self.tab_strs[i].append([self.strs[i][j],1])
def del_rep(self, si):
clean_ptr = 0
clean_str = self.strs[si][0]
#set_trace()
del_start = False
for i in xrange(1, len(self.strs[si])):
if clean_str[-1] == self.strs[si][i]:
#i+= 1
if not del_start:
self.total_action += 1
del_start = True
else:
del_start = False
clean_str += self.strs[si][i]
#i += 1
return clean_str
def solve(self):
#the point is that as long as there is no repetition we can't do anything.
#if there is a character in one of the string that is not in the other one
#then we are done impossible.
#also the order of repetition doesn't matter
#so we move the pointer for all of them if we can repair we repair if not game
#over
N = len(self.strs)
ref_len = len(self.tab_strs[0])
# mod_str = self.del_rep(0)
# poss = True
# for i in range(1,N):
# if (mod_str != self.del_rep(i)):
# return "Fegla Won"
for i in range(1,N):
if ref_len != len(self.tab_strs[i]):
return "Fegla Won"
for j in range(0, ref_len):
if (self.tab_strs[0][j][0] != self.tab_strs[i][j][0]):
return "Fegla Won"
#set_trace()
# all_mins = [self.tab_strs[0][i][1] for i in range(0, ref_len)]
# for i in range(1, N):
# for j in range(0, ref_len):
# if all_mins[j] > self.tab_strs[i][j][1]:
# all_mins[j] = self.tab_strs[i][j][1]
for j in range(0, ref_len):
sum_cl = 0
for i in range(0, N):
sum_cl += self.tab_strs[i][j][1]
average = float(sum_cl)/float(N)
av = [0,0]
no_action = [0,0]
av[0] = int(average)
av[1] = int(average)+1
for side in range(0,2):
for i in range(0, N):
no_action[side] += abs(av[side] - self.tab_strs[i][j][1])
if no_action[0] < no_action[1]:
self.total_action += no_action[0]
else:
self.total_action += no_action[1]
return str(self.total_action)
N = input()
for i in range(1,N+1):
T = input()
cur_case = []
from pdb import set_trace
for j in range(0,T):
cur_case.append(raw_input())
#set_trace()
cur_game = GameInstance(cur_case)
print "Case #%i: %s"%(i,cur_game.solve())
| [
"[email protected]"
] | |
e5f2b67f813053e0c4f7d0204c27f0484fd58db9 | 89e3f694021f261b95e494d2b479367bacde8251 | /followthemoney/cli/ocds.py | 536ec92f0f70666a97530d0dcc850e5d8f6e74e3 | [
"MIT"
] | permissive | dchaplinsky/followthemoney | 6f9c05f430f8bfb04f7841378fd2ee5cf9b33235 | a2a150f558acb5a1c985b9dc891c98c0fdf2f17e | refs/heads/master | 2020-09-10T08:16:14.617602 | 2019-11-14T09:15:52 | 2019-11-14T09:15:52 | 221,699,199 | 1 | 0 | MIT | 2019-11-14T13:03:41 | 2019-11-14T13:03:41 | null | UTF-8 | Python | false | false | 5,146 | py | import json
import click
import logging
from pprint import pprint # noqa
from followthemoney import model
from followthemoney.cli.cli import cli
from followthemoney.cli.util import write_object
log = logging.getLogger(__name__)
IDENTIFIERS = {
'TRADE_REGISTER': 'registrationNumber',
'TAX_ID': 'vatCode',
'ORGANIZATION_ID': 'classification',
'STATISTICAL': 'classification',
}
@cli.command('import-ocds', help="Import open contracting data")
@click.option('-i', '--infile', type=click.File('r'), default='-') # noqa
@click.option('-o', '--outfile', type=click.File('w'), default='-') # noqa
def import_ocds(infile, outfile):
try:
while True:
line = infile.readline()
if not line:
return
record = json.loads(line)
for entity in convert_record(record):
if entity.id is not None:
write_object(outfile, entity)
except BrokenPipeError:
raise click.Abort()
def clean_date(date):
if date is not None and 'T' in date:
date, _ = date.split('T', 1)
return date
def make_address(*parts):
return ' '.join((p for p in parts if p is not None))
def convert_party(party):
entity = model.make_entity('LegalEntity')
entity.make_id(party.pop('id', None))
entity.add('name', party.pop('name', None))
address = party.pop('address', {})
entity.add('country', address.pop('countryName', None))
address_text = make_address(address.pop('streetAddress', None),
address.pop('postalCode', None),
address.pop('region', None))
entity.add('address', address_text)
if len(address):
log.info("Unknown address part: %r", address.keys())
contact = party.pop('contactPoint', {})
entity.add('website', contact.pop('url', None))
entity.add('phone', contact.pop('telephone', None))
entity.add('email', contact.pop('email', None))
for identifier in party.pop('additionalIdentifiers', []):
scheme = identifier.pop('scheme', None)
prop = IDENTIFIERS.get(scheme, None)
if prop is None:
log.info("Unknown identifier scheme: %s", scheme)
continue
entity.add(prop, identifier.pop('id', None))
# pprint(party)
return entity
def convert_release(release):
for party in release.pop('parties', []):
yield convert_party(party)
buyer = release.pop('buyer', {})
authority = model.make_entity('LegalEntity')
authority.make_id(buyer.pop('id', None))
authority.add('name', buyer.pop('name', None))
yield authority
tender = release.pop('tender', {})
contract = model.make_entity('Contract')
contract.make_id(release.pop('id', None))
contract.add('authority', authority)
contract.add('name', tender.pop('title', None))
if not contract.has('name'):
contract.add('name', tender.get('id', None))
contract.add('description', tender.pop('description', None))
contract.add('procedureNumber', tender.pop('id', None))
contract.add('type', tender.pop('mainProcurementCategory', None))
value = tender.pop('value', {})
contract.add('amount', value.pop('amount', None))
contract.add('currency', value.pop('currency', None))
# pprint(tender)
yield contract
# contract.add('modifiedAt', published_date)
lots = tender.pop('lots', [])
for award in release.pop('awards', []):
ca = model.make_entity('ContractAward')
ca.make_id(contract.id, award.pop('id', None))
ca.add('contract', contract)
ca.add('date', clean_date(award.pop('date', None)))
value = award.pop('value', {})
ca.add('amount', value.pop('amount', None))
ca.add('currency', value.pop('currency', None))
reason = tender.get('procurementMethodDetails', None)
ca.add('decisionReason', reason)
for document in award.pop('documents', []):
ca.add('sourceUrl', document.get('url'))
for item in award.pop('items', []):
classification = item.pop('classification', {})
ca.add('cpvCode', classification.get('url'))
related_lots = award.pop('relatedLots', [])
for lot in lots:
if lot.get('id') in related_lots:
ca.add('role', lot.get('title'))
ca.add('summary', lot.get('description'))
for supplier in award.pop('suppliers', []):
entity = model.make_entity('LegalEntity')
entity.make_id(supplier.pop('id', None))
entity.add('name', supplier.pop('name', None))
ca.add('supplier', entity)
yield entity
# pprint(award)
yield ca
def convert_record(record):
published_date = clean_date(record.pop('publishedDate', None))
publisher = record.pop('publisher', {}).get('name')
for release in record.get('releases', []):
for entity in convert_release(release):
entity.add('publisher', publisher, quiet=True)
entity.add('modifiedAt', published_date, quiet=True)
yield entity
| [
"[email protected]"
] | |
43d460ce6a3a415277321f9a4f8658f6d7c4dbec | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_parody.py | 947ea401561ad67bf848e6ab6ddc4814d3613dd2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py |
#calss header
class _PARODY():
def __init__(self,):
self.name = "PARODY"
self.definitions = [u'to copy the style of someone or something in a humorous way: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
c515faf8793eb07a829146f36ac33429993b55ef | 8ff6c3e513e17be6c51b484bed81d03150bdd175 | /2013-04-analytic/part2/ex52b.py | be5ca1d980f5e1a94ea1ffb0ae488dd765182979 | [] | no_license | ricbit/Oldies | f1a2ac520b64e43d11c250cc372d526e9febeedd | 2d884c61ac777605f7260cd4d36a13ed5a2c6a58 | refs/heads/master | 2023-04-27T20:35:19.485763 | 2023-04-26T04:45:44 | 2023-04-26T04:45:44 | 2,050,140 | 40 | 8 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | import itertools, sys
def surjection(seq):
hist = {}
for i in seq:
hist[i] = 1 + hist.get(i, 0)
m = max(hist.iterkeys())
for i in xrange(1, 1 + m):
if hist.get(i, 0) < 3:
return False
return True
def triple_surjections(n):
for seq in itertools.product(xrange(1, 1 + n / 3), repeat=n):
if surjection(seq):
yield seq
def tabular(seq):
size = 7
print "\\begin{tabular}{ %s }" % " ".join(["r"]*size)
for i in xrange((len(seq)+size-1)/size):
print "%s \\\\" % "&".join("".join(map(str,i))
for i in seq[i*size:i*size+size])
print "\\end{tabular}"
tabular(list(triple_surjections(int(sys.argv[1]))))
| [
"[email protected]"
] | |
cf53263187c3025a04b1d121a9c4f9bfaa1f2106 | 3d69b7fe8fa95fcd6dbab25885f2e3e42bc891d6 | /src/nlp/classification/tf1/xlnet/prepro_utils.py | fc945d6d64a46f483a18389895831414c5f33e17 | [
"Apache-2.0"
] | permissive | wu-uw/OpenCompetition | ac652d066f667dc2b3061947af5ea0425643a1b5 | 9aa9d7a50ada1deb653d295dd8a7fe46321b9094 | refs/heads/master | 2021-01-03T04:59:28.987099 | 2020-03-02T07:49:11 | 2020-03-02T07:49:11 | 239,932,371 | 0 | 0 | Apache-2.0 | 2020-03-02T07:49:12 | 2020-02-12T05:12:02 | Python | UTF-8 | Python | false | false | 5,013 | py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unicodedata
import six
from functools import partial
SPIECE_UNDERLINE = '▁'
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def print_(*args):
new_args = []
for arg in args:
if isinstance(arg, list):
s = [printable_text(i) for i in arg]
s = ' '.join(s)
new_args.append(s)
else:
new_args.append(printable_text(arg))
print(*new_args)
def preprocess_text(
inputs,
lower=False,
remove_space=True,
keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
# return_unicode is used only for py2
# note(zhiliny): in some systems, sentencepiece only accepts str for py2
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
if __name__ == '__main__':
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
sp.load('sp10m.uncased.v3.model')
print_(u'I was born in 2000, and this is falsé.')
print_(u'ORIGINAL', sp.EncodeAsPieces(
u'I was born in 2000, and this is falsé.'))
print_(
u'OURS',
encode_pieces(
sp,
u'I was born in 2000, and this is falsé.'))
print(encode_ids(sp, u'I was born in 2000, and this is falsé.'))
print_('')
prepro_func = partial(preprocess_text, lower=True)
print_(prepro_func('I was born in 2000, and this is falsé.'))
print_('ORIGINAL', sp.EncodeAsPieces(
prepro_func('I was born in 2000, and this is falsé.')))
print_('OURS', encode_pieces(sp, prepro_func(
'I was born in 2000, and this is falsé.')))
print(encode_ids(sp, prepro_func('I was born in 2000, and this is falsé.')))
print_('')
print_('I was born in 2000, and this is falsé.')
print_('ORIGINAL', sp.EncodeAsPieces(
'I was born in 2000, and this is falsé.'))
print_('OURS', encode_pieces(sp, 'I was born in 2000, and this is falsé.'))
print(encode_ids(sp, 'I was born in 2000, and this is falsé.'))
print_('')
print_('I was born in 92000, and this is falsé.')
print_('ORIGINAL', sp.EncodeAsPieces(
'I was born in 92000, and this is falsé.'))
print_('OURS', encode_pieces(sp, 'I was born in 92000, and this is falsé.'))
print(encode_ids(sp, 'I was born in 92000, and this is falsé.'))
| [
"[email protected]"
] | |
44cdec8d130987c667d3ddd3a464bad33f309eeb | 5dd47abf7061201d9378e73e51f08fbb314ba2fd | /envdsys/envcontacts/migrations/0050_auto_20210219_2128.py | b9ec01c328ffb1adf3231e767f6654bbfec32bcf | [
"Unlicense"
] | permissive | NOAA-PMEL/envDataSystem | 4d264ae5209015e4faee648f37608d68a4461d0a | 4db4a3569d2329658799a3eef06ce36dd5c0597d | refs/heads/master | 2023-02-23T22:33:14.334737 | 2021-07-22T01:09:16 | 2021-07-22T01:09:16 | 191,809,007 | 1 | 0 | Unlicense | 2023-02-08T00:45:54 | 2019-06-13T17:50:03 | Python | UTF-8 | Python | false | false | 1,175 | py | # Generated by Django 3.1.7 on 2021-02-19 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envcontacts', '0049_auto_20210219_2127'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email1_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='email2_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone1_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone2_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
]
| [
"[email protected]"
] | |
398cdcff7d1ab5344ac51ed8db7f7047b69180be | 20ace38b89c0ebaa0738753fcd11b0fdd4ed21cd | /CMSSW_8_0_24/src/HeavyIonsAnalysis/JetAnalysis/python/jets/akSoftDrop2PFJetSequence_PbPb_mb_cff.py | b80e6b111102b987d3b344d8044b1d74a6fdc23b | [] | no_license | ssanders50/pPb_2016_v0 | 3c32c2920067a2f8a0a7a7fadba6225babf9a905 | 9fc4ae61cf4343c88ce6666f55c0738f963754a3 | refs/heads/master | 2020-12-12T16:30:41.253014 | 2020-02-14T21:51:17 | 2020-02-14T21:51:17 | 234,162,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,103 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akSoftDrop2PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akSoftDrop2PFJets"),
matched = cms.InputTag("ak2HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.2
)
akSoftDrop2PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akSoftDrop2HiGenJets"),
matched = cms.InputTag("ak2HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.2
)
akSoftDrop2PFparton = patJetPartonMatch.clone(src = cms.InputTag("akSoftDrop2PFJets")
)
akSoftDrop2PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akSoftDrop2PFJets"),
payload = "AK2PF_offline"
)
akSoftDrop2PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akSoftDrop2CaloJets'))
#akSoftDrop2PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak2HiCleanedGenJets'))
akSoftDrop2PFbTagger = bTaggers("akSoftDrop2PF",0.2)
#create objects locally since they dont load properly otherwise
#akSoftDrop2PFmatch = akSoftDrop2PFbTagger.match
akSoftDrop2PFparton = patJetPartonMatch.clone(src = cms.InputTag("akSoftDrop2PFJets"), matched = cms.InputTag("selectedPartons"))
akSoftDrop2PFPatJetFlavourAssociationLegacy = akSoftDrop2PFbTagger.PatJetFlavourAssociationLegacy
akSoftDrop2PFPatJetPartons = akSoftDrop2PFbTagger.PatJetPartons
akSoftDrop2PFJetTracksAssociatorAtVertex = akSoftDrop2PFbTagger.JetTracksAssociatorAtVertex
akSoftDrop2PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akSoftDrop2PFSimpleSecondaryVertexHighEffBJetTags = akSoftDrop2PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akSoftDrop2PFSimpleSecondaryVertexHighPurBJetTags = akSoftDrop2PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akSoftDrop2PFCombinedSecondaryVertexBJetTags = akSoftDrop2PFbTagger.CombinedSecondaryVertexBJetTags
akSoftDrop2PFCombinedSecondaryVertexV2BJetTags = akSoftDrop2PFbTagger.CombinedSecondaryVertexV2BJetTags
akSoftDrop2PFJetBProbabilityBJetTags = akSoftDrop2PFbTagger.JetBProbabilityBJetTags
akSoftDrop2PFSoftPFMuonByPtBJetTags = akSoftDrop2PFbTagger.SoftPFMuonByPtBJetTags
akSoftDrop2PFSoftPFMuonByIP3dBJetTags = akSoftDrop2PFbTagger.SoftPFMuonByIP3dBJetTags
akSoftDrop2PFTrackCountingHighEffBJetTags = akSoftDrop2PFbTagger.TrackCountingHighEffBJetTags
akSoftDrop2PFTrackCountingHighPurBJetTags = akSoftDrop2PFbTagger.TrackCountingHighPurBJetTags
akSoftDrop2PFPatJetPartonAssociationLegacy = akSoftDrop2PFbTagger.PatJetPartonAssociationLegacy
akSoftDrop2PFImpactParameterTagInfos = akSoftDrop2PFbTagger.ImpactParameterTagInfos
akSoftDrop2PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akSoftDrop2PFJetProbabilityBJetTags = akSoftDrop2PFbTagger.JetProbabilityBJetTags
akSoftDrop2PFSecondaryVertexTagInfos = akSoftDrop2PFbTagger.SecondaryVertexTagInfos
akSoftDrop2PFSimpleSecondaryVertexHighEffBJetTags = akSoftDrop2PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akSoftDrop2PFSimpleSecondaryVertexHighPurBJetTags = akSoftDrop2PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akSoftDrop2PFCombinedSecondaryVertexBJetTags = akSoftDrop2PFbTagger.CombinedSecondaryVertexBJetTags
akSoftDrop2PFCombinedSecondaryVertexV2BJetTags = akSoftDrop2PFbTagger.CombinedSecondaryVertexV2BJetTags
akSoftDrop2PFSecondaryVertexNegativeTagInfos = akSoftDrop2PFbTagger.SecondaryVertexNegativeTagInfos
akSoftDrop2PFNegativeSimpleSecondaryVertexHighEffBJetTags = akSoftDrop2PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akSoftDrop2PFNegativeSimpleSecondaryVertexHighPurBJetTags = akSoftDrop2PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akSoftDrop2PFNegativeCombinedSecondaryVertexBJetTags = akSoftDrop2PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akSoftDrop2PFPositiveCombinedSecondaryVertexBJetTags = akSoftDrop2PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akSoftDrop2PFNegativeCombinedSecondaryVertexV2BJetTags = akSoftDrop2PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akSoftDrop2PFPositiveCombinedSecondaryVertexV2BJetTags = akSoftDrop2PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akSoftDrop2PFSoftPFMuonsTagInfos = akSoftDrop2PFbTagger.SoftPFMuonsTagInfos
akSoftDrop2PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akSoftDrop2PFSoftPFMuonBJetTags = akSoftDrop2PFbTagger.SoftPFMuonBJetTags
akSoftDrop2PFSoftPFMuonByIP3dBJetTags = akSoftDrop2PFbTagger.SoftPFMuonByIP3dBJetTags
akSoftDrop2PFSoftPFMuonByPtBJetTags = akSoftDrop2PFbTagger.SoftPFMuonByPtBJetTags
akSoftDrop2PFNegativeSoftPFMuonByPtBJetTags = akSoftDrop2PFbTagger.NegativeSoftPFMuonByPtBJetTags
akSoftDrop2PFPositiveSoftPFMuonByPtBJetTags = akSoftDrop2PFbTagger.PositiveSoftPFMuonByPtBJetTags
akSoftDrop2PFPatJetFlavourIdLegacy = cms.Sequence(akSoftDrop2PFPatJetPartonAssociationLegacy*akSoftDrop2PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akSoftDrop2PFPatJetFlavourAssociation = akSoftDrop2PFbTagger.PatJetFlavourAssociation
#akSoftDrop2PFPatJetFlavourId = cms.Sequence(akSoftDrop2PFPatJetPartons*akSoftDrop2PFPatJetFlavourAssociation)
akSoftDrop2PFJetBtaggingIP = cms.Sequence(akSoftDrop2PFImpactParameterTagInfos *
(akSoftDrop2PFTrackCountingHighEffBJetTags +
akSoftDrop2PFTrackCountingHighPurBJetTags +
akSoftDrop2PFJetProbabilityBJetTags +
akSoftDrop2PFJetBProbabilityBJetTags
)
)
akSoftDrop2PFJetBtaggingSV = cms.Sequence(akSoftDrop2PFImpactParameterTagInfos
*
akSoftDrop2PFSecondaryVertexTagInfos
* (akSoftDrop2PFSimpleSecondaryVertexHighEffBJetTags+
akSoftDrop2PFSimpleSecondaryVertexHighPurBJetTags+
akSoftDrop2PFCombinedSecondaryVertexBJetTags+
akSoftDrop2PFCombinedSecondaryVertexV2BJetTags
)
)
akSoftDrop2PFJetBtaggingNegSV = cms.Sequence(akSoftDrop2PFImpactParameterTagInfos
*
akSoftDrop2PFSecondaryVertexNegativeTagInfos
* (akSoftDrop2PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akSoftDrop2PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akSoftDrop2PFNegativeCombinedSecondaryVertexBJetTags+
akSoftDrop2PFPositiveCombinedSecondaryVertexBJetTags+
akSoftDrop2PFNegativeCombinedSecondaryVertexV2BJetTags+
akSoftDrop2PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akSoftDrop2PFJetBtaggingMu = cms.Sequence(akSoftDrop2PFSoftPFMuonsTagInfos * (akSoftDrop2PFSoftPFMuonBJetTags
+
akSoftDrop2PFSoftPFMuonByIP3dBJetTags
+
akSoftDrop2PFSoftPFMuonByPtBJetTags
+
akSoftDrop2PFNegativeSoftPFMuonByPtBJetTags
+
akSoftDrop2PFPositiveSoftPFMuonByPtBJetTags
)
)
akSoftDrop2PFJetBtagging = cms.Sequence(akSoftDrop2PFJetBtaggingIP
*akSoftDrop2PFJetBtaggingSV
*akSoftDrop2PFJetBtaggingNegSV
# *akSoftDrop2PFJetBtaggingMu
)
akSoftDrop2PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akSoftDrop2PFJets"),
genJetMatch = cms.InputTag("akSoftDrop2PFmatch"),
genPartonMatch = cms.InputTag("akSoftDrop2PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akSoftDrop2PFcorr")),
JetPartonMapSource = cms.InputTag("akSoftDrop2PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akSoftDrop2PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akSoftDrop2PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akSoftDrop2PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akSoftDrop2PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akSoftDrop2PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akSoftDrop2PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akSoftDrop2PFJetBProbabilityBJetTags"),
cms.InputTag("akSoftDrop2PFJetProbabilityBJetTags"),
#cms.InputTag("akSoftDrop2PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akSoftDrop2PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akSoftDrop2PFTrackCountingHighEffBJetTags"),
cms.InputTag("akSoftDrop2PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akSoftDrop2PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akSoftDrop2PFNjettiness = Njettiness.clone(
src = cms.InputTag("akSoftDrop2PFJets"),
R0 = cms.double( 0.2)
)
akSoftDrop2PFpatJetsWithBtagging.userData.userFloats.src += ['akSoftDrop2PFNjettiness:tau1','akSoftDrop2PFNjettiness:tau2','akSoftDrop2PFNjettiness:tau3']
akSoftDrop2PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akSoftDrop2PFpatJetsWithBtagging"),
genjetTag = 'ak2HiGenJets',
rParam = 0.2,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akSoftDrop2PF"),
jetName = cms.untracked.string("akSoftDrop2PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("akSoftDrop2GenJets"),
doGenTaus = True
)
akSoftDrop2PFJetSequence_mc = cms.Sequence(
#akSoftDrop2PFclean
#*
akSoftDrop2PFmatch
#*
#akSoftDrop2PFmatchGroomed
*
akSoftDrop2PFparton
*
akSoftDrop2PFcorr
*
#akSoftDrop2PFJetID
#*
akSoftDrop2PFPatJetFlavourIdLegacy
#*
#akSoftDrop2PFPatJetFlavourId # Use legacy algo till PU implemented
*
akSoftDrop2PFJetTracksAssociatorAtVertex
*
akSoftDrop2PFJetBtagging
*
akSoftDrop2PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
akSoftDrop2PFpatJetsWithBtagging
*
akSoftDrop2PFJetAnalyzer
)
akSoftDrop2PFJetSequence_data = cms.Sequence(akSoftDrop2PFcorr
*
#akSoftDrop2PFJetID
#*
akSoftDrop2PFJetTracksAssociatorAtVertex
*
akSoftDrop2PFJetBtagging
*
akSoftDrop2PFNjettiness
*
akSoftDrop2PFpatJetsWithBtagging
*
akSoftDrop2PFJetAnalyzer
)
akSoftDrop2PFJetSequence_jec = cms.Sequence(akSoftDrop2PFJetSequence_mc)
akSoftDrop2PFJetSequence_mb = cms.Sequence(akSoftDrop2PFJetSequence_mc)
akSoftDrop2PFJetSequence = cms.Sequence(akSoftDrop2PFJetSequence_mb)
| [
"[email protected]"
] | |
02ba996948b22fbb2fda69fed6c6a4eb1ca4e2c6 | b528b880b1ae104cc03118b2ca1421b8bfb9bd00 | /Django/djangoEnv/bin/easy_install-2.7 | 70ede9d56547f116a1620382639c99b94f875a82 | [] | no_license | hkneal/DojoAssignments | c83288555913aa6a1071845353ab91cc159e0bdd | 4f9c6999853a16cab6ab7e9d7b99463e6b418016 | refs/heads/master | 2021-01-18T16:48:28.139859 | 2018-05-17T04:43:50 | 2018-05-17T04:43:50 | 86,770,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | 7 | #!/Users/HKN/DojoAssignments/Django/djangoEnv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
4b50d3a9c44f387818b24514e492f94d5951050f | e0527bce5c53a196752d3a16adf50cb60754de5f | /03-Workshop/Workshop-Questions/C_fun_with_flags.py | 8627af965e3f1c5c0e3e439a2dc9c83893f634a1 | [] | no_license | ARWA-ALraddadi/python-tutorial-for-beginners | ddeb657f419fbc176bea273bc9fb6b88d1894191 | 21cedfc47871ca4d25c2382464c60ab0a2121205 | refs/heads/master | 2023-06-30T20:24:30.688800 | 2021-08-08T08:22:29 | 2021-08-08T08:22:29 | 193,094,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | #--------------------------------------------------------------------
#
# Fun With Flags
#
# In the lecture demonstration program "stars and stripes" we saw
# how function definitions allowed us to reuse code that drew a
# star and a rectangle (stripe) multiple times to create a copy of
# the United States flag.
#
# As a further example of the way functions allow us to reuse code,
# in this exercise we will import the flag_elements module into
# this program and create a different flag. In the PDF document
# accompanying this file you will find several flags which can be
# constructed easily using the "star" and "stripe" functions already
# defined. Choose one of these and try to draw it.
#
# First we import the two functions we need (make sure a copy of file
# flag_elements.py is in the same folder as this one)
from flag_elements import star, stripe
# Import the turtle graphics functions
from turtle import *
# Set up the drawing environment
setup(600, 400)
##### PUT YOUR CODE FOR DRAWING THE FLAG HERE
pass
# Exit gracefully
hideturtle()
done()
| [
"[email protected]"
] | |
fe416a0e81300a32016388151c240e79727ff3ad | e7ec251afc62616525c573c1b1b9e6416454aaaa | /bcbio/pipeline/__init__.py | 298c4775b143c01b611e1483575f90280a9da72a | [
"MIT"
] | permissive | YTLogos/bcbio-nextgen | 157e023341b9085b6c3f36d68c2b68ae31e063f2 | f964a25ab74a31551273b7e50518f3451c90f473 | refs/heads/master | 2022-12-28T15:11:28.127131 | 2017-09-20T18:58:45 | 2017-09-20T18:59:57 | 104,303,076 | 1 | 1 | MIT | 2022-12-12T12:18:27 | 2017-09-21T04:52:21 | Python | UTF-8 | Python | false | false | 598 | py | """High level code for driving a next-gen analysis pipeline.
This structures processing steps into the following modules:
- lane.py: Analyze a single fastq file.
- fastq.py: Utilities to retrieve fastq files.
- alignment.py: Align to a reference genome.
- sample.py: Analyze a sample, which may consist of multiple lanes or
barcoded samples on a lane.
- merge.py: Merge multiple sample files in one processing run.
- variation.py: Calculate SNP/indel variations for a sample.
- qcsummary.py: Quality control, alignment metrics and summary information.
"""
| [
"[email protected]"
] | |
f7db4248308429362c6ea3a4382920078bbd0636 | 465097858def678018ff76865bb09d34735d8eb9 | /mysite/blog/forms.py | 0d517f8db2dec175dc7af7cd362d511e1f0ffa33 | [] | no_license | bunnycast/django_girls | f9c3f3eb30955db64d2e643109bd2aa483b0f4b7 | fc24a8301dd55d98b790c8fb19bd9e68129a7c63 | refs/heads/master | 2022-11-13T09:12:30.860813 | 2020-07-02T02:28:51 | 2020-07-02T02:28:51 | 275,992,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django import forms
from blog.models import Post, Comment
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author', 'text',)
| [
"[email protected]"
] | |
5e9db43277324b837743fd0c041324c531ce89b3 | 6dc4d2b5abe4317f154dd0e81f45fda3501e7e52 | /Syntax/comments_in_python.py | a837f69ee613728945e70ed6adf880d271a7d648 | [] | no_license | Sanket1228/pythonBasics | 89376a7bbd3292d0f19fbbc8b3baae576abf9d75 | 94f68fa888cb1d8f61c2466ad8b395c769fe6f37 | refs/heads/master | 2023-06-26T22:54:03.774872 | 2021-07-22T12:31:10 | 2021-07-22T12:31:10 | 280,104,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | #This is comment
print("Hello, World ! ")
| [
"[email protected]"
] | |
77d9d1e0b97cfe7890c04957f93b007a82d99098 | d318975fdf4daeccecbf90c24aba5009d51637eb | /server/env/bin/symilar | 2bc1d53bf60868c4ab248a62a2e1b4c6610295af | [] | no_license | Jrius4/data-shuffle | 759702914b052c737b75f8cf5f84170f4e0cae40 | 4a0e7ac500d91903fcf4806d878ad01083068119 | refs/heads/master | 2023-01-24T10:38:59.467067 | 2019-10-13T20:01:33 | 2019-10-13T20:01:33 | 214,883,377 | 0 | 0 | null | 2023-01-04T12:23:25 | 2019-10-13T19:47:20 | Python | UTF-8 | Python | false | false | 274 | #!/home/jrius/Kaxiuz/investment/datastore/v1-UI/server/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
] | ||
b305b26622db5f2f5eb4e89f70911e77ea7254d5 | f6f632bee57875e76e1a2aa713fdbe9f25e18d66 | /python/CodingInterviews_2/30_bao-han-minhan-shu-de-zhan-lcof.py | e9a86f1b3ff003b07e9bfbf7235100b290513e37 | [] | no_license | Wang-Yann/LeetCodeMe | b50ee60beeeb3661869bb948bef4fbe21fc6d904 | 44765a7d89423b7ec2c159f70b1a6f6e446523c2 | refs/heads/master | 2023-08-07T05:31:23.428240 | 2021-09-30T15:33:53 | 2021-09-30T15:33:53 | 253,497,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-05-06 23:17:56
# @Last Modified : 2020-05-06 23:17:56
# @Mail : [email protected]
# @Version : alpha-1.0
# 定义栈的数据结构,请在该类型中实现一个能够得到栈的最小元素的 min 函数在该栈中,调用 min、push 及 pop 的时间复杂度都是 O(1)。
#
#
#
# 示例:
#
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.min(); --> 返回 -3.
# minStack.pop();
# minStack.top(); --> 返回 0.
# minStack.min(); --> 返回 -2.
#
#
#
#
# 提示:
#
#
# 各函数的调用总次数不超过 20000 次
#
#
#
#
# 注意:本题与主站 155 题相同:https://leetcode-cn.com/problems/min-stack/
# Related Topics 栈 设计
# 👍 28 👎 0
import traceback
import pytest
import math, fractions, operator
from typing import List
import collections, bisect, heapq
import functools, itertools
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack=[]
def push(self, x: int) -> None:
if self.stack:
current_min=min(x,self.stack[-1][0])
self.stack.append((current_min,x))
else:
self.stack.append((x,x))
def pop(self) -> None:
return self.stack.pop()[1]
def top(self) -> int:
return self.stack[-1][1]
def min(self) -> int:
return self.stack[-1][0]
| [
"[email protected]"
] | |
36173a6b0f8010fa465e6f58b4576b374a85c962 | 24b1fa231f4e89f1a588c09ebee6fe4da6915c53 | /Tutorials/Canvas/Fundamental-Theorem-Algebra.py | 39eb49279e27df600cc9cb59f442cec0a5a30844 | [] | no_license | cyrt63/demos | a429214154cf0e51b58710f67670e1d902bfcac6 | a4b54b862dba4ad33a707511896324829f4cc7b1 | refs/heads/master | 2020-04-08T13:51:40.823058 | 2015-04-21T14:01:41 | 2015-04-21T14:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | from browser import *
from workbench import *
from math import *
from units import *
from easel import *
from eight import *
popup = window.open("","","width=800,height=600")
popup.document.body.style.backgroundColor = "202020"
popup.document.body.style.overflow = "hidden"
popup.document.title = "Visualizing Geometric Algebra with WebGL"
canvas2D = popup.document.createElement("canvas")
canvas2D.style.position = "absolute"
canvas2D.style.top = "0px"
canvas2D.style.left = "0px"
workbench2D = Workbench2D(canvas2D, popup)
space2D = Stage(canvas2D)
space2D.autoClear = True
font = "20px Helvetica"
output = Text(popup.document.title + ". Hit Esc key to exit.", font, "white")
output.x = 100
output.y = 60
space2D.addChild(output)
stats = window.Stats()
stats.setMode(0)
stats.domElement.style.position = 'absolute'
stats.domElement.style.left = '0px'
stats.domElement.style.top = '0px'
popup.document.body.appendChild(stats.domElement)
def setUp():
workbench2D.setUp()
def tick(t):
stats.begin()
space2D.render()
stats.end()
def terminate(t):
return False
def tearDown(e):
popup.close()
if e:
print "Error during animation: %s" % (e)
else:
print "Goodbye!"
workbench2D.tearDown()
runner = windowAnimationRunner(tick, terminate, setUp, tearDown, popup)
runner.start()
| [
"[email protected]"
] | |
9a9b603dacd11b6877b6f71b4b1dbcf95b157098 | cd0f3fa5c3b202599812ac8b49e374fe2b2f2e8b | /ExerciciosFixacao/Cap08/C08EXFIX01.py | 2da857eb09708d1fde3beae9840f79314ba2abba | [] | no_license | J-AugustoManzano/livro_Python | 46c14dc4bc5fb361d850fcd361477a952de172c2 | e42b79ef78c6b1ab936fe9a13d32ddc94deeb2a8 | refs/heads/main | 2023-06-25T03:10:30.297226 | 2023-06-08T23:34:54 | 2023-06-08T23:34:54 | 354,116,051 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | a = []
for i in range(10):
a.append(input("Entre o {0:2}o. nome: ".format(i + 1)))
print()
for i in range(10):
print("{0:2}o. nome {1}.".format(i + 1, a[i]))
enter = input("\nPressione <Enter> para encerrar... ")
| [
"[email protected]"
] | |
e32226900cf40f40d2d4e42c722d43e09866fa5f | b65f31d9d273c3d4bb826ff83a805368570bcd4d | /Lesson 13 - Email Search/mailgui.py | c5fb8aa0db53048004cfdfd786ea43e8f8f717fb | [] | no_license | kobaltkween/python2 | 3fde6cc9ca1413b900c87656d8ceb99cb3f34f42 | f7e529abd303b65f0b794c8a9ed87dbf085541a8 | refs/heads/master | 2020-12-31T05:09:39.297693 | 2016-04-13T23:27:10 | 2016-04-13T23:27:10 | 56,192,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,280 | py | from tkinter import *
from maildb import msgs
import datetime
import mysql.connector as mysqlc
from database import loginInfo
def getDate(s):
"""
Assumes a date form of yyyy-mm-dd, returns a corresponding datetime.date.
"""
syear = s[:4]
smonth = s[5:7]
sday = s[8:]
return datetime.date(int(syear), int(smonth), int(sday))
class Application(Frame):
def __init__(self, master = None):
"""
Establish the window structure, leaving some widgets accessible
as app instance variables. Connect button clicks to searchMail
method and subject double-clicks to displayMail method.
"""
Frame.__init__(self, master)
self.master.rowconfigure(0, weight = 1)
self.master.columnconfigure(0, weight = 1)
self.grid(sticky = W + E + N + S)
l0 = Label(self, text = "Email Database Search", font = ("Helvetica", 16))
l0.grid(row = 0, column = 1, columnspan = 2)
l1 = Label(self, text = "Not Before (yyyy-mm-dd):")
l1.grid(row = 1, column = 1, sticky = E + N + S)
self.mindate = Entry(self)
self.mindate.grid(row = 1, column = 2, sticky = W + N + S)
l2 = Label(self, text="Not After (yyyy-mm-dd):")
l2.grid(row = 2, column = 1, sticky = E + N + S)
self.maxdate = Entry(self)
self.maxdate.grid(row = 2, column = 2, sticky = W + N + S)
l3 = Label(self, text= "Sender's E-mail Contains:")
l3.grid(row = 3, column = 1, sticky = E + N + S)
self.addsearch = Entry(self)
self.addsearch.grid(row = 3, column = 2, sticky = W + N + S)
l4 = Label (self, text = "Sender's Name Contains:")
l4.grid(row = 4, column = 1, sticky = E + N + S)
self.namesearch = Entry(self)
self.namesearch.grid(row = 4, column = 2, sticky = W + N + S)
button = Button(self, text = "Search", command = self.searchMail)
button.grid(row = 5, column = 2)
self.msgsubs = Listbox(self, height = 10, width = 100)
self.msgsubs.grid(row = 8, column = 1, columnspan = 2)
self.msgsubs.bind("<Double-Button-1>", self.displayMail)
self.message = Text(self, width = 100)
self.message.grid(row = 9, column = 1, columnspan = 2)
def searchMail(self):
"""
Take the database search parameters provided by the user
(trying to make sense of the dates) and select the appropriate
messages from the database, displaying the subject lines of the
messages in a scrolling selection list.
"""
mindate = self.mindate.get()
if not mindate:
mindate = None
else:
mindate = getDate(mindate)
maxdate = self.maxdate.get()
if not maxdate:
maxdate = None
else:
maxdate = getDate(maxdate)
addsearch = self.addsearch.get()
if not addsearch:
addsearch = None
namesearch = self.namesearch.get()
if not namesearch:
namesearch = None
conn = mysqlc.Connect(**loginInfo)
curs = conn.cursor()
table = "testMessage"
self.msglist = msgs(conn, curs, table, mindate = mindate, maxdate = maxdate, addsearch = addsearch, namesearch = namesearch)
self.msgsubs.delete(0, END)
for pk, msg in self.msglist:
self.msgsubs.insert(END, msg['subject'])
def displayMail(self, event):
"""
Display the message corresoponding to the subject line the
user just clicked on.
"""
indexes = self.msgsubs.curselection()
if len(indexes) != 1:
return
self.message.delete(1.0, END)
pk, msg = self.msglist[int(indexes[0])]
for headerName in "Subject", "Date", "From":
hdr = msg[headerName]
if hdr:
self.message.insert(INSERT, "{0}: {1}\n".format(headerName, hdr))
self.message.insert(END, "\n")
if msg.is_multipart():
self.message.insert(END, "MULTIPART MESSAGE - SORRY!")
self.message.insert(END, msg.get_payload())
if __name__ == "__main__":
root = Tk()
app = Application(master = root)
app.searchMail()
app.mainloop()
| [
"[email protected]"
] | |
86fd6f568e1499f023fb669731c15d2a3fb6510b | 9cd10b1bb27bd31259b278a6339d1101144f5e7b | /data/__init__.py | 414c8749c575ec64d1c53890ce8dcb8a0c6853d9 | [] | no_license | wangfin/Earthquake-Electromagnetic-Anomaly-Detection | ed2d8a12da1ebec456dfc460592cead5abd23352 | de0ad9b44979fbc6b4cecccc592f663b17a7ee04 | refs/heads/master | 2023-01-28T06:39:16.252684 | 2020-11-24T07:40:41 | 2020-11-24T07:40:53 | 290,426,971 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | #!/usr/bin/env python
# @Time : 2020/8/26 15:53
# @Author : wb
# @File : __init__.py.py | [
"[email protected]"
] | |
1c137c520a51b109eb7c9e5c70390f86272bb782 | 39759112ee3a84aa78b15be8cc4888ff6a6b1bc0 | /webcast/admin.py | e804dbbc9ed63d84a585a3bea44e642bd77059d5 | [] | no_license | ecolemo/showbox | bd8b5c8eb30fc3704a7aaf559c0fa0820014a8f7 | 6cb0f3d6394897ebb34f0602787793c8a49f0953 | refs/heads/master | 2021-01-22T14:45:38.704992 | 2011-12-03T05:46:57 | 2011-12-03T05:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | from django.contrib import admin
from showbox.webcast.models import *
admin.site.register(Feed)
admin.site.register(Channel)
| [
"[email protected]"
] | |
c980620389a0801db5b4c61284ea6fb1efc63e96 | e11a1d6d38227bdfaef88eb06386d719b5c7ade9 | /tests/test_mail_parser.py | 0305664222fbc9f8940e85e22500786222a35147 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | spankders/mail-parser | 3d955d3bec118806cc7a7a5d492ed9152ec2fcc7 | 29196a76851dfa426b59f8141510cb8808ed5ec1 | refs/heads/master | 2020-04-28T06:33:08.552262 | 2019-02-05T22:15:18 | 2019-02-05T22:15:18 | 175,062,966 | 1 | 0 | Apache-2.0 | 2019-03-11T18:43:09 | 2019-03-11T18:43:09 | null | UTF-8 | Python | false | false | 21,647 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import logging
import os
import six
import sys
import unittest
base_path = os.path.realpath(os.path.dirname(__file__))
root = os.path.join(base_path, '..')
sys.path.append(root)
logging.getLogger().addHandler(logging.NullHandler())
import mailparser
from mailparser.utils import (
convert_mail_date,
fingerprints,
get_header,
get_mail_keys,
get_to_domains,
msgconvert,
ported_open,
ported_string,
receiveds_parsing,
parse_received,
)
from mailparser.exceptions import MailParserEnvironmentError
mail_test_1 = os.path.join(base_path, 'mails', 'mail_test_1')
mail_test_2 = os.path.join(base_path, 'mails', 'mail_test_2')
mail_test_3 = os.path.join(base_path, 'mails', 'mail_test_3')
mail_test_4 = os.path.join(base_path, 'mails', 'mail_test_4')
mail_test_5 = os.path.join(base_path, 'mails', 'mail_test_5')
mail_test_6 = os.path.join(base_path, 'mails', 'mail_test_6')
mail_test_7 = os.path.join(base_path, 'mails', 'mail_test_7')
mail_test_8 = os.path.join(base_path, 'mails', 'mail_test_8')
mail_test_9 = os.path.join(base_path, 'mails', 'mail_test_9')
mail_test_10 = os.path.join(base_path, 'mails', 'mail_test_10')
mail_test_11 = os.path.join(base_path, 'mails', 'mail_test_11')
mail_test_12 = os.path.join(base_path, 'mails', 'mail_test_12')
mail_test_13 = os.path.join(base_path, 'mails', 'mail_test_13')
mail_malformed_1 = os.path.join(base_path, 'mails', 'mail_malformed_1')
mail_malformed_2 = os.path.join(base_path, 'mails', 'mail_malformed_2')
mail_malformed_3 = os.path.join(base_path, 'mails', 'mail_malformed_3')
mail_outlook_1 = os.path.join(base_path, 'mails', 'mail_outlook_1')
class TestMailParser(unittest.TestCase):
def setUp(self):
self.all_mails = (
mail_test_1,
mail_test_2,
mail_test_3,
mail_test_4,
mail_test_5,
mail_test_6,
mail_test_7,
mail_test_8,
mail_test_9,
mail_test_10,
mail_test_11,
mail_test_12,
mail_test_13,
mail_malformed_1,
mail_malformed_2,
mail_malformed_3)
def test_html_field(self):
mail = mailparser.parse_from_file(mail_malformed_1)
self.assertIsInstance(mail.text_html, list)
self.assertIsInstance(mail.text_html_json, six.text_type)
self.assertEqual(len(mail.text_html), 1)
def test_get_mail_keys(self):
mail = mailparser.parse_from_file(mail_test_11)
all_parts = get_mail_keys(mail.message)
mains_parts = get_mail_keys(mail.message, False)
self.assertNotEqual(all_parts, mains_parts)
self.assertIn("message-id", mains_parts)
self.assertIn("x-filterd-recvd-size", all_parts)
self.assertNotIn("x-filterd-recvd-size", mains_parts)
def test_mail_partial(self):
mail = mailparser.parse_from_file(mail_test_10)
self.assertNotEqual(mail.mail, mail.mail_partial)
self.assertIn("message-id", mail.mail_partial)
self.assertIn("x-ibm-av-version", mail.mail)
self.assertNotIn("x-ibm-av-version", mail.mail_partial)
result = mail.mail_partial_json
self.assertIsInstance(result, six.text_type)
def test_not_parsed_received(self):
mail = mailparser.parse_from_file(mail_test_9)
for i in mail.received:
self.assertNotIn("raw", i)
self.assertIn("hop", i)
def test_issue_received(self):
mail = mailparser.parse_from_file(mail_test_8)
for i in mail.received:
self.assertIn("date_utc", i)
self.assertIsNotNone(i["date_utc"])
def test_get_header(self):
mail = mailparser.parse_from_file(mail_test_1)
h1 = get_header(mail.message, "from")
self.assertIsInstance(h1, six.text_type)
def test_receiveds_parsing(self):
for i in self.all_mails:
mail = mailparser.parse_from_file(i)
receiveds = mail.received_raw
result = receiveds_parsing(receiveds)
self.assertIsInstance(result, list)
for j in result:
self.assertIsInstance(j, dict)
self.assertIn("hop", j)
self.assertIn("delay", j)
def test_ipaddress(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
ip = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, ip)
trust = ""
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, None)
trust = " "
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, None)
def test_ipaddress_unicodeerror(self):
mail = mailparser.parse_from_file(mail_test_12)
trust = "localhost"
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, "96.202.181.20")
def test_fingerprints_body(self):
mail = mailparser.parse_from_file(mail_test_1)
md5, sha1, sha256, sha512 = fingerprints(
mail.body.encode("utf-8"))
self.assertEqual(md5, "1bbdb7dcf511113bbc0c1b214aeac392")
self.assertEqual(sha1, "ce9e62b50fa4e2168278880b14460b905b24eb4b")
self.assertEqual(sha256, ("1e9b96e3f1bc74702f9703391e8ba0715b849"
"7127a7ff857013ab33385898574"))
self.assertEqual(sha512, ("ad858f7b5ec5549e55650fd13df7683e403489"
"77522995851fb6b625ac54744cf3a4bf652784"
"dba971ef99afeec4e6caf2fdd10be72eabb730"
"c312ffbe1c4de3"))
def test_fingerprints_unicodeencodeerror(self):
mail = mailparser.parse_from_file(mail_test_7)
for i in mail.attachments:
fingerprints(i["payload"])
def test_malformed_mail(self):
mail = mailparser.parse_from_file(mail_malformed_3)
defects_categories = mail.defects_categories
self.assertIn("StartBoundaryNotFoundDefect", defects_categories)
self.assertIn("MultipartInvariantViolationDefect", defects_categories)
self.assertIn("reply-to", mail.mail)
self.assertNotIn("reply_to", mail.mail)
reply_to = [(u'VICTORIA Souvenirs', u'[email protected]')]
self.assertEqual(mail.reply_to, reply_to)
self.assertEqual(mail.fake_header, six.text_type())
# This email has header X-MSMail-Priority
msmail_priority = mail.X_MSMail_Priority
self.assertEqual(msmail_priority, "High")
def test_type_error(self):
mail = mailparser.parse_from_file(mail_test_5)
self.assertEqual(len(mail.attachments), 5)
for i in mail.attachments:
self.assertIsInstance(i["filename"], six.text_type)
def test_filename_decode(self):
mail = mailparser.parse_from_file(mail_test_11)
for i in mail.attachments:
self.assertIsInstance(i["filename"], six.text_type)
def test_valid_mail(self):
m = mailparser.parse_from_string("fake mail")
self.assertFalse(m.message)
def test_receiveds(self):
mail = mailparser.parse_from_file(mail_test_1)
self.assertEqual(len(mail.received), 6)
self.assertIsInstance(mail.received, list)
for i in mail.received:
self.assertIsInstance(i, dict)
self.assertIsInstance(mail.received_raw, list)
for i in mail.received_raw:
self.assertIsInstance(i, six.text_type)
self.assertIsInstance(mail.received_json, six.text_type)
def test_parsing_know_values(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
raw = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(raw, result)
raw = "<[email protected]>"
result = mail.message_id
self.assertEqual(raw, result)
raw = "[email protected]"
result = mail.to
self.assertEqual(len(result), 2)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], tuple)
self.assertIsInstance(mail.to_json, six.text_type)
self.assertIsInstance(mail.to_raw, six.text_type)
self.assertEqual(raw, result[0][1])
raw = "[email protected]"
result = mail.from_
self.assertEqual(raw, result[0][1])
raw = "Bollettino Meteorologico del 29/11/2015"
result = mail.subject
self.assertEqual(raw, result)
result = mail.has_defects
self.assertEqual(False, result)
result = len(mail.attachments)
self.assertEqual(3, result)
# raw = "Sun, 29 Nov 2015 09:45:18 +0100"
self.assertIsInstance(mail.date_raw, six.text_type)
self.assertIsInstance(mail.date_json, six.text_type)
raw_utc = datetime.datetime(2015, 11, 29, 8, 45, 18, 0).isoformat()
result = mail.date.isoformat()
self.assertEqual(raw_utc, result)
def test_types(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertIsInstance(result, dict)
self.assertNotIn("defects", result)
self.assertIn("has_defects", result)
result = mail.get_server_ipaddress(trust)
self.assertIsInstance(result, six.text_type)
result = mail.mail_json
self.assertIsInstance(result, six.text_type)
result = mail.headers_json
self.assertIsInstance(result, six.text_type)
result = mail.headers
self.assertIsInstance(result, dict)
result = mail.body
self.assertIsInstance(result, six.text_type)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.from_
self.assertIsInstance(result, list)
result = mail.to
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], tuple)
self.assertEqual(len(result[0]), 2)
result = mail.subject
self.assertIsInstance(result, six.text_type)
result = mail.message_id
self.assertIsInstance(result, six.text_type)
result = mail.attachments
self.assertIsInstance(result, list)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.defects
self.assertIsInstance(result, list)
def test_defects(self):
mail = mailparser.parse_from_file(mail_malformed_1)
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn("StartBoundaryNotFoundDefect",
mail.defects_categories)
self.assertIsInstance(mail.mail_json, six.text_type)
result = len(mail.attachments)
self.assertEqual(1, result)
mail = mailparser.parse_from_file(mail_test_1)
if six.PY2:
self.assertEqual(False, mail.has_defects)
self.assertNotIn("defects", mail.mail)
elif six.PY3:
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn(
"CloseBoundaryNotFoundDefect", mail.defects_categories)
def test_defects_bug(self):
mail = mailparser.parse_from_file(mail_malformed_2)
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn("StartBoundaryNotFoundDefect",
mail.defects_categories)
self.assertIsInstance(mail.parsed_mail_json, six.text_type)
result = len(mail.attachments)
self.assertEqual(0, result)
def test_add_content_type(self):
mail = mailparser.parse_from_file(mail_test_3)
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertEqual(len(result["attachments"]), 1)
self.assertIsInstance(
result["attachments"][0]["mail_content_type"], six.text_type)
self.assertFalse(result["attachments"][0]["binary"])
self.assertIsInstance(
result["attachments"][0]["payload"], six.text_type)
self.assertEqual(
result["attachments"][0]["content_transfer_encoding"],
"quoted-printable")
self.assertEqual(
result["attachments"][0]["charset"],
"iso-8859-1")
def test_from_bytes(self):
if six.PY2:
with self.assertRaises(MailParserEnvironmentError):
mailparser.MailParser.from_bytes(b"")
def test_classmethods(self):
# MailParser.from_file
m = mailparser.MailParser.from_file(mail_test_3)
m.parse()
result = m.mail
self.assertEqual(len(result["attachments"]), 1)
# MailParser.from_string
m = mailparser.MailParser.from_string(m.message_as_string)
m.parse()
result = m.mail
self.assertEqual(len(result["attachments"]), 1)
def test_bug_UnicodeDecodeError(self):
m = mailparser.parse_from_file(mail_test_6)
self.assertIsInstance(m.mail, dict)
self.assertIsInstance(m.mail_json, six.text_type)
def test_parse_from_file_msg(self):
"""
Tested mail from VirusTotal: md5 b89bf096c9e3717f2d218b3307c69bd0
The email used for unittest were found randomly on VirusTotal and
then already publicly available so can not be considered
as privacy violation
"""
m = mailparser.parse_from_file_msg(mail_outlook_1)
email = m.mail
self.assertIn("attachments", email)
self.assertEqual(len(email["attachments"]), 5)
self.assertIn("from", email)
self.assertEqual(email["from"][0][1], "[email protected]")
self.assertIn("subject", email)
def test_msgconvert(self):
"""
Tested mail from VirusTotal: md5 b89bf096c9e3717f2d218b3307c69bd0
The email used for unittest were found randomly on VirusTotal and
then already publicly available so can not be considered
as privacy violation
"""
f, _ = msgconvert(mail_outlook_1)
self.assertTrue(os.path.exists(f))
m = mailparser.parse_from_file(f)
self.assertEqual(m.from_[0][1], "[email protected]")
def test_from_file_obj(self):
with ported_open(mail_test_2) as fp:
mail = mailparser.parse_from_file_obj(fp)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertIsInstance(result, dict)
self.assertNotIn("defects", result)
self.assertNotIn("anomalies", result)
self.assertIn("has_defects", result)
result = mail.get_server_ipaddress(trust)
self.assertIsInstance(result, six.text_type)
result = mail.mail_json
self.assertIsInstance(result, six.text_type)
result = mail.headers
self.assertIsInstance(result, dict)
result = mail.headers_json
self.assertIsInstance(result, six.text_type)
result = mail.body
self.assertIsInstance(result, six.text_type)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.from_
self.assertIsInstance(result, list)
result = mail.to
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], tuple)
self.assertEqual(len(result[0]), 2)
result = mail.subject
self.assertIsInstance(result, six.text_type)
result = mail.message_id
self.assertIsInstance(result, six.text_type)
result = mail.attachments
self.assertIsInstance(result, list)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.defects
self.assertIsInstance(result, list)
result = mail.timezone
self.assertEqual(result, "+1")
def test_get_to_domains(self):
m = mailparser.parse_from_file(mail_test_6)
domains_1 = get_to_domains(m.to, m.reply_to)
self.assertIsInstance(domains_1, list)
self.assertIn("test.it", domains_1)
domains_2 = m.to_domains
self.assertIsInstance(domains_2, list)
self.assertIn("test.it", domains_2)
self.assertEqual(domains_1, domains_2)
self.assertIsInstance(m.to_domains_json, six.text_type)
def test_convert_mail_date(self):
s = "Mon, 20 Mar 2017 05:12:54 +0600"
d, t = convert_mail_date(s)
self.assertEqual(t, "+6")
self.assertEqual(str(d), "2017-03-19 23:12:54")
s = "Mon, 20 Mar 2017 05:12:54 -0600"
d, t = convert_mail_date(s)
self.assertEqual(t, "-6")
def test_ported_string(self):
raw_data = ""
s = ported_string(raw_data)
self.assertEqual(s, six.text_type())
raw_data = "test "
s = ported_string(raw_data)
self.assertEqual(s, "test")
raw_data = u"test "
s = ported_string(raw_data)
self.assertEqual(s, "test")
def test_standard_outlook(self):
""" Verify a basic outlook received header works. """
received = """
from DM3NAM03FT035
by CY4PR0601CA0051.outlook.office365.com
with Microsoft SMTP Server version=TLS1_2, cipher=TLS
id 15.20.1185.23
via Frontend Transport; Mon, 1 Oct 2018 09:49:21 +0000
""".strip()
expected = {
'from': 'DM3NAM03FT035',
'by': 'CY4PR0601CA0051.outlook.office365.com',
'with': 'Microsoft SMTP Server version=TLS1_2, cipher=TLS',
'id': '15.20.1185.23',
'via': 'Frontend Transport',
'date': 'Mon, 1 Oct 2018 09:49:21 +0000'
}
values_by_clause = parse_received(received)
self.assertEqual(expected, values_by_clause)
def test_standard_google__with_cipher(self):
""" Verify that we don't match 'with cipher' a la google. """
received = """
from mail_yw1_f65.google.com
by subdomain.domain.com Postfix with ESMTPS
id abc123 for <[email protected]>;
Tue, 25 Sep 2018 13:09:36 +0000 (UTC)"""
expected = {
'from': 'mail_yw1_f65.google.com',
'by': 'subdomain.domain.com Postfix',
'with': 'ESMTPS',
'id': 'abc123',
'for': '<[email protected]>',
'date': 'Tue, 25 Sep 2018 13:09:36 +0000 (UTC)'
}
values_by_clause = parse_received(received)
self.assertEqual(expected, values_by_clause)
@unittest.skipIf(sys.version_info[0] < 3, "Must be using Python 3")
def test_parse_from_bytes(self):
with open(mail_test_2, "rb") as f:
mail_bytes = f.read()
mail = mailparser.parse_from_bytes(mail_bytes)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
raw = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(raw, result)
raw = "<[email protected]>"
result = mail.message_id
self.assertEqual(raw, result)
raw = "[email protected]"
result = mail.to
self.assertEqual(len(result), 2)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], tuple)
self.assertIsInstance(mail.to_json, six.text_type)
self.assertIsInstance(mail.to_raw, six.text_type)
self.assertEqual(raw, result[0][1])
raw = "[email protected]"
result = mail.from_
self.assertEqual(raw, result[0][1])
raw = "Bollettino Meteorologico del 29/11/2015"
result = mail.subject
self.assertEqual(raw, result)
result = mail.has_defects
self.assertEqual(False, result)
result = len(mail.attachments)
self.assertEqual(3, result)
# raw = "Sun, 29 Nov 2015 09:45:18 +0100"
self.assertIsInstance(mail.date_raw, six.text_type)
self.assertIsInstance(mail.date_json, six.text_type)
raw_utc = datetime.datetime(2015, 11, 29, 8, 45, 18, 0).isoformat()
result = mail.date.isoformat()
self.assertEqual(raw_utc, result)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
46afdbc213039bded37448eb93dc6e30299d328f | ac7c02f29a837fdd67d2bdc77bba182080e98ed8 | /codekata/simpleinterest.py | b38138ffa7828f55aa6627b446c6d995a7baf9e8 | [] | no_license | YaminiNarayanan-359/guvi | 7630c309a86365e4367fda1ddab4e966e7d1ac5b | a52b6353100b4e9b83a003e6a327fbfb174daac4 | refs/heads/master | 2020-06-03T00:08:00.389609 | 2019-07-16T06:59:53 | 2019-07-16T06:59:53 | 191,355,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | j,k,l=list(map(int,input().split()))
print(int((j*k*l)/100))
| [
"[email protected]"
] | |
f3ae9c1a7bf1d55613b290744e12c443dcac932d | 693568f813603806fbde976a1c69a97b06195708 | /mods/tests/test_install.py | 05669735deeb25b553bc8a1df5f2d8a56faf3514 | [
"MIT"
] | permissive | danlkv/pywebviz | c664a584c5a16d66c49aa233b69ef3b29ccaa081 | 5892ef90f28dbd43c33fefbfa5a199d15322a120 | refs/heads/master | 2023-02-11T06:06:13.451408 | 2021-01-13T07:22:08 | 2021-01-13T07:23:17 | 172,800,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from importlib import reload
import libvis.modules.installed as modules
import libvis_mods
from pathlib import Path
mocks = Path(__file__).parent / 'mocks'
def test_install_files():
global modules
pyfile, webfle = mocks/'module.py', mocks/'blah.coffee'
try:
libvis_mods.install('Test', pyfile, webfle)
modules = reload(modules)
_ = modules.Test()
finally:
libvis_mods.uninstall('Test')
def test_install_dirs():
global modules
try:
pyfile, webfle = mocks/'BirModule'/'back', mocks/'BirModule'/'front'
libvis_mods.install('BirModule', pyfile, webfle)
modules = reload(modules)
m = modules.BirModule(count=5)
finally:
libvis_mods.uninstall('BirModule')
if __name__ == '__main__':
test_install_dirs()
test_install_files()
| [
"[email protected]"
] | |
fb0b3cea6186400de9e2106c276c471deea1a9c1 | e67fd8a02af7c913d5469b86b1fcc02a3497d863 | /organizing_hub/migrations/0004_auto_20181003_2101.py | 412f0ae3cf448c84b4866ed408a26659932c1147 | [
"MIT"
] | permissive | Our-Revolution/site | 37268727ab4761ca5d3e222b9b11c809327e01c2 | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | refs/heads/master | 2023-01-20T18:10:57.479047 | 2019-08-02T17:26:52 | 2019-08-02T17:26:52 | 71,601,229 | 4 | 3 | MIT | 2023-01-12T08:22:58 | 2016-10-21T22:19:53 | Python | UTF-8 | Python | false | false | 744 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-03 21:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizing_hub', '0003_organizinghubloginalert_alert_level'),
]
operations = [
migrations.AlterField(
model_name='organizinghubloginalert',
name='alert_level',
field=models.IntegerField(choices=[(1, 'Success'), (2, 'Info'), (3, 'Warning'), (4, 'Danger')], default=3, help_text='\n Set the alert style corresponding to Bootstrap 3 alert levels.\n\n See: https://getbootstrap.com/docs/3.3/components/#alerts-dismissible\n '),
),
]
| [
"[email protected]"
] | |
f283b2717969e97a9084442cb738ded2f130471c | 5896669c7ccf3efe979a4780516fc810844bfbba | /conf.py | 790504a29ba2e1d53b75d3f3ec6fffc60661f7ed | [
"MIT"
] | permissive | Hiestaa/miniboard-factorio-manager | ea1ff7e6084ef88869db635cb866517601f5b055 | 9ff5f1f063f17c0eaa47f43ac05bce0e74d90d45 | refs/heads/master | 2021-01-01T03:47:25.674434 | 2016-04-30T14:45:03 | 2016-04-30T14:45:03 | 57,064,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15 # in minutes
}
}
| [
"[email protected]"
] | |
54fd9901b39b49d2d42047b88b691cb6d03284de | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_125/ch40_2020_04_06_19_40_46_900950.py | 3b9243a2901e11cca6dba9dde4a7da4dd0698904 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def soma_valores(n):
num=len(n)
i=0
soma=0
while i=<num:
soma= [i]+n
i+=1
return soma | [
"[email protected]"
] | |
08ad9df9dd16c3d904a326e08dbe5b1848f362ff | c157097e9883757f588c6da74d419b964a1c75cc | /python_fundamentals/08-user-input/command-line-parameters-01.py | 25984b506bf0dc30f4e2eb2bba2086dcb995dfb2 | [] | no_license | sudhansom/python_sda | 8d888216740c559ab66b700d3bea54c05caa0333 | 25d563854ef9d31ab910f84c973e48e3259de585 | refs/heads/master | 2022-04-26T15:26:15.263236 | 2020-04-25T07:32:10 | 2020-04-25T07:32:10 | 257,564,556 | 0 | 0 | null | 2020-04-29T16:41:37 | 2020-04-21T10:49:59 | Python | UTF-8 | Python | false | false | 191 | py | import sys
my_dict = {}
country_list = sys.argv[1:]
for i in range(0, len(country_list), 2):
my_dict[country_list[i]] = country_list[i+1]
print(f"\nDictionary details : \n\n {my_dict}")
| [
"[email protected]"
] | |
79669a5b1eccf60216afd0fadf1e13d7389fd0d1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/109.py | 50abe5d4745d2991cd4d5af4fe4809c0886ebe1c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | #!/usr/bin/python
import sys
import numpy as np # http://www.numpy.org/
import scipy # http://scipy.org/
import networkx as nx # https://networkx.github.io/
import sympy # http://www.sympy.org
import itertools
import operator
import string
import fractions
#import visual # vpython.org
#import Levenshtein # https://pypi.python.org/pypi/python-Levenshtein/0.12.0
import cmath
sys.setrecursionlimit(5000)
T = int(sys.stdin.readline())
charmap = {'.':0, '^': 1, 'v':-1,'>':2,'<':2}
dirR = {'.':0, '^': 1, 'v':-1,'>':0,'<':0}
dirC = {'.':0, '^': 0, 'v':0,'>':1,'<':-1}
def test(field):
bools = field!='.'
d1 = np.sum(bools,axis=1)
d2 = np.sum(bools,axis=0)
for i in range(R):
for j in range(C):
if field[i,j]=='.': continue
if d1[i]==1 and d2[j]==1: return "IMPOSSIBLE"
count = 0
for i in range(R):
for j in range(C):
if field[i,j]=='.': continue
if field[i,j]=='<':
count+=1
break
for j in range(C):
if field[i,C-j-1]=='.': continue
if field[i,C-j-1]=='>':
count+=1
break
for j in range(C):
for i in range(R):
if field[i,j]=='.': continue
if field[i,j]=='^':
count+=1
break
for i in range(R):
if field[R-i-1,j]=='.': continue
if field[R-i-1,j]=='v':
count+=1
break
return str(count)
for case in range(0, T):
R,C = map(int,sys.stdin.readline().strip().split())
field = np.chararray( (R,C))
for i in range(R):
line=sys.stdin.readline().strip()
for c in range(len(line)):
field[i,c] = line[c]
solution = test(field)
print "Case #%i: %s" % (case + 1, solution)
| [
"[email protected]"
] | |
77f456482ecebbe990adfabf0b25a4c0dd0fd7e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02724/s623079670.py | d862e55f1b0e4e1a22bc93ff1477b6ed38532fd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | x=int(input())
y=x//500
z=x-500*y
a=z//5
print(1000*y+a*5)
| [
"[email protected]"
] | |
7b993e2d2391a2c6f2fdf7c9b7dcc0ae0b47bb85 | 509823ea14f04d5791486b56a592d7e7499d7d51 | /parte05/ex5.05_remover_duplicados_lista.py | e2c0ee58c5eaf7c39ca0c9d8479cad0f6d096521 | [] | no_license | Fhernd/Python-CursoV2 | 7613144cbed0410501b68bedd289a4d7fbefe291 | 1ce30162d4335945227f7cbb875f99bc5f682b98 | refs/heads/master | 2023-08-08T05:09:44.167755 | 2023-08-05T19:59:38 | 2023-08-05T19:59:38 | 239,033,656 | 64 | 38 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # Ejercicio 5.5: Remover los valores duplicados en una lista.
numeros = [1, 2, 3, 1, 1, 1, 4, 5, 6, 3, 3, 2, 5]
print('Contenido actual de la lista `numeros`:', numeros)
print('Cantidad actual de la lista `numeros`:', len(numeros))
print()
# Solución #1:
print('Solución #1:')
numeros_sin_repetir = []
for n in numeros:
if n not in numeros_sin_repetir:
numeros_sin_repetir.append(n)
print('Contenido actual de la lista `numeros_sin_repetir`:', numeros_sin_repetir)
print('Cantidad actual de la lista `numeros_sin_repetir`:', len(numeros_sin_repetir))
print()
# Solución #2:
print('Solución #2')
conjunto_numeros = list(set(numeros))
print('Contenido actual de `conjunto_numeros`:', conjunto_numeros)
print('Cantidad actual de conjunto_numeros`:', len(conjunto_numeros))
| [
"[email protected]"
] | |
ff21461f29ea8d9161ba90e7c5ee44d3fba4e68d | f5ee595836adfb75047d2798928ca020533bd597 | /nanobrok/ext/ssl.py | 43a2b503b138db55be7ab8dc961127139043fb33 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | santaklouse/Nanobrok | efcc836484a799f614c21d50a75e0f5d1088f8bb | 680b112f76e248f64c021337769bef163527bce0 | refs/heads/master | 2023-08-13T03:52:25.137896 | 2021-09-18T18:11:13 | 2021-09-18T18:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | # TODO disabled features
# from flask_talisman import Talisman
# def init_app(app):
# # need to run with disable debug
# talisman = Talisman(app)
| [
"[email protected]"
] | |
964bb062f8e25a61f0000a0172d3c72f53622e37 | 5a6555a37ea574a6a02eb4a612171fec86724edf | /Django/mongodb/mongodb/settings.py | 2a7966a6a2d2cdbcf4d6cfcd94989fd091cc3df8 | [] | no_license | heiyouyou/Python | 9b014b3d3619824eb739c7d87fa5304fa2cf1546 | 74b0b0d1e4d678b74ada61b03a026b64f2a084d9 | refs/heads/master | 2021-05-06T13:48:22.616248 | 2018-10-24T10:33:19 | 2018-10-24T10:33:19 | 113,288,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for mongodb project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w#ql--a^7913cugh5njqjd3txa#_qhrok%xxq%)jugj6@^%%47'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mongodb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mongodb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
dd43ada4c7651ae8a3fc87fb17b8fa37eba4718b | 241c51904c2501a85e71da62d4a9a79f8656dbb4 | /transformers/models/prophetnet/modeling_prophetnet.py | 11197182f85b4b58f126b486ffbda39e24f29f4b | [
"Apache-2.0"
] | permissive | zhouhaoyi/TripletAttention | c3d7a37b00d80286e802324859156841f33841d0 | 84bb8d7a7a45dfd37c82849c9ae6ed8a41bb0718 | refs/heads/main | 2023-08-25T22:35:59.828152 | 2021-10-27T04:27:13 | 2021-10-27T04:27:13 | 374,306,104 | 15 | 3 | Apache-2.0 | 2021-09-19T08:33:38 | 2021-06-06T08:22:35 | null | UTF-8 | Python | false | false | 103,663 | py | # coding=utf-8
# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version). """
import copy
import math
import warnings
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import LayerNorm
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_prophetnet import ProphetNetConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ProphenetConfig"
_TOKENIZER_FOR_DOC = "ProphetNetTokenizer"
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/prophetnet-large-uncased",
# See all ProphetNet models at https://huggingface.co/models?filter=prophetnet
]
PROPHETNET_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
Original ProphetNet code can be found at <https://github.com/microsoft/ProphetNet> . Checkpoints were converted
from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
file ``convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py``.
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
behavior.
Parameters:
config (:class:`~transformers.ProphetNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROPHETNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
ProphetNet uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
def softmax(hidden_state, dim, onnx_trace=False):
if onnx_trace:
return F.softmax(hidden_state.float(), dim=dim)
else:
return F.softmax(hidden_state, dim=dim, dtype=torch.float32)
def ngram_attention_bias(sequence_length, ngram, device, dtype):
"""
This function computes the bias for the predict stream
"""
bias = torch.ones((ngram, sequence_length, 2 * sequence_length), device=device, dtype=dtype) * float("-inf")
# create bias
for stream_idx in range(ngram):
for i in range(sequence_length):
bias[stream_idx, i, sequence_length + i] = 0
bias[stream_idx, i, : max(i - stream_idx, 0) + 1] = 0
return bias
def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
"""
This function computes individual parts of the relative position buckets. For more detail, see paper.
"""
inv_relative_positions = -relative_positions
rel_positions_bucket = 0
if is_bidirectional:
num_buckets = num_buckets // 2
rel_positions_bucket = (
rel_positions_bucket
+ torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
)
inv_relative_positions = torch.abs(inv_relative_positions)
else:
inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
max_exact = num_buckets // 2
is_small = torch.lt(inv_relative_positions, max_exact)
val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
max_distance / max_exact
) * (num_buckets - max_exact)
val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
return rel_positions_bucket
def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
"""
This function computes both main and predict relative position buckets. For more detail, see paper.
"""
# main stream
main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
# predicting stream
predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
# get both position buckets
main_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
)
predict_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
)
return main_relative_position_buckets, predict_relative_position_buckets
@dataclass
class ProphetNetSeq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
softmax, used to compute the weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
class ProphetNetSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
class ProphetNetDecoderModelOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ProphetNetDecoderLMOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
class ProphetNetPreTrainedModel(PreTrainedModel):
config_class = ProphetNetConfig
base_model_prefix = "prophetnet"
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the pad_token_id. See ProphetNet docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class ProhpetNetPositionalEmbeddings(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
the forward function.
"""
def __init__(self, config: ProphetNetConfig):
super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
assert (position_ids is None) or (
self.padding_idx is None
), "If position_ids is pre-computed then padding_idx should not be set."
if position_ids is None:
if past_key_values is not None:
# position_ids is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
prev_num_input_ids = past_key_values[0]["self"]["prev_key_states"].shape[2]
num_input_ids = inputs_shape[1] + prev_num_input_ids
position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
int(self.padding_idx + num_input_ids)
)
else:
if attention_mask is None:
attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
# retrieve position_ids from input_ids / attention_mask
position_ids = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() + self.padding_idx
return super().forward(position_ids), position_ids
def _forward(self, position_ids):
return super().forward(position_ids)
class ProphetNetSelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config: ProphetNetConfig,
num_attn_heads: int,
):
super().__init__()
hidden_size = config.hidden_size
self.attention_dropout = config.attention_dropout
self.dropout = config.dropout
self.num_attn_heads = num_attn_heads
self.head_dim = hidden_size // num_attn_heads
assert (
self.head_dim * num_attn_heads == hidden_size
), "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and `config.num_decoder_attention_heads`"
self.key_proj = nn.Linear(hidden_size, hidden_size)
self.value_proj = nn.Linear(hidden_size, hidden_size)
self.query_proj = nn.Linear(hidden_size, hidden_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
def _reshape(self, tensor, first_dim, batch_size):
return tensor.reshape(first_dim, batch_size * self.num_attn_heads, self.head_dim).transpose(0, 1)
def forward(
self,
hidden_states,
key_value_states: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
sequence_length, batch_size, hidden_size = hidden_states.size()
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
cache_key = "cross_attention" if is_cross_attention else "self"
assert list(hidden_states.size()) == [
sequence_length,
batch_size,
hidden_size,
], f"Size of hidden states should be {sequence_length, batch_size, hidden_size}, but is {hidden_states.size()}"
# previous time steps are cached - no need to recompute key and value if they are static
if layer_state is not None:
saved_state = layer_state.get(cache_key, None)
query_states = self.query_proj(hidden_states) / (self.head_dim ** 0.5)
query_states = self._reshape(query_states, sequence_length, batch_size)
if not is_cross_attention:
# self-attention
key_states = self.key_proj(hidden_states)
key_states = self._reshape(key_states, -1, batch_size)
value_states = self.value_proj(hidden_states)
value_states = self._reshape(value_states, -1, batch_size)
elif saved_state is None:
# cross-attention without layer state
key_states = self.key_proj(key_value_states)
key_states = self._reshape(key_states, -1, batch_size)
value_states = self.value_proj(key_value_states)
value_states = self._reshape(value_states, -1, batch_size)
else:
key_states = saved_state["prev_key_states"].view(batch_size * self.num_attn_heads, -1, self.head_dim)
value_states = saved_state["prev_value_states"].view(batch_size * self.num_attn_heads, -1, self.head_dim)
# Update cache
if is_cross_attention:
layer_state[cache_key] = {
"prev_key_states": key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
"prev_value_states": value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
}
key_sequence_length = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
batch_size * self.num_attn_heads,
sequence_length,
key_sequence_length,
), f"`attn_weights` should be of size {batch_size * self.num_attn_heads, sequence_length, key_sequence_length}, but is of size {attn_weights.shape}"
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if attention_mask is not None and attention_mask.dim() == 0:
attention_mask = None
assert attention_mask is None or attention_mask.size() == (
self.num_attn_heads * batch_size,
1,
key_sequence_length,
), f"`attention_mask` should be `None` or of shape attention_mask.size() == {batch_size * self.num_attn_heads, 1, key_sequence_length}, but is {attention_mask.shape}"
if attention_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights + attention_mask
# need two reshapes to keep gradient at attention weights
attn_weights_reshaped = attn_weights.view(
batch_size, self.num_attn_heads, sequence_length, key_sequence_length
)
attn_weights = attn_weights_reshaped.view(
batch_size * self.num_attn_heads, sequence_length, key_sequence_length
)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(
attn_weights,
p=self.attention_dropout,
training=self.training,
)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
batch_size * self.num_attn_heads,
sequence_length,
self.head_dim,
), "`attn_output` should be of shape {batch_size * self.num_attn_heads, sequence_length, self.head_dim}, but is of shape {attn_output.size()}"
attn_output = attn_output.transpose(0, 1).contiguous().view(sequence_length, batch_size, hidden_size)
attn_output = self.out_proj(attn_output)
attn_output = F.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, attn_weights_reshaped
class ProhpetNetFeedForward(nn.Module):
"""
This is the residual two feed-forward layer block based on the original Transformer implementation.
"""
def __init__(self, config: ProphetNetConfig, ffn_dim: int):
super().__init__()
self.activation_fn = ACT2FN[config.activation_function]
self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
self.output = nn.Linear(ffn_dim, config.hidden_size)
self.activation_dropout = config.activation_dropout
self.dropout = config.dropout
def forward(self, hidden_states):
hidden_states = self.intermediate(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.output(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ProphetNetNgramProphetNetSelfAttention(nn.Module):
def __init__(self, config: ProphetNetConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.num_attn_heads = config.num_attention_heads
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
self.head_dim = config.hidden_size // self.num_attn_heads
self.ngram = config.ngram
assert (
self.head_dim * self.num_attn_heads == config.hidden_size
), "config.hidden_size must be divisible by num_attn_heads"
# key, value, query projection
self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
# out projection
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
# rel position embeddings
self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
# for onnx runtime
self.onnx_trace = False
def _reshape(self, tensor, first_dim, batch_size):
return tensor.reshape(first_dim, batch_size * self.num_attn_heads, self.head_dim).transpose(0, 1)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
hidden_states,
layer_state=None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
):
sequence_length, batch_size, hidden_size = hidden_states.size()
assert list(hidden_states.size()) == [
sequence_length,
batch_size,
hidden_size,
], f"`hidden_states` should be of shape {sequence_length, batch_size, hidden_size}, but is of shape {hidden_states.shape}"
# key and value of previous time steps are cached
saved_state = layer_state.get("self", None)
# project
query_states = self.query_proj(hidden_states)
key_states = self.key_proj(hidden_states)
value_states = self.value_proj(hidden_states)
# normalize
query_states = query_states / (self.head_dim ** 0.5)
# reshape
query_states = self._reshape(query_states, sequence_length, batch_size)
key_states = self._reshape(key_states, -1, batch_size)
value_states = self._reshape(value_states, -1, batch_size)
# chunk into main stream and predict stream
hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=0)
query_states_list = query_states.chunk(1 + self.ngram, dim=1)
key_states_list = key_states.chunk(1 + self.ngram, dim=1)
value_states_list = value_states.chunk(1 + self.ngram, dim=1)
main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
# saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
if saved_state is not None:
prev_main_key_states = saved_state["prev_key_states"].view(
batch_size * self.num_attn_heads, -1, self.head_dim
)
main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=1)
prev_main_value_states = saved_state["prev_value_states"].view(
batch_size * self.num_attn_heads, -1, self.head_dim
)
main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=1)
# Update cache
layer_state["self"] = {
"prev_key_states": main_key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
"prev_value_states": main_value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
}
# get seq_length of main stream only
main_sequence_length = sequence_length // (1 + self.ngram)
# MAIN-STREAM
# main attn weights
main_attn_weights = torch.bmm(main_query_states, main_key_states.transpose(1, 2))
# retrieve relative position embeddings for each layer -> see paper for more details
main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
)
main_attn_weights = main_attn_weights + main_relative_pos_embeddings
if attention_mask is not None:
main_attn_weights = main_attn_weights + attention_mask
main_attn_probs = softmax(
main_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(main_attn_weights)
main_attn_probs = F.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
# project to attn_output
main_attn_output = torch.bmm(main_attn_probs, main_value_states)
main_attn_output = (
main_attn_output.transpose(0, 1).contiguous().view(1, main_sequence_length, batch_size, hidden_size)
)
main_attn_output = self.out_proj(main_attn_output)
# PREDICT-STREAM
# [ngram, B*head, T, c]
predict_query_states = torch.cat(predict_query_states_list, 0).view(
self.ngram, -1, main_sequence_length, self.head_dim
)
# [ngram, B*head, 2*T, c]
predict_key_states = torch.cat(
[torch.cat([main_key_states, key], 1).unsqueeze(0) for key in predict_key_states_list], 0
)
# [ngram, T, B, C]
predict_hidden_states = torch.cat(hidden_states_predict_list, 0).view(
self.ngram, main_sequence_length, batch_size, hidden_size
)
# [ngram, B*head, 2*T, c]
predict_value_states = torch.cat(
[torch.cat([main_value_states, v_p], 1).unsqueeze(0) for v_p in predict_value_states_list], 0
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = torch.einsum("nbtc,nbsc->nbts", (predict_query_states, predict_key_states))
# [ngram, B*head, T, S]
# retrieve relative position embeddings for each layer -> see paper for more details
predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
if extended_predict_attention_mask is not None:
predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
predict_attn_probs = softmax(
predict_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(predict_attn_weights)
predict_attn_probs = F.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training)
# project to attention output
# [ngram, B*head, T, c]
predict_attn_output = torch.einsum("nbts,nbsc->nbtc", (predict_attn_probs, predict_value_states))
# [ngram, T, B, C]
predict_attn_output = (
predict_attn_output.transpose(1, 2)
.contiguous()
.view(self.ngram, main_sequence_length, batch_size, hidden_size)
)
predict_attn_output = self.out_proj(predict_attn_output)
# concat to single attn output
# [1+ngram*T, B, C]
attn_output = torch.cat([main_attn_output, predict_attn_output], 0).view(-1, batch_size, hidden_size)
# reshape into better form for `config.output_attentions`
main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, main_sequence_length, -1)
predict_attn_probs = predict_attn_probs.view(
self.ngram, batch_size, self.num_attn_heads, main_sequence_length, -1
).transpose(0, 1)
attn_output = F.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, main_attn_probs, predict_attn_probs
def get_main_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
):
# input hidden_states [T,B,C], input attn_weights [T*head,T,S], input position_ids [B,T] or [1,1]
if main_relative_position_buckets is None:
batch_size, sequence_length = hidden_states.shape[:2]
relative_positions = (
torch.arange(1, attn_weights.shape[-1] + 1)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(
batch_size, sequence_length, 1
) # [B, T, s]
main_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
hidden_states = hidden_states.transpose(0, 1) # [B,T,C]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) # [B,T,Buckets*head]
rel_pos_embeddings = rel_pos_embeddings.view(
rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
).permute(
0, 3, 1, 2
) # [B,T,Buckets,head]
rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:2] + (-1,)) # [B*head,T,Buckets]
main_relative_position_buckets = (
main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
.view(-1, main_relative_position_buckets.shape[-1])
.long()
) # [B*head*T, T]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) # [B*head*T,Buckets]
main_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=main_relative_position_buckets
).view(attn_weights.shape[:2] + (-1,))
return main_relative_pos_embeddings
def get_predict_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
):
# input hidden_states [ngram, T,B,C], input attn_weights [ngram, B*head,T,S], input position_ids [B,T] or [1,1], input predict_relative_position_buckets [B,T, 2*T] or None
sequence_length, batch_size = hidden_states.shape[1:3]
if predict_relative_position_buckets is None:
key_sequence_length = attn_weights.shape[-1]
assert (
position_ids[0][0] == key_sequence_length - 1
), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
relative_positions = (
torch.arange(0, key_sequence_length)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
predict_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
hidden_states = hidden_states.transpose(1, 2) # [ngram, B, T, C]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states).view(
hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
) # [ngram, B, T, bucket, head]
rel_pos_embeddings = rel_pos_embeddings.permute(0, 1, 4, 2, 3).reshape(
self.ngram * batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram*B*head, T, bucket]
predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0).repeat(
self.ngram, 1, self.num_attn_heads, 1
) # [ngram, B, head*T, S]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
predict_relative_position_buckets = predict_relative_position_buckets.view(
-1, predict_relative_position_buckets.size(-1)
).long() # [ngram*B*head*T, S]
predict_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
).view(
self.ngram, batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram, B*head, T, S]
return predict_relative_pos_embeddings
class ProphetNetEncoderLayer(nn.Module):
"""
Encoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetSelfAttention(config, config.num_encoder_attention_heads)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
self.feed_forward = ProhpetNetFeedForward(config, config.encoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(self, hidden_states, attention_mask):
# 1st residual block
attention_output, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
)
hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
# 2nd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
return hidden_states, attn_weights
class ProphetNetDecoderLayer(nn.Module):
"""
Decoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetNgramProphetNetSelfAttention(config)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
if config.add_cross_attention:
self.cross_attn = ProphetNetSelfAttention(config, config.num_decoder_attention_heads)
self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
# 3rd residual block
self.feed_forward = ProhpetNetFeedForward(config, config.decoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(
self,
hidden_states,
encoder_hidden_states=None,
encoder_attn_mask=None,
layer_state=None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
):
layer_state = layer_state if layer_state is not None else {}
# 1st residual block
ngram_attention_output, self_attn_weights, self_attn_weights_ngram = self.self_attn(
hidden_states=hidden_states,
layer_state=layer_state,
attention_mask=attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
)
hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
cross_attn_weights = None
if encoder_hidden_states is not None:
# 2nd residual block
attention_output, cross_attn_weights = self.cross_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
# 3rd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
return (
hidden_states,
self_attn_weights,
self_attn_weights_ngram,
cross_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
@add_start_docstrings(
"The standalone encoder part of the ProphetNetModel.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetEncoder(ProphetNetPreTrainedModel):
r"""
word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`):
The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with
pre-defined word embeddings instead of randomely initialized word embeddings.
"""
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
super().__init__(config)
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProhpetNetPositionalEmbeddings(config)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetEncoder
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetEncoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either input_ids or inputs_embeds has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# prepare attention mask
if attention_mask is not None:
extended_attention_mask = (
1.0 - attention_mask[:, None, :].repeat(self.config.num_attention_heads, 1, 1)
) * -10000.0
extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
else:
extended_attention_mask = None
position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
hidden_states = inputs_embeds + position_embeddings
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training)
hidden_states = hidden_states.transpose(0, 1) # B x T x C -> T x B x C
encoder_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
hidden_states = hidden_states.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
hidden_states = hidden_states.transpose(0, 1)
hidden_states, attn_probs = encoder_layer(hidden_states, attention_mask=extended_attention_mask)
if output_attentions:
all_attentions = all_attentions + (attn_probs,)
hidden_states = hidden_states.transpose(0, 1)
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
)
@add_start_docstrings(
"The standalone decoder part of the ProphetNetModel.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetDecoder(ProphetNetPreTrainedModel):
r"""
word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`):
The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with
pre-defined word embeddings instead of randomely initialized word embeddings.
"""
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
super().__init__(config)
self.ngram = config.ngram
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.dropout = config.dropout
self.max_target_positions = config.max_position_embeddings
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProhpetNetPositionalEmbeddings(config)
self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetDecoder
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetDecoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
batch_size, sequence_length = inputs_embeds.shape[:2]
main_stream_pos_embed, position_ids = self.position_embeddings(
(batch_size, sequence_length),
device=inputs_embeds.device,
past_key_values=past_key_values,
)
if past_key_values is not None:
main_relative_position_buckets, predict_relative_position_buckets = None, None
else:
(
main_relative_position_buckets,
predict_relative_position_buckets,
) = self.compute_buffered_relative_buckets(position_ids)
predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
# add position embeddings
hidden_states = inputs_embeds + main_stream_pos_embed
hidden_states = hidden_states.transpose(0, 1)
ngram_embeddings = self.ngram_embeddings.weight
# prepare attention mask
if past_key_values is not None:
assert (
hidden_states.size(0) == 1
), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).transpose(0, 1).repeat(1, batch_size, 1)
for ngram in range(self.ngram)
]
extended_attention_mask = None
extended_predict_attention_mask = None
else:
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).transpose(0, 1)
for ngram in range(self.ngram)
]
extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
# prepare encoder attention mask
if encoder_attention_mask is not None:
extended_encoder_attention_mask = (
1.0 - encoder_attention_mask[:, None, :].repeat(self.config.num_attention_heads, 1, 1)
) * -10000.0
extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
else:
extended_encoder_attention_mask = None
hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 0)
if self.embeddings_layer_norm:
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# init attentions, hidden_states and cache with empty tuples
all_main_stream_hidden_states = () if output_hidden_states else None
all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
all_main_stream_attns = () if output_attentions else None
all_ngram_stream_attns = () if output_attentions else None
all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
present_key_values = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
# grad cannot be kept because tensor is sliced
all_main_stream_hidden_states += (hidden_states[:sequence_length].transpose(0, 1),)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[sequence_length:].transpose(0, 1),)
layer_state = past_key_values[idx] if past_key_values is not None else None
(
hidden_states,
layer_self_attn,
layer_self_predict_attn_output,
layer_cross_attn,
layer_past,
) = decoder_layer(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
encoder_attn_mask=extended_encoder_attention_mask,
layer_state=layer_state,
attention_mask=extended_attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
)
if use_cache:
present_key_values += (layer_past,)
if output_attentions:
all_main_stream_attns += (layer_self_attn,)
all_ngram_stream_attns += (layer_self_predict_attn_output,)
if self.config.add_cross_attention:
all_cross_attns += (layer_cross_attn,)
if output_hidden_states:
all_main_stream_hidden_states += (hidden_states[:sequence_length].transpose(0, 1),)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[sequence_length:].transpose(0, 1),)
# split last_hidden_state for return
last_hidden_state = hidden_states[:sequence_length].transpose(0, 1)
last_hidden_state_ngram = hidden_states[sequence_length:].transpose(0, 1) if self.config.ngram > 0 else None
encoder_hidden_states = encoder_hidden_states.transpose(0, 1) if encoder_hidden_states is not None else None
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
last_hidden_state_ngram,
present_key_values,
all_main_stream_hidden_states,
all_ngram_stream_hidden_states,
all_main_stream_attns,
all_ngram_stream_attns,
all_cross_attns,
]
if v is not None
)
return ProphetNetDecoderModelOutput(
last_hidden_state=last_hidden_state,
last_hidden_state_ngram=last_hidden_state_ngram,
past_key_values=present_key_values,
hidden_states=all_main_stream_hidden_states,
hidden_states_ngram=all_ngram_stream_hidden_states,
attentions=all_main_stream_attns,
ngram_attentions=all_ngram_stream_attns,
cross_attentions=all_cross_attns,
)
def compute_buffered_relative_buckets(self, position_ids):
batch_size, sequence_length = position_ids.shape
position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
self.num_buckets, self.relative_max_distance, position_ids
)
# buffer relative buckets
main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
predict_relative_buckets = torch.cat(
[
predict_relative_buckets[:, :sequence_length, :sequence_length],
predict_relative_buckets[
:, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
],
],
2,
).repeat(batch_size, 1, 1)
return main_relative_buckets, predict_relative_buckets
def prepare_attention_mask(self, hidden_states, attention_mask):
seq_length, batch_size = hidden_states.shape[:2]
# get causal mask
causal_mask = hidden_states.new(seq_length, seq_length).float().fill_(-float("inf"))
causal_mask = torch.triu(causal_mask, 1)
extended_causal_mask = causal_mask[:seq_length, :seq_length][None, :, :].expand(
(batch_size,) + causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, :]) * -10000.0
extended_attention_mask = extended_causal_mask + extended_attention_mask
else:
extended_attention_mask = extended_causal_mask
return extended_attention_mask.repeat(self.config.num_decoder_attention_heads, 1, 1).to(hidden_states.dtype)
def prepare_predict_attention_mask(self, hidden_states, attention_mask):
seq_length, batch_size = hidden_states.shape[:2]
# get causal mask
predict_causal_mask = ngram_attention_bias(
self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
)
predict_causal_mask = torch.cat(
[
predict_causal_mask[:, :seq_length, :seq_length],
predict_causal_mask[
:, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
],
],
dim=-1,
)
extended_predict_causal_mask = predict_causal_mask[:, None, :, :].expand(
predict_causal_mask.shape[:1] + (batch_size,) + predict_causal_mask.shape[1:]
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[None, :, None, :]) * -10000.0
extended_attention_mask = extended_attention_mask.expand((self.ngram, batch_size, seq_length, seq_length))
# predicted stream attention_mask should always be 0
extended_attention_mask = torch.cat(
[extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
)
extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
else:
extended_predict_attention_mask = extended_predict_causal_mask
return extended_predict_attention_mask.repeat(1, self.config.num_decoder_attention_heads, 1, 1).to(
hidden_states.dtype
)
@add_start_docstrings(
"The bare ProphetNet Model outputting raw hidden-states without any specific head on top.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetModel(ProphetNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
encoder_config = copy.deepcopy(config)
encoder_config.is_encoder_decoder = False
encoder_config.use_cache = False
self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings)
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
self.encoder.word_embeddings = self.word_embeddings
self.decoder.word_embeddings = self.word_embeddings
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs: Optional[Tuple] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetModel
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetModel.from_pretrained('microsoft/prophetnet-large-uncased')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
>>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
"""
use_cache == use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return ProphetNetSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
decoder_attentions=decoder_outputs.attentions,
decoder_ngram_attentions=decoder_outputs.ngram_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.prophetnet = ProphetNetModel(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_input_embeddings(self):
return self.prophetnet.word_embeddings
@add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> logits_next_token = outputs.logits # logits to predict next token as usual
>>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
outputs = self.prophetnet(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
batch_size, sequence_length = (
decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
)
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
# To use .view in loss computation, make sure that logits is contiguous.
if not logits.is_contiguous():
logits = logits.contiguous()
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetSeq2SeqLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
decoder_attentions=outputs.decoder_attentions,
decoder_ngram_attentions=outputs.decoder_ngram_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
lprobs = F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_inputs_for_generation(
self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation."
if past:
decoder_input_ids = decoder_input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
# this function reorders the cache for beam search
def _reorder_cache(cache_dict, beam_idx):
for k, key_value_states in cache_dict.items():
if key_value_states is not None:
cache_dict[k] = key_value_states.index_select(0, beam_idx)
return cache_dict
reordered_past = []
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_cache(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
return reordered_past
def get_encoder(self):
return self.prophetnet.encoder
def get_decoder(self):
return self.prophetnet.decoder
@add_start_docstrings(
"The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetForCausalLM(ProphetNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# set config for CLM
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
self.prophetnet = ProphetNetDecoderWrapper(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.prophetnet.decoder.word_embeddings
def set_input_embeddings(self, value):
self.prophetnet.decoder.word_embeddings = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.prophetnet.decoder = decoder
def get_decoder(self):
return self.prophetnet.decoder
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetForCausalLM
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetForCausalLM.from_pretrained('microsoft/prophetnet-large-uncased')
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # Model can also be used with EncoderDecoder framework
>>> from transformers import BertTokenizer, EncoderDecoderModel, ProphetNetTokenizer
>>> import torch
>>> tokenizer_enc = BertTokenizer.from_pretrained('bert-large-uncased')
>>> tokenizer_dec = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-large-uncased", "microsoft/prophetnet-large-uncased")
>>> ARTICLE = (
... "the us state department said wednesday it had received no "
... "formal word from bolivia that it was expelling the us ambassador there "
... "but said the charges made against him are `` baseless ."
... )
>>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
>>> labels = tokenizer_dec("us rejects charges against its ambassador in bolivia", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
>>> loss = outputs.loss
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.prophetnet.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetDecoderLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
hidden_states_ngram=outputs.hidden_states_ngram,
attentions=outputs.attentions,
ngram_attentions=outputs.ngram_attentions,
cross_attentions=outputs.cross_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
lprobs = F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
# this function reorders the cache for beam search
def _reorder_cache(cache_dict, beam_idx):
for k, key_value_states in cache_dict.items():
if key_value_states is not None:
cache_dict[k] = key_value_states.index_select(0, beam_idx)
return cache_dict
reordered_past = []
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_cache(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
return reordered_past
class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel):
"""
This is a wrapper class, so that :class:`~transformers.ProphetNetForCausalLM` can correctly be loaded from
pretrained prophetnet classes.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = ProphetNetDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
| [
"[email protected]"
] | |
d421bd86eebc600f231707b7649f32908802167c | 8b2b61d1c6a9d58f79f65e4e91c281a5fb53ade2 | /magic/magic.py | a781e8d46efd90383017430f97931c80d8ccaae7 | [] | no_license | PiotrDabkowski/NeuralMagic | ffcdcc7f24fbc825eabe48ddfacddf84127a41be | cad50c9ba77b17b67d8b15c1fbed02487373ea21 | refs/heads/master | 2021-01-22T18:38:39.446605 | 2017-03-15T17:28:02 | 2017-03-15T17:28:02 | 85,101,058 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,775 | py | import tensorflow as tf
import numpy as np
from tensorflow.contrib import layers as tf_layers
from dense_net import DenseBlock, TrainsitionLayer, bottleneck_block
import random_hole
import tiny_imagenet
# Based on DenseNets but with an extra trick with dilated convolutions to increase receptive fields
# Basically, intelligently converts one texture to another and you can train it anything, image segmentation, super resolution,
# neural style, image inpainting, neural doodle, etc. The only drawback is that for every one you will need to train the model from scratch...
# I will try to make things more generic by adding additional input to the Magic network - so that the transformation is a
# function of an image and a goal.
class DownsampleBlockSpecs:
def __init__(self, layers, growth_factors, dilate_after=None, bottleneck=0.25, keep_dim_fraction=0.5):
''' layers specifies how many layers given dense block should have
growth_factors specifies growth_factor for each layer (if int constant for all)
dilate_after is a list of layers after which dilation should be increased by 2, if None then no dilation'''
self.layers = layers
self.growth_factors = growth_factors if type(growth_factors)!=int else self.layers*[growth_factors]
self.dilations = [1]*layers
if dilate_after is not None:
for k in dilate_after:
while k<len(self.dilations):
self.dilations[k] *= 2
k += 1
self.bottleneck = bottleneck
self.keep_dim_fraction = keep_dim_fraction
class UpsableBlockSpecs:
def __init__(self, kernel_size, channels, passthrough, passthrough_relative_size, follow_up_residual_block, activation=tf.nn.elu):
self.kernel_size = kernel_size
self.channels = channels
self.passthrough = passthrough
if passthrough:
assert follow_up_residual_block, 'You must follow up with residual blocks if you use passthrough so that you get specified number of channels'
self.passthrough_relative_size = passthrough_relative_size
self.follow_up_residual_block = follow_up_residual_block
self.activation = activation
class Specs:
def __init__(self, downsample_blocks, upsample_blocks):
self.downsample_blocks = downsample_blocks
self.upsample_blocks = upsample_blocks
print 'According to specs the resolution of the output will be x%f' % 2**(len(upsample_blocks)-len(downsample_blocks)+1)
class Magic:
def __init__(self, specs, trainable=True, weights_collections=None):
assert isinstance(specs, Specs)
self.specs = specs
self.trainable = trainable
self.batch_norm_params = {'updates_collections': None, 'is_training': trainable, 'trainable': trainable, 'scale': False}
assert not isinstance(weights_collections, basestring), 'Must be a list of collections!'
self.variable_collections = None if weights_collections is None else {'weights': weights_collections}
self.weight_collections = weights_collections
self.d_res_maps = None
self.u_res_maps = None
self.own_scope_name = None
def __call__(self, images):
''' transforms images '''
resolution = 1.
out = images
self.d_res_maps = {}
self.u_res_maps = {}
with tf.variable_scope(None, default_name='MagicNet'):
with tf.variable_scope('downsampler'):
for dblock in self.specs.downsample_blocks:
out = DenseBlock(growth_rate=dblock.growth_factors, layers=dblock.layers,
bottleneck=dblock.bottleneck, trainable=self.trainable,
weights_collections=self.weight_collections, dilation_factors=dblock.dilations)(out)
out, res_map = TrainsitionLayer(keep_dim_fraction=dblock.keep_dim_fraction, trainable=self.trainable,
weights_collections=self.weight_collections)(out)
self.d_res_maps[resolution] = res_map
resolution /= 2.
resolution *= 2.
out = self.d_res_maps[resolution]
with tf.variable_scope('upsampler'):
for ublock in self.specs.upsample_blocks:
# first standard deconv
out = tf_layers.conv2d_transpose(out, ublock.channels, ublock.kernel_size, stride=2,
activation_fn=ublock.activation,
normalizer_fn=tf_layers.batch_norm,
normalizer_params=self.batch_norm_params,
variables_collections=self.variable_collections,
trainable=self.trainable)
resolution *= 2.
if ublock.passthrough:
assert ublock.follow_up_residual_block
take_from = self.d_res_maps[resolution]
# the question is: should we add the passthrough or concat as extra channels?
# if concat then use batch_norm + activation, otherwise not but has to have the same num of channels
# will use concat for now
if ublock.passthrough_relative_size != 1:
ext = tf_layers.conv2d(take_from, int(take_from.get_shape().as_list()[-1] * ublock.passthrough_relative_size), 1,
stride=1,
activation_fn=ublock.activation,
normalizer_fn=tf_layers.batch_norm,
normalizer_params=self.batch_norm_params,
variables_collections=self.variable_collections,
trainable=self.trainable)
else:
ext = take_from
out = tf.concat((out, ext), 3)
if ublock.follow_up_residual_block:
if not isinstance(ublock.follow_up_residual_block, int):
blocks = 1
else:
blocks = ublock.follow_up_residual_block
for _ in xrange(blocks):
out = bottleneck_block(out, ublock.channels, stride=1, training=self.trainable, weights_collections=self.weight_collections, scale=False, activation=ublock.activation)
self.u_res_maps[resolution] = out
scope = tf.get_variable_scope()
self.own_scope_name = scope.name
return out
def get_own_variables(self):
return tf.get_collection(tf.GraphKeys().GLOBAL_VARIABLES, scope=self.own_scope_name)
def get_own_weights(self):
assert self.weight_collections
return tf.get_collection(self.weight_collections[-1])
def get_num_params(self):
s = 0
for e in self.get_own_weights():
s += np.prod(e.get_shape().as_list())
return s
def get_own_l2_loss(self):
print 'Number of params in weights', self.get_num_params()
return sum(map(tf.nn.l2_loss, self.get_own_weights()), tf.constant(0.))
def output_channel_ranges_from_mean_std(mean, std):
new_mean = (255./2 - mean)/std
new_range = 255. / std
return np.concatenate((np.expand_dims(new_mean - new_range/2., 1), np.expand_dims(new_mean + new_range/2. , 1)), 1)
def to_image_channels(inp, num_channels, output_channel_ranges, trainable=True, nonlinearity=tf.nn.tanh, nonlinearity_range=(-1, 1)):
# for each channel you must supply a range (min_val, max_val) as array CHANS X 2
assert len(output_channel_ranges) == num_channels
print output_channel_ranges
with tf.variable_scope(None, default_name='ToImageChannels'):
out = tf_layers.conv2d(inp, num_channels, 1,
stride=1,
activation_fn=nonlinearity,
trainable=trainable)
nonlinearity_mean = sum(nonlinearity_range) / 2.
nonlinearity_spread = float(nonlinearity_range[1]) - nonlinearity_range[0]
output_channel_ranges = np.array(output_channel_ranges)
output_channel_means = np.mean(output_channel_ranges, 1)
output_channel_spreads = output_channel_ranges[:, 1] - output_channel_ranges[:, 0]
return (out - nonlinearity_mean) / nonlinearity_spread * output_channel_spreads + output_channel_means
def to_classification_layer(inp, num_classes, trainable=True, weights_collections=None):
variables_collections = None if weights_collections is None else {'weights': weights_collections}
with tf.variable_scope(None, default_name='ClassificationLayer'):
out = tf.reduce_mean(inp, (1,2))
out = tf_layers.fully_connected(out, num_classes,
activation_fn=None,
variables_collections=variables_collections,
trainable=trainable)
return out
StandardDownsample = [DownsampleBlockSpecs(7, 16, [3, 5]),
DownsampleBlockSpecs(11, 22, [5, 7]),
DownsampleBlockSpecs(14, 22, [7]),
DownsampleBlockSpecs(16, 22, None)]
DiscDownsample = [DownsampleBlockSpecs(6, 16, [3, 5]),
DownsampleBlockSpecs(8, 22, [4, 6]),
DownsampleBlockSpecs(10, 22, [6]),]
StandardUpsample = [UpsableBlockSpecs(2, 256, True, 1, 3),
UpsableBlockSpecs(2, 128, True, 1, 3),
UpsableBlockSpecs(2, 64, True, 1, 3)
]
def get_spatial_feature_weights(mask, masked_weight):
temp = masked_weight*(1.-mask) + mask
return temp / tf.reduce_mean(temp)
BS = 4
MASKED_FEATURES_WEIGHT = 11.
IMG_SIZE = 80
import imagenet
StandardMagic = Specs(StandardDownsample, StandardUpsample)
def get_vars(scope):
return tf.get_collection(tf.GraphKeys().GLOBAL_VARIABLES, scope=scope)
masks = tf.ones((BS, IMG_SIZE, IMG_SIZE, 1))* tf.expand_dims(random_hole.matrix_select(tf.ones((IMG_SIZE, IMG_SIZE, 1)), 8, 72, 8, 72), 0)
#masks = 1. - tf.expand_dims(random_hole.random_matrices_gen(BS, 20, 30, (IMG_SIZE, IMG_SIZE)), 3) # tf.ones((BS, 64, 64, 1), tf.float32) # 0s or 1s, randomly generated every run!
masks1 = masks
masks2 = tf.image.resize_bilinear(masks, (IMG_SIZE/2, IMG_SIZE/2))
masks4 = tf.image.resize_bilinear(masks, (IMG_SIZE/4, IMG_SIZE/4))
masks8 = tf.image.resize_bilinear(masks, (IMG_SIZE/8, IMG_SIZE/8))
images = tf.placeholder(tf.float32, (BS, IMG_SIZE, IMG_SIZE, 3))
labels = tf.placeholder(tf.int32, (BS,))
masked_images = masks1*images
with tf.variable_scope('EncDec'):
a = Magic(StandardMagic, weights_collections=['abc'], trainable=False)
fake_imgs = to_image_channels(a(masked_images), 3,
output_channel_ranges=output_channel_ranges_from_mean_std(tiny_imagenet.IMAGE_NET_PIXEL_MEAN,
tiny_imagenet.IMAGE_NET_PIXEL_STD))
raw_scores = to_classification_layer(a.d_res_maps[1/8.], 200, weights_collections=['abc'])
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=raw_scores, labels=labels))
with tf.variable_scope('Disc'):
disc = Magic(Specs(DiscDownsample, []), weights_collections=['yub'], trainable=False)
disc(tf.concat((images, fake_imgs), 0))
disc_probs = tf_layers.conv2d(disc.d_res_maps[1 / 4.], 1, 1, activation_fn=tf.nn.sigmoid)
real_features1, fake_features1 = tf.split(disc.d_res_maps[1.], 2)
real_features2, fake_features2 = tf.split(disc.d_res_maps[1./2], 2)
real_features4, fake_features4 = tf.split(disc.d_res_maps[1./4], 2)
distance_loss = (tf.reduce_mean(get_spatial_feature_weights(masks1, MASKED_FEATURES_WEIGHT)*((images - fake_imgs) ** 2)) #+
# tf.reduce_mean(get_spatial_feature_weights(masks2, MASKED_FEATURES_WEIGHT)*((real_features2 - fake_features2) ** 2)) +
# tf.reduce_mean(get_spatial_feature_weights(masks4, MASKED_FEATURES_WEIGHT)*((real_features4 - fake_features4) ** 2))
) / 2.
real_probs_map, fake_probs_map = tf.split(disc_probs, 2)
# we have to use masks4, it may seem high res but actually they cover about 32x32 image patches thanks to dilated convolutions
i_masks4 = 1. - masks4
i_masks4_areas = tf.reduce_sum(i_masks4, (1,2,3))
real_probs, fake_probs = tf.reduce_sum(real_probs_map*i_masks4, (1,2,3)) / i_masks4_areas, \
tf.reduce_sum(fake_probs_map*i_masks4, (1,2,3)) / i_masks4_areas
trick_loss = -tf.reduce_mean(tf.log(1.-fake_probs))
disc_loss = (-tf.reduce_mean(tf.log(fake_probs)) - tf.reduce_mean(tf.log(1-real_probs))) / 2.
full_disc_loss = disc_loss + 0.0005*disc.get_own_l2_loss()
print len(a.get_own_variables())
full_loss = 0.0005*a.get_own_l2_loss() + 0.5*distance_loss + 0.1*trick_loss
import time, cv2
LAST = time.time()
def tick(extra_vars, batch):
global LAST
if time.time() - LAST < 10:
return
LAST = time.time()
cv2.imwrite('xyz.jpg', tiny_imagenet.to_bgr_img(np.concatenate((extra_vars['fake_imgs'][0], extra_vars['masked_images'][0]), 0)))
print len(tf.global_variables())
print len(tf.trainable_variables())
print len(a.get_own_weights())
print a.get_num_params()
train_main = tf.train.MomentumOptimizer(0.01, 0.9, use_nesterov=True).minimize(full_loss, var_list=get_vars('EncDec'))
disc_train_every = 15
maybe_train_disc = tf.cond(tf.random_uniform((), 0., 1.) < 1./disc_train_every,
lambda : tf.train.MomentumOptimizer(0.05, 0.9, use_nesterov=True).minimize(full_disc_loss, var_list=get_vars('Disc')),
lambda : tf.no_op())
train_op = tf.group(train_main, maybe_train_disc)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
import tfutils
train_bm = tiny_imagenet.get_train_bm(BS)
val_bm = tiny_imagenet.get_train_bm(BS)
saver = tf.train.Saver(tf.global_variables())
nt = tfutils.NiceTrainer(sess, train_bm, [images, labels], train_op, bm_val=val_bm,
extra_variables={#'loss': loss,
#'probs': tf.nn.softmax(raw_scores),
'trick_loss': trick_loss,
'disc_loss': disc_loss,
'distance_loss': distance_loss,
'fake_imgs': fake_imgs,
'masked_images': masked_images,
},
printable_vars=['distance_loss', 'disc_loss', 'trick_loss'],
computed_variables={#'acc': tfutils.accuracy_calc_op(),
'tick': tick},
saver=saver,
save_every=5000000,
save_dir='chuj',
smooth_coef=0.9)
nt.restore(relaxed=True)
# reinit = tf.get_collection(tf.GraphKeys().GLOBAL_VARIABLES, scope='Disc')
# assert reinit
# sess.run(tf.variables_initializer(reinit))
while True:
nt.train()
nt.validate()
nt.save()
| [
"[email protected]"
] | |
c54fa934590f5cb47a549f57d815aa35745143c9 | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_dpscertificate_info.py | 186ac98ef17029cf91c7ff582d8755547b35601b | [] | no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 9,585 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_dpscertificate_info
version_added: '2.9'
short_description: Get DpsCertificate info.
description:
- Get info of DpsCertificate.
options:
certificate_name:
description:
- Name of the certificate to retrieve.
type: str
resource_group_name:
description:
- Resource group identifier.
- Name of resource group.
required: true
type: str
provisioning_service_name:
description:
- Name of the provisioning service the certificate is associated with.
- Name of provisioning service to retrieve certificates for.
required: true
type: str
if_match:
description:
- ETag of the certificate.
type: str
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
- name: DPSGetCertificate
azure_rm_dpscertificate_info:
certificate_name: cert
provisioning_service_name: myFirstProvisioningService
resource_group_name: myResourceGroup
- name: DPSGetCertificates
azure_rm_dpscertificate_info:
provisioning_service_name: myFirstProvisioningService
resource_group_name: myResourceGroup
'''
RETURN = '''
dps_certificate:
description: >-
A list of dict results where the key is the name of the DpsCertificate and
the values are the facts for that DpsCertificate.
returned: always
type: complex
contains:
properties:
description:
- properties of a certificate
returned: always
type: dict
sample: null
contains:
subject:
description:
- The certificate's subject name.
returned: always
type: str
sample: null
expiry:
description:
- The certificate's expiration date and time.
returned: always
type: str
sample: null
thumbprint:
description:
- The certificate's thumbprint.
returned: always
type: str
sample: null
is_verified:
description:
- Determines whether certificate has been verified.
returned: always
type: bool
sample: null
created:
description:
- The certificate's creation date and time.
returned: always
type: str
sample: null
updated:
description:
- The certificate's last update date and time.
returned: always
type: str
sample: null
id:
description:
- The resource identifier.
returned: always
type: str
sample: null
name:
description:
- The name of the certificate.
returned: always
type: str
sample: null
etag:
description:
- The entity tag.
returned: always
type: str
sample: null
type:
description:
- The resource type.
returned: always
type: str
sample: null
value:
description:
- The array of Certificate objects.
returned: always
type: list
sample: null
contains:
properties:
description:
- properties of a certificate
returned: always
type: dict
sample: null
contains:
subject:
description:
- The certificate's subject name.
returned: always
type: str
sample: null
expiry:
description:
- The certificate's expiration date and time.
returned: always
type: str
sample: null
thumbprint:
description:
- The certificate's thumbprint.
returned: always
type: str
sample: null
is_verified:
description:
- Determines whether certificate has been verified.
returned: always
type: bool
sample: null
created:
description:
- The certificate's creation date and time.
returned: always
type: str
sample: null
updated:
description:
- The certificate's last update date and time.
returned: always
type: str
sample: null
id:
description:
- The resource identifier.
returned: always
type: str
sample: null
name:
description:
- The name of the certificate.
returned: always
type: str
sample: null
etag:
description:
- The entity tag.
returned: always
type: str
sample: null
type:
description:
- The resource type.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.iot import iotDpsClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDpsCertificateInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
certificate_name=dict(
type='str'
),
resource_group_name=dict(
type='str',
required=True
),
provisioning_service_name=dict(
type='str',
required=True
),
if_match=dict(
type='str'
)
)
self.certificate_name = None
self.resource_group_name = None
self.provisioning_service_name = None
self.if_match = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2020-09-01-preview'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMDpsCertificateInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(iotDpsClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2020-09-01-preview')
if (self.certificate_name is not None and
self.resource_group_name is not None and
self.provisioning_service_name is not None):
self.results['dps_certificate'] = self.format_item(self.get())
elif (self.resource_group_name is not None and
self.provisioning_service_name is not None):
self.results['dps_certificate'] = self.format_item(self.list())
return self.results
def get(self):
response = None
try:
response = self.mgmt_client.dps_certificate.get(certificate_name=self.certificate_name,
resource_group_name=self.resource_group_name,
provisioning_service_name=self.provisioning_service_name,
if_match=self.if_match)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def list(self):
response = None
try:
response = self.mgmt_client.dps_certificate.list(resource_group_name=self.resource_group_name,
provisioning_service_name=self.provisioning_service_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def format_item(self, item):
if hasattr(item, 'as_dict'):
return [item.as_dict()]
else:
result = []
items = list(item)
for tmp in items:
result.append(tmp.as_dict())
return result
def main():
AzureRMDpsCertificateInfo()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a5f3a1d7bc6d6ea8f54a13b11fa07fa758a81d39 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714191514.py | 60ba3be53c8d65f3e3bf954ef6f39baf671d731c | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_turbofan_max, p_motorfun_max, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_turbofan_max = p_turbofan_max
self.p_motorfun_max = p_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = len(self.h)
self.hp = np.linspace(0, 1, self.n)
self.hp_threshold = self.p_motorfun_max / (self.p_motorfun_max + self.p_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
p_w = np.zeros([self.m, self.n]) # m x n matrix
for i in range(1, 8):
for j in range(self.n):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_motorfun_max:
p_w_2 = 100000
self.p_w[i, j] = p_w_1 + p_w_2
return p_w
def strategy(self):
p_w = Design_Point_Select_Strategy.p_w_compute(self)
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
hp_p_w_min = np.array(np.where(p_w == p_w_min))
design_point = np.amax(p_w_min)
return p_w_min, hp_p_w_min
| [
"[email protected]"
] | |
5cdb06fe2b728a7c56950f0ef7ab873a08acf5b7 | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /mmseg/models/backbones/fightingcv/conv/DepthwiseSeparableConvolution.py | 8dde19054b1cbfee8452cf320b61bf165bbdeceb | [
"Apache-2.0"
] | permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 899 | py | import torch
from torch import nn
class DepthwiseSeparableConvolution(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size=3,stride=1,padding=1):
super().__init__()
self.depthwise_conv=nn.Conv2d(
in_channels=in_ch,
out_channels=in_ch,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=in_ch
)
self.pointwise_conv=nn.Conv2d(
in_channels=in_ch,
out_channels=out_ch,
kernel_size=1,
stride=1,
padding=0,
groups=1
)
def forward(self, x):
out=self.depthwise_conv(x)
out=self.pointwise_conv(out)
return out
if __name__ == '__main__':
input=torch.randn(1,3,224,224)
dsconv=DepthwiseSeparableConvolution(3,64)
out=dsconv(input)
print(out.shape)
| [
"[email protected]"
] | |
14e16f4f11ae53470f7f9898327d4ed7af13658a | ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29 | /code/practice/abc/abc017/b.py | 00e2723566008b22396a8f37944c3663d2794fdc | [] | no_license | mollinaca/ac | e99bb5d5c07159b3ef98cd7067424fa2751c0256 | 2f40dd4333c2b39573b75b45b06ad52cf36d75c3 | refs/heads/master | 2020-12-22T11:02:13.269855 | 2020-09-18T01:02:29 | 2020-09-18T01:02:29 | 236,757,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
s = input()
s = s.replace('ch','').replace('o','').replace('k','').replace('u','')
print ("YES") if len(s) == 0 else print ("NO")
| [
"[email protected]"
] | |
f78fe98818ec4e3c7f3f8938e6c2b1cc0aacfeb5 | f030c1b724ad3a04dade2463374bd3c03e17b93c | /napari/layers/_tests/test_source.py | de9e0954979762eb743aedccddb1bc784505ff21 | [
"BSD-3-Clause"
] | permissive | sandutsar/napari | 3c8568979c320d57cdb80e2ea2a5db7ea035413b | 37d476bc0b00252177f17f25e7d1fd52ddc4bb69 | refs/heads/master | 2023-07-25T08:31:32.189843 | 2021-09-05T11:01:02 | 2021-09-05T11:01:02 | 390,003,115 | 0 | 0 | BSD-3-Clause | 2021-09-05T12:18:14 | 2021-07-27T13:56:23 | Python | UTF-8 | Python | false | false | 1,457 | py | from napari.layers import Points
from napari.layers._source import Source, current_source, layer_source
def test_layer_source():
"""Test basic layer source assignment mechanism"""
with layer_source(path='some_path', reader_plugin='builtins'):
points = Points()
assert points.source == Source(path='some_path', reader_plugin='builtins')
def test_source_context():
"""Test nested contexts, overrides, and resets."""
assert current_source() == Source()
# everything created within this context will have this sample source
with layer_source(sample=('samp', 'name')):
assert current_source() == Source(sample=('samp', 'name'))
# nested contexts override previous ones
with layer_source(path='a', reader_plugin='plug'):
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
# note the new path now...
with layer_source(path='b'):
assert current_source() == Source(
path='b', reader_plugin='plug', sample=('samp', 'name')
)
# as we exit the contexts, they should undo their assignments
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
assert current_source() == Source(sample=('samp', 'name'))
assert current_source() == Source()
| [
"[email protected]"
] | |
2c2b4b54559435087b2f62c0c283829e9b7231ac | f865fdd970f8e37ea2aa5157374af8c4d6ced987 | /test/test_vehicle.py | bee0fd446e3e911c97a237c630c531bd3edceb95 | [] | no_license | gkeep-openapi/python-sdk | 7e809448355bff535b3d64e013f001e9196c5e19 | 7c4f3785b47a110386ef10109619654522c95de5 | refs/heads/master | 2022-05-28T16:13:06.643958 | 2022-05-13T14:58:39 | 2022-05-13T14:58:39 | 235,536,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # coding: utf-8
"""
Gkeep API
Gkeep API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.vehicle import Vehicle # noqa: E501
from swagger_client.rest import ApiException
class TestVehicle(unittest.TestCase):
"""Vehicle unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVehicle(self):
"""Test Vehicle"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.vehicle.Vehicle() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"gkeep-ci-jenkins"
] | gkeep-ci-jenkins |
dd438d3b7d5636c160f3cf6c666427e41f69c4b1 | 73f04095a7905fa84e0ff255a07f730b4c4963d5 | /dmi/sst/mw_oe/preprocessor.py | f2971521644c9b4ab285d2fc7c8025b499385a03 | [] | no_license | bcdev/dmi-oe-sst | ff6d6d0795848ae6dbddb5a31ca5f32d7326c64a | 03c8f4558e4a3452c009fe7292777faa12188449 | refs/heads/master | 2018-09-03T21:52:25.946135 | 2018-06-04T08:18:40 | 2018-06-04T08:18:40 | 107,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,691 | py | import numpy as np
import xarray as xr
from numba import jit, prange
from xarray import Variable
from dmi.sst.mw_oe.pressure_processor import PressureProcessor
from dmi.sst.util.default_data import DefaultData
DEG_TO_RAD = np.pi / np.float64(180.0)
RAD_TO_DEG = np.float64(180.0) / np.pi
class Preprocessor:
TO_SQUEEZE_NAMES = ["insitu.time", "insitu.lat", "insitu.lon", "insitu.sea_surface_temperature", "insitu.sst_depth", "insitu.sst_qc_flag", "insitu.sst_track_flag]"]
TO_AVERAGE_NAMES = []
TO_CENTER_EXTRACT_NAMES = ["amsre.nwp.sea_surface_temperature", "amsre.nwp.skin_temperature", "amsre.nwp.log_surface_pressure", "amsre.nwp.cloud_liquid_water",
"amsre.nwp.total_column_water_vapour", "amsre.nwp.total_precip", "amsre.pixel_data_quality6V", "amsre.pixel_data_quality6H", "amsre.pixel_data_quality10V",
"amsre.pixel_data_quality10H", "amsre.pixel_data_quality18V", "amsre.pixel_data_quality18H", "amsre.pixel_data_quality23V", "amsre.pixel_data_quality23H",
"amsre.pixel_data_quality36V", "amsre.pixel_data_quality36H", "amsre.solar_zenith_angle", "amsre.scan_data_quality", "amsre.satellite_zenith_angle",
"amsre.satellite_azimuth_angle", "amsre.Geostationary_Reflection_Latitude", "amsre.Geostationary_Reflection_Longitude", "amsre.latitude", "amsre.longitude",
"amsre.brightness_temperature6V", "amsre.brightness_temperature6H", "amsre.brightness_temperature10V", "amsre.brightness_temperature10H", "amsre.brightness_temperature18V",
"amsre.brightness_temperature18H", "amsre.brightness_temperature23V", "amsre.brightness_temperature23H", "amsre.brightness_temperature36V", "amsre.brightness_temperature36H"]
TO_STDDEV_NAMES = ["amsre.brightness_temperature23V", "amsre.brightness_temperature23H", "amsre.brightness_temperature36V", "amsre.brightness_temperature36H"]
WIND_SPEED_VARIABLES = ["amsre.nwp.10m_east_wind_component", "amsre.nwp.10m_north_wind_component"]
NWP_SST_VARIABLES = ["amsre.nwp.sea_surface_temperature"]
FILENAME_VARIABLES = ["amsre.l2a_filename"]
AVERAGING_LENGTH = 5 # @todo 3 tb/tb this can be a parameter to the processor 2017-11-17
STDDEV_LENGTH = 21 # @todo 3 tb/tb this can be a parameter to the processor 2017-12-13
INV_GRAVITY_CONST = 1.0 / 9.80665 # s^2/m
SST_NWP_BIAS = -0.05
def run(self, dataset, flag_coding=None):
preprocessed_data = xr.Dataset()
for variable_name in dataset.variables:
print(" ... " + variable_name)
if variable_name in self.TO_SQUEEZE_NAMES:
self.squeeze_data(dataset, preprocessed_data, variable_name)
continue
if variable_name in self.TO_AVERAGE_NAMES:
if variable_name in self.TO_STDDEV_NAMES:
self.calc_std_dev(dataset, preprocessed_data, variable_name, flag_coding)
self.average_subset(dataset, preprocessed_data, variable_name, flag_coding)
continue
if variable_name in self.TO_CENTER_EXTRACT_NAMES:
self.extract_center_px(dataset, preprocessed_data, variable_name)
self.convert_temperature(preprocessed_data, variable_name)
self.apply_sst_nwp_bias(preprocessed_data, variable_name)
if variable_name in self.TO_STDDEV_NAMES:
self.calc_std_dev(dataset, preprocessed_data, variable_name, flag_coding)
continue
if variable_name in self.WIND_SPEED_VARIABLES:
self.process_wind_speed_and_relative_angle(dataset, preprocessed_data)
continue
if variable_name in self.FILENAME_VARIABLES:
self.extract_ascending_descending(dataset, preprocessed_data, flag_coding)
continue
self.calculate_TCLW(preprocessed_data)
return preprocessed_data
def convert_temperature(self, preprocessed_data, variable_name):
if variable_name in self.NWP_SST_VARIABLES:
sst_data = preprocessed_data[variable_name].data
sst_data = sst_data - 273.15
preprocessed_data[variable_name] = Variable(["matchup"], sst_data)
def apply_sst_nwp_bias(self, preprocessed_data, variable_name):
if variable_name in self.NWP_SST_VARIABLES:
sst_data = preprocessed_data[variable_name].data
sst_data = sst_data + self.SST_NWP_BIAS
preprocessed_data[variable_name] = Variable(["matchup"], sst_data)
def process_wind_speed_and_relative_angle(self, dataset, preprocessed_data):
self.extract_center_px(dataset, preprocessed_data, self.WIND_SPEED_VARIABLES[0])
self.extract_center_px(dataset, preprocessed_data, self.WIND_SPEED_VARIABLES[1])
east_wind_data = preprocessed_data.variables[self.WIND_SPEED_VARIABLES[0]].data
north_wind_data = preprocessed_data.variables[self.WIND_SPEED_VARIABLES[1]].data
abs_wind_speed_data = np.sqrt(np.square(east_wind_data) + np.square(north_wind_data))
preprocessed_data["amsre.nwp.abs_wind_speed"] = Variable(["matchup"], abs_wind_speed_data)
num_matchups = len(dataset.coords["matchup_count"])
self.extract_center_px(dataset, preprocessed_data, "amsre.satellite_azimuth_angle")
target_data = DefaultData.create_default_vector(num_matchups, np.float32, fill_value=np.NaN)
phi_sat = preprocessed_data.variables["amsre.satellite_azimuth_angle"].data
for i in range(0, num_matchups):
target_data[i] = self.calculate_relative_angle(phi_sat[i], north_wind_data[i], east_wind_data[i])
preprocessed_data["relative_angle"] = Variable(["matchup"], target_data)
def average_subset(self, dataset, preprocessed_data, variable_name, flag_coding=None):
# @todo 1 tb/tb this method needs performance boost 2018-02-20
num_matchups = len(dataset.coords["matchup_count"])
invalid_data_array = np.zeros(num_matchups, dtype=np.bool)
variable = dataset.variables[variable_name]
fill_value = variable.attrs["_FillValue"]
input_data = variable.values
target_data = DefaultData.create_default_vector(num_matchups, np.float32, fill_value)
width = variable.shape[2]
height = variable.shape[1]
center_x = int(np.floor(width / 2))
center_y = int(np.floor(height / 2))
offset = int(np.floor(self.AVERAGING_LENGTH / 2))
y_min = center_y - offset
y_max = center_y + offset + 1
x_min = center_x - offset
x_max = center_x + offset + 1
max_num_invalid = int(np.ceil(self.AVERAGING_LENGTH * self.AVERAGING_LENGTH * 0.1))
for i in range(0, num_matchups):
layer = input_data[i, y_min:y_max, x_min: x_max]
masked_layer = calculate_masked(layer, fill_value)
num_fills = count_masked(masked_layer)
if num_fills <= max_num_invalid:
target_data[i] = np.nanmean(masked_layer)
else:
target_data[i] = fill_value
invalid_data_array[i] = True
if flag_coding is not None:
flag_coding.add_avg_inv_thresh(invalid_data_array)
preprocessed_data[variable_name] = Variable(["matchup"], target_data)
# @todo 2 tb/tb refactor, this method duplicates most of the normal averaging method 2017-12-13
def calc_std_dev(self, dataset, preprocessed_data, variable_name, flag_coding=None):
num_matchups = len(dataset.coords["matchup_count"])
invalid_data_array = np.zeros(num_matchups, dtype=np.bool)
variable = dataset.variables[variable_name]
fill_value = variable.attrs["_FillValue"]
input_data = variable.values
target_data = DefaultData.create_default_vector(num_matchups, np.float32, fill_value)
width = variable.shape[2]
height = variable.shape[1]
center_x = int(np.floor(width / 2))
center_y = int(np.floor(height / 2))
offset = int(np.floor(self.STDDEV_LENGTH / 2))
y_min = center_y - offset
y_max = center_y + offset + 1
x_min = center_x - offset
x_max = center_x + offset + 1
max_num_invalid = int(np.ceil(self.STDDEV_LENGTH * self.STDDEV_LENGTH * 0.1))
for i in range(0, num_matchups):
layer = input_data[i, y_min:y_max, x_min: x_max]
masked_layer = calculate_masked(layer, fill_value)
num_fills = count_masked(masked_layer)
if num_fills <= max_num_invalid:
target_data[i] = np.nanstd(masked_layer)
else:
target_data[i] = fill_value
invalid_data_array[i] = True
if flag_coding is not None:
flag_coding.add_avg_inv_thresh(invalid_data_array)
preprocessed_data[variable_name + "_stddev"] = Variable(["matchup"], target_data)
def extract_center_px(self, dataset, preprocessed_data, variable_name):
variable = dataset.variables[variable_name]
if len(variable.shape) == 3:
width = variable.shape[2]
height = variable.shape[1]
center_x = int(np.floor(width / 2))
center_y = int(np.floor(height / 2))
preprocessed_data[variable_name] = variable[:, center_y, center_x].squeeze()
elif len(variable.shape) == 4:
width = variable.shape[3]
height = variable.shape[2]
center_x = int(np.floor(width / 2))
center_y = int(np.floor(height / 2))
preprocessed_data[variable_name] = variable[:, :, center_y, center_x].squeeze()
def squeeze_data(self, dataset, preprocessed_data, variable_name):
preprocessed_data[variable_name] = dataset.variables[variable_name].squeeze()
def calculate_TCLW(self, preprocessed_data):
surface_pressure = np.exp(preprocessed_data["amsre.nwp.log_surface_pressure"])
pressure_processor = PressureProcessor()
pressure_levels = pressure_processor.calculate_pressure_levels(surface_pressure)
clw = preprocessed_data["amsre.nwp.cloud_liquid_water"]
tclw_tmp = clw.data * pressure_levels.data
tclw_tmp = tclw_tmp * self.INV_GRAVITY_CONST
tclw = np.sum(tclw_tmp, axis=1)
preprocessed_data["amsre.nwp.total_column_liquid_water"] = Variable(["num_matchups"], tclw)
def extract_ascending_descending(self, dataset, preprocessed_data, flag_coding=None):
num_matchups = len(dataset.coords["matchup_count"])
ascending_data_array = np.zeros(num_matchups, dtype=np.bool)
invalid_data_array = np.zeros(num_matchups, dtype=np.bool)
filename_data = dataset.variables["amsre.l2a_filename"].data
for i in range(0, num_matchups):
file_name = str(filename_data[i])
if "_A." in file_name:
ascending_data_array[i] = True
elif "_D." in file_name:
ascending_data_array[i] = False
else:
invalid_data_array[i] = True
if flag_coding is not None:
flag_coding.add_inv_filename(invalid_data_array)
preprocessed_data["amsre.ascending"] = Variable(["matchup"], ascending_data_array)
def calculate_relative_angle(self, phi_sat, north_wind, east_wind):
if phi_sat < 0.0:
phi_sat = phi_sat + 360.0
north_wind_rad = north_wind * DEG_TO_RAD
east_wind_rad = east_wind * DEG_TO_RAD
phi_w = 90.0 - np.arctan2(north_wind_rad, east_wind_rad) * RAD_TO_DEG
if phi_w < 0.0:
phi_w = phi_w + 360.0
phi_rel = phi_sat - phi_w
if phi_rel < 0.0:
phi_rel = phi_rel + 360.0
return phi_rel
@jit('float32[:, :](float32[:, :], float32)', nopython=True, parallel=True)
def calculate_masked(layer, fill_value):
height = layer.shape[0]
width = layer.shape[1]
result = np.zeros(layer.shape, dtype=np.float32)
for y in prange(0, height):
for x in prange(0, width):
value = layer[y, x]
if abs(value - fill_value) < 1e-9:
result[y, x] = np.NaN
else:
result[y, x] = value
return result
@jit('int32(float32[:, :])', nopython=True, parallel=True)
def count_masked(layer):
height = layer.shape[0]
width = layer.shape[1]
result = 0
for y in prange(0, height):
for x in prange(0, width):
value = layer[y, x]
if np.isnan(value):
result += 1
return result
| [
"[email protected]"
] | |
480ee4addf549a8560df46c79e497d97793f9f92 | 6dc9f1753f0e2ccaef6fb385324ba0602a04042a | /CUHK_CPM/GPS_Project/RR_Robot/build/pi_six_axis/pi_description/catkin_generated/pkg.develspace.context.pc.py | 3d97e1ad3fa96f593b07648b7de288c2fbc559fa | [] | no_license | SunnyLyz/Deep_Learning | c413abe3ef6510b3492f0a73c9a287b4bf56ec2c | 9fa58688a7daffdded8037b9fa20c571a00f87e0 | refs/heads/master | 2021-06-21T12:12:39.450564 | 2017-07-18T12:20:45 | 2017-07-18T12:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pi_description"
PROJECT_SPACE_DIR = "/home/turinglife/GPS_Project/RR_Robot/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
f617d8bc124c5f917b3c4b77f2bbec4e77496e8d | 55ae369a3ef1593ff31a76847deb2a0d33898895 | /mango/orderbookside.py | 68ec8576483ae87a4daf73e9dff659332c7bc063 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Investin-pro/mango-explorer | 63afb2ad4fb272f5640d18d3df367a6877b3a99a | 4760bd5f9d7067e24c12941d3d7d113b1a7173ef | refs/heads/master | 2023-07-31T23:23:00.590654 | 2021-10-01T17:13:18 | 2021-10-01T17:13:18 | 402,579,362 | 1 | 3 | MIT | 2021-10-02T16:31:43 | 2021-09-02T22:31:31 | Python | UTF-8 | Python | false | false | 6,432 | py | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:[email protected])
import enum
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .context import Context
from .layouts import layouts
from .metadata import Metadata
from .orders import Order, OrderType, Side
from .perpmarketdetails import PerpMarketDetails
from .version import Version
# # 🥭 OrderBookSideType enum
#
# Does the orderbook side represent bids or asks?
#
class OrderBookSideType(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
BIDS = "BIDS"
ASKS = "ASKS"
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
# # 🥭 PerpOrderBookSide class
#
# `PerpOrderBookSide` holds orders for one side of a market.
#
class PerpOrderBookSide(AddressableAccount):
def __init__(self, account_info: AccountInfo, version: Version,
meta_data: Metadata, perp_market_details: PerpMarketDetails, bump_index: Decimal,
free_list_len: Decimal, free_list_head: Decimal, root_node: Decimal,
leaf_count: Decimal, nodes: typing.Any):
super().__init__(account_info)
self.version: Version = version
self.meta_data: Metadata = meta_data
self.perp_market_details: PerpMarketDetails = perp_market_details
self.bump_index: Decimal = bump_index
self.free_list_len: Decimal = free_list_len
self.free_list_head: Decimal = free_list_head
self.root_node: Decimal = root_node
self.leaf_count: Decimal = leaf_count
self.nodes: typing.Any = nodes
@staticmethod
def from_layout(layout: typing.Any, account_info: AccountInfo, version: Version, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
meta_data = Metadata.from_layout(layout.meta_data)
bump_index: Decimal = layout.bump_index
free_list_len: Decimal = layout.free_list_len
free_list_head: Decimal = layout.free_list_head
root_node: Decimal = layout.root_node
leaf_count: Decimal = layout.leaf_count
nodes: typing.Any = layout.nodes
return PerpOrderBookSide(account_info, version, meta_data, perp_market_details, bump_index, free_list_len, free_list_head, root_node, leaf_count, nodes)
@staticmethod
def parse(context: Context, account_info: AccountInfo, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
data = account_info.data
if len(data) != layouts.ORDERBOOK_SIDE.sizeof():
raise Exception(
f"PerpOrderBookSide data length ({len(data)}) does not match expected size ({layouts.ORDERBOOK_SIDE.sizeof()})")
layout = layouts.ORDERBOOK_SIDE.parse(data)
return PerpOrderBookSide.from_layout(layout, account_info, Version.V1, perp_market_details)
@staticmethod
def load(context: Context, address: PublicKey, perp_market_details: PerpMarketDetails) -> "PerpOrderBookSide":
account_info = AccountInfo.load(context, address)
if account_info is None:
raise Exception(f"PerpOrderBookSide account not found at address '{address}'")
return PerpOrderBookSide.parse(context, account_info, perp_market_details)
def orders(self) -> typing.Sequence[Order]:
if self.leaf_count == 0:
return []
if self.meta_data.data_type == layouts.DATA_TYPE.Bids:
order_side = Side.BUY
else:
order_side = Side.SELL
stack = [self.root_node]
orders: typing.List[Order] = []
while len(stack) > 0:
index = int(stack.pop())
node = self.nodes[index]
if node.type_name == "leaf":
price = node.key["price"]
quantity = node.quantity
decimals_differential = self.perp_market_details.base_token.decimals - self.perp_market_details.quote_token.decimals
native_to_ui = Decimal(10) ** decimals_differential
quote_lot_size = self.perp_market_details.quote_lot_size
base_lot_size = self.perp_market_details.base_lot_size
actual_price = price * (quote_lot_size / base_lot_size) * native_to_ui
base_factor = Decimal(10) ** self.perp_market_details.base_token.decimals
actual_quantity = (quantity * self.perp_market_details.base_lot_size) / base_factor
orders += [Order(int(node.key["order_id"]),
node.client_order_id,
node.owner,
order_side,
actual_price,
actual_quantity,
OrderType.UNKNOWN)]
elif node.type_name == "inner":
if order_side == Side.BUY:
stack = [*stack, node.children[0], node.children[1]]
else:
stack = [*stack, node.children[1], node.children[0]]
return orders
def __str__(self) -> str:
nodes = "\n ".join([str(node).replace("\n", "\n ") for node in self.orders()])
return f"""« 𝙿𝚎𝚛𝚙𝙾𝚛𝚍𝚎𝚛𝙱𝚘𝚘𝚔𝚂𝚒𝚍𝚎 {self.version} [{self.address}]
{self.meta_data}
Perp Market: {self.perp_market_details}
Bump Index: {self.bump_index}
Free List: {self.free_list_head} (head) {self.free_list_len} (length)
Root Node: {self.root_node}
Leaf Count: {self.leaf_count}
{nodes}
»"""
| [
"[email protected]"
] | |
298d061ade3f8aae2939f4898a724e5ec2c4bd4d | 432481b47d95ea2ce63f4e1ceb2e27e8a6f155a1 | /Project/Portfolio_construction/data.py | 4534e88433e71c81ef7b704e42acdf7eb5e05458 | [] | no_license | dxcv/Project-2 | 81fe3777fb7ee3db3df84d24b7321c8d40fcbb91 | 8105f996f97b657b5f1644a04f6f678005119b06 | refs/heads/master | 2020-09-08T09:58:56.829060 | 2019-11-08T17:51:36 | 2019-11-08T17:51:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | """
Importation of the data.
"""
# Author: John Sibony <[email protected]>
from util import *
from password import *
import pandas as pd
from sqlalchemy import create_engine
def extraction_data(link_engine, query):
"""Extraction of the data using Vadim's database.
:param link_engine: Link to extact the data (see password.py file).
:param query: SQL query."""
engine = create_engine(link_engine)
data = pd.read_sql_query(query, engine)
return data
def import_data(index, contract, start_date='2006-01-01', freq=['EW1', 'EW2', 'EW3', 'EW4', 'EW']):
"""Extraction of specific data.
:param index: Name of the data index ('SP' or 'VIX'or 'VVIX' or '10Ybond' for respectively SP500 or Vix or Volatility of Vix or 10Year TBond index).
:param contract: Type of Contract ('call' or 'put' or 'future' or 'spot').
:param start_date: Begining date of the extracted data. String in the format %YYYY-%mm-%dd.
:param freq: Only valid for SPX index. List of the frequency of the option maturity.
(items should be 'EW1' or 'EW2' or 'EW3' or 'EW4' or 'EW' or 'ES' for respectively every 1st Friday or 2nd Friday or 3rd Friday or 4th Friday or end of the month)"""
link_engine = get_link_engine()
if(index=='SP'):
if(len(freq)>1):
freq = str(tuple(freq))
else:
freq = freq[0]
freq = """('"""+str(freq)+"""')"""
if(contract=='call'):
query = '''select option_expiration, date, underlying, strike, delta, value, std_skew, dte, iv from data_option.cme_es_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" in '''+freq+''' and sense = 'c' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='put'):
query = '''select option_expiration, date, underlying, strike, delta, value, std_skew, dte, iv from data_option.cme_es_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" in '''+str(freq)+''' and sense = 'p' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='future'):
query = '''select date,expiry_date,close from data_future.cme_es where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
elif(contract=='spot'):
query = '''select date,close from data_ohlc.cboe_spx where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='VIX'):
if(contract=='call'):
query = '''select date,option_expiration,strike,underlying,value,iv,delta,std_skew,dte from data_option.cbot_vx_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" = 'VIX' and sense = 'c' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='put'):
query = '''select date,option_expiration,strike,underlying,value,iv,delta,std_skew,dte from data_option.cbot_vx_ivol_rp where date >= '''+"""'"""+start_date+"""'"""+''' and "root.symbol" = 'VIX' and sense = 'p' '''
data = extraction_data(link_engine, query)
data.sort_values(['date', 'option_expiration'], inplace=True)
data = data.set_index("date")
elif(contract=='future'):
query = '''select date,expiry_date,close from data_future.cbot_vx where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
elif(contract=='spot'):
query = '''select date,close from data_ohlc.cbot_vix where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='VVIX'):
if(contract=='spot'):
query = '''select date,close from data_ohlc.cboe_vvix where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
elif(index=='10Ybond'):
if(contract=='future'):
query = '''select date,expiry_date,close from data_future.cme_ty where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date', 'expiry_date'], inplace=True)
data = data.set_index("date")
data['underlying'] = 0
elif(contract=='spot'):
query = '''select * from data_future_cont.ty1 where date >= '''+"""'"""+start_date+"""'"""
data = extraction_data(link_engine, query)
data.sort_values(['date'], inplace=True)
data = data.set_index("date")
try:
return data
except:
raise KeyError('Data not find. Look at the argument allowed in the function import_data in the file data.py')
if __name__ == '__main__':
import_data('SP', 'spot', '2006-01-01')
| [
"[email protected]"
] | |
3f510935494dd7cead655b91bd5e53778d5689d1 | 2d9a706cb899dfc355fe49dc6a37a0dc257b22fd | /test/crab_HIMB2_pixel_eff_sysEta_v1.py | 9d910a1c9daa66e6f3f82a2a1547f4b45f121d17 | [] | no_license | BetterWang/QWCumuGap | b1f4d3169d2019d3d465ea985fed2094279b62b6 | 61beb88799fd3c18398061b64b849ad5a849871d | refs/heads/master | 2020-04-04T22:25:33.686266 | 2018-03-16T19:27:01 | 2018-03-16T19:27:01 | 82,000,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | from CRABAPI.RawCommand import crabCommand
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
config = config()
config.General.requestName = 'HIMB2_CumuGap_Pixel_eff_cent_sysPos_v1'
config.General.workArea = 'CrabArea'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'qwcumu_PbPb15_Pix_eff_pos_v1.py'
config.JobType.inputFiles = ['EffCorrectionsPixel_TT_pt_0_10_v2.root']
config.JobType.maxJobRuntimeMin = 2500
config.Data.inputDataset = '/HIMinimumBias2/HIRun2015-25Aug2016-v1/AOD'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 20
config.Data.outLFNDirBase = '/store/group/phys_heavyions/qwang/PbPb2015_cumu/'
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/HI/Cert_262548-263757_PromptReco_HICollisions15_JSON_v2.txt'
config.Data.publication = False
config.Data.useParent = False
config.Site.storageSite = 'T2_CH_CERN'
#config.Data.allowNonValidInputDataset = True
#try:
# crabCommand('submit', config = config)
#except HTTPException as hte:
# print "Failed submitting task: %s" % (hte.headers)
#except ClientException as cle:
# print "Failed submitting task: %s" % (cle)
config.General.requestName = 'HIMB2_CumuGap_Pixel_eff_cent_sysNeg_v2'
config.JobType.psetName = 'qwcumu_PbPb15_Pix_eff_neg_v1.py'
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
| [
"[email protected]"
] | |
01720e33170d4697953e0ec099bcda60e4576d6c | 923f707341f7e6a4c86673c52ca796f40638619c | /809. Expressive Words.py | 3080ddd64ba075863b8c0ce379c733da3c6944d6 | [] | no_license | Huijuan2015/leetcode_Python_2019 | bb1e54801faa15ee3ef2a7bd7628b6a16033f7c7 | 36c584e8f92a0725bab7a567dfd10b918408627b | refs/heads/master | 2020-04-22T13:31:55.203162 | 2020-03-10T00:00:58 | 2020-03-10T00:00:58 | 170,412,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | class Solution(object):
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
自己定义map,不能直接用map,还需要考虑key的顺序
def RLE(s): # return string, list
prev = -1
key = ""
cnts = []
for i in range(len(s)):
if i== len(s)-1 or s[i] != s[i+1]:
key += s[i]
cnts.append(i-prev)
prev = i
return (key,cnts)
def isExtended(skey, scnt, wkey, wcnt):
if skey != wkey or len(skey) != len(wkey):
return False
for i in range(len(scnt)):
c1, c2 = scnt[i], wcnt[i]
if c2 > c1:
return False
if c1 < 3 and c1 != c2:
return False
return True
skey, scnt = RLE(S)
cnt = 0
for word in words:
wkey, wcnt = RLE(word)
if isExtended(skey, scnt, wkey, wcnt):
cnt += 1
# print word
return cnt
| [
"[email protected]"
] | |
fa07ea6fbca874d31aa899db0aad1b1f300167e5 | 545f817485cbf75e5b791ef39c7ff25f66a8de29 | /src/brasil/gov/portal/tests/test_externalcontent_content_type.py | 2c1a81689ccd3e718b59d796319d9a806340079c | [] | no_license | Assistevc/brasil.gov.portal | b5e85e749b19b3bc5080f1ed0b7ee727ad58bad0 | 54eb24e7e0ee81d74012a2af27bc8c9a8d56ef71 | refs/heads/master | 2021-01-15T19:05:01.335974 | 2014-12-17T13:46:55 | 2014-12-17T13:46:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,602 | py | # -*- coding: utf-8 -*-
from brasil.gov.portal.browser.content.external import ExternalContentView
from brasil.gov.portal.content.external import IExternalContent
from brasil.gov.portal.testing import INTEGRATION_TESTING
from plone import api
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.schema import SCHEMA_CACHE
from plone.namedfile.file import NamedBlobImage
from zope.component import createObject
from zope.component import queryUtility
import os
import unittest2 as unittest
class ExternalContentTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
with api.env.adopt_roles(['Manager', ]):
self.folder = api.content.create(
type='Folder',
container=self.portal,
id='test-folder'
)
# Invalidate schema cache
SCHEMA_CACHE.invalidate('ExternalContent')
self.content = api.content.create(
type='ExternalContent',
container=self.folder,
id='external'
)
self.setup_content_data()
def setup_content_data(self):
path = os.path.dirname(__file__)
image = open(os.path.join(path, 'files', 'image.jpg')).read()
self.image = NamedBlobImage(image, 'image/jpeg', u'image.jpg')
def test_adding(self):
self.assertTrue(IExternalContent.providedBy(self.content))
def test_fti(self):
fti = queryUtility(IDexterityFTI, name='ExternalContent')
self.assertNotEqual(None, fti)
def test_factory(self):
fti = queryUtility(IDexterityFTI, name='ExternalContent')
factory = fti.factory
new_object = createObject(factory)
self.assertTrue(IExternalContent.providedBy(new_object))
def test_image_tag(self):
content = self.content
# Sem imagem, sem tag
self.assertEqual(content.tag(), '')
# Adicionamos a imagem
content.image = self.image
self.assertIn('tileImage', content.tag())
def test_image_thumb(self):
content = self.content
# Sem imagem, sem thumbnail
self.assertEqual(content.image_thumb(), None)
# Adicionamos a imagem
content.image = self.image
self.assertTrue(content.image_thumb())
class ExternalContentViewTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
# Invalidate schema cache
SCHEMA_CACHE.invalidate('ExternalContent')
with api.env.adopt_roles(['Manager', ]):
self.folder = api.content.create(
type='Folder',
container=self.portal,
id='test-folder'
)
self.content = api.content.create(
type='ExternalContent',
container=self.folder,
id='external'
)
def test_view(self):
view = self.content.restrictedTraverse('@@view')
self.assertTrue(isinstance(view, ExternalContentView))
def test_view_manager(self):
with api.env.adopt_roles(['Manager', ]):
view = self.content.restrictedTraverse('@@view')
self.assertIn('The link address is', view())
def test_view_anonymous(self):
with api.env.adopt_roles(['Anonymous', ]):
view = self.content.restrictedTraverse('@@view')
# Um redirecionamento ocorrera, que nao sera realizado neste teste
self.assertIsNone(view())
| [
"[email protected]"
] | |
3a60668b274b8710c9d34d5244a5c0d11c03ec42 | 22712d4a3633c93c6173b826882b01174a4c6928 | /sign/migrations/0001_initial.py | 04cd63636e29f7b459fdb68d99865fb8594ccfe3 | [] | no_license | New2object/guest2 | e5dcbdcfb6fbbe386a5da51e7b7a18f97de8815d | 30edbe54261a074fdea10150b52cb59e3bc6d781 | refs/heads/master | 2022-12-23T22:27:44.275577 | 2018-03-23T14:03:45 | 2018-03-23T14:03:45 | 124,031,317 | 1 | 1 | null | 2022-12-10T19:20:25 | 2018-03-06T06:24:45 | Python | UTF-8 | Python | false | false | 1,662 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-30 09:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('limit', models.IntegerField()),
('status', models.BooleanField()),
('address', models.CharField(max_length=200)),
('start_time', models.DateTimeField(verbose_name='event_time')),
('create_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Guest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('realname', models.CharField(max_length=62)),
('phone', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('sign', models.BooleanField()),
('create_time', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sign.Event')),
],
),
migrations.AlterUniqueTogether(
name='guest',
unique_together=set([('event', 'phone')]),
),
]
| [
"[email protected]"
] | |
a008914d98ae2a6baab427010b3bfc9a8e14ee65 | 1beac95667f9236084dfecdf2550fb6e8a28b0b8 | /backend/api/decapod_api/exceptions.py | c172c2796f82a2f416855d9af5c3ba696ff06535 | [
"Apache-2.0"
] | permissive | lihaijing/ceph-lcm | 52b9d2fae24ad8b54a386cda4c528d93288d603d | d7c07fbb87dc170d5b8a0a5c8a2cf857f71ae466 | refs/heads/master | 2021-01-12T08:17:03.919876 | 2016-12-12T07:58:58 | 2016-12-12T07:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,709 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains exceptions specific for API."""
import flask.json
from werkzeug import exceptions
from decapod_common import exceptions as app_exceptions
class DecapodJSONMixin(app_exceptions.DecapodError, exceptions.HTTPException):
"""Basic JSON mixin for the werkzeug exceptions.
Basic werkzeug exceptions return an HTML. This mixin
forces them to return correct JSON.
{
"code": <numberical HTTP status code>,
"error": <error ID>,
"message": <description suitable to show to humans>
}
"""
error_name = None
def get_description(self, environ=None):
return self.description
def get_body(self, environ=None):
error = self.error_name or self.__class__.__name__
error = str(error)
error_message = {
"code": self.code,
"error": error,
"message": self.get_description(environ)
}
json_error = flask.json.dumps(error_message)
return json_error
def get_headers(self, environ=None):
return [("Content-Type", "application/json")]
class BadRequest(DecapodJSONMixin, exceptions.BadRequest):
pass
class Unauthorized(DecapodJSONMixin, exceptions.Unauthorized):
def get_headers(self, environ=None):
headers = super().get_headers(environ=environ)
headers.append(("WWW-Authenticate", "Token realm=\"Application\""))
return headers
class Forbidden(DecapodJSONMixin, exceptions.Forbidden):
pass
class NotFound(DecapodJSONMixin, exceptions.NotFound):
pass
class MethodNotAllowed(DecapodJSONMixin, exceptions.MethodNotAllowed):
def get_headers(self, environ=None):
headers = DecapodJSONMixin.get_headers(self, environ)
headers.extend(exceptions.MethodNotAllowed.get_headers(self, environ))
return headers
class NotAcceptable(DecapodJSONMixin, exceptions.NotAcceptable):
pass
class InternalServerError(DecapodJSONMixin, exceptions.InternalServerError):
pass
class CannotConvertResultToJSONError(InternalServerError):
pass
class UnknownReturnValueError(InternalServerError):
pass
class InvalidJSONError(BadRequest):
def __init__(self, errors):
super().__init__("\n".join(errors))
class ImpossibleToCreateSuchModel(BadRequest):
description = (
"It is impossible to create such model because it violates "
"data model contracts."
)
class CannotUpdateManagedFieldsError(BadRequest):
description = "It is forbidden to update automanaged fields."
class UnknownUserError(BadRequest):
description = "Unknown user with ID {0}"
def __init__(self, user_id):
super().__init__(self.description.format(user_id))
class CannotUpdateDeletedModel(BadRequest):
"""Exception which is raised if you are trying to update deleted model."""
class CannotDeleteRoleWithActiveUsers(BadRequest):
"""Exception raised on attempt to delete role with active users."""
class CannotUpdateModelWithSuchParameters(ImpossibleToCreateSuchModel):
"""Exception raised on attempt to save data which violaties uniquiness."""
class CannotDeleteClusterWithServers(BadRequest):
description = "Cluster still has servers"
class UnknownPlaybookError(BadRequest):
description = "Unknown playbook {0}"
def __init__(self, playbook_name):
super().__init__(self.description.format(playbook_name))
class ServerListIsRequiredForPlaybookError(BadRequest):
description = "Explicit server list is required for playbook {0}"
def __init__(self, playbook_name):
super().__init__(self.description.format(playbook_name))
class UnknownClusterError(BadRequest):
description = "There is not cluster with ID {0}"
def __init__(self, cluster_id):
super().__init__(self.description.format(cluster_id))
class UnknownPlaybookConfiguration(BadRequest):
description = (
"There is no playbook configuration with ID {0} and "
"version {1}"
)
def __init__(self, item_id, version):
super().__init__(self.description.format(item_id, version))
| [
"[email protected]"
] | |
bc3d6165b432e622690fb5a07f75a8f96308ebd3 | 4c8a32fee60c54777396f80e6698c95fb18ae5b5 | /env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py | e508841d3fb192c2523eb7b6127fc5c7faaa0f8c | [] | no_license | LUINFO89/PlataformaGestiondeVuelos | f91cd351df6d5e40e341cbcae793d819faecaf75 | adc6d6dc888d551ab83726c2dbdd7e6db4398c3b | refs/heads/main | 2023-08-30T08:30:43.089497 | 2021-10-30T01:25:12 | 2021-10-30T01:25:12 | 422,015,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,998 | py | from __future__ import absolute_import
import email
import logging
import re
import time
import warnings
from collections import namedtuple
from itertools import takewhile
from ..exceptions import (
ConnectTimeoutError,
InvalidHeader,
MaxRetryError,
ProtocolError,
ProxyError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
_Default = object()
class _RetryMeta(type):
@property
def DEFAULT_METHOD_WHITELIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_METHODS_ALLOWED' instead",
DeprecationWarning,
)
return cls.DEFAULT_ALLOWED_METHODS
@DEFAULT_METHOD_WHITELIST.setter
def DEFAULT_METHOD_WHITELIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
DeprecationWarning,
)
cls.DEFAULT_ALLOWED_METHODS = value
@property
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
@DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
warnings.warn(
"Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
"will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
DeprecationWarning,
)
cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
@six.add_metaclass(_RetryMeta)
class Retry(object):
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param iterable allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``False`` value to retry on any verb.
.. warning::
Previously this parameter was named ``method_whitelist``, that
usage is deprecated in v1.26.0 and will be removed in v2.0.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
other=None,
allowed_methods=_Default,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=_Default,
# TODO: Deprecated, remove in v2.0
method_whitelist=_Default,
):
if method_whitelist is not _Default:
if allowed_methods is not _Default:
raise ValueError(
"Using both 'allowed_methods' and "
"'method_whitelist' together is not allowed. "
"Instead only use 'allowed_methods'"
)
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
stacklevel=2,
)
allowed_methods = method_whitelist
if allowed_methods is _Default:
allowed_methods = self.DEFAULT_ALLOWED_METHODS
if remove_headers_on_redirect is _Default:
remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
# TODO: If already given in **kw we use what's given to us
# If not given we need to figure out what to pass. We decide
# based on whether our class has the 'method_whitelist' property
# and if so we pass the deprecated 'method_whitelist' otherwise
# we use 'allowed_methods'. Remove in v2.0
if "method_whitelist" not in kw and "allowed_methods" not in kw:
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
params["method_whitelist"] = self.allowed_methods
else:
params["allowed_methods"] = self.allowed_methods
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
if retry_date_tuple[9] is None: # Python 2
# Assume UTC if no timezone was specified
# On Python2.7, parsedate_tz returns None for a timezone offset
# instead of 0 if no timezone is given, where mktime_tz treats
# a None timezone offset as local time.
retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
# TODO: For now favor if the Retry implementation sets its own method_whitelist
# property outside of our constructor to avoid breaking custom implementations.
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
allowed_methods = self.method_whitelist
else:
allowed_methods = self.allowed_methods
if allowed_methods and method.upper() not in allowed_methods:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
def __getattr__(self, item):
if item == "method_whitelist":
# TODO: Remove this deprecated alias in v2.0
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
return self.allowed_methods
try:
return getattr(super(Retry, self), item)
except AttributeError:
return getattr(Retry, item)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| [
"[email protected]"
] | |
2c4b39edafd28a4c21b76214cd3c205f4ee1d683 | d5c578256dc7d8f0bbd5c4b340e804c9d6676b90 | /combine_A_and_B.py | e966b73df86d87a284b567626d54ac819d9b0d81 | [] | no_license | Bala93/Life_science | 470728376a5ce37017bf9647d49b8fb2b93fcac6 | fbd0f16ddde13e356269fe14c679af8e4005eb74 | refs/heads/master | 2021-09-17T16:15:21.356685 | 2018-07-03T19:14:49 | 2018-07-03T19:14:49 | 129,958,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import os
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int, default=1000000)
parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)',action='store_true')
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if '_A.' in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print('split = %s, number of images = %d' % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace('_A.', '_B.')
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace('_A.', '.') # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
im_A = cv2.imread(path_A)
im_B = cv2.imread(path_B)
# im_A = cv2.imread(path_A, cv2.CV_LOAD_IMAGE_COLOR)
# im_B = cv2.imread(path_B, cv2.CV_LOAD_IMAGE_COLOR)
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
| [
"[email protected]"
] | |
f81184a6ca86a7b8f8791b4043f069df9155c3b3 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SSD/tests/test_models/test_forward.py | d3805e803d59dbea7a21a0dd83d3bae9bd213a7d | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 12,689 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
train_cfg = mmcv.Config(copy.deepcopy(config.train_cfg))
test_cfg = mmcv.Config(copy.deepcopy(config.test_cfg))
return model, train_cfg, test_cfg
def test_rpn_forward():
model, train_cfg, test_cfg = _get_detector_cfg(
'rpn/rpn_r50_fpn_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 224, 224)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'yolo/yolov3_d53_mstrain-608_273e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model, train_cfg, test_cfg = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (2, 3, 224, 224)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model, train_cfg, test_cfg = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 256, 256)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# HTC is not ready yet
@pytest.mark.parametrize('cfg_file', [
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py'
])
def test_two_stage_forward(cfg_file):
model, train_cfg, test_cfg = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 256, 256)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model, train_cfg, test_cfg = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
return mm_inputs
def test_yolact_forward():
model, train_cfg, test_cfg = _get_detector_cfg(
'yolact/yolact_r50_1x8_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 550, 550)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
| [
"[email protected]"
] | |
b73e66f56dd25716dad74184c383b3a7b077bf13 | aa9fc66c8b94f05d4651f243f6f21799f4c1fd80 | /jump-game-vi/jump-game-vi.py | 4eadd56dcea8503cbfdedc446f3ea6d98917e497 | [] | no_license | baranee-18/Data-Structures-and-Algorithms | 3cd739ba3c0710835d5995a6ccf2b44f612f8352 | 5074bac42b9323b8e7353d533355ece18dd5f5f1 | refs/heads/main | 2023-08-23T23:06:59.028649 | 2021-10-19T19:21:43 | 2021-10-19T19:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
n = len(nums)
queue = []
val = 0
for i in range(n):
maxV = 0
if queue:
maxV, indx = queue[0]
while indx+k < i:
maxV, indx = heapq.heappop(queue)
heapq.heappush(queue, [maxV,indx])
val = nums[i] + (-1) * maxV
heapq.heappush(queue, [-1 * val, i])
return val | [
"[email protected]"
] | |
f7f9d2048aac2ff8422cdb78315139cfe63f6cc3 | 87a9706379670da62739b3c1fbbdd75edb5107b8 | /alien_invasion/scoreboard.py | 11f4eb35474043303f80b3f5ed7dcbf980ac7b77 | [] | no_license | zxbzxb180/python_work | ba21ab74f842e0d560a8bb192bb8a874d356b9e1 | 6406024e011aa06d1bda78d97cfecc47f7f2058c | refs/heads/master | 2022-12-12T23:53:36.887963 | 2020-03-04T07:20:29 | 2020-03-04T07:20:29 | 194,494,744 | 0 | 0 | null | 2022-11-22T03:54:47 | 2019-06-30T08:48:44 | Python | GB18030 | Python | false | false | 985 | py | #coding=gbk
import pygame.font
class Scoreboard():
"""显示得分信息的类"""
def __init__(self,ai_settings,screen,stats):
"""初始化显示得分涉及的属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
#显示得分信息时使用的字体设置
self.text_color = (30,30,30)
self.font = pygame.font.SysFont(None,48)
#准备初始得分图像
self.prep_score()
def prep_score(self):
"""将得分转换为一幅渲染的图像"""
score_str = str(self.stats.score)
self.score_image = self.font.render(score_str,True,self.text_color,self.ai_settings.bg_color)
#将得分放在屏幕右上角
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def show_score(self):
"""在屏幕上显示得分"""
self.screen.blit(self.score_image,self.score_rect)
| [
"[email protected]"
] | |
c518c6954e9b0640ead738942f5c31574b6e8035 | 3c2d4ed20da3aa3e045b617c787df68c7d0ddd1d | /src/drugex/__main__.py | 50f052ac1a040383a0178b2488fc5a6b739f347c | [
"MIT"
] | permissive | cthoyt/DrugEx | 699ea37a86bfd0ed06e5c5112a68d5bd46ed05af | 9e4d31adb2c65d0afc852948f502c79dcf8308a3 | refs/heads/master | 2020-06-07T22:08:26.799943 | 2019-06-21T16:38:20 | 2019-06-21T16:38:20 | 193,103,470 | 0 | 0 | MIT | 2019-06-21T13:34:04 | 2019-06-21T13:34:03 | null | UTF-8 | Python | false | false | 328 | py | # -*- coding: utf-8 -*-
"""Entrypoint module, in case you use `python -m drugex`.
Why does this file exist, and why ``__main__``? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
from drugex.cli import main
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
232c6641ae1d5833e25fbf1e833963f1e1d7e53d | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /monk/system_unit_tests/gluon/test_block_mobilenet_v2_inverted_linear_bottleneck.py | 805df2c924d747b85329f2e810c0f1bdc52a05e7 | [
"Apache-2.0"
] | permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 1,512 | py | import os
import sys
sys.path.append("../../../../monk_v1");
sys.path.append("../../../monk/");
import psutil
from gluon_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import mxnet as mx
import numpy as np
from gluon.losses.return_loss import load_loss
def test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict):
forward = True;
test = "test_block_mobilenet_v2_inverted_linear_bottleneck";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.mobilenet_v2_inverted_linear_bottleneck_block(output_channels=64, bottleneck_width=4, stride=1));
gtf.Compile_Network(network, use_gpu=False);
x = np.random.rand(1, 64, 64, 64);
x = mx.nd.array(x);
y = gtf.system_dict["local"]["model"].forward(x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| [
"[email protected]"
] | |
004aa18f2e1b9effc7eca12b7058f92597767819 | 9acbf0279c38d11e89f16831e9c43b49badabb00 | /IPTVPlayer/tsiplayer/addons/resources/hosters/uptostream.py | 314392de9337b64f4093974ba2f8058e0b501c6c | [] | no_license | dgbkn/e2iPlayer | 4f101b87bc5f67bf14690d012a62cbe8755ab82c | e5f413ea032eb9012569d9d149a368a3e73d9579 | refs/heads/master | 2023-05-15T05:01:18.204256 | 2021-06-06T18:03:42 | 2021-06-06T18:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
#
import re
import json
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.config import GestionCookie
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog, VSlog, isMatrix
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.premiumHandler import cPremiumHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.util import Unquote
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'UpToStream'
self.__sFileName = self.__sDisplayName
self.oPremiumHandler = None
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'uptostream'
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self):
if self.__sUrl[-4:] in '.mp4.avi.mkv':
return self.__sUrl.split('/')[3]
return self.__sUrl.split('/')[-1]
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
self.__sUrl = self.__sUrl.replace('iframe/', '')
self.__sUrl = self.__sUrl.replace('http:', 'https:')
def checkSubtitle(self, sHtmlContent):
if sHtmlContent:
Files = []
lab = []
for aEntry in sHtmlContent:
if aEntry["label"] == "French":
url = aEntry["src"]
if not url.startswith('http'):
url = 'http:' + url
Files.append(url)
else:
continue
return Files
return False
def checkUrl(self, sUrl):
return True
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
self.oPremiumHandler = cPremiumHandler('uptobox')
premium = self.oPremiumHandler.isPremiumModeAvailable()
api_call = False
SubTitle = ""
if premium:
self.oPremiumHandler.Authentificate()
else:
dialog().VSok('Ce hoster demande un login, meme gratuit.')
return False, False
cookies = GestionCookie().Readcookie("uptobox")
import requests, re
s = requests.Session()
s.headers.update({"Cookie": cookies})
r = s.get('https://uptobox.com/api/streaming?file_code=' + self.__sUrl.split('/')[3]).json()
r1 = s.get(r["data"]["user_url"]).text
tok = re.search('token.+?;.+?;(.+?)&', r1).group(1)
r1 = s.post("https://uptobox.com/api/user/pin/validate?token=" + tok,json={"pin":r["data"]["pin"]}).json()
s.headers.update({"Referer": "https://uptobox.com/pin?pin=" + r["data"]["pin"]})
r = s.get(r["data"]["check_url"]).json()["data"]
sPattern = "'(.+?)': {(.+?)}"
oParser = cParser()
aResult = oParser.parse(r["streamLinks"], sPattern)
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog
url = []
qua = []
api_call = False
for aEntry in aResult[1]:
QUAL = aEntry[0]
d = re.findall("'u*(.+?)': u*'(.+?)'",aEntry[1])
for aEntry1 in d:
url.append(aEntry1[1])
qua.append(QUAL + ' (' + aEntry1[0] + ')')
# Affichage du tableau
api_call = dialog().VSselectqual(qua, url)
SubTitle = self.checkSubtitle(r["subs"])
if (api_call):
if SubTitle:
return True, api_call, SubTitle
else:
return True, api_call
return False, False
| [
"[email protected]"
] | |
b4b1cae9c7e54d74e89f8afd4bcbdbde27236d80 | 562d4bf000dbb66cd7109844c972bfc00ea7224c | /addons/advertising/controllers/controllers.py | 1868e3ae1ee47ecfdd4a0d92df979f22c6b5bda9 | [] | no_license | Mohamed33/odoo-efact-11-pos | e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059 | de38355aea74cdc643a347f7d52e1d287c208ff8 | refs/heads/master | 2023-03-10T15:24:44.052883 | 2021-03-06T13:25:58 | 2021-03-06T13:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | # -*- coding: utf-8 -*-
from odoo import http
# class Advertising(http.Controller):
# @http.route('/advertising/advertising/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/advertising/advertising/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('advertising.listing', {
# 'root': '/advertising/advertising',
# 'objects': http.request.env['advertising.advertising'].search([]),
# })
# @http.route('/advertising/advertising/objects/<model("advertising.advertising"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('advertising.object', {
# 'object': obj
# }) | [
"[email protected]"
] | |
ed83154aac965d7020394db30fc7d33772351c78 | 94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af | /055.py | 3426c8ddb88f7c434c1f5ca842561dae076bd58f | [] | no_license | huosan0123/leetcode-py | f1ec8226bae732369d4e1989b99ab0ba4b4061c4 | 22794e5e80f534c41ff81eb40072acaa1346a75c | refs/heads/master | 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums or len(nums)==1:
return True
pre = nums[0]
for i in range(1, len(nums)):
if pre == 0:
return False
else:
pre = max(pre-1, nums[i])
return True
| [
"[email protected]"
] | |
97e996c0fe83b4c2b8a1bafa79f0a29358a094de | 904a87f73eb0e3902a738e823f959cbad2f68a82 | /plotClass/plotting/plotGroups_mer.py | dc83b9a7dba4eeb9344b2b365d782974bbc46fe8 | [] | no_license | ashrafkasem/hepML1Lep | 2ccf167432e7d2a1550991137b7a247e044af1b1 | 9ed3b73887b36f26b9d4ca0243eedd3cac0c420e | refs/heads/master | 2021-07-11T22:31:18.498721 | 2020-08-31T21:30:10 | 2020-08-31T21:30:10 | 193,732,937 | 1 | 4 | null | 2020-05-18T06:55:53 | 2019-06-25T15:17:21 | Jupyter Notebook | UTF-8 | Python | false | false | 2,881 | py | import ROOT
All_files = {
'DiLepTT' :
{
'files': ['TTJets_DiLepton','TTJets_LO_HT'] ,
'select' : '&& DiLep_Flag == 1',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF*nISRttweight',
"fill": ROOT.TAttFill(ROOT.kRed, 1001),
"line": ROOT.TAttLine(ROOT.kRed, ROOT.kSolid, 1),
"marker": None,
"Label" : "t#bar{t} ll + jets",
"Stackable" : True
},
'SemiLepTT' :
{
'files': ['TTJets_SingleLeptonFrom','TTJets_LO_HT'] ,
'select' : '&& semiLep_Flag == 1',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF*nISRttweight',
"fill": ROOT.TAttFill(ROOT.kBlue-7, 1001),
"line": ROOT.TAttLine(ROOT.kBlue-7, ROOT.kSolid, 1),
"marker": None,
"Label" : "t#bar{t} l + jets",
"Stackable" : True
},
'Others' :
{
'files': ["TBar_tWch","TBar_tch_powheg","T_tWch","T_tWch_ext","T_tch_powheg","VVTo","WWTo","WZTo","ZZTo",'TTW','TTZ',"QCD_","WJetsToLNu_HT","DYJetsToLL"],
'select' : '',
'scale' : '1000.0/sumOfWeights*genWeight*Xsec*1*btagSF*puRatio*lepSF',
"fill": ROOT.TAttFill(ROOT.kOrange-3, 1001),
"line": ROOT.TAttLine(ROOT.kOrange-3, ROOT.kSolid, 1),
"marker": None,
"Label" : "Others",
"Stackable" : True
},
'Data' :
{
'files': ['SingleElectron','SingleMuon','MET_Run'] ,
'select' : '',
'scale' : '1',
"fill": None,
"line": None,
"marker": ROOT.TAttMarker(ROOT.kBlack, ROOT.kFullCircle, 0.7),
"Label" : "Data",
"Stackable" : False
}
}
dPhiCut = '&& ((LT < 350 && fabs(dPhi) > 1.0) || (350 < LT && LT < 600 && fabs(dPhi) > 0.75) || (600 < LT && fabs(dPhi) > 0.5))'
AntidPhiCut = '&& ((LT < 350 && fabs(dPhi) < 1.0) || (350 < LT && LT < 600 && fabs(dPhi) < 0.75) || (600 < LT && fabs(dPhi) < 0.5))'
ntopCut = '&& nTop_Total_Combined >= 2 '
AntintopCut = '&& nTop_Total_Combined < 1'
oldbins = {"LT12HT01": "(LT < 450) && (HT < 1000) " ,
"LT12HT23": "(LT < 450) && (HT > 1000) && (HT < 1500)" ,
"LT12HT4i": "(LT < 450) && (HT > 1500) " ,
"LT3HT01" : "(LT > 450) && (LT < 600) && (HT < 1000)" ,
"LT3HT23" : "(LT > 450) && (LT < 600) && (HT > 1000) && (HT < 1500)" ,
"LT3HT4i" : "(LT > 450) && (LT < 600) && (HT > 1500)" ,
"LT4HT01" : "(LT > 600) && (LT < 750) && (HT < 1000)" ,
"LT4HT23" : "(LT > 600) && (LT < 750) && (HT > 1000) && (HT < 1500)" ,
"LT4HT4i" : "(LT > 600) && (LT < 750) && (HT > 1500)" ,
"LT5iHT0i": "(LT > 750)" }
| [
"[email protected]"
] | |
d51a4bd7dc7436067f703bec0084d907b03f9157 | a5ba631dddaf2912c309601f8fbdd3c5b494fe20 | /src/azure-cli-core/tests/test_logging.py | 14cddef8a729a9767cf3f2dedc6a9e237df9fe80 | [
"MIT"
] | permissive | saurabsa/azure-cli-old | 37471020cd2af9a53e949e739643299f71037565 | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | refs/heads/master | 2023-01-09T04:00:15.642883 | 2018-04-23T21:40:04 | 2018-04-23T21:40:04 | 130,759,501 | 0 | 0 | NOASSERTION | 2022-12-27T14:59:06 | 2018-04-23T21:33:34 | Python | UTF-8 | Python | false | false | 2,897 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import azure.cli.core.azlogging as azlogging
class TestLogging(unittest.TestCase):
# When running verbose level tests, we check that argv is empty
# as we expect _determine_verbose_level to remove consumed arguments.
def test_determine_verbose_level_default(self):
argv = []
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 0
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_verbose(self):
argv = ['--verbose']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 1
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_debug(self):
argv = ['--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
self.assertFalse(argv)
def test_determine_verbose_level_v_v_v_default(self):
argv = ['--verbose', '--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
# We still consumed the arguments
self.assertFalse(argv)
def test_determine_verbose_level_other_args_verbose(self):
argv = ['account', '--verbose']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 1
self.assertEqual(actual_level, expected_level)
# We consumed 1 argument
self.assertEqual(argv, ['account'])
def test_determine_verbose_level_other_args_debug(self):
argv = ['account', '--debug']
actual_level = azlogging._determine_verbose_level(argv) # pylint: disable=protected-access
expected_level = 2
self.assertEqual(actual_level, expected_level)
# We consumed 1 argument
self.assertEqual(argv, ['account'])
def test_get_az_logger(self):
az_logger = azlogging.get_az_logger()
self.assertEqual(az_logger.name, 'az')
def test_get_az_logger_module(self):
az_module_logger = azlogging.get_az_logger('azure.cli.module')
self.assertEqual(az_module_logger.name, 'az.azure.cli.module')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a2b971c67d0456fbbf6fd22640af49583f80fce2 | e62d13d578ebbe3acc3713e3eb783c81c785f2a8 | /myems-api/core/version.py | f1ef5dfaf78eabc8ce8847750632ade6fd4f7c29 | [
"MIT"
] | permissive | tianlinzhong/myems | c25d7ece4f1853bb4415e2cedfdc8cb9cf8ff991 | 07dd1eb8060f4145be66c8d1a20b5e064a68281b | refs/heads/master | 2023-03-25T05:24:05.057248 | 2021-03-28T09:06:45 | 2021-03-28T09:06:45 | 340,333,276 | 2 | 0 | MIT | 2021-02-28T14:00:06 | 2021-02-19T10:22:32 | Python | UTF-8 | Python | false | false | 452 | py | import falcon
import simplejson as json
class VersionItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
result = {"version": 'MyEMS 1.1.0 (Community Edition)',
"release-date": '202103018',
"website": "https://myems.io"}
resp.body = json.dumps(result)
| [
"[email protected]"
] | |
67a45c24fd1b92104a81304a32b145bd2a77baa6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/2715.py | 53c87fa3b23e56f4d4d2ff037c02e48d28108491 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import math
f = open('/home/dexter/input1.in', 'r')
cases = int(f.readline())
for k in range (0, cases):
a = f.readline()
itms=a.split()
out=0
for i in range(int(itms[0]),int(itms[1])+1):
x=str(i)
y =x[::-1]
if(y==x):
x=int(x)
if((math.sqrt(x)-int(math.sqrt(x))) == 0):
x=str(int(math.sqrt(x)))
y =x[::-1]
if(y==x):
out+=1
print "Case #"+str(k+1)+": "+str(out)
| [
"[email protected]"
] | |
80c70e681d1be2636cc0167b75f54d09254d1b14 | 301c85e8f2391896b11c9f4cf9f440283865593e | /armstrong/spiders/spider.py | 1e2f822c32a2f498a48612c4df8490fd7bf8d844 | [] | no_license | hristo-grudev/armstrong | 513e5639c347c8a3ffc8df3cafd5860d2ab3fb81 | 8bbcad8a72f58456638c84369f72c985c93e3cc9 | refs/heads/main | 2023-03-29T11:33:06.484366 | 2021-04-07T07:51:26 | 2021-04-07T07:51:26 | 355,458,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import ArmstrongItem
from itemloaders.processors import TakeFirst
class ArmstrongSpider(scrapy.Spider):
name = 'armstrong'
start_urls = ['https://www.armstrong.bank/connect/news-and-updates']
def parse(self, response):
post_links = response.xpath('//div[@class="news-item-text"]')
for post in post_links:
url = post.xpath('.//a[@data-link-type-id="page"]/@href').get()
date = post.xpath('.//div[@class="news-item-text-date"]//text()[normalize-space()]').get()
if url:
yield response.follow(url, self.parse_post, cb_kwargs={'date': date})
def parse_post(self, response, date):
title = response.xpath('//h1/text()').get()
description = response.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "mb-6", " " ))]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
item = ItemLoader(item=ArmstrongItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"[email protected]"
] | |
bee9c55e7640a1f88b9e62f0512abcf2174573a4 | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /GCJ/GCJ2021_2_a_sub.py | 7f055f0b05f5c29efe942e6944b2235a3c45cf43 | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import math
ans = 0
for i in range(2, 100+1):
ans += math.ceil(10**8 / i)
print(ans / (10**8))
# 4.18737795 < 6 OK!
| [
"[email protected]"
] | |
3d7b94751b9c6a8ebf732eec60f889bc243c3977 | 08ddce92744c78432b69409d197ad1393ca685aa | /weixin/Bot/test2.py | 3537329a673c998099f7e0f15bc0b992e3d3d01a | [] | no_license | baliguan163/PythonDemo | 71255eb21850134b4b6afb2eeed948cc34326e7a | c4fe1b6ea36bec2c531244ef95c809e17b64b727 | refs/heads/master | 2021-01-02T08:13:18.809740 | 2019-05-19T16:28:16 | 2019-05-19T16:28:16 | 98,963,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | #-*-coding:utf-8-*-
__author__ = 'Administrator'
from wxpy import *
# wxpy的好友统计功能非常好用,可以很方便地统计好友的地理位置分布和性别分布。
# 下面的代码中,强哥统计了下自己的好友的分布情况,并打印出人数最多的10个地
bot = Bot(cache_path=True)
friends_stat = bot.friends().stats()
# print(friends_stat)
friend_loc = [] # 每一个元素是一个二元列表,分别存储地区和人数信息
for province, count in friends_stat["province"].items():
if province != "":
friend_loc.append([province, count])
# 对人数倒序排序
friend_loc.sort(key=lambda x: x[1], reverse=True)
print('--------------统计人数最多的10个地区-------------')
# 打印人数最多的10个地区
for item in friend_loc[:10]:
print(item[0], item[1])
print('------------------统计性别分布-------------------')
# 统计性别分布的代码如下
for sex, count in friends_stat["sex"].items():
# 1代表MALE, 2代表FEMALE
if sex == 1:
print(" MALE %d" % count)
elif sex == 2:
print("FEMALE %d" % count)
# 定位群
company_group = bot.groups().search('优惠券')[0]
print(company_group)
boss = company_group.search('阿杜')[0] #定位老板
print(boss)
# 将老板的消息转发到文件传输助手
@bot.register(company_group)
def forward_boss_message(msg):
print(msg.member)
print(msg)
if msg.member == boss:
print('消息转发:' + msg['Text'])
# 堵塞线程
embed()
| [
"[email protected]"
] | |
386a8bf05a7ce8388ed78b86e6713dc8bb4e3535 | aba442afba026d2130c4aeca863308ca26e7e472 | /tabular/src/autogluon/tabular/__init__.py | 132b85f2fd121822af13adfd0428350ff276ff5c | [
"Apache-2.0"
] | permissive | stjordanis/autogluon | c8fd03a9bf7624911b13e90239e9260dd8885ddf | 6af92e149491f6e5062495d87306b3625d12d992 | refs/heads/master | 2023-08-21T15:16:53.202431 | 2023-08-11T20:15:31 | 2023-08-11T20:15:31 | 228,360,888 | 0 | 0 | Apache-2.0 | 2019-12-16T10:25:32 | 2019-12-16T10:25:30 | null | UTF-8 | Python | false | false | 322 | py | from autogluon.common.features.feature_metadata import FeatureMetadata
from autogluon.common.utils.log_utils import _add_stream_handler
from autogluon.core.dataset import TabularDataset
try:
from .version import __version__
except ImportError:
pass
from .predictor import TabularPredictor
_add_stream_handler()
| [
"[email protected]"
] | |
c68cdb6fed4d1c16def268726b0762202f070da8 | f44b4e41d3b64fc64dc8f28cce1a42aac5715530 | /metrics/plastic_analysis.py | 66002d70c98efae0fbcf9a1785ee1d8a245228ca | [
"Apache-2.0"
] | permissive | jmribeiro/PLASTIC-Algorithms | d4ba4dbae9fea15a446e6557b9fe58f06b687464 | c59ad567a906f320220a09caff64c4a6273151f8 | refs/heads/main | 2022-12-31T00:16:10.108189 | 2020-10-20T22:06:11 | 2020-10-20T22:06:11 | 305,774,055 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | from agents.plastic.PLASTICAgent import PLASTICAgent
from agents.plastic.model.LearningPLASTICModel import LearningPLASTICModel
from agents.plastic.model.LearntPLASTICModel import LearntPLASTICModel
from agents.teammates.GreedyAgent import GreedyAgent
from agents.teammates.TeammateAwareAgent import TeammateAwareAgent
import numpy as np
from yaaf.evaluation import Metric
import scipy.stats
class PLASTICTeammate(PLASTICAgent):
def __init__(self, type, num_teammates, world_size):
super(PLASTICTeammate, self).__init__("Plastic teammate", num_teammates, world_size)
if type == "greedy":
self._underlying_agent = GreedyAgent(0, world_size)
elif type == "teammate aware" or type == "mixed":
self._underlying_agent = TeammateAwareAgent(0, world_size)
else:
raise ValueError()
def select_action_according_to_model(self, pursuit_state, most_likely_model):
return self._underlying_agent.action(pursuit_state.features())
def setup_learning_prior(self):
return LearningPLASTICModel(self.num_teammates)
def _load_prior_team(self, directory, name):
return LearntPLASTICModel(directory, name, self.num_teammates)
class PLASTICAnalyzer(Metric):
def __init__(self):
super(PLASTICAnalyzer, self).__init__("PLASTIC Analyzer")
self._entropy = []
self._beliefs = []
self._team_names = None
def reset(self):
self._entropy = []
def __call__(self, timestep):
info = timestep.info
for key in info:
if "Plastic" in key or key == "Adhoc":
agent_info = info[key]
belief_distribution = agent_info["belief distribution"]
if self._team_names is None:
self._team_names = list(belief_distribution.keys())
beliefs = np.array([belief_distribution[team] for team in self._team_names])
entropy = scipy.stats.entropy(beliefs)
self._beliefs.append(beliefs)
self._entropy.append(entropy)
return self._entropy[-1]
def result(self):
return np.array(self._entropy)
def team_names(self):
return self._team_names
def beliefs(self):
return np.array(self._beliefs)
| [
"[email protected]"
] | |
0ae73847354ad0243e92bc20077f9c2eef00d8b6 | 6227637b2b3e13e2d17d7dd2c954e879bc6947a8 | /configs/bash/keyring.py | 5509f45c1543c39f7696c29c13788dfe38180959 | [] | no_license | Owensa/confs | 28c01e57984a9f8187740a19d95d9c51844c7a1d | f247448fbba3d873460a4f99228f372230f1b1bc | refs/heads/master | 2021-04-15T04:36:03.755459 | 2018-03-31T22:31:50 | 2018-03-31T22:31:50 | 126,733,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | #!/usr/bin/env/ python3
import os
#Get archive key and get rid of strange apt behavior
def fetch():
os.system("wget -q -O - https://archive.kali.org/archive-key.asc | apt-key add && apt-get update >> bootstrap_log.md")
| [
"[email protected]"
] | |
392c843a677b5ebc71e265798518ab247c504ee7 | 164b499e14412e7e5d0b1f917922873a7b5d072c | /studyNote/python-2/cmd_serve.py | 87aace6bc1f982e3ab23341bf7b8f5656e6c00df | [] | no_license | liangliang115715/pythonStudyNote | f55293b0ad2ded21dbb6938ac82f7bee77e724ef | c36ef8c032ee8d85570d0f2234a26370a3709402 | refs/heads/master | 2023-01-09T19:14:16.076798 | 2019-10-16T10:59:48 | 2019-10-16T10:59:48 | 215,523,473 | 0 | 0 | null | 2023-01-04T12:31:28 | 2019-10-16T10:42:51 | Python | UTF-8 | Python | false | false | 778 | py | #_author:
#date:
import socket
import subprocess
# 创建socket对象
sk=socket.socket()
# 为socket对象提供ip地址和端口,然后绑定
adress=("127.0.0.1",8000)
sk.bind(adress)
# 监听设置端口 等待客户端的请求
sk.listen(2)
while True:
print("waiting.....")
conn, addr = sk.accept()
print(addr)
while True:
try:
data=conn.recv(1024)
except Exception:
break
if not data:
break
# 将子进程转到主进程,并将执行结果存入obj对象内
obj=subprocess.Popen(str(data,"utf8"),shell=True,stdout=subprocess.PIPE)
# obj对象内存储的执行结果读出
cmd_result=obj.stdout.read()
result_len=bytes(str(len(cmd_result)),"utf8")
conn.sendall(result_len)
conn.sendall(cmd_result) | [
"[email protected]"
] | |
fb5cb78ce44163af8a0147d51eb7e62cb50fa7e6 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/bonner.py | f884714292e78e4bd9ae20597022ed5d04d0f710 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 82 | py | ii = [('WadeJEB.py', 1), ('GodwWLN.py', 1), ('MereHHB3.py', 1), ('DibdTRL.py', 1)] | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.