blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ccaf0b7923ddbcf05dbd74de253ca863a8a52de | 57db61160494659af43ee255d1e6ab2af6617114 | /ultron-api/contact/admin.py | 92b8324a11994ac12b4367be09b970e401577cbe | [] | no_license | gloompi/ultron-studio | fc667d563467b386a8dec04a6079e7cdcfedc5a7 | ec2ae8051644df2433b931c7e0228e75eaf20990 | refs/heads/master | 2023-06-25T19:22:45.119315 | 2019-12-08T05:53:02 | 2019-12-08T05:53:02 | 226,545,035 | 0 | 0 | null | 2023-06-10T00:22:15 | 2019-12-07T16:44:16 | JavaScript | UTF-8 | Python | false | false | 205 | py | from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
# Register your models here.
admin.site.register(Contact, ContactAdmin) | [
"[email protected]"
] | |
aca820fb2f94f242539ff4b7b1b2ab02fbc5a555 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/db/sqlmap/tamper/charencode.py | 6d1a46727fed80594ad45d9e5cbf3e7aa2e118f8 | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOWEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Url-encodes all characters in a given payload (not processing already
encoded)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak web application firewalls that do not
url-decode the request before processing it through their ruleset
* The web server will anyway pass the url-decoded version behind,
hence it should work against any DBMS
>>> tamper('SELECT FIELD FROM%20TABLE')
'%53%45%4C%45%43%54%20%46%49%45%4C%44%20%46%52%4F%4D%20%54%41%42%4C%45'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[
i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += '%%%.2X' % ord(payload[i])
i += 1
return retVal
| [
"[email protected]"
] | |
38b2275bab017121700f29468db3da539f3d450e | bab33c23fc02dc171395b34c5c88fcf83a95cb96 | /test/Transforms/test_Transforms.py | ec1905520dfce9a46bb05990c38fae7639a0f5b3 | [] | no_license | heliy/nornir-imageregistration | a623ad00c0c253bcc925306920824affaa414810 | 368bc245ef2c7be630f0cdc8c448adb62b797d5a | refs/heads/master | 2020-05-07T16:59:02.268951 | 2018-02-27T01:22:57 | 2018-02-27T01:22:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,922 | py | '''
Created on Mar 18, 2013
@author: u0490822
'''
import os
import unittest
from nornir_imageregistration.transforms import *
from nornir_imageregistration.transforms.rbftransform import \
RBFWithLinearCorrection
import numpy as np
### MirrorTransformPoints###
### A simple four control point mapping on two 20x20 grids centered on 0,0###
### Fixed Space WarpedSpace ###
# . . . . . . . . . . 2 . . . . . . . . . 3 . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . 0 . . . . . . . . . 1 1 . . . . . . . . . 0 . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . 3 . . . . . . . . . 2 . . . . . . . . . .
# Coordinates are CY, CX, MY, MX
MirrorTransformPoints = np.array([[0, 0, 0, 0],
[0, 10, 0, -10],
[10, 0, -10, 0],
[10, 10, -10, -10]])
IdentityTransformPoints = np.array([[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 1, 1, 1]])
# Translate points by (1,2)
TranslateTransformPoints = np.array([[0, 0, 1, 2],
[1, 0, 2, 2],
[0, 1, 1, 3],
[1, 1, 2, 3]])
# Used to test IsOffsetAtZero
OffsetTransformPoints = np.array([[1, 1, 0, 0],
[2, 1, 1, 0],
[1, 2, 0, 1],
[2, 2, 1, 1]])
def TransformCheck(test, transform, warpedPoint, fixedPoint):
'''Ensures that a point can map to its expected transformed position and back again'''
fp = transform.Transform(warpedPoint)
test.assertTrue(np.array_equal(np.around(fp, 2), fixedPoint))
wp = transform.InverseTransform(fp)
test.assertTrue(np.array_equal(np.around(wp, 2), warpedPoint))
def NearestFixedCheck(test, transform, fixedPoints, testPoints):
'''Ensures that the nearest fixed point can be found for a test point'''
distance, index = transform.NearestFixedPoint(testPoints)
test.assertTrue(np.array_equal(np.around(transform.FixedPoints[index,:], 2), fixedPoints))
def NearestWarpedCheck(test, transform, warpedPoints, testPoints):
'''Ensures that the nearest warped point can be found for a test point'''
distance, index = transform.NearestWarpedPoint(testPoints)
test.assertTrue(np.array_equal(np.around(transform.WarpedPoints[index,:], 2), warpedPoints))
class Test(unittest.TestCase):
def testIdentity(self):
T = meshwithrbffallback.MeshWithRBFFallback(IdentityTransformPoints)
warpedPoint = np.array([[0, 0],
[0.25, 0.25],
[1, 1],
[-1, -1]])
TransformCheck(self, T, warpedPoint, warpedPoint)
def testTranslate(self):
T = meshwithrbffallback.MeshWithRBFFallback(TranslateTransformPoints)
warpedPoint = np.array([[1, 2],
[1.25, 2.25],
[2, 3],
[0, 1]])
controlPoint = np.array([[0, 0],
[0.25, 0.25],
[1, 1],
[-1, -1]])
TransformCheck(self, T, warpedPoint, controlPoint)
def testTriangulation(self):
# os.chdir('C:\\Buildscript\\Test\\Stos')
# MToCStos = IrTools.IO.stosfile.StosFile.Load('27-26.stos')
# CToVStos = IrTools.IO.stosfile.StosFile.Load('26-25.stos')
#
# # I'll need to make sure I remember to set the downsample factor when I warp the .mosaic files
# (CToV, cw, ch) = IrTools.Transforms.factory.TransformFactory.LoadTransform(CToVStos.Transform)
# (MToC, mw, mh) = IrTools.Transforms.factory.TransformFactory.LoadTransform(MToCStos.Transform)
#
# MToV = CToV.AddTransform(MToC)
#
# MToCStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToC, mw, mh)
# MToCStos.Save("27-26_Test.stos")
#
# MToVStos = copy.deepcopy(MToCStos)
# MToVStos.ControlImageFullPath = CToVStos.ControlImageFullPath
# MToVStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToV, mw, mh)
# MToVStos.ControlImageDim = CToVStos.ControlImageDim
# MToVStos.MappedImageDim = MToCStos.MappedImageDim
#
# MToVStos.Save("27-25.stos")
global MirrorTransformPoints
T = triangulation.Triangulation(MirrorTransformPoints)
self.assertEqual(len(T.FixedTriangles), 2)
self.assertEqual(len(T.WarpedTriangles), 2)
warpedPoint = np.array([[-5, -5]])
TransformCheck(self, T, warpedPoint, -warpedPoint)
NearestFixedCheck(self, T, MirrorTransformPoints[:,0:2], MirrorTransformPoints[:,0:2] - 1)
NearestWarpedCheck(self, T, MirrorTransformPoints[:,2:4], MirrorTransformPoints[:,2:4] - 1)
# Add a point to the mirror transform, make sure it still works
T.AddPoint([5.0, 5.0, -5.0, -5.0])
#Make sure the new point can be found correctly
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
#Add a duplicate and see what happens
NumBefore = T.NumControlPoints
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NumAfter = T.NumControlPoints
self.assertEqual(NumBefore, NumAfter)
# We should have a new triangulation if we added a point
self.assertTrue(len(T.FixedTriangles) > 2)
self.assertTrue(len(T.WarpedTriangles) > 2)
TransformCheck(self, T, warpedPoint, -warpedPoint)
# Try points not on the transform points
warpedPoints = np.array([[-2.0, -4.0],
[-4.0, -2.0],
[0.0, -9.0],
[-9.0, 0.0]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
def testRBFTriangulation(self):
# os.chdir('C:\\Buildscript\\Test\\Stos')
# MToCStos = IrTools.IO.stosfile.StosFile.Load('27-26.stos')
# CToVStos = IrTools.IO.stosfile.StosFile.Load('26-25.stos')
#
# # I'll need to make sure I remember to set the downsample factor when I warp the .mosaic files
# (CToV, cw, ch) = IrTools.Transforms.factory.TransformFactory.LoadTransform(CToVStos.Transform)
# (MToC, mw, mh) = IrTools.Transforms.factory.TransformFactory.LoadTransform(MToCStos.Transform)
#
# MToV = CToV.AddTransform(MToC)
#
# MToCStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToC, mw, mh)
# MToCStos.Save("27-26_Test.stos")
#
# MToVStos = copy.deepcopy(MToCStos)
# MToVStos.ControlImageFullPath = CToVStos.ControlImageFullPath
# MToVStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToV, mw, mh)
# MToVStos.ControlImageDim = CToVStos.ControlImageDim
# MToVStos.MappedImageDim = MToCStos.MappedImageDim
#
# MToVStos.Save("27-25.stos")
global MirrorTransformPoints
T = RBFWithLinearCorrection(MirrorTransformPoints[:,2:4], MirrorTransformPoints[:,0:2])
self.assertEqual(len(T.FixedTriangles), 2)
self.assertEqual(len(T.WarpedTriangles), 2)
warpedPoint = np.array([[-5, -5]])
TransformCheck(self, T, warpedPoint, -warpedPoint)
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
# Add a point to the mirror transform, make sure it still works
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
#Add a duplicate and see what happens
NumBefore = T.NumControlPoints
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NumAfter = T.NumControlPoints
self.assertEqual(NumBefore, NumAfter)
# We should have a new triangulation if we added a point
self.assertTrue(len(T.FixedTriangles) > 2)
self.assertTrue(len(T.WarpedTriangles) > 2)
TransformCheck(self, T, warpedPoint, -warpedPoint)
#Try removing a point
# Try points not on the transform points
warpedPoints = np.array([[-2.0, -4.0],
[-4.0, -2.0],
[0.0, -9.0],
[-9.0, 0.0]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
T.AddPoints([[2.5,2.5,-2.5,-2.5],
[7.5,7.5,-7.5,-7.5]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
def test_OriginAtZero(self):
global IdentityTransformPoints
global OffsetTransformPoints
IdentityTransform = triangulation.Triangulation(IdentityTransformPoints)
OffsetTransform = triangulation.Triangulation(OffsetTransformPoints)
self.assertTrue(utils.IsOriginAtZero([IdentityTransform]), "Origin of identity transform is at zero")
self.assertFalse(utils.IsOriginAtZero([OffsetTransform]), "Origin of Offset Transform is not at zero")
self.assertTrue(utils.IsOriginAtZero([IdentityTransform, OffsetTransform]), "Origin of identity transform and offset transform is at zero")
def test_bounds(self):
global IdentityTransformPoints
IdentityTransform = triangulation.Triangulation(IdentityTransformPoints)
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# T.AddPoint([5, 5, -5, -5])
# print "\nPoint added"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# T.AddPoint([5, 5, 5, 5])
# print "\nDuplicate Point added"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# warpedPoint = [[-5, -5]]
# fp = T.ViewTransform(warpedPoint)
# print("__Transform " + str(warpedPoint) + " to " + str(fp))
# wp = T.InverseTransform(fp)
#
# T.UpdatePoint(3, [10, 15, -10, -15])
# print "\nPoint updated"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# warpedPoint = [[-9, -14]]
# fp = T.ViewTransform(warpedPoint)
# print("__Transform " + str(warpedPoint) + " to " + str(fp))
# wp = T.InverseTransform(fp)
#
# T.RemovePoint(1)
# print "\nPoint removed"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# print "\nFixedPointsInRect"
# print T.GetFixedPointsRect([-1, -1, 14, 4])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"[email protected]"
] | |
89953cc562f5821db41a06a6c2c67cef8e4197ab | 67cfe3567f0a961123c561538624be28044ec852 | /backend/girltalk_15424/urls.py | d6ab5c8e6f4bb8b2bb8d6c9afad9be43542c8a78 | [] | no_license | crowdbotics-apps/girltalk-15424 | b732f7f6fc04fedd1acd99a2acfd129af71cc010 | 770efb300bc8297faea15e7b6a94c7a755fa8cf7 | refs/heads/master | 2023-02-04T02:55:52.708635 | 2020-04-04T05:21:02 | 2020-04-04T05:21:02 | 252,916,119 | 0 | 0 | null | 2023-01-26T16:28:35 | 2020-04-04T05:20:13 | JavaScript | UTF-8 | Python | false | false | 1,914 | py | """girltalk_15424 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Girltalk"
admin.site.site_title = "Girltalk Admin Portal"
admin.site.index_title = "Girltalk Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Girltalk API",
default_version="v1",
description="API documentation for Girltalk App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
d1ca6c351122bf427bc8a5012370cfc1a2fa3cc8 | 6570bfdb26a41d99620debec5541e789b3c613f3 | /Others/Mercari/binary.py | c7514cc5228a9257b4679b4b065a41dd9d90ea3f | [] | no_license | ameet-1997/Competitive_Coding | bc30f37ae034efe7bb63f71241792fc53c323a50 | a9824430cf0458516ddd88655c1eca1f42ff3f0a | refs/heads/master | 2021-05-10T14:07:15.209770 | 2018-01-22T19:22:13 | 2018-01-22T19:22:13 | 118,500,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | n=input()+1
while'11'in bin(n):n+=1
print n
| [
"[email protected]"
] | |
1dcceb03a81d2043094a05404cce1de425ad775d | 4ff90da76cb447db065896a06573258a27fc8878 | /itchat/components/login.py | 709b8307350c9a7bc7e422e7fe610d2d585d68f0 | [
"MIT"
] | permissive | wosenbo/ItChat | 9455c79a57f7b5d5f03be86d652b428eb248299f | 36a3a027bb8dc19f0ceb4b8a44e943a60b2286ef | refs/heads/master | 2021-01-12T09:18:38.605240 | 2017-04-07T01:18:53 | 2017-04-07T01:18:53 | 76,820,262 | 0 | 0 | null | 2016-12-19T02:13:56 | 2016-12-19T02:13:56 | null | UTF-8 | Python | false | false | 12,311 | py | import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from .. import config, utils
from ..returnvalues import ReturnValue
from .contact import update_local_chatrooms
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive:
logger.debug('itchat has already logged in.')
return
while 1:
for getCount in range(10):
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid(): time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
if qrStorage:
break
elif 9 == getCount:
logger.info('Failed to get QR code, please restart the program.')
sys.exit()
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn: break
logger.info('Log in time out, reloading QR code')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
url = '%s/qrcode/%s' % (config.BASE_URL, uuid)
headers = { 'User-Agent' : config.USER_AGENT }
try:
r = self.s.get(url, stream=True, headers=headers)
except:
return False
qrStorage = io.BytesIO(r.content)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f: f.write(r.content)
if enableCmdQR:
utils.print_cmd_qr(picDir, enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % (
uuid, localTime / 1579, localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['msgid'] = int(time.time() * 1000)
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = utils.struct_friend_info(dic['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
self.memberList.append(dic['User'])
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
continue
else:
msgList, contactList = self.get_msg()
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
self.msgList.put(chatroomMsg)
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList: self.msgList.put(msg)
retryCount = 0
except:
retryCount += 1
logger.debug(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
| [
"[email protected]"
] | |
598e66cd794150397c8cf73002b440126b93541a | 951fc0da7384b961726999e5451a10e2783462c4 | /script.module.ATFTV/addon.py | 08dc093ce00ace1411bebb0134af1dcc39de1c05 | [] | no_license | vphuc81/MyRepository | eaf7b8531b2362f0e0de997a67b889bc114cd7c2 | 9bf8aca6de07fcd91bcec573f438f29e520eb87a | refs/heads/master | 2022-01-02T15:07:35.821826 | 2021-12-24T05:57:58 | 2021-12-24T05:57:58 | 37,680,232 | 6 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,622 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016,2017,2018 RACC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
from xbmcgui import ListItem
from routing import Plugin
import os
import traceback
import requests
import requests_cache
from datetime import timedelta
from base64 import b64decode, urlsafe_b64encode
from pyDes import des, PAD_PKCS5
try:
from urllib.parse import quote_from_bytes as orig_quote
except ImportError:
from urllib import quote as orig_quote
addon = xbmcaddon.Addon()
plugin = Plugin()
plugin.name = addon.getAddonInfo("name")
user_agent = "Dalvik/2.1.0 (Linux; U; Android 5.1.1; AFTS Build/LVY48F)"
player_user_agent = "mediaPlayerhttp/2.1 (Linux;Android 5.1) ExoPlayerLib/2.6.1"
USER_DATA_DIR = xbmc.translatePath(addon.getAddonInfo("profile")).decode("utf-8") # !!
CACHE_TIME = int(addon.getSetting("cache_time"))
CACHE_FILE = os.path.join(USER_DATA_DIR, "cache")
expire_after = timedelta(hours=CACHE_TIME)
if not os.path.exists(USER_DATA_DIR):
os.makedirs(USER_DATA_DIR)
s = requests_cache.CachedSession(CACHE_FILE, allowable_methods="POST", expire_after=expire_after, old_data_on_error=True)
s.hooks = {"response": lambda r, *args, **kwargs: r.raise_for_status()}
s.headers.update({"User-Agent": "USER-AGENT-tvtap-APP-V2"})
token_url = "http://tvtap.net/tvtap1/index_new.php?case=get_channel_link_with_token_tvtap"
list_url = "http://tvtap.net/tvtap1/index_new.php?case=get_all_channels"
def quote(s, safe=""):
return orig_quote(s.encode("utf-8"), safe.encode("utf-8"))
@plugin.route("/")
def root():
categories = {
"01": "UK & USA Channels",
"02": "Movies",
"03": "Music",
"04": "News",
"05": "Sport",
"06": "Documentary",
"07": "Kids",
"08": "Food",
"09": "Religious",
}
list_items = []
for cat in categories.keys():
li = ListItem(categories[cat])
url = plugin.url_for(list_channels, cat_id=cat.lstrip("0"))
list_items.append((url, li, True))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/list_channels/<cat_id>")
def list_channels(cat_id=None):
list_items = []
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
if "Could not connect" in r.content:
s.cache.clear()
ch = r.json()
for c in ch["msg"]["channels"]:
if c["cat_id"] == cat_id:
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
li = ListItem(c["channel_name"].rstrip("."))
li.setProperty("IsPlayable", "true")
li.setArt({"thumb": image, "icon": image})
li.setInfo(type="Video", infoLabels={"Title": c["channel_name"].rstrip("."), "mediatype": "video"})
try:
li.setContentLookup(False)
except AttributeError:
pass
url = plugin.url_for(play, ch_id=c["pk_id"])
list_items.append((url, li, False))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/play/<ch_id>/play.pvr")
def play(ch_id):
# 178.132.6.54 81.171.8.162
key = b"19087321"
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
ch = r.json()
for c in ch["msg"]["channels"]:
if c["pk_id"] == ch_id:
selected_channel = c
break
title = selected_channel.get("channel_name")
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
with s.cache_disabled():
r = s.post(token_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"channel_id": ch_id, "username": "603803577"}, timeout=15)
links = []
for stream in r.json()["msg"]["channel"][0].keys():
if "stream" in stream or "chrome_cast" in stream:
d = des(key)
link = d.decrypt(b64decode(r.json()["msg"]["channel"][0][stream]), padmode=PAD_PKCS5)
if link:
link = link.decode("utf-8")
if not link == "dummytext" and link not in links:
links.append(link)
if addon.getSetting("autoplay") == "true":
link = links[0]
else:
dialog = xbmcgui.Dialog()
ret = dialog.select("Choose Stream", links)
link = links[ret]
if link.startswith("http"):
media_url = "{0}|User-Agent={1}".format(link, quote(player_user_agent))
else:
media_url = link
if "playlist.m3u8" in media_url:
if addon.getSetting("inputstream") == "true":
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
li.setProperty("inputstreamaddon", "inputstream.adaptive")
li.setProperty("inputstream.adaptive.manifest_type", "hls")
li.setProperty("inputstream.adaptive.stream_headers", media_url.split("|")[-1])
elif addon.getSetting("livestreamer") == "true":
serverPath = os.path.join(xbmc.translatePath(addon.getAddonInfo("path")), "livestreamerXBMCLocalProxy.py")
runs = 0
while not runs > 10:
try:
requests.get("http://127.0.0.1:19001/version")
break
except Exception:
xbmc.executebuiltin("RunScript(" + serverPath + ")")
runs += 1
xbmc.sleep(600)
livestreamer_url = "http://127.0.0.1:19001/livestreamer/" + urlsafe_b64encode("hlsvariant://" + media_url)
li = ListItem(title, path=livestreamer_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("video/x-mpegts")
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
try:
li.setContentLookup(False)
except AttributeError:
pass
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
xbmcplugin.setResolvedUrl(plugin.handle, True, li)
if __name__ == "__main__":
try:
plugin.run(sys.argv)
s.close()
except requests.exceptions.RequestException as e:
dialog = xbmcgui.Dialog()
dialog.notification(plugin.name, str(e), xbmcgui.NOTIFICATION_ERROR)
traceback.print_exc()
xbmcplugin.endOfDirectory(plugin.handle, False)
| [
"[email protected]"
] | |
3040be782248c917cdc83a55505739f977559922 | bf2d010229aece071359662f4fef44e48ba57951 | /dynamic_range_parallel_pipeline.py | 6432414ec8f72d79b72df4a68b82b80d29b6a4bc | [] | no_license | Osrip/CriticalEvolution | b97398f74e2fc5b54c9ab92765b08ce3bf97257e | f77cae8acc626cb4c6d64d5a44fdf00310309c2e | refs/heads/master | 2021-06-24T03:44:03.283017 | 2021-04-03T13:09:42 | 2021-04-03T13:09:42 | 215,332,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,763 | py | import os
from multiprocessing import Pool
import argparse
import train
import copy
from automatic_plot_helper import detect_all_isings
from automatic_plot_helper import load_isings_from_list
from automatic_plot_helper import load_settings
from automatic_plot_helper import all_sim_names_in_parallel_folder
import time
import ray
from switch_season_repeat_plotting import plot_pipeline
import pickle
from run_combi import RunCombi
import numpy as np
def dynamic_pipeline_all_sims(folder_names, pipeline_settings):
for folder_name in folder_names:
sim_names = all_sim_names_in_parallel_folder(folder_name)
if not pipeline_settings['parallelize_each_sim']:
for i, sim_name in enumerate(sim_names):
if pipeline_settings['only_plot_certain_num_of_simulations'] is None:
dynamic_pipeline_one_sim(sim_name, pipeline_settings)
elif pipeline_settings['only_plot_certain_num_of_simulations'] > i:
dynamic_pipeline_one_sim(sim_name, pipeline_settings)
else:
all_sim_names = np.array([])
for folder_name in folder_names:
sim_names = all_sim_names_in_parallel_folder(folder_name)
all_sim_names = np.append(all_sim_names, sim_names)
ray.init(num_cpus=pipeline_settings['cores'])
if pipeline_settings['specify_memory_usage']:
ray_funcs = [dynamic_pipeline_one_sim_remote_memory.remote(sim_name, pipeline_settings)for sim_name in all_sim_names]
else:
ray_funcs = [dynamic_pipeline_one_sim_remote.remote(sim_name, pipeline_settings)for sim_name in all_sim_names]
ray.get(ray_funcs)
ray.shutdown()
@ray.remote
def dynamic_pipeline_one_sim_remote(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
# Exact copy of run_repeat_remote but with specific memory usage. Memory usage par task!!
@ray.remote(memory=1500 * 1024 * 1024)
def dynamic_pipeline_one_sim_remote_memory(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
# Exact copy of run_repeat_remote but without ray.remote decorator
def dynamic_pipeline_one_sim(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
def create_settings_for_repeat(settings, sim_name, pipeline_settings):
# settings['TimeSteps'] = 5
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['random_time_steps'] = False
elif pipeline_settings['varying_parameter'] == 'food':
settings['random_food_seasons'] = False
settings = copy.deepcopy(settings)
complete_sim_folder = sim_name
settings['loadfile'] = complete_sim_folder
if pipeline_settings['load_last_generation']:
settings['iter'] = detect_all_isings(complete_sim_folder)[-1]
pipeline_settings['load_generation'] = detect_all_isings(complete_sim_folder)[-1]
else:
settings['iter'] = pipeline_settings['load_generation']
settings['LoadIsings'] = True
settings['switch_off_evolution'] = True
settings['save_data'] = False
settings['switch_seasons_repeat_pipeline'] = True
settings['dynamic_range_pipeline'] = True
# Animations:
settings['plot_generations'] = pipeline_settings['animation_for_repeats']
settings['repeat_pipeline_switched_boo'] = None
settings['random_time_steps_power_law'] = False
settings['commands_in_folder_name'] = False
settings['plot_pipeline'] = False
# switches off animation:
settings['plot'] = False
settings['save_energies_velocities_last_gen'] = False
settings['compress_save_isings'] = pipeline_settings['compress_save_isings']
return settings
def run_all_repeats(settings, original_settings, pipeline_settings):
# WATCH OUT !!! PARAMETERS WITH "FOOD" IN THEM CAN ALSO BECOME TIME STEPS !!!
if pipeline_settings['varying_parameter'] == 'time_steps':
if not original_settings['random_time_steps']:
original_mean_food_num = original_settings['TimeSteps']
else:
original_mean_food_num = (settings['random_time_step_limits'][0] + settings['random_time_step_limits'][1]) / 2
# if original_settings['random_time_steps_power_law']:
# print('!!! random_time_steps_power_law is not supported !!!')
elif pipeline_settings['varying_parameter'] == 'food':
if not original_settings['random_food_seasons']:
original_mean_food_num = original_settings['food_num']
else:
original_mean_food_num = (settings['rand_food_season_limits'][0] + settings['rand_food_season_limits'][1]) / 2
lowest_food_num = original_mean_food_num * (pipeline_settings['lowest_food_percent'] / 100.0)
if lowest_food_num < 1:
lowest_food_num = 1
highest_food_num = original_mean_food_num * (pipeline_settings['highest_food_percent'] / 100.0)
resolution = pipeline_settings['resolution']
food_num_arr = l
# Append food_num of original simulation if not already in list
if not original_mean_food_num in food_num_arr:
food_num_arr = np.append(food_num_arr, original_mean_food_num)
food_num_arr = np.sort(food_num_arr)
if pipeline_settings['parallelize_run_repeats']:
ray.init(num_cpus=pipeline_settings['cores']) #, ignore_reinit_error=True
ray_funcs = [run_repeat_remote.remote(food_num, settings, pipeline_settings, food_num_arr, original_mean_food_num) for food_num in food_num_arr]
ray.get(ray_funcs)
ray.shutdown()
else:
[run_repeat(food_num, settings, pipeline_settings, food_num_arr, original_mean_food_num) for food_num in food_num_arr]
# run_repeat(20, settings, pipeline_settings)
@ray.remote
def run_repeat_remote(num_foods, settings, pipeline_settings, food_num_arr, original_mean_food_num):
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['TimeSteps'] = num_foods
# Activate saving of energies and velocities during life time for simulation with similar varying param as
# original simulation and for largest varying param
if num_foods == original_mean_food_num or num_foods == np.max(food_num_arr):
settings['save_energies_velocities_last_gen'] = True
print(num_foods)
elif pipeline_settings['varying_parameter'] == 'food':
settings['food_num'] = num_foods
if pipeline_settings['varying_parameter'] == 'food':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_foods_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
elif pipeline_settings['varying_parameter'] == 'time_steps':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_time_step_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
Iterations = pipeline_settings['num_repeats']
train.run(settings, Iterations)
# Exact copy of run_repeat_remote but without ray.remote decorator
def run_repeat(num_foods, settings, pipeline_settings, food_num_arr, original_mean_food_num):
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['TimeSteps'] = num_foods
# Activate saving of energies and velocities during life time for simulation with similar varying param as
# original simulation and for largest varying param
if num_foods == original_mean_food_num or num_foods == np.max(food_num_arr):
settings['save_energies_velocities_last_gen'] = True
print(num_foods)
elif pipeline_settings['varying_parameter'] == 'food':
settings['food_num'] = num_foods
if pipeline_settings['varying_parameter'] == 'food':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_foods_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
elif pipeline_settings['varying_parameter'] == 'time_steps':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_time_step_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
Iterations = pipeline_settings['num_repeats']
train.run(settings, Iterations)
if __name__=='__main__':
'''
BETTER NAME: FOOD or TIME STEP DENSITY RESPONSE CURVE
This module explores the dynamic range of random food simulations:
It expects a file with with random food season parameter active
It then takes the last generation of that simulation and puts it into different environments with fixed amount of
foods. There the organisms do not evolve but the experiment is repeated from scratch a given amount of times, which
is defined by "num_repeats" to get statistically meaningful results.
Cores should be about equal to the resolution, which should also be int
'''
pipeline_settings = {}
pipeline_settings['varying_parameter'] = 'time_steps' # 'food'
pipeline_settings['cores'] = 58
pipeline_settings['num_repeats'] = 3
if pipeline_settings['varying_parameter'] == 'food':
pipeline_settings['lowest_food_percent'] = 1
pipeline_settings['highest_food_percent'] = 1000
elif pipeline_settings['varying_parameter'] == 'time_steps':
pipeline_settings['lowest_food_percent'] = 1
pipeline_settings['highest_food_percent'] = 2500
pipeline_settings['resolution'] = 40
# !!!!!!!! add_save_file_name has to be unique each run and must not be a substring of previous run !!!!!!!!!
# !!!!!!!! otherwise runs are indistringuishible !!!!!!!!!
pipeline_settings['add_save_file_name'] = 'res_40_3_repeats_gen_4000' #'resulotion_80_hugeres_3_repeats_gen_100' # 'resulotion_80_hugeres_3_repeats_last_gen'
# list of repeats, that should be animated, keep in mind, that this Creates an animation for each REPEAT!
# If no animations, just emtpy list, if an animation should be created f.e. [0]
pipeline_settings['animation_for_repeats'] = []
# This loads last / highest generation from trained simulation
pipeline_settings['load_last_generation'] = False
# Otherwise specify generation, that shall be loaded, make sure thsi generation exists in all loaded simulations:
pipeline_settings['load_generation'] = 4000
# The following command allows to only plot a certain number of simulations in each parallel simulations folder
# If all simulations in those folders shall be plotted, set to None
pipeline_settings['only_plot_certain_num_of_simulations'] = None
# The following settings define the level of parallelization. Use 'parallelize_run_repeats' for low level
# parallelization when plotting few simulations. use high level parallelization with 'parallelize_each_sim' when
# plotting many simulations. Both does not work at the same time. 'parallelize_each_sim' particularly recommended
# when varying time steps
pipeline_settings['parallelize_each_sim'] = True
pipeline_settings['parallelize_run_repeats'] = False
# Specific memory usage per parallel task has to be specified in dynamic_pipeline_one_sim_remote_memory
# only works for pipeline_settings['parallelize_each_sim'] = True
pipeline_settings['specify_memory_usage'] = True
pipeline_settings['compress_save_isings'] = True
# folder_names = ['sim-20201022-184145_parallel_TEST_repeated']
# folder_names = ['sim-20201022-190553_parallel_b1_normal_seas_g4000_t2000', 'sim-20201022-190615_parallel_b10_normal_seas_g4000_t2000']#, 'sim-20201105-202455_parallel_b1_random_ts_2000_lim_100_3900', 'sim-20201105-202517_parallel_b10_random_ts_2000_lim_100_3900']
# folder_names = ['sim-20201026-224639_parallel_b1_fixed_4000ts_', 'sim-20201026-224709_parallel_b10_fixed_4000ts_', 'sim-20201022-190553_parallel_b1_normal_seas_g4000_t2000', 'sim-20201022-190615_parallel_b10_normal_seas_g4000_t2000', 'sim-20201026-224655_parallel_b1_random_100-7900ts_', 'sim-20201026-224722_parallel_b10_random_100-7900ts_', 'sim-20201105-202455_parallel_b1_random_ts_2000_lim_100_3900', 'sim-20201105-202517_parallel_b10_random_ts_2000_lim_100_3900']
folder_names = ['sim-20210206-122918_parallel_b1_normal_run_g4000_t2000_54_sims']#, 'sim-20201119-190204_parallel_b10_normal_run_g4000_t2000_54_sims']
dynamic_pipeline_all_sims(folder_names, pipeline_settings)
| [
"[email protected]"
] | |
dac3f89e9ff6dcef5bdf7d2f7588c8933dd9afa1 | c5291e50a3c72c885922378573a0ad423fcedf05 | /elastic/MainApp/__init__.py | a45576712bb6d54a6826b931b0fc69c4e1e0d94d | [] | no_license | raghurammanyam/django-projects | bcc3ed6285882af437a2995514cef33760fb063e | dd20ae354f7f111a0176a1cc047c099bd23e9f05 | refs/heads/master | 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 | Python | UTF-8 | Python | false | false | 51 | py | #default_app_config = 'MainApp.apps.MainappConfig'
| [
"[email protected]"
] | |
5eb44788937ca1fbf4a8a624dde9701a7a41231b | 8ac22dadac75a6968209997eae693db312deeef3 | /tenant_customer/__init__.py | 108c3893fe1910b15d16abd89dfadfd2b7dfad02 | [
"BSD-2-Clause"
] | permissive | smegurus/smegurus-django | 9b7c420d35806850da7e3ce66cffccfbc263bea2 | 053973b5ff0b997c52bfaca8daf8e07db64a877c | refs/heads/master | 2022-11-29T08:43:43.596459 | 2019-01-09T01:47:03 | 2019-01-09T01:47:03 | 159,753,141 | 1 | 0 | BSD-4-Clause | 2022-11-22T01:37:38 | 2018-11-30T01:52:03 | HTML | UTF-8 | Python | false | false | 65 | py | default_app_config = 'tenant_customer.apps.TenantCustomerConfig'
| [
"[email protected]"
] | |
ec6aeb30c25573caf0925da3c3ed23837c47509c | c5347ba3bbd2f4f2c7eefa50b2be2cdef94fa8d1 | /src/plugin/binkit/functions_match_viewer.py | e51dfe5ede3784977760930618561dbc7c858513 | [] | no_license | ohjeongwook/binkit | ebc1d58db6ff6950a632cbc8f98ce7078475670f | cfd183d5fa2860f78071d35424d55cae8ca80e60 | refs/heads/master | 2022-12-23T16:43:59.812706 | 2020-10-05T01:34:57 | 2020-10-05T01:34:57 | 266,231,657 | 68 | 9 | null | null | null | null | UTF-8 | Python | false | false | 9,088 | py | import thread
import traceback
import idaapi
import idc
import ida_bytes
from PyQt5 import QtGui, QtCore, QtWidgets
from client import *
from Queue import Queue
from threading import Thread
def sync_worker(queue):
syncers = {}
while True:
commands = queue.get()
queue.task_done()
if not commands['md5'] in syncers or syncers[commands['md5']] == None:
syncers[commands['md5']] = IDASessions.connect(commands['md5'])
connection = syncers[commands['md5']]
try:
if connection:
connection.root.run_commands(commands['list'])
except:
traceback.print_exc()
del syncers[commands['md5']]
class NumberSortModel(QtCore.QSortFilterProxyModel):
def lessThan(self, left, right):
if left.column() in (4, 5, 6):
lvalue = int(left.data())
rvalue = int(right.data())
return lvalue < rvalue
elif left.column() in (1, 3):
lvalue = int(left.data(), 16)
rvalue = int(right.data(), 16)
return lvalue < rvalue
else:
return left < right
class FunctionsMatchViewer(idaapi.PluginForm):
def color_lines(self, start, end, color):
address = idaapi.get_imagebase() + start
while address < idaapi.get_imagebase() + end:
idaapi.set_item_color(address, color)
address += ida_bytes.get_item_size(address)
def color_node(self, addresses, bg_color, frame_color = 0x000000):
if len(addresses) <= 0:
return
func = idaapi.get_func(idaapi.get_imagebase() + addresses[0])
flowchart_ = idaapi.FlowChart(func)
address_map = {}
for address in addresses:
address_map[idaapi.get_imagebase() + address] = 1
for code_block in flowchart_:
if not code_block.start_ea in address_map:
continue
node_info = idaapi.node_info_t()
node_info.bg_color = bg_color
node_info.frame_color = frame_color
idaapi.set_node_info(func.start_ea, code_block.id, node_info, idaapi.NIF_BG_COLOR | idaapi.NIF_FRAME_COLOR)
def set_basic_blocks_color(self):
for function_match in self.function_matches:
self.matched_block_color_function_match(function_match)
def tree_view_double_clicked_handler(self, ix):
item = ix.data(QtCore.Qt.UserRole)
idaapi.jumpto(idaapi.get_imagebase() + item.function_match[item.self_name])
commands = {'md5': item.peer_md5, 'list': []}
commands['list'].append(({'name': 'jumpto', 'address': item.function_match[item.peer_name]}))
self_basic_block_addresses = []
peer_basic_block_addresses = []
if 'matches' in item.function_match:
for match_data in item.function_match['matches']:
self_basic_block_addresses.append(match_data[self.self_name])
peer_basic_block_addresses.append(match_data[self.peer_name])
self.color_lines(match_data[self.self_name], match_data[self.self_name+'_end'], self.matched_block_color)
commands['list'].append({'name': 'color_lines', 'start': match_data[self.peer_name], 'end': match_data[self.peer_name+'_end'], 'color': self.matched_block_color})
self.color_node(self_basic_block_addresses, self.matched_block_color)
commands['list'].append({'name': 'color_node', 'addresses': peer_basic_block_addresses, 'bg_color': self.matched_block_color})
if 'unidentified_blocks' in item.function_match:
self_basic_block_addresses = []
for basic_block in item.function_match['unidentified_blocks'][self.self_name+'s']:
self_basic_block_addresses.append(basic_block['start'])
self.color_lines(basic_block['start'], basic_block['end'], self.unidentified_block_color)
self.color_node(self_basic_block_addresses, self.unidentified_block_color)
peer_basic_block_addresses = []
for basic_block in item.function_match['unidentified_blocks'][self.peer_name+'s']:
peer_basic_block_addresses.append(basic_block['start'])
commands['list'].append({'name': 'color_lines', 'start': basic_block['start'], 'end': basic_block['end'], 'color': self.unidentified_block_color})
commands['list'].append({'name': 'color_node', 'addresses': peer_basic_block_addresses, 'bg_color': self.unidentified_block_color})
item.queue.put(commands)
def count_blocks(self, function_match):
matched_block_counts = 0
self_unidentified_block_counts = 0
peer_unidentified_block_counts = 0
if 'matches' in function_match:
matched_block_counts = len(function_match['matches']) * 2
if 'unidentified_blocks' in function_match:
self_unidentified_block_counts += len(function_match['unidentified_blocks'][self.self_name+'s'])
peer_unidentified_block_counts += len(function_match['unidentified_blocks'][self.peer_name+'s'])
counts = {}
counts['matched_block_counts'] = matched_block_counts
counts['self_unidentified_block_counts'] = self_unidentified_block_counts
counts['peer_unidentified_block_counts'] = peer_unidentified_block_counts
return counts
def add_item(self, function_match):
imagebase = idaapi.get_imagebase()
self_address = imagebase + function_match[self.self_name]
counts = self.count_blocks(function_match)
root = self.model.invisibleRootItem()
columns = [
QtGui.QStandardItem(idaapi.get_short_name(self_address)),
QtGui.QStandardItem('%.8x' % self_address),
QtGui.QStandardItem(function_match[self.peer_name+'_name']),
QtGui.QStandardItem('%.8x' % function_match[self.peer_name]),
QtGui.QStandardItem('%d' % counts['matched_block_counts']),
QtGui.QStandardItem('%d' % counts['self_unidentified_block_counts']),
QtGui.QStandardItem('%d' % counts['peer_unidentified_block_counts'])
]
root.appendRow(columns)
class Item:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
item_data = Item(
function_match = function_match,
self_name = self.self_name,
peer_name = self.peer_name,
peer_md5 = self.peer_md5,
queue = self.queue
)
for column_item in columns:
column_item.setData(item_data, QtCore.Qt.UserRole)
def add_items(self, function_matches, self_name, peer_name, peer_md5, matched_block_color, unidentified_block_color):
self.matched_block_color = matched_block_color
self.unidentified_block_color = unidentified_block_color
self.function_matches = function_matches
self.self_name = self_name
self.peer_name = peer_name
self.peer_md5 = peer_md5
for function_match in self.function_matches:
self.add_item(function_match)
self.tree_view.setRootIsDecorated(False)
self.tree_view.setColumnWidth(0, 100)
self.tree_view.setColumnWidth(1, 50)
self.tree_view.setColumnWidth(2, 100)
self.tree_view.setColumnWidth(3, 50)
self.tree_view.setColumnWidth(4, 30)
self.tree_view.setColumnWidth(5, 30)
self.tree_view.setColumnWidth(6, 30)
def search_input_changed(self, text):
self.proxy_model.setFilterWildcard(text)
def OnCreate(self, form):
self.parent = idaapi.PluginForm.FormToPyQtWidget(form)
self.columns = ("Source", "Address", "Target", "Address", "Matched", "Removed", "Added")
self.tree_view = QtWidgets.QTreeView()
self.tree_view.setSortingEnabled(True)
self.tree_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tree_view.doubleClicked.connect(self.tree_view_double_clicked_handler)
self.item_map = {}
self.model = QtGui.QStandardItemModel(self.tree_view)
self.model.setHorizontalHeaderLabels(self.columns)
self.proxy_model = NumberSortModel(self.tree_view)
self.proxy_model.setSourceModel(self.model)
self.tree_view.setModel(self.proxy_model)
self.search_input = QtWidgets.QLineEdit()
self.search_input.textChanged.connect(self.search_input_changed)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.tree_view)
layout.addWidget(self.search_input)
self.parent.setLayout(layout)
self.queue = Queue(maxsize=0)
worker = Thread(target=sync_worker, args=(self.queue,))
worker.setDaemon(True)
worker.start()
def Show(self, title):
return idaapi.PluginForm.Show(self, title, options = idaapi.PluginForm.FORM_PERSIST)
if __name__ == "__main__":
form = FunctionsMatchViewer()
form.Show("Function Matches")
form.AddTestItems()
| [
"[email protected]"
] | |
bc687d5bb4cf86f031a3ecd8470bf3c53f0497b8 | 4fd3f6c6ce06199d554101f796c0f6fc7eca074f | /0x04-python-more_data_structures/4-only_diff_elements.py | 383927b3db341ed3619e6a785f0868335cd45a56 | [] | no_license | Joldiazch/holbertonschool-higher_level_programming | 64f453aaf492b5473319a1b5e7e338bc7964fa7b | c9127882ffed3b72b2a517824770adafa63a9042 | refs/heads/master | 2020-09-29T03:12:47.497695 | 2020-05-15T04:05:13 | 2020-05-15T04:05:13 | 226,935,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | #!/usr/bin/python3
def only_diff_elements(set_1, set_2):
return set_1 - set_2 | set_2 - set_1
| [
"[email protected]"
] | |
fc9d27bcb01c7fe4e3ef1115a053ef8ac3b732cd | 1925c535d439d2d47e27ace779f08be0b2a75750 | /microsoft/implement_rand10_with_rand7.py | 0f89680adba0923d2798aa8ebf8bb297ca0fc640 | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Given a function rand7 which generates a uniform random integer in the range 1 to 7, write a function rand10 which generates a uniform random integer in the range 1 to 10.
# Do NOT use system's Math.random().
# Example 1:
# Input: 1
# Output: [7]
# Example 2:
# Input: 2
# Output: [8,4]
# Example 3:
# Input: 3
# Output: [8,1,10]
# Note:
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
# Follow up:
# What is the expected value for the number of calls to rand7() function?
# Could you minimize the number of calls to rand7()?
def rand10(self):
temp = rand7() + (rand7() - 1) * 7
while temp > 10:
temp = rand7() + (rand7() - 1) * 7
return temp | [
"[email protected]"
] | |
fc27042eaae21fea6ee015e954980fd672a2c584 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.40/8/1569575464.py | 03f7325643668c7c922036efc5b29701c3522051 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | import functools
import typing
import string
import random
import pytest
class Leaf0:
def __init__ (self, value):
self.value = value
class Node0:
def __init__ (self, left, right, value=None):
self.value = value
self.left = left
self.right = right
## Lösung Teil 1.
def Leaf(Leaf0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the leaf in preorder without any None values.
"""
return self.value
def postorder(self) -> list:
"""
Returns a list of the leaf in postorder without any None values.
"""
return self.value
class Node(Node0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the node in preorder without any None values.
"""
ls = []
if self.value:
ls.append(self.value)
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
return ls
def postorder(self) -> list:
"""
Returns a list of the node in postorder without any None values.
"""
ls = []
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
if self.value:
ls.append(self.value)
return ls
######################################################################
## Lösung Teil 2.
def test_tree():
assert Node (Leaf(1), Leaf(2), 3).postorder() == [1, 2, 3]
######################################################################
| [
"[email protected]"
] | |
7e4e28d2c13d17fdd64f8dd33933b84f8a9c95db | cbcdf195338307b0c9756549a9bffebf3890a657 | /django-stubs/core/cache/backends/base.pyi | 52f2910b56950d0d0b50af70cb6a198f97a8879f | [
"MIT"
] | permissive | mattbasta/django-stubs | bc482edf5c6cdf33b85005c2638484049c52851b | 8978ad471f2cec0aa74256fe491e2e07887f1006 | refs/heads/master | 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 | MIT | 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null | UTF-8 | Python | false | false | 2,590 | pyi | from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
from django.core.exceptions import ImproperlyConfigured
class InvalidCacheBackendError(ImproperlyConfigured): ...
class CacheKeyWarning(RuntimeWarning): ...
DEFAULT_TIMEOUT: Any
MEMCACHE_MAX_KEY_LENGTH: int
def default_key_func(key: Union[int, str], key_prefix: str, version: Union[int, str]) -> str: ...
def get_key_func(key_func: Optional[Union[Callable, str]]) -> Callable: ...
class BaseCache:
default_timeout: int = ...
key_prefix: str = ...
version: int = ...
key_func: Callable = ...
def __init__(self, params: Dict[str, Optional[Union[Callable, Dict[str, int], int, str]]]) -> None: ...
def get_backend_timeout(self, timeout: Any = ...) -> Optional[float]: ...
def make_key(self, key: Union[int, str], version: Optional[Union[int, str]] = ...) -> str: ...
def add(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def get(self, key: Any, default: Optional[Any] = ..., version: Optional[Any] = ...) -> Any: ...
def set(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def touch(self, key: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def delete(self, key: Any, version: Optional[Any] = ...) -> None: ...
def get_many(self, keys: List[str], version: Optional[int] = ...) -> Dict[str, Union[int, str]]: ...
def get_or_set(
self, key: str, default: Optional[Union[Callable, int, str]], timeout: Any = ..., version: Optional[int] = ...
) -> Optional[Union[int, str]]: ...
def has_key(self, key: Any, version: Optional[Any] = ...): ...
def incr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def __contains__(self, key: str) -> bool: ...
def set_many(
self,
data: Union[Dict[str, bytes], Dict[str, int], Dict[str, str], OrderedDict],
timeout: Any = ...,
version: Optional[Union[int, str]] = ...,
) -> List[Any]: ...
def delete_many(self, keys: Union[Dict[str, str], List[str]], version: None = ...) -> None: ...
def clear(self) -> None: ...
def validate_key(self, key: str) -> None: ...
def incr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def close(self, **kwargs: Any) -> None: ...
| [
"[email protected]"
] | |
0165d25c1c0c68a71343c15d575f22e270017e69 | e29734c2b3543a05a28b6bc460c3248ea37aaf5c | /apps/course/migrations/0015_auto_20190424_1717.py | 36961cbabe8320fc898752c336f25bbec6d02e5d | [] | no_license | simida0755/PopularBlogs | fda6dbe06751dde013ba57f73c708fd7106a49ee | 3a86989232206d0727223306c0e2d2c62d35fa9b | refs/heads/master | 2020-05-21T15:54:09.853341 | 2019-05-13T02:15:28 | 2019-05-13T02:15:28 | 186,101,555 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 2.0.2 on 2019-04-24 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0014_auto_20190424_1716'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.ImageField(null=True, upload_to='courses/%Y/%m', verbose_name='封面图'),
),
]
| [
"[email protected]"
] | |
82bbbe9b40505d1dc9a8185d84f30a264647c3a4 | 6c90a52d5be46fe4bd920acef07b2e53d2e4b42c | /runner.py | 52b14e24661e02f9b9948a2cb41441de6ec05b45 | [] | no_license | amoretti86/supervind | fb3f335f0400011af937fc0e5d29e98688ed885c | 6444b88acf0c51e32b54206619cb6bcb438bdd26 | refs/heads/master | 2021-04-26T23:25:05.347404 | 2018-03-05T04:58:46 | 2018-03-05T04:58:46 | 123,989,516 | 0 | 0 | null | 2018-03-05T22:55:18 | 2018-03-05T22:55:18 | null | UTF-8 | Python | false | false | 12,926 | py | # Copyright 2018 Daniel Hernandez Diaz, Columbia University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from code.LatEvModels import LocallyLinearEvolution
from code.ObservationModels import PoissonObs, GaussianObs
from code.Optimizer_VAEC import Optimizer_TS
from code.datetools import addDateTime
DTYPE = tf.float32
# CONFIGURATION
RUN_MODE = 'train' # ['train', 'generate']
# DIRECTORIES, SAVE FILES, ETC
LOCAL_ROOT = "./"
LOCAL_DATA_DIR = "./data/"
THIS_DATA_DIR = 'poisson_data_002/'
LOCAL_RLT_DIR = "./rslts/"
LOAD_CKPT_DIR = "" # TODO:
SAVE_DATA_FILE = "datadict"
SAVE_TO_VIND = False
IS_PY2 = True
# MODEL/OPTIMIZER ATTRIBUTES
LAT_MOD_CLASS = 'llinear'
GEN_MOD_CLASS = 'Poisson' # ['Gaussian', 'Poisson']
YDIM = 10
XDIM = 2
NNODES = 60
ALPHA = 0.3
INITRANGE_MUX = 0.3
INITRANGE_LAMBDAX = 2.0
INITRANGE_B = 3.0
INITRANGE_OUTY = 3.0
INIT_Q0 = 0.4
INIT_Q = 1.0
INITRANGE_GOUTMEAN = 0.03
INITRANGE_GOUTVAR = 1.0
INITBIAS_GOUTMEAN = 1.0
# TRAINING PARAMETERS
LEARNING_RATE = 2e-3
# GENERATION PARAMETERS
NTBINS = 30
NSAMPS = 100
DRAW_HEAT_MAPS = True
flags = tf.app.flags
flags.DEFINE_string('mode', RUN_MODE, "The mode in which to run. Can be ['train', 'generate']")
flags.DEFINE_string('local_root', LOCAL_ROOT, "The root directory of supervind.")
flags.DEFINE_string('local_data_dir', LOCAL_DATA_DIR, "The directory that stores all the datasets")
flags.DEFINE_string('local_rlt_dir', LOCAL_RLT_DIR, "The directory that stores all the results")
flags.DEFINE_string('this_data_dir', THIS_DATA_DIR, ("For the 'generate' mode, the directory that shall "
"store this dataset"))
flags.DEFINE_string('save_data_file', SAVE_DATA_FILE, ("For the 'generate' mode, the name of the file "
"to store the data"))
flags.DEFINE_string('load_data_file', LOAD_CKPT_DIR, ("For the 'train' mode, the directory storing "
"`tf` checkpoints."))
flags.DEFINE_boolean('save_to_vind', SAVE_TO_VIND, ("Should the data be saved in a format that can be "
"read by the old theano code"))
flags.DEFINE_boolean('is_py2', IS_PY2, "Was the data pickled in python 2?")
flags.DEFINE_integer('xDim', XDIM, "The dimensionality of the latent space")
flags.DEFINE_integer('yDim', YDIM, "The dimensionality of the data")
flags.DEFINE_string('lat_mod_class', LAT_MOD_CLASS, ("The evolution model class. Implemented "
"['llinear']"))
flags.DEFINE_string('gen_mod_class', GEN_MOD_CLASS, ("The generative model class. Implemented "
"['Poisson, Gaussian']"))
flags.DEFINE_float('alpha', ALPHA, ("The scale factor of the nonlinearity. This parameters "
"works in conjunction with initrange_B"))
flags.DEFINE_float('initrange_MuX', INITRANGE_MUX, ("Controls the initial ranges within "
"which the latent space paths are contained. Bigger "
"values here lead to bigger bounding box. It is im-"
"portant to adjust this parameter so that the initial "
"paths do not collapse nor blow up."))
flags.DEFINE_float('initrange_LambdaX', INITRANGE_LAMBDAX, ("Controls the initial ranges within "
"which the latent space paths are contained. Roughly "
"rangeX ~ 1/(Lambda + Q), so if Lambda very big, the "
"range is reduced. If Lambda very small, then it defers "
"to Q. Optimally Lambda ~ Q ~ 1."))
flags.DEFINE_float('initrange_B', INITRANGE_B, ("Controls the initial size of the nonlinearity. "
"Works in conjunction with alpha"))
flags.DEFINE_float('initrange_outY', INITRANGE_OUTY, ("Controls the initial range of the output of the "
"generative network"))
flags.DEFINE_float('init_Q0', INIT_Q0, ("Controls the initial spread of the starting points of the "
"paths in latent space."))
flags.DEFINE_float('init_Q', INIT_Q, ("Controls the initial noise added to the paths in latent space. "
"More importantly, it also controls the initial ranges within "
"which the latent space paths are contained. Roughly rangeX ~ "
"1/(Lambda + Q), so if Q is very big, the range is reduced. If "
"Q is very small, then it defers to Lambda. Optimally "
"Lambda ~ Q ~ 1."))
flags.DEFINE_float('initrange_Goutmean', INITRANGE_GOUTMEAN, "")
flags.DEFINE_float('initrange_Goutvar', INITRANGE_GOUTVAR, "")
flags.DEFINE_float('initbias_Goutmean', INITBIAS_GOUTMEAN, "")
flags.DEFINE_float('learning_rate', LEARNING_RATE, "It's the learning rate, silly")
flags.DEFINE_integer('genNsamps', NSAMPS, "The number of samples to generate")
flags.DEFINE_integer('genNTbins', NTBINS, "The number of time bins in the generated data")
flags.DEFINE_boolean('draw_heat_maps', DRAW_HEAT_MAPS, "Should I draw heat maps of your data?")
params = tf.flags.FLAGS
def write_option_file(path):
"""
Writes a file with the parameters that were used for this fit. Cuz - no doubt -
you will forget Daniel Hernandez.
"""
params_list = sorted([param for param in dir(params) if param
not in ['h', 'help', 'helpfull', 'helpshort']])
with open(path + 'params.txt', 'w') as option_file:
for par in params_list:
option_file.write(par + ' ' + str(getattr(params, par)) + '\n')
def generate_fake_data(lat_mod_class, gen_mod_class, params,
data_path=None,
save_data_file=None,
Nsamps=100,
NTbins=30,
write_params_file=False,
draw_quiver=False,
draw_heat_maps=True,
savefigs=False):
"""
Generates synthetic data and possibly pickles it for later use. Maybe you
would like to train a model? ;)
Args:
lat_mod_class: A string that is a key to the evolution model class. Currently
'llinear' -> `LocallyLinearEvolution` is implemented.
gen_mod_class: A string that is a key to the observation model class. Currently
'Poisson' -> `PoissonObs` is implemented
data_path: The local directory where the generated data should be stored. If None,
don't store shit.
save_data_file: The name of the file to hold your data
Nsamps: Number of trials to generate
NTbins: Number of time steps to run.
xDim: The dimensions of the latent space.
yDim: The dimensions of the data.
write_params_file: Would you like the parameters with which this data has been
generated to be saved to a separate txt file?
"""
print('Generating some fake data...!\n')
lat_mod_classes = {'llinear' : LocallyLinearEvolution}
gen_mod_classes = {'Poisson' : PoissonObs, 'Gaussian' : GaussianObs}
evolution_class = lat_mod_classes[lat_mod_class]
generator_class = gen_mod_classes[gen_mod_class]
if data_path:
if not type(save_data_file) is str:
raise ValueError("`save_data_file` must be string (representing the name of your file) "
"if you intend to save the data (`data_path` is not None)")
if not os.path.exists(data_path): os.makedirs(data_path)
if write_params_file:
write_option_file(data_path)
# Generate some fake data for training, validation and test
graph = tf.Graph()
with graph.as_default():
with tf.Session() as sess:
xDim = params.xDim
yDim = params.yDim
if not Nsamps: Nsamps = params.genNsamps
if not NTbins: NTbins = params.genNTbins
X = tf.placeholder(DTYPE, shape=[None, None, xDim], name='X')
Y = tf.placeholder(DTYPE, shape=[None, None, yDim], name='Y')
latm = evolution_class(X, params)
genm = generator_class(Y, X, params, latm, is_out_positive=True)
Nsamps_train = int(4*Nsamps/5)
valid_test = int(Nsamps/10)
sess.run(tf.global_variables_initializer())
Ydata, Xdata = genm.sample_XY(sess, 'X:0', Nsamps=Nsamps, NTbins=NTbins,
with_inflow=True)
Ytrain, Xtrain = Ydata[:Nsamps_train], Xdata[:Nsamps_train]
Yvalid, Xvalid = Ydata[Nsamps_train:-valid_test], Xdata[Nsamps_train:-valid_test]
Ytest, Xtest = Ydata[valid_test:], Xdata[valid_test:]
# If xDim == 2, draw a cool path plot
if draw_quiver and xDim == 2:
latm.plot_2Dquiver_paths(sess, Xdata, 'X:0', rlt_dir=data_path,
with_inflow=True, savefig=savefigs)
if draw_heat_maps:
maxY = np.max(Ydata)
for i in range(1):
plt.figure()
sns.heatmap(Ydata[i].T, yticklabels=False, vmax=maxY).get_figure()
if savefigs:
plt.savefig(data_path + "heat" + str(i) + ".png")
else:
plt.show()
plt.pause(0.001)
input('Press Enter to continue.')
plt.close()
if data_path:
datadict = {'Ytrain' : Ytrain, 'Yvalid' : Yvalid, 'Xtrain' : Xtrain, 'Xvalid' : Xvalid,
'Ytest' : Ytest, 'Xtest' : Xtest}
with open(data_path + save_data_file, 'wb+') as data_file:
pickle.dump(datadict, data_file)
if params.save_to_vind:
with open(data_path + save_data_file + '_vind', 'wb+') as data_file:
pickle.dump(datadict, data_file, protocol=2)
return Ydata, Xdata
def main(_):
"""
Launches this whole zingamajinga.
"""
data_path = params.local_data_dir + params.this_data_dir
rlt_dir = params.local_rlt_dir + params.this_data_dir + addDateTime() + '/'
if params.mode == 'generate':
generate_fake_data(lat_mod_class=params.lat_mod_class,
gen_mod_class=params.gen_mod_class,
params=params,
data_path=data_path,
save_data_file=params.save_data_file,
Nsamps=params.genNsamps,
NTbins=params.genNTbins,
write_params_file=True,
draw_quiver=True,
draw_heat_maps=True,
savefigs=True)
if params.mode == 'train':
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
with sess.as_default():
with open(data_path+params.save_data_file, 'rb+') as f:
# Set encoding='latin1' for python 2 pickled data
datadict = pickle.load(f, encoding='latin1') if params.is_py2 else pickle.load(f)
Ytrain = datadict['Ytrain']
Yvalid = datadict['Yvalid']
params.yDim = Ytrain.shape[-1]
write_option_file(data_path)
opt = Optimizer_TS(params)
sess.run(tf.global_variables_initializer())
opt.train(sess, rlt_dir, Ytrain, Yvalid)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
2b9bee86ebd1b08f2a0f0400abf395c09608c7e8 | 5de3f612df0dbda712b39403dbafb0617e597651 | /build/pal_behaviour_msgs/catkin_generated/pkg.installspace.context.pc.py | 8706a70930366093c2aaea8520ef1c40fd260a4a | [] | no_license | AdriiTrujillo/tiago_public_ws | 1bd62d51c2eb694d07db83738f7bebd582d8126c | 6eaeabd1ec177df837b81fd9f42887318128766b | refs/heads/main | 2023-04-03T13:09:09.749190 | 2021-04-01T10:05:43 | 2021-04-01T10:05:43 | 350,026,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_behaviour_msgs"
PROJECT_SPACE_DIR = "/home/adrii/tiago_public_ws/install"
PROJECT_VERSION = "0.13.2"
| [
"[email protected]"
] | |
7a7b7ac0f8c14cc1a4756aa69a85c707f0d0cb51 | 2826bdf11463b199f20be351f514bcb16f35d04e | /.history/ftp_20210407055256.py | b44e9d312cd6276dfc7c23b78b965740f32bf6a1 | [] | no_license | Roarcannotprogramming/Sec_Client_Server | 9efdb7e4c3e729cd6b5052b0ca0e23c14459ebc0 | 38f491e0e643e372c546eca73f9cf16c36513568 | refs/heads/master | 2023-04-11T12:40:12.780834 | 2021-04-17T15:53:47 | 2021-04-17T15:53:47 | 353,070,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,984 | py | import socket, ssl, os, sys
"""
00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
0x00 | version | hb | request | unused | path length |
0x04 | package length (1) | package length (2) |
0x08 | package length (3) | package length (4) |
0x0c | unused | unused |
"""
class ProtocalError(Exception):
pass
class FtpProtocol:
MAGIC = b'v1me'
# Requests
GET_FILE_LIST = 1
GET_FILE = 2
POST_FILE = 3
GET_CWD = 4
CHANGE_CWD = 5
MAKE_DIR = 6
DEL_FILE = 7
TRANS_ERROR = 8
# Max length of single content is 16M
CONTENT_MAX_LENGTH = 0xfffff0
HEADER_LEN = 0x10
BASE_PATH = '/FILES'
def __init__(self, ssock, version=1):
if version != 1:
raise ProtocalError("Version error")
if not isinstance(ssock, ssl.SSLSocket):
raise ProtocalError("Socket type error")
self.version = version
self.ssock = ssock
self.request = 0
self.hb = False
self.root = b''
self.current_recv = b''
def get_file_list(self, path):
assert(isinstance(path, bytes))
self.request = self.GET_FILE_LIST
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def get_file(self, path):
assert(isinstance(path, bytes))
self.request = self.GET_FILE
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def post_file(self, path, file_path = None, file_content = None):
if (file_path and file_content):
raise ProtocalError("File must be unique")
assert(isinstance(path, bytes))
self.request = self.POST_FILE
self.path = path
self.path_len = len(path)
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
if file_path:
self.package_len = self.HEADER_LEN + self.path_len + os.path.getsize(file_path)
self.content = b''
with open(file_path, 'rb') as f:
self.__send(self.__pack())
while True:
s = f.read(self.CONTENT_MAX_LENGTH)
if not s:
break
self.__send(s)
if file_content:
self.package_len = self.HEADER_LEN + self.path_len + len(file_content)
self.content = file_content
self.__send(self.__pack())
def get_cwd(self):
self.request = self.GET_CWD
self.path = b''
self.path_len = 0
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
self.__send(self.__pack())
def change_cwd(self, path):
assert(isinstance(path, bytes))
self.request = self.CHANGE_CWD
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def make_dir(self, path):
assert(isinstance(path, bytes))
self.request = self.MAKE_DIR
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def del_file(self, path):
assert(isinstance(path, bytes))
self.request = self.DEL_FILE
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def server_deal(self):
while True:
header = self.__recv(self.HEADER_LEN)
self.version , self.hb, self.request, self.path_len, self.package_len = self.__check_format(header)
if self.hb:
self.path_len = 0
self.package_len = self.HEADER_LEN
self.path = b''
self.content = b''
# return self.__send(self.__pack())
return 0
if self.request == self.GET_FILE_LIST:
self.path = self.__recv(self.path_len)
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path_len)
try:
p = self.__os_check_path(self.path)
ls = '\n'.join(os.listdir(p))
self.content = ls
return self.__send(self.__pack())
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
if self.request == self.GET_FILE:
self.path = self.__recv(self.path_len)
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path_len)
try:
p = self.__os_check_path(self.path)
with open(p, 'rb') as f:
self.__send(self.__pack())
while True:
s = f.read(self.CONTENT_MAX_LENGTH)
if not s:
break
self.content = s
self.__send(s)
return 1
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
if self.request == self.POST_FILE:
self.path = self.__recv(self.path_len)
# TODO
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path)
try:
p = self.__os_check_path(self.path)
with open(p, 'wb+') as f:
f.write(self.content)
self.content = b''
return self.__send(self.__pack())
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
def __os_check_path(self, path):
p = os.path.normpath(path)
if p.startswith('..'):
ProtocalError('Invalid path')
return os.path.join(self.BASE_PATH, self.root, p)
def __check_format(self, pack):
version = pack[0] & 7
hb = (pack[0] >> 3) & 1
request = pack[0] >> 4
path_len = pack[2] + (pack[3] << 8)
package_len = pack[4] + (pack[5] << 8) + (pack[6] << 16) + (pack[7] << 24) + (pack[8] << 32) + (pack[9] << 40) + (pack[10] << 48) + (pack[11] << 56)
if version != 1:
raise ProtocalError("Version error")
if request not in range(1, 8):
raise ProtocalError("Request error")
if path_len < 0:
raise ProtocalError("Path error")
if package_len < 0:
raise ProtocalError("Package error")
return version, hb, request, path_len, package_len
def __pack(self):
self.path_len = len(self.path)
self.package_len = self.HEADER_LEN + self.path_len + len(self.content)
p = bytes([(self.version & 7) | (self.hb << 3) | (self.request << 4), 0,
self.path_len & 0xff, (self.path_len >> 8) & 0xff,
self.package_len & 0xff, (self.package_len >> 8) & 0xff,
(self.package_len >> 16) & 0xff, (self.package_len >> 24) & 0xff,
(self.package_len >> 32) & 0xff, (self.package_len >> 40) & 0xff,
(self.package_len >> 48) & 0xff, (self.package_len >> 56) & 0xff,
0, 0, 0, 0])
p += self.path
p += self.content
return p
def __send(self, pack):
self.ssock.send(pack)
"""
print(pack)
path_len = pack[2] + (pack[3] << 8)
package_len = pack[4] + (pack[5] << 8) + (pack[6] << 16) + (pack[7] << 24) + (pack[8] << 32) + (pack[9] << 40) + (pack[10] << 48) + (pack[11] << 56)
request = pack[0] >> 4
print("package_len: ", package_len)
print("path_len: ", path_len)
print("content_len: ", package_len - path_len - self.HEADER_LEN)
print("path: ", pack[self.HEADER_LEN: self.HEADER_LEN + path_len])
print("content: ", pack[self.HEADER_LEN + path_len:])
"""
return 1
def __recv(self, length):
current_len = len(self.current_recv)
while True:
s = self.ssock.recv(length - current_len)
current_len += len(s)
self.current_recv = self.current_recv + s
if current_len == length:
current_len = 0
ss = self.current_recv
self.current_recv = b''
return ss
if current_len > length:
raise ProtocalError("Length error")
# FtpProtocol(0).post_file(b'/root/admin/user/pwn', b'CA.key')
# client
def client():
CA_FILE = "CA.crt"
KEY_FILE = "Client.key"
CERT_FILE = "Client.crt"
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.check_hostname = False
context.load_cert_chain(certfile=CERT_FILE, keyfile=KEY_FILE)
context.load_verify_locations(CA_FILE)
context.verify_mode = ssl.CERT_REQUIRED
with socket.socket() as sock:
with context.wrap_socket(sock, server_side=False) as ssock:
ssock.connect(('127.0.0.1', 5678))
ftp = FtpProtocol(ssock)
ftp.get_cwd()
msg = ssock.recv(1024).decode("utf-8")
print(f"receive msg from server : {msg}")
ssock.close()
def server():
CA_FILE = "CA.crt"
KEY_FILE = "Server.key"
CERT_FILE = "Server.crt"
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=CERT_FILE, keyfile=KEY_FILE)
context.load_verify_locations(CA_FILE)
context.verify_mode = ssl.CERT_REQUIRED
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
with context.wrap_socket(sock, server_side=True) as ssock:
ssock.bind(('127.0.0.1', 5678))
ssock.listen(5)
while True:
client_socket, addr = ssock.accept()
ftp = FtpProtocol(client_socket)
ftp.server_deal()
msg = client_socket.recv(1024).decode("utf-8")
print(f"receive msg from client {addr}:{msg}")
msg = f"yes , you have client_socketect with server.\r\n".encode("utf-8")
client_socket.close()
if __name__ == "__main__":
if sys.argv[1] == "server":
server()
if sys.argv[1] == "client":
client()
| [
"[email protected]"
] | |
d156167ce165ac16bab92f480187ddf3da7430eb | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/b2e3e8c0718142d4cb0387f46cd77c15b67cc1e9-<get_random_string>-bug.py | 9b873176526ac647a2e151598420e0deb76c070d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | def get_random_string(length=8, choices=(string.ascii_letters + string.digits)):
'\n Generate random string\n '
return ''.join([choice(choices) for i in range(length)]) | [
"[email protected]"
] | |
9d9bec682b8409ccc2d18ac3c64f1c22b5a01199 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2551/60603/312775.py | 3038d6419acf664fc4ed2b489ef7cb65c0727f17 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def change(a,b):
for i in range(a-1,b):
li[i]=0 if li[i]==1 else 1
def que(a,b):
return sum(li[a-1:b])
n,m = [int(x) for x in input().split()]
li = [0]*n
for i in range(m):
s = [int(x) for x in input().split()]
if s[0]==0:
change(s[1],s[2])
elif s[0]==1:
print(que(s[1],s[2])) | [
"[email protected]"
] | |
b0b638794415687755cbdb2be2b4c90db79e1c55 | bc2cdb1e438efaf67131e975ac4db80b4dc43385 | /src/public/message/migrations/0003_pushmessage.py | a4cc7fb3829a923d5a18ec9f447e1971018bd4f1 | [] | no_license | Shadow-linux/ops-for-study | cf4d55409ebc6f27d454bea60886cd154c994484 | 115b567948d25a64e423a6cdc89bc8337896afe2 | refs/heads/master | 2023-01-14T13:35:56.880896 | 2019-09-23T05:01:31 | 2019-09-23T05:01:31 | 209,781,758 | 2 | 0 | null | 2023-01-04T10:55:45 | 2019-09-20T12:08:11 | Python | UTF-8 | Python | false | false | 1,062 | py | # Generated by Django 2.0.1 on 2019-04-17 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20190416_1144'),
]
operations = [
migrations.CreateModel(
name='PushMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='str; 标题', max_length=100)),
('content', models.TextField(help_text='str; 消息内容')),
('user_id_list', models.CharField(help_text='str; 用户ID', max_length=500)),
('send_type_list', models.CharField(help_text='str; 发送消息类型', max_length=500)),
('created', models.DateTimeField(auto_now_add=True, help_text='str; 创建时间')),
],
options={
'verbose_name': '消息推送',
'db_table': 'common_push_message',
},
),
]
| [
"[email protected]"
] | |
4ee5c7635d1d388cb4d468d7dc04515ac9df2ccd | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/v1_0/personalcontacts_v1_0/azext_personalcontacts_v1_0/vendored_sdks/personalcontacts/aio/_personal_contacts.py | 0c6cf4d05bbae3c59e563ab9a028bb2e8874efa7 | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 3,990 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PersonalContactsConfiguration
from .operations import UsersOperations
from .operations import UsersContactFoldersOperations
from .operations import UsersContactFoldersContactsOperations
from .operations import UsersContactsOperations
from .. import models
class PersonalContacts(object):
"""PersonalContacts.
:ivar users: UsersOperations operations
:vartype users: personal_contacts.aio.operations.UsersOperations
:ivar users_contact_folders: UsersContactFoldersOperations operations
:vartype users_contact_folders: personal_contacts.aio.operations.UsersContactFoldersOperations
:ivar users_contact_folders_contacts: UsersContactFoldersContactsOperations operations
:vartype users_contact_folders_contacts: personal_contacts.aio.operations.UsersContactFoldersContactsOperations
:ivar users_contacts: UsersContactsOperations operations
:vartype users_contacts: personal_contacts.aio.operations.UsersContactsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = PersonalContactsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders = UsersContactFoldersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders_contacts = UsersContactFoldersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contacts = UsersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PersonalContacts":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
e742907f523101322df4966977e82fafc1446f34 | 2c4ad0e41e495b1be29ac54f3552f5a4bcfb8d8b | /apps/comments/views.py | 0c004c9e007472a3dba19ad976acbe6ce31052d7 | [] | no_license | buzzzzx/blogforzly | 7de8f01e767e01f30d7dab8ffb2243484de24f4a | 163a26c7518ed13c7f3a58cd12d455748b60ab6d | refs/heads/master | 2022-03-09T14:43:00.098795 | 2019-08-06T13:13:08 | 2019-08-06T13:13:08 | 114,436,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Comment
from .forms import CommentForm
from blog.models import Post
from utils.send_email import send
# Create your views here.
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
# comment.text = form.text
comment.save()
# send email
send(flag=1, nickname=comment.name, semail=comment.email, text=comment.text, postname=comment.post.title)
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {
'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
| [
"[email protected]"
] | |
1cdc2bcc69cfb9db96d5c781083c1bc817ff9f01 | 387cf5f72ed6679a4d9e04bddd16998a190c4caf | /problems/programmers/lv4/pgs-12983-wrong.py | 69760552fe4acfa3898004c7c8b095f9f458bbe3 | [] | no_license | CodyBuilder-dev/Algorithm-Coding-Test | db4ee1e7565fbcef3140192225167eff42ad5c02 | cca5c4ba8bc31679ab00aceccfd8d9d39c232f72 | refs/heads/master | 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """
제목 : 단어 퍼즐
아이디어 : 거스름돈과 비슷한 dp
(1) DP 값 저장
- key는 strs의 i번재 단어까지 서서 t의 j번째를 만드는 것
- j번째 까지냐 vs j번째 만이냐
- value는 최소값
(2) 초기화
- dp[0][0] ~ dp[0][len(t)-1]는 초기화할 수 있음
(3) 점화식
- dp[i][j] 는, min(dp[i-1][j], dp[i][j-k] (k = strs에 담긴 원소들의 길이))인가?
"""
from math import inf
def solution(strs, t):
dp = [[inf]*len(t) for _ in range(len(strs))]
for i in range(len(t)):
print(strs[0]*(i+1))
dp[0][i] = t[:i+1].count(strs[0]*(i+1))
return dp
# 테스트 케이스
print(solution(["ba","na","n","a"],"banana"),3)
print(solution(["app","ap","p","l","e","ple","pp"],"apple"),2)
print(solution(["ba","an","nan","ban","n"],"banana"),-1)
print(solution(["bax","dxv","zxc"],"baobab"))
print(solution) | [
"[email protected]"
] | |
5d86db5ca4849c4a3d056fe445f5af21bcb558e8 | 4c7fc810eb442b386969bf345b4dc6ef3152c783 | /src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py | c74865617cf76d513cfa10379dcd8d43d8b825e4 | [
"Apache-2.0"
] | permissive | newcodevelop/transformers | fbcef5d703b12febf6e76e84e3f0493769fb9d37 | e8d1bd7427021d2114ec159b2c90c6b1fcddeae7 | refs/heads/main | 2023-03-15T11:45:09.906184 | 2022-08-30T07:26:17 | 2022-08-30T07:26:17 | 254,360,734 | 0 | 1 | Apache-2.0 | 2020-04-09T12:07:09 | 2020-04-09T12:07:08 | null | UTF-8 | Python | false | false | 36,192 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support TF Encoder-Decoder architectures"""
import tempfile
import warnings
from typing import Optional
import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, unpack_inputs
from ...tf_utils import shape_list
from ...utils import (
DUMMY_INPUTS,
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
from .configuration_encoder_decoder import EncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "EncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
" encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
" fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
" labels, no need to pass them yourself anymore."
)
ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
[`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
(see the examples for more information).
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
Parameters:
config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
Provide for sequence to sequence training to the decoder. Indices can be obtained using
[`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `({0})`.
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
"""
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
r"""
[`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
[`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
load_weight_prefix = "tf_encoder_decoder_model"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[TFPreTrainedModel] = None,
decoder: Optional[TFPreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
" `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
encoder = TFAutoModel.from_config(config.encoder, name="encoder")
if decoder is None:
decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
f" {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = tf.keras.layers.Dense(
units=self.decoder.config.hidden_size,
kernel_initializer=get_initializer(config.encoder.initializer_range),
name="enc_to_dec_proj",
)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
# Add `decoder_input_ids` because `self.decoder` requires it.
input_ids = tf.constant(DUMMY_INPUTS)
dummy = {"input_ids": input_ids, "decoder_input_ids": input_ids}
return dummy
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Initializing *TFEncoderDecoderModel* from a pytorch checkpoint is not supported currently.
If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is:
```python
>>> # a workaround to load from pytorch checkpoint
>>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel
>>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
>>> _model.encoder.save_pretrained("./encoder")
>>> _model.decoder.save_pretrained("./decoder")
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
... )
>>> # This is only for copying some specific attributes of this particular model.
>>> model.config = _model.config
```
Example:
```python
>>> from transformers import TFEncoderDecoderModel
>>> model = TFEncoderDecoderModel.from_pretrained("ydshieh/bert2bert-cnn_dailymail-fp16")
```"""
from_pt = kwargs.pop("from_pt", False)
if from_pt:
raise ValueError(
"Initializing `TFEncoderDecoderModel` from a pytorch checkpoint is not supported currently. Use a"
" tensorflow checkpoint instead. If only the pytorch checkpoints are available, create the encoder and"
" decoder models separately, and use them to initialize `TFEncoderDecoderModel`. Check"
" `TFEncoderDecoderModel.from_encoder_decoder_pretrained()` for more details."
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> TFPreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
`encoder_from_pt` should be set to `True`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
`decoder_from_pt` should be set to `True`.
model_args (remaining positional arguments, *optional*):
All remaning positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import TFEncoderDecoderModel
>>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "gpt2")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2gpt2")
>>> # load fine-tuned model
>>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
kwargs_encoder["name"] = "encoder"
kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_encoder.get("from_pt", None):
del kwargs_encoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
encoder.save_pretrained(tmp_dirname)
del encoder
encoder = TFAutoModel.from_pretrained(tmp_dirname, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
kwargs_decoder["name"] = "decoder"
kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_decoder.get("from_pt", None):
del kwargs_decoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
decoder.save_pretrained(tmp_dirname)
del decoder
decoder = TFAutoModelForCausalLM.from_pretrained(tmp_dirname, **kwargs_decoder)
# Make sure these 2 `tf.keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
if encoder.name != "encoder":
raise ValueError("encoder model must be created with the name `encoder`.")
if decoder.name != "decoder":
raise ValueError("decoder model must be created with the name `decoder`.")
# instantiate config with corresponding kwargs
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@unpack_inputs
@add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import TFEncoderDecoderModel, BertTokenizer
>>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "gpt2")
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> # forward
>>> input_ids = tokenizer.encode(
... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
... ) # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
>>> # training
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2gpt2")
>>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
>>> # generation
>>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# Let the user be responsible for the expected format.
if encoder_outputs is not None:
if return_dict and not isinstance(encoder_outputs, ModelOutput):
raise ValueError(
"If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
)
if encoder_outputs is None:
encoder_inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"inputs_embeds": inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"training": training,
}
# Add arguments to encoder from `kwargs_encoder`
encoder_inputs.update(kwargs_encoder)
# Handle the case where the inputs are passed as a single dict which contains `labels`.
# The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
# parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
if "labels" in encoder_inputs:
labels = encoder_inputs.pop("labels")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_input_ids" in encoder_inputs:
decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_attention_mask" in encoder_inputs:
decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
encoder_outputs = self.encoder(**encoder_inputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
decoder_inputs = {
"input_ids": decoder_input_ids,
"attention_mask": decoder_attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": attention_mask,
"inputs_embeds": decoder_inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"use_cache": use_cache,
"past_key_values": past_key_values,
"return_dict": return_dict,
"training": training,
}
# Add arguments to decoder from `kwargs_decoder`
decoder_inputs.update(kwargs_decoder)
decoder_outputs = self.decoder(**decoder_inputs)
logits = decoder_outputs[0]
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
loss = self.hf_compute_loss(labels, logits)
if not return_dict:
past_key_values = None
if use_cache:
past_key_values = decoder_outputs[1]
# The starting index of the remaining elements in `decoder_outputs`
start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
if not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
output = tuple([x for x in output if x is not None])
return output
return TFSeq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.output_attentions and output.cross_attentions is not None
else None
)
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
cross_attentions=cross_attns,
)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
past_key_values = decoder_inputs.get("past_key_values")
if past_key_values is None:
past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
input_dict = {
"input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
# TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
"encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
"past_key_values": past_key_values,
"use_cache": use_cache,
}
return input_dict
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
" respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
" model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
| [
"[email protected]"
] | |
0e48b30a06104cba35625dfe97b6f03f276fffcb | c553f9d608c435cd7f19c9be0ef512307295a837 | /daemin/greedy/실전문제/1.모험가길드.py | cfc80dc0c3cd461720a12db2077c822dd132f7b8 | [] | no_license | Green0v0/Algorithm | 2d089e7c016997c1fb5e1094ddeeb80cd1ce0485 | ab9b387e63550ef1b5dfe0f851163b16fbd42c88 | refs/heads/main | 2023-05-24T05:37:17.125671 | 2021-06-16T05:35:52 | 2021-06-16T05:35:52 | 330,944,982 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # n = int(input())
# k = list(map(int,input().split(" ")))
n= 5
data = [2,3,1,2,2]
data.sort() # 원본 변경 / True 가 내림차순이다.
result = 0 # 그룹 갯수
count = 0 # 모험가의 수
"""
1. 기본 로직은 공포도가 작은 탐험가 부터 (숫자가 작은) 그룹을 이뤄 나가는것이다.
2. count 에 일단 모험가를 1개 넣어주고 다음 모험가(i)가 1보다 작거나 같으면 그룹이 되어나간다.
3. 1보다 크다면 result(그룹)이 되지 못하고 반복문으로 올라가서 다음 모험가를 데리고 count 에 1을 더해준다.
4 . 그러면서 조건에 만족할때 그룹개수를 증가시킨다.
"""
for i in data:
count +=1
if count >= i: # i가 크거나같으면 공포도에 따른 그룹 구성원이 안맞는데도 그룹을 이룬다.
result +=1
count=0
print(result)
# 첫번째 코드 실패 //
# (4,3,2,2,2,1,1,1,1,1) 이 케이스를 입력했을때 4,3,2가 남으면 더이상 그룹이 될 수없는데 그냥 실행이된다.
#
#
# while True:
# m = min(k)
# for _ in range(m):
# k.pop() # 이부분이 문제임! pop을 하니까 마지막 4,3,2 에서 2때문에 pop을 두번해서 3까지 날려버림..
# count += 1
# if len(k)==0:
# break
# print(count)
| [
"[email protected]"
] | |
00b204dd1c59a7f8d99f85a898d26452b44fb647 | 0cbd245ba67ada0dd04e8a61471b2bc2bbacdc47 | /App09_RealEstate_DataMiner/app9.py | 8c7df6853b363cbc8fa964ed55f68f41a46db523 | [] | no_license | ptsouth97/pythonapps | 7ed0a121f35669d0bb177d88ef9aa09828bea813 | ee239a02c553fb9d2672f50a4b4c49b4ea4396f0 | refs/heads/master | 2021-01-12T04:31:31.687181 | 2017-02-05T21:07:42 | 2017-02-05T21:07:42 | 77,632,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import os
import csv
from data_types import Purchase
import statistics
def main():
print_header()
filename = get_data_file()
# print(filename)
data = load_file(filename)
query_data(data)
def print_header():
print('------------------------')
print(' Real Estate App')
print('------------------------')
print()
def get_data_file():
base_folder = os.path.dirname(__file__)
return os.path.join(base_folder, 'data', 'SacramentoRealEstateTransactions2008.csv')
def load_file(filename):
with open(filename, 'r', encoding='utf-8') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
# print(type(row), row)
# print("Bed count: {}".format(row['beds']))
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
# print(purchases[0].__dict__)
# header = fin.readline().strip()
# reader = csv.reader(fin, delimiter=',')
# for row in reader:
# print(row)
# beds = row[4]
# def load_file_basic(filename):
# with open(filename, 'r', encoding='utf-8') as fin:
# header = fin.readline().strip()
# print('found header: ' + header)
#
# lines = []
# for line in fin:
# line_data = line.strip().split(',')
# lines.append(line_data)
#
# print(lines[:5])
# def get_price(p):
# return p.price
def query_data(data):
# if data was sorted by price:
# data.sort(key=get_price)
data.sort(key= lambda p: p.price)
# most expensive house
high_purchase = data[-1]
print("The most expensive house is ${:,} with {} beds and {} baths".format(high_purchase.price, high_purchase.beds, high_purchase.baths))
# least expensive house
low_purchase = data[0]
print("The least expensive house is ${:,} with {} beds and {} baths".format(low_purchase.price, low_purchase.beds, low_purchase.baths))
# average price house
# average price of 2 bedroom homes
# prices = []
# for pur in data:
# prices.append(pur.price)
# LIST COMPREHENSIONS
prices = [
p.price # projection or items
for p in data # the set to process
]
ave_price = statistics.mean(prices)
print("The average home price is ${:,}".format(int(ave_price)))
two_bed_homes = [
p
for p in data # the set to process
if p.beds == 2 # test condition
]
ave_price = statistics.mean([p.price for p in two_bed_homes])
ave_baths = statistics.mean([p.baths for p in two_bed_homes])
ave_sqft = statistics.mean([p.sq__ft for p in two_bed_homes])
print("The average price of a 2-bedroom home is ${:,}, baths={}, sq ft={:,}".format(int(ave_price), round(ave_baths, 1), round( ave_sqft, 1)))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
caef56b59f3154376c50d4336649aec1100d0102 | 2f6d017dedc68588b2615d65c1e8ca8bcdd90446 | /api/dynamic_tests_v2/cumsum.py | 64fc792e50a19fb1e753faa601710dbef87b366e | [] | no_license | hysunflower/benchmark | 70fc952a4eb1545208543627539d72e991cef78a | c14f99c15b4be9e11f56ea378ca15d9c3da23bab | refs/heads/master | 2022-06-30T07:04:14.986050 | 2022-06-15T02:43:04 | 2022-06-15T02:43:04 | 224,449,279 | 1 | 0 | null | 2019-11-27T14:29:29 | 2019-11-27T14:29:29 | null | UTF-8 | Python | false | false | 1,479 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class CumsumConfig(APIConfig):
def __init__(self):
super(CumsumConfig, self).__init__('cumsum')
self.feed_spec = {"range": [-1, 1]}
class PDCumsum(PaddleDynamicAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = paddle.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
class TorchCumsum(PytorchAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = torch.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
if __name__ == '__main__':
test_main(
pd_dy_obj=PDCumsum(), torch_obj=TorchCumsum(), config=CumsumConfig())
| [
"[email protected]"
] | |
c83eadf7b9b9967c1507e6da8273883512787e28 | 13ea58f72fa96e2455609fb452b5f3b98e94f846 | /sfepy/postprocess/plot_cmesh.py | 4319e5a25f131980a112ea817a562980f7b29e29 | [
"BSD-3-Clause"
] | permissive | vondrejc/sfepy | 4284ee47979b89d9e504b72b91689a9ce0c3a5ec | 8e427af699c4b2858eb096510057abb3ae7e28e8 | refs/heads/master | 2021-01-24T00:09:18.722674 | 2014-08-20T12:37:03 | 2014-08-20T14:25:56 | 12,810,199 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | """
Functions to visualize the CMesh geometry and topology.
"""
import matplotlib.pyplot as plt
from sfepy.postprocess.plot_dofs import _get_axes
def plot_wireframe(ax, cmesh, color='k', show=False):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
dim = cmesh.dim
edges = cmesh.get_conn(1, 0)
ax = _get_axes(ax, dim)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
if dim == 3:
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], color)
else:
ax.plot(cc[:, 0], cc[:, 1], color)
if show:
plt.show()
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10, show=False):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
if dim == 3:
ax.scatter(coors[:, 0], coors[:, 1], coors[:, 2], s=size, c=color)
else:
ax.scatter(coors[:, 0], coors[:, 1], s=size, c=color)
if show:
plt.show()
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ii,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ii,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = _get_axes(ax, dim)
eps = 0.1
oeps = 1.0 - eps
for ii in xrange(conn.num):
for ic, ie in enumerate(conn.indices[off[ii]:off[ii+1]]):
# Shift labels towards the cell centre.
cc = oeps * coors[ie] + eps * centres[ii]
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ic,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ic,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
| [
"[email protected]"
] | |
83b6c3223a9ea60b7456b4e43317b1614cfe87e0 | 7ce05272d21c903abc85ebc74544009aacd80c82 | /Advance_Python/Socket_Programming/socket_programs/client.py | cead5a5a2d925f83e46f72d6bbd4a1b3d48a2ce3 | [] | no_license | sachinyadav3496/PythonInternBatch2018 | 8899a866f60a39b4c7eff4f5bc79ec2586833403 | 8e2610ad80c39ea747e8a6547ebe540e7b019a79 | refs/heads/master | 2021-06-26T09:18:58.178457 | 2020-10-03T09:49:32 | 2020-10-03T09:49:32 | 136,880,809 | 18 | 34 | null | 2020-10-03T09:49:33 | 2018-06-11T05:56:26 | Jupyter Notebook | UTF-8 | Python | false | false | 670 | py | import socket
server_socket = socket.socket()
host = socket.gethostbyname(socket.gethostname()) # Give server address if server is on differnt machine
port = 12345 #port no on which server is listing
server_socket.connect((host,port))
print("Got Connection from server at {}:{} ".format(host,port))
while True :
smsg = server_socket.recv(1024)
if smsg.decode().strip().lower() == 'bye' :
print("Connection is Terminated by server")
server_socket.close()
break
print("\t\t\tServer -> ",smsg.decode())
msg = input("client->")
server_socket.send(msg.encode())
if msg == 'bye' :
server_socket.close()
break
| [
"[email protected]"
] | |
a4ec925ffdf9afa9aff09c57049d796f503f32ea | 524c168b1b7ab4644a612f692645ae56487dea8c | /fwork-backend/tina/projects/migrations/0013_auto_20141210_1040.py | ac9ab59da8ca199cb8221bccf33e483a8493f55f | [] | no_license | phamhongnhung2501/Taiga.Tina | b4fa730a9f9601e23ab19c6d937e7daf0386b1e2 | 8bc44de3a364ccd0e49e767b098589898dcabc10 | refs/heads/master | 2022-12-14T09:55:11.205228 | 2019-07-08T07:42:38 | 2019-07-08T07:42:38 | 195,760,755 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-07-08T07:39:32 | Python | UTF-8 | Python | false | false | 995 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db import connection
from tina.projects.userstories.models import *
from tina.projects.tasks.models import *
from tina.projects.issues.models import *
from tina.projects.models import *
def _fix_tags_model(tags_model):
table_name = tags_model._meta.db_table
query = "select id from (select id, unnest(tags) tag from %s) x where tag LIKE '%%,%%'"%(table_name)
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
id = row[0]
instance = tags_model.objects.get(id=id)
instance.tags = [tag.replace(",", "") for tag in instance.tags]
instance.save()
def fix_tags(apps, schema_editor):
_fix_tags_model(Project)
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_auto_20141210_1009'),
]
operations = [
migrations.RunPython(fix_tags),
]
| [
"[email protected]"
] | |
46eb0092ec00ba666cc6bbdaa21bff606a02a170 | 6f594cc963795c69d8da3c30ca580c0405ef2d6e | /binaryTree/652FindDuplicateSubtrees.py | fbf0d850405b6d265b0194874f1be18bc6d4cea4 | [] | no_license | lo-tp/leetcode | 25933c5b25f64f881d43748d8b2763f69614a97f | 4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50 | refs/heads/master | 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | from collections import defaultdict
class Solution(object):
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [], defaultdict(lambda: 0)
while stack or root:
if root:
stack.append((root, False, ''))
root = root.left
else:
t, visited, left_str = stack.pop()
if visited:
current = '{} {} {}'.format(left_str, t.val, current)
root = None
if data[current] == 1:
res.append(t)
data[current] += 1
else:
stack.append((t, True, current))
current = ''
root = t.right
return res
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [
(root, 0, '')], defaultdict(lambda: 0)
while stack:
root, flag, left_str = stack.pop()
if not root:
current += ' '
elif not flag:
stack.append((root, 1, ''))
stack.append((root.left, 0, ''))
elif flag == 1:
stack.append((root, 2, current))
stack.append((root.right, 0, ''))
current = ''
else:
current = 'l{}-{}-{}r'.format(left_str, root.val, current)
if data[current] == 1:
res.append(root)
data[current] += 1
return res
def findDuplicateSubtrees(self, root):
cur = None
res, data, stack = [], defaultdict(lambda: 0), [(root, None, 0)]
while stack:
node, string, flag = stack.pop()
if not node:
cur = '#'
elif not flag:
stack.append((node, None, 1))
stack.append((node.left, None, 0))
elif flag == 1:
stack.append((node, cur, 2))
stack.append((node.right, None, 0))
else:
cur = '{},{},{}'.format(node.val, string, cur)
data[cur] += 1
if data[cur] == 2:
res.append(node)
return res
| [
"[email protected]"
] | |
9f238e46d438784023ea24f418acbc362d03107b | 86813bf514f3e0257f92207f40a68443f08ee44b | /459 重复的子字符串/459 重复的子字符串.py | 09445982e188b913bf7d0f47bd859239932d3471 | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #label: string difficulty: easy
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
n = len(s)
for i in range(1,n//2+1):
if n % i == 0:
a = s[:i]
j = i
while j<n and s[j:j+i] == a:
j += i
if j == n:
return True
return False
| [
"[email protected]"
] | |
1e9a225fe5733b7b760390bc1f1511e3d4fc2649 | 99697559d046cdd04dd9068bd518e4da4177aaa2 | /Finish/H065_Valid_Number.py | 887cc224b572e863ae805b6987920e3864f81620 | [] | no_license | Azurisky/Leetcode | 3e3621ef15f2774cfdfac8c3018e2e4701760c3b | 8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04 | refs/heads/master | 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | class Solution:
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return False
# strip the heading and tailing spaces of the string
s = s.strip()
i = 0
res = signs = eE = dot = False
while i < len(s):
if s[i].isdigit():
i += 1
res = signs = True
elif s[i]=='.' and not dot:
i += 1
dot = signs = True
elif (s[i]=='e' or s[i]=='E') and (not eE) and res:
i += 1
res = signs = False
dot = eE = True
elif (s[i]=='+' or s[i]=='-') and not res and not signs:
i += 1
signs = True
else:
return False
if res:
return True
return False | [
"[email protected]"
] | |
74ab53846c7f95d413948f7ff2c3a206fcf660ca | d3b7a7a922eb9999f22c99c0cc3908d7289ca27e | /tests/end-to-end.py | e965fe6cbdead05f32e481356524c7034165020e | [
"Apache-2.0"
] | permissive | g3l0o/plaso | b668203c2c7cf8799a1c12824ee1bdc8befd3980 | ae29d853a6bcdd1530ce9320a3af7b3f122941ac | refs/heads/master | 2020-12-25T20:31:08.928709 | 2016-07-22T20:00:33 | 2016-07-22T20:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,110 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""End-to-end test launcher."""
from __future__ import print_function
import abc
import argparse
import difflib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
try:
import ConfigParser as configparser
except ImportError:
import configparser # pylint: disable=import-error
if sys.version_info[0] < 3:
STRING_TYPES = (basestring, )
else:
STRING_TYPES = (str, )
# Since os.path.abspath() uses the current working directory (cwd)
# os.path.abspath(__file__) will point to a different location if
# cwd has been changed. Hence we preserve the absolute location of __file__.
__file__ = os.path.abspath(__file__)
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory object."""
super(TempDirectory, self).__init__()
self.name = u''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
class TestCase(object):
"""Class that defines the test case object interface.
The test case defines what aspect of the plaso tools to test.
A test definition is used to provide parameters for the test
case so it can be easily run on different input files.
Attributes:
name (str): name of the test case.
"""
NAME = None
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestCase, self).__init__()
self._debug_output = debug_output
self._log2timeline_path = None
self._pinfo_path = None
self._psort_path = None
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _InitializeLog2TimelinePath(self):
"""Initializes the location of log2timeline."""
for filename in (
u'log2timeline.exe', u'log2timeline.sh', u'log2timeline.py'):
self._log2timeline_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._log2timeline_path):
break
if self._log2timeline_path.endswith(u'.py'):
self._log2timeline_path = u' '.join([
sys.executable, self._log2timeline_path])
def _InitializePinfoPath(self):
"""Initializes the location of pinfo."""
for filename in (u'pinfo.exe', u'pinfo.sh', u'pinfo.py'):
self._pinfo_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._pinfo_path):
break
if self._pinfo_path.endswith(u'.py'):
self._pinfo_path = u' '.join([sys.executable, self._pinfo_path])
def _InitializePsortPath(self):
"""Initializes the location of psort."""
for filename in (u'psort.exe', u'psort.sh', u'psort.py'):
self._psort_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._psort_path):
break
if self._psort_path.endswith(u'.py'):
self._psort_path = u' '.join([sys.executable, self._psort_path])
def _RunCommand(self, command):
"""Runs a command.
Args:
command (str): command to run.
Returns:
bool: True if the command ran successfully.
"""
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
@abc.abstractmethod
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
@abc.abstractmethod
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
class TestCasesManager(object):
"""Class that implements the test cases manager."""
_test_case_classes = {}
_test_case_objects = {}
@classmethod
def DeregisterTestCase(cls, test_case_class):
"""Deregisters a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is not set for the corresponding name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name not in cls._test_case_classes:
raise KeyError(
u'Formatter class not set for name: {0:s}.'.format(
test_case_class.NAME))
del cls._test_case_classes[test_case_name]
@classmethod
def GetTestCaseObject(
cls, name, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Retrieves the test case object for a specific name.
Args:
name (str): name of the test case.
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
Returns:
TestCase: test case or None if not available.
"""
name = name.lower()
if name not in cls._test_case_objects:
test_case_object = None
if name in cls._test_case_classes:
test_case_class = cls._test_case_classes[name]
test_case_object = test_case_class(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
if not test_case_object:
return
cls._test_case_objects[name] = test_case_object
return cls._test_case_objects[name]
@classmethod
def RegisterTestCase(cls, test_case_class):
"""Registers a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name in cls._test_case_classes:
raise KeyError((
u'Formatter class already set for name: {0:s}.').format(
test_case_class.NAME))
cls._test_case_classes[test_case_name] = test_case_class
@classmethod
def RegisterTestCases(cls, test_case_classes):
"""Registers test case classes.
The test case classes are identified based on their lower case name.
Args:
test_case_classes (list[type]): test case classes.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
for test_case_class in test_case_classes:
cls.RegisterTestCase(test_case_class)
class TestDefinition(object):
"""Class that implements a test definition.
Attributes:
case (str): name of test case.
name (str): name of the test.
"""
def __init__(self, name):
"""Initializes a test definition object.
Args:
name (str): name of the test.
"""
super(TestDefinition, self).__init__()
self.case = u''
self.name = name
class TestDefinitionReader(object):
"""Class that implements a test definition reader.
The test definition reader reads tests definitions from a configuration
file.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test definition reader object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestDefinitionReader, self).__init__()
self._config_parser = None
self._debug_output = debug_output
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def GetConfigValue(self, section_name, value_name):
"""Retrieves a value from the config parser.
Args:
section_name (str): name of the section that contains the value.
value_name (str): the name of the value.
Returns:
object: value or None if the value does not exists.
Raises:
RuntimeError: if the configuration parser is not set.
"""
if not self._config_parser:
raise RuntimeError(u'Missing configuration parser.')
try:
return self._config_parser.get(section_name, value_name).decode('utf-8')
except configparser.NoOptionError:
return
def Read(self, file_object):
"""Reads test definitions.
Args:
file_object (file): a file-like object to read from.
Yields:
TestDefinition: end-to-end test definition.
"""
# TODO: replace by:
# self._config_parser = configparser. ConfigParser(interpolation=None)
self._config_parser = configparser.RawConfigParser()
try:
self._config_parser.readfp(file_object)
for section_name in self._config_parser.sections():
test_definition = TestDefinition(section_name)
test_definition.case = self.GetConfigValue(section_name, u'case')
if not test_definition.case:
logging.warning(
u'Test case missing in test definition: {0:s}.'.format(
section_name))
continue
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path,
debug_output=self._debug_output)
if not test_case:
logging.warning(u'Undefined test case: {0:s}'.format(
test_definition.case))
continue
if not test_case.ReadAttributes(self, test_definition):
logging.warning(
u'Unable to read attributes of test case: {0:s}'.format(
test_definition.case))
continue
yield test_definition
finally:
self._config_parser = None
class TestLauncher(object):
"""Class that implements the test launcher.
The test launcher reads the test definitions from a file, looks up
the corresponding test cases in the test case manager and then runs
the test case with the parameters specified in the test definition.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test launcher object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestLauncher, self).__init__()
self._debug_output = debug_output
self._test_definitions = []
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _RunTest(self, test_definition):
"""Runs the test.
Args:
test_definition (TestDefinition): test definition.
Returns:
A boolean value indicating the test ran successfully.
"""
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
if not test_case:
logging.error(u'Unsupported test case: {0:s}'.format(
test_definition.case))
return False
return test_case.Run(test_definition)
def ReadDefinitions(self, configuration_file):
"""Reads the test definitions from the configuration file.
Args:
configuration_file (str): path of the configuration file.
"""
self._test_definitions = []
with open(configuration_file) as file_object:
test_definition_reader = TestDefinitionReader(
self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
for test_definition in test_definition_reader.Read(file_object):
self._test_definitions.append(test_definition)
def RunTests(self):
"""Runs the tests.
Returns:
list[str]: names of the failed tests.
"""
# TODO: set up test environment
failed_tests = []
for test_definition in self._test_definitions:
if not self._RunTest(test_definition):
failed_tests.append(test_definition.name)
return failed_tests
class ExtractAndOutputTestCase(TestCase):
"""Class that implements the extract and output test case.
The extract and output test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted pinfo and psort are run to validate if the resulting storage
file is readable.
"""
NAME = u'extract_and_output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ExtractAndOutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializeLog2TimelinePath()
self._InitializePinfoPath()
self._InitializePsortPath()
def _RunLog2Timeline(
self, test_definition, temp_directory, storage_file, source_path):
"""Runs log2timeline with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
source_path (str): path of the source.
Returns:
bool: True if log2timeline ran successfully.
"""
extract_options = u'--status-view=none {0:s}'.format(
u' '.join(test_definition.extract_options))
stdout_file = os.path.join(
temp_directory, u'{0:s}-log2timeline.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-log2timeline.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} {3:s} > {4:s} 2> {5:s}'.format(
self._log2timeline_path, extract_options, storage_file, source_path,
stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(storage_file):
shutil.copy(storage_file, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPinfo(self, test_definition, temp_directory, storage_file):
"""Runs pinfo on the storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
stdout_file = os.path.join(
temp_directory, u'{0:s}-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-pinfo.err'.format(test_definition.name))
command = u'{0:s} {1:s} > {2:s} 2> {3:s}'.format(
self._pinfo_path, storage_file, stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPinfoCompare(self, test_definition, temp_directory, storage_file):
"""Runs pinfo --compare on the storage file and a reference storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
reference_storage_file = test_definition.reference_storage_file
if self._test_references_path:
reference_storage_file = os.path.join(
self._test_references_path, reference_storage_file)
if not os.path.exists(reference_storage_file):
logging.error(u'No such reference storage file: {0:s}'.format(
reference_storage_file))
return False
stdout_file = os.path.join(
temp_directory, u'{0:s}-compare-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-compare-pinfo.err'.format(test_definition.name))
command = u'{0:s} --compare {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._pinfo_path, reference_storage_file, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPsort(self, test_definition, temp_directory, storage_file):
"""Runs psort on a storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort.err'.format(test_definition.name))
command = u'{0:s} {1:s} > {2:s} 2> {3:s}'.format(
self._psort_path, storage_file, stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.extract_options = test_definition_reader.GetConfigValue(
test_definition.name, u'extract_options')
if test_definition.extract_options is None:
test_definition.extract_options = []
elif isinstance(test_definition.extract_options, STRING_TYPES):
test_definition.extract_options = test_definition.extract_options.split(
u',')
test_definition.reference_storage_file = (
test_definition_reader.GetConfigValue(
test_definition.name, u'reference_storage_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, u'{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Check if the resulting storage file can be read with pinfo.
if not self._RunPinfo(
test_definition, temp_directory, storage_file):
return False
# Compare storage file with a reference storage file.
if test_definition.reference_storage_file:
if not self._RunPinfoCompare(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file):
return False
return True
class ExtractAndTagTestCase(ExtractAndOutputTestCase):
"""Class that implements the extract and tag test case.
The extract and tag test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted psort is run to tag events in the resulting storage file.
"""
NAME = u'extract_and_tag'
def _RunPsortWithTaggingOptions(
self, test_definition, temp_directory, storage_file):
"""Runs psort with the tagging options specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
# TODO: determine why --analysis=tagging fails.
tagging_options = (
u'--analysis tagging --output-format=null '
u'--tagging-file {0:s}').format(test_definition.tagging_file)
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort-tagging.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort-tagging.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._psort_path, tagging_options, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
if not super(ExtractAndTagTestCase, self).ReadAttributes(
test_definition_reader, test_definition):
return False
test_definition.tagging_file = test_definition_reader.GetConfigValue(
test_definition.name, u'tagging_file')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, u'{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Add tags to the resulting storage file with psort.
if not self._RunPsortWithTaggingOptions(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file):
return False
return True
class ImageExportTestCase(TestCase):
"""Class that implements the image export test case.
The image export test case runs image_export to extract files from a storage
media image, specified by the test definition.
"""
NAME = u'image_export'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ImageExportTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._image_export_path = None
self._InitializeImageExportPath()
def _InitializeImageExportPath(self):
"""Initializes the location of image_export."""
for filename in (
u'image_export.exe', u'image_export.sh', u'image_export.py'):
self._image_export_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._image_export_path):
break
if self._image_export_path.endswith(u'.py'):
self._image_export_path = u' '.join([
sys.executable, self._image_export_path])
def _RunImageExport(self, test_definition, temp_directory, source_path):
"""Runs image_export on a storage media image.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
source_path (str): path of the source.
Returns:
bool: True if image_export ran successfully.
"""
output_file_path = os.path.join(temp_directory, u'export')
output_options = [u'-w {0:s}'.format(output_file_path)]
output_options = u' '.join(output_options)
stdout_file = os.path.join(
temp_directory, u'{0:s}-image_export.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-image_export.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._image_export_path, output_options, source_path, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
# TODO: hash the files.
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.filter_file = test_definition_reader.GetConfigValue(
test_definition.name, u'filter_file')
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Extract files with image_export.
if not self._RunImageExport(
test_definition, temp_directory, source_path):
return False
return True
class OutputTestCase(TestCase):
"""Class that implements the output test case.
The output test case runs psort on a storage file to its various
output formats.
"""
NAME = u'output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(OutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializePsortPath()
def _CompareOutputFile(self, test_definition, temp_directory):
"""Compares the output file with a reference output file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
Returns:
bool: True if he output files are identical.
"""
if test_definition.output_format not in (
u'dynamic', u'json', u'json_line', u'l2tcsv', u'l2ttln', u'rawpy',
u'tln'):
logging.error(u'Unsuppored output format: {0:s}'.format(
test_definition.output_format))
return False
output_file_path = os.path.join(temp_directory, test_definition.output_file)
# TODO: add support to compare output by SHA-256.
result = False
if test_definition.reference_output_file:
reference_output_file_path = test_definition.reference_output_file
if self._test_references_path:
reference_output_file_path = os.path.join(
self._test_references_path, reference_output_file_path)
if not os.path.exists(reference_output_file_path):
logging.error(u'No such reference output file: {0:s}'.format(
reference_output_file_path))
return False
with open(reference_output_file_path, 'r') as reference_output_file:
with open(output_file_path, 'r') as output_file:
differences = list(difflib.unified_diff(
reference_output_file.readlines(), output_file.readlines(),
fromfile=reference_output_file_path, tofile=output_file_path))
if not differences:
result = True
return result
def _RunPsortWithOutputOptions(
self, test_definition, temp_directory, storage_file):
"""Runs psort with the output options specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
output_options = test_definition.output_options
if test_definition.output_format:
output_options.append(u'-o {0:s}'.format(test_definition.output_format))
output_file_path = None
if test_definition.output_file:
output_file_path = os.path.join(
temp_directory, test_definition.output_file)
output_options.append(u'-w {0:s}'.format(output_file_path))
output_options = u' '.join(output_options)
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._psort_path, output_options, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if output_file_path and os.path.exists(output_file_path):
shutil.copy(output_file_path, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, u'output_file')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, u'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, u'output_options')
if test_definition.output_options is None:
test_definition.output_options = []
elif isinstance(test_definition.output_options, STRING_TYPES):
test_definition.output_options = test_definition.output_options.split(
u',')
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, u'reference_output_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
if not self._RunPsortWithOutputOptions(
test_definition, temp_directory, source_path):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
TestCasesManager.RegisterTestCases([
ExtractAndOutputTestCase, ExtractAndTagTestCase, ImageExportTestCase,
OutputTestCase])
def Main():
"""The main function."""
argument_parser = argparse.ArgumentParser(
description=u'End-to-end test launcher.', add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
argument_parser.add_argument(
u'-c', u'--config', dest=u'config_file', action=u'store',
metavar=u'CONFIG_FILE', default=None,
help=u'path of the test configuration file.')
argument_parser.add_argument(
u'--debug', dest=u'debug_output', action=u'store_true', default=False,
help=u'enable debug output.')
argument_parser.add_argument(
u'-h', u'--help', action=u'help',
help=u'show this help message and exit.')
argument_parser.add_argument(
u'--references-directory', u'--references_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'references_directory', type=str,
default=None, help=(
u'The location of the directory where the test references are '
u'stored.'))
argument_parser.add_argument(
u'--results-directory', u'--results_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'results_directory', type=str,
default=None, help=(
u'The location of the directory where to store the test results.'))
argument_parser.add_argument(
u'--sources-directory', u'--sources_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'sources_directory', type=str,
default=None, help=(
u'The location of the directory where the test sources are stored.'))
argument_parser.add_argument(
u'--tools-directory', u'--tools_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'tools_directory', type=str,
default=None, help=u'The location of the plaso tools directory.')
options = argument_parser.parse_args()
if not options.config_file:
options.config_file = os.path.dirname(__file__)
options.config_file = os.path.dirname(options.config_file)
options.config_file = os.path.join(
options.config_file, u'config', u'end-to-end.ini')
if not os.path.exists(options.config_file):
print(u'No such config file: {0:s}.'.format(options.config_file))
print(u'')
return False
logging.basicConfig(
format=u'[%(levelname)s] %(message)s', level=logging.INFO)
tools_path = options.tools_directory
if not tools_path:
tools_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), u'tools')
test_sources_path = options.sources_directory
if test_sources_path and not os.path.isdir(test_sources_path):
print(u'No such sources directory: {0:s}.'.format(test_sources_path))
print(u'')
return False
test_references_path = options.references_directory
if test_references_path and not os.path.isdir(test_references_path):
print(u'No such references directory: {0:s}.'.format(test_references_path))
print(u'')
return False
test_results_path = options.results_directory
if not test_results_path:
test_results_path = os.getcwd()
if not os.path.isdir(test_results_path):
print(u'No such results directory: {0:s}.'.format(test_results_path))
print(u'')
return False
tests = []
with open(options.config_file) as file_object:
test_definition_reader = TestDefinitionReader(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
for test_definition in test_definition_reader.Read(file_object):
tests.append(test_definition)
test_launcher = TestLauncher(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
test_launcher.ReadDefinitions(options.config_file)
failed_tests = test_launcher.RunTests()
if failed_tests:
print(u'Failed tests:')
for failed_test in failed_tests:
print(u' {0:s}'.format(failed_test))
print(u'')
return False
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| [
"[email protected]"
] | |
a4687640887e3eaa056be17102156fb6c73301a5 | 71ef2ddc4a10c9f6be7b938dadbd25bb5accbe89 | /bots/MyBot_alt2/MyBot_alt2.py | c5bd7445daa98e9422d2b37244815251b1dfaf0c | [] | no_license | NicoKNL/halite3 | e06b72e68c102d5cf863b6efd7c2ef5b0c161ea2 | 60ccd9a36e13b447a481e242762379d38e71c1b1 | refs/heads/master | 2020-04-07T22:01:18.804779 | 2018-12-20T00:47:17 | 2018-12-20T00:47:17 | 158,751,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,186 | py | #!/usr/bin/env python3
# Python 3.6
# Import the Halite SDK, which will let you interact with the game.
import hlt
# This library contains constant values.
from hlt import constants
# This library contains direction metadata to better interface with the game.
from hlt.positionals import Direction, Position
# This library allows you to generate random numbers.
# Logging allows you to save messages for yourself. This is required because the regular STDOUT
# (# print statements) are reserved for the engine-bot communication.
import logging
""" <<<Game Begin>>> """
# This game object contains the initial game state.
game = hlt.Game()
# At this point "game" variable is populated with initial map data.
# This is a good place to do computationally expensive start-up pre-processing.
# As soon as you call "ready" function below, the 2 second per turn timer will start.
game.ready("ALT2")
# Now that your bot is initialized, save a message to yourself in the log file with some important information.
# Here, you log here your id, which you can always fetch from the game object by using my_id.
logging.info("Successfully created bot! My Player ID is {}.".format(game.my_id))
FILL_RATIO = 0.9 # For now we accept 80% fill rate
INF = 99999999
directions = {"n": (0, -1),
"e": (1, 0),
"s": (0, 1),
"w": (-1, 0)}
ship_actions = {}
ship_directions = {}
def shipyard_cleanup(game_map, ship, shipyard):
if ship in ship_actions.keys():
action = not ship_actions[ship]
else:
action = True
ship_actions[ship] = action
if ship in ship_directions.keys():
turn_in = ship_directions[ship]
elif ship.position == shipyard.position:
turn_in = False
else:
turn_in = False
ship_directions[ship] = turn_in
if action:
if turn_in:
target = shipyard.position
else:
target = ship.position
max_value = game_map[target.x][target.y].w
staying_value = max_value // 4
moving_cost = max_value // 10
if moving_cost < ship.halite_amount or moving_cost == 0:
for d in directions.values():
pos = game_map.normalize(ship.position.directional_offset(d))
# logging.debug(f"pos: {pos} | {game_map.calculate_distance(ship.position, shipyard.position) <= 5} | {game_map[pos.x][pos.y].w}")
if game_map.calculate_distance(pos, shipyard.position) <= 5:
w = game_map[pos.x][pos.y].w
if (w // 4) - moving_cost > staying_value and w > max_value:
max_value = w
target = pos
if game_map.calculate_distance(ship.position, shipyard.position) == 5:
ship_directions[ship] = True # Start turning in
else:
target = ship.position
logging.debug(f"decision: {target}")
return target
def closest_cell_with_ratio_fill(game_map, ship):
minimum = min(0.25 * game_map.max_halite, 4 * (constants.MAX_HALITE - ship.halite_amount))
logging.debug(f"res max: {game_map.max_halite} - minimum: {minimum}")
current_offset = 1
found = False
pos = ship.position
target = None
# Search with an expanding ring
while not found and current_offset <= game_map.height: # possible max search range
offsets = list(range(-current_offset, current_offset + 1))
offsets = [(x, y) for x in offsets for y in offsets]
for offset in offsets:
# # print(f"offset: {offset}")
cell_pos = game_map.normalize(Position(pos.x - offset[0], pos.y - offset[1]))
# print(f"cell_pos: {cell_pos}")
cell = game_map[cell_pos]
if not target and cell.halite_amount >= minimum:
target = cell_pos
found = True
elif cell.halite_amount >= minimum and game_map.calculate_distance(ship.position, cell_pos) < game_map.calculate_distance(ship.position, target):
target = cell_pos
current_offset += 1
if not target:
target = ship.position
logging.info("target not found!")
else:
logging.info(f"target found!: {target}")
return target
def weighted_cleanup(game_map, ship, shipyard):
minimum = 30
current_offset = 1
running_sum = 0
found = False
targets = []
# Search with an expanding ring
while not found and current_offset <= game_map.height: # possible max search range
offsets = list(range(-current_offset, current_offset + 1))
offsets = [(x, y) for x in offsets for y in offsets]
for offset in offsets:
cell_pos = game_map.normalize(shipyard.position + Position(*offset))
# print(f"cell_pos: {cell_pos}")
cell = game_map[cell_pos]
if cell.halite_amount >= minimum and not cell.is_occupied:
targets.append(cell_pos)
if len(targets) > 3:
found = True
current_offset += 1
best_target = (None, INF) # For now best => closest
for target in targets:
distance = game_map.calculate_distance(ship.position, target)
if distance < best_target[1]:
best_target = (target, distance)
return best_target[0]
def dijkstra_a_to_b(game_map, source, target, offset=1):
if source == target:
return Direction.Still
dx = abs(target.x - source.x)
dy = abs(target.y - source.y)
xdir = 1 if target.x > source.x else -1
ydir = 1 if target.y > source.y else -1
# Valid x and y positions in range
if xdir == 1:
rx = range(source.x - offset, target.x + offset + 1)
else:
rx = range(target.x - offset, source.x + offset + 1)
if ydir == 1:
ry = range(source.y - offset, target.y + offset + 1)
else:
ry = range(target.y - offset, source.y + offset + 1)
# initialize distances
distance_map = {
source: {
"distance": 0,
"previous": None}
}
queue = [source]
for offset_x in range(-offset, dx + offset + 1):
for offset_y in range(-offset, dy + offset + 1):
if offset_x == 0 and offset_y == 0:
continue
x = source.x + offset_x * xdir
y = source.y + offset_y * ydir
position = Position(x, y)
distance_map[position] = {
"distance": INF * 32,
"previouis": None
}
queue.append(position)
# Dijkstra
# Calculating the cheapest path to each respective node in the grid
while len(queue):
# Take the item in the queue with the lowest distance and remove it from the queue
node = sorted(queue, key=lambda position: distance_map[position]["distance"])[0]
queue.pop(queue.index(node))
# For each neighbouring position
for pos in node.get_surrounding_cardinals():
pos = game_map.normalize(pos) # Ensure position is in normalized coordinates
# validate cell is within search bounds
if pos.x in rx and pos.y in ry:
neighbour = game_map[pos]
# Calculate the cost of traveling to that neighbour
if game_map[pos].is_occupied:
neighbour_weight = INF
else:
neighbour_weight = neighbour.halite_amount
# neighbour_weight = neighbour.halite_amount if not game_map[pos].is_occupied else INF
# logging.debug(f"Neighbour: {pos} | {neighbour_weight} | occupied: {game_map[pos].is_occupied} | ship id {game_map[pos].ship}")
# Calculate the distance of the path to the neighbour
dist_to_neighbour = distance_map[node]["distance"] + neighbour_weight
# If path is shorter than any other current path to that neighbour, then we update the path to that node
if dist_to_neighbour < distance_map[pos]["distance"]:
distance_map[pos]["distance"] = dist_to_neighbour
distance_map[pos]["previous"] = node
# Traverse from the target to the source by following all "previous" nodes that we calculated
path_node = target
while path_node != source:
prev_path_node = distance_map[path_node]["previous"]
if prev_path_node == source:
for d in Direction.get_all_cardinals(): #.values():
if game_map.normalize(source.directional_offset(d)) == path_node:
return d
path_node = prev_path_node
def safe_greedy_move(game_map, source, target):
safe_moves = []
# Evaluate if standing still is safe
if game_map.position_is_safe(source):
safe_moves.append(Direction.Still)
# Evaluate if any of the cardinal directions are safe
for direction in Direction.get_all_cardinals():
new_position = game_map.normalize(source.directional_offset(direction))
if game_map.position_is_safe(new_position):
safe_moves.append(direction)
# The scenario where we are fucked
if not safe_moves:
return Direction.Still
# Else we greedily check which move brings us closest to our target
closest_to_target = (None, INF)
for direction in safe_moves:
position = game_map.normalize(source.directional_offset(direction))
distance = game_map.calculate_distance(position, target)
if distance < closest_to_target[1]:
closest_to_target = (direction, distance)
# Returns direction
return closest_to_target[0]
""" <<<Game Loop>>> """
while True:
# This loop handles each turn of the game. The game object changes every turn, and you refresh that state by
# running update_frame().
game.update_frame()
me = game.me
game_map = game.game_map
# A command queue holds all the commands you will run this turn. You build this list up and submit it at the
# end of the turn.
ship_queue = me.get_ships()
command_queue = []
new_ship_positions = []
ship_position_map = [] # (ship, target)
# First we check if we are at the end of the game and the ship needs to start coming home
ship_queue_tmp = []
for ship in ship_queue:
if ship.should_turn_in(game_map, game.turn_number) and ship.can_move(game_map[ship]):
target = me.shipyard.position
new_dir = dijkstra_a_to_b(game_map, ship.position, target)
# Final check if the move is actually safe as Dijkstra can result in an unsafe move when 1 unit away from target
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
if not game_map.position_is_safe(new_position):
new_dir = safe_greedy_move(game_map, ship.position, target)
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
# Already move the ship in the game map to help prevent collisions
logging.debug(f"SHIP {ship.id} WANTS TO MOVE: {ship.position} - {new_dir}")
game_map[ship].mark_safe()
game_map[new_position].mark_unsafe(ship)
# And finally add the command to the queue
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Evaluated all the ships that can't move
ship_queue_tmp = []
for ship in ship_queue:
current_cell = game_map[ship]
if not ship.can_move(current_cell):
new_dir = Direction.Still
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Then evaluate all ships that don't want to move and are in a safe spot
ship_queue_tmp = []
for ship in ship_queue:
current_cell = game_map[ship]
logging.debug(f"SHOULD MOVE: {not ship.should_move(current_cell)} | {game_map.position_is_safe(current_cell)}")
if not ship.should_move(current_cell) and not game_map.enemy_is_close(current_cell):
new_dir = Direction.Still
logging.debug(f"SHIP {ship.id} WANTS TO STAY: {ship.position} - {new_dir}")
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Finally start resolving all ships that CAN move, and want or should move
for ship in ship_queue:
current_cell = game_map[ship]
if ship.halite_amount >= FILL_RATIO * constants.MAX_HALITE:
# Case: We need to turn in our halite
target = me.shipyard.position
else:
# Case: Gather more resources
target = weighted_cleanup(game_map, ship, me.shipyard)
new_dir = dijkstra_a_to_b(game_map, ship.position, target)
# Final check if the move is actually safe as Dijkstra can result in an unsafe move when 1 unit away from target
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
if not game_map.position_is_safe(new_position):
new_dir = safe_greedy_move(game_map, ship.position, target)
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
# Already move the ship in the game map to help prevent collisions
logging.debug(f"SHIP {ship.id} WANTS TO MOVE: {ship.position} - {new_dir}")
game_map[ship].mark_safe()
game_map[new_position].mark_unsafe(ship)
# And finally add the command to the queue
command_queue.append(ship.move(new_dir))
# Spawning a ship
if game.turn_number <= constants.MAX_TURNS - 150 and me.halite_amount >= constants.SHIP_COST and not game_map[me.shipyard].is_occupied and game_map.total_halite / max(game_map.ship_count, 1) > 4000 and game_map.ship_count < 50:
command_queue.append(me.shipyard.spawn())
# if game.turn_number > 10:
# time.sleep(2)
# Sending moves to end the turn
game.end_turn(command_queue)
| [
"[email protected]"
] | |
d5417d605f2204782ab1b6dd38bcb7262adc6354 | 99ae6372a5a5518543f9863a33ab21218a3a0768 | /tests/test-all.py | 31554599186717cf11032773e371545ac5143bde | [] | no_license | DANS-KNAW/parthenos-widget | 7b3578a37402069e99da8eaf0d8cf52f32c12231 | b549b76b7f16f1338cd80c6af7952963b3a8dd63 | refs/heads/master | 2022-07-17T23:07:25.238193 | 2021-11-03T08:36:55 | 2021-11-03T08:36:55 | 84,067,894 | 0 | 3 | null | 2021-11-03T08:36:55 | 2017-03-06T11:56:28 | JavaScript | UTF-8 | Python | false | false | 684 | py | #!/usr/bin/python
from __future__ import print_function, absolute_import
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
#import pytest
from tests.config import MATRIX
from parthenos.core.datatojson import *
import uuid
import httpretty
import requests
import pandas as pd
import simplejson
import json
if __name__ == '__main__':
print ('%s' % contents(0))
print ('%s' % gettopics("SOCIAL SCIENCE"))
print ('%s' % gettopics("LANGUAGE STUDIES"))
# print ('%s' % policies(4))
# (df, fairtest) = fair(4)
# print ('%s' % fairtest)
# x = fairfilter(df, fairtest, 'fair')
# print ('%s' % x.to_html())
| [
"[email protected]"
] | |
c00bff8a97f2f0cd605b081aab99214bd019e9fd | fe42f1c1eefb2069eda1dd98821ba6049fb4f01a | /ML/P3DataAnalysisPandas/P4Combining.py | 30cbbcdbd467feed161647f9dcf1775382909e7d | [] | no_license | hvn2001/LearnPython | c1b13f6685e6e62b3c9b612e88e624925f43eb6e | 323595df8d69e84873f74819a36b5eb36b017773 | refs/heads/master | 2021-03-30T06:26:55.110963 | 2020-04-10T16:13:36 | 2020-04-10T16:13:36 | 248,025,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import pandas as pd
print('------A. Concatenation------')
df1 = pd.DataFrame({'c1': [1, 2], 'c2': [3, 4]},
index=['r1', 'r2'])
df2 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]},
index=['r1', 'r2'])
df3 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]})
concat = pd.concat([df1, df2], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1 3 5 7
r2 2 4 6 8
'''
concat = pd.concat([df2, df1, df3])
print('{}\n'.format(concat))
'''
c1 c2
r1 5 7
r2 6 8
r1 1 3
r2 2 4
0 5 7
1 6 8
'''
concat = pd.concat([df1, df3], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1.0 3.0 NaN NaN
r2 2.0 4.0 NaN NaN
0 NaN NaN 5.0 7.0
1 NaN NaN 6.0 8.0
'''
print('------B. Merging------')
mlb_df1 = pd.DataFrame({'name': ['john doe', 'al smith', 'sam black', 'john doe'],
'pos': ['1B', 'C', 'P', '2B'],
'year': [2000, 2004, 2008, 2003]})
mlb_df2 = pd.DataFrame({'name': ['john doe', 'al smith', 'jack lee'],
'year': [2000, 2004, 2012],
'rbi': [80, 100, 12]})
print('{}\n'.format(mlb_df1))
'''
name pos year
0 john doe 1B 2000
1 al smith C 2004
2 sam black P 2008
3 john doe 2B 2003
'''
print('{}\n'.format(mlb_df2))
'''
name rbi year
0 john doe 80 2000
1 al smith 100 2004
2 jack lee 12 2012
'''
mlb_merged = pd.merge(mlb_df1, mlb_df2)
print('{}\n'.format(mlb_merged))
'''
name pos year rbi
0 john doe 1B 2000 80
1 al smith C 2004 100
'''
print('------Ex: ------')
def concat_rows(df1, df2):
row_concat = pd.concat([df1, df2])
return row_concat
def concat_cols(df1, df2):
col_concat = pd.concat([df1, df2], axis=1)
return col_concat
def merge_dfs(df1, df2):
merged_df = pd.merge(df1, df2)
return merged_df
| [
"[email protected]"
] | |
40c1b8c3ff03082a68e3d906964553c4da5afa44 | 0985dfc7b53f6bb80a6ee9c7b9cad4d7c31d2013 | /dform/admin.py | bcef11df218893cdb425e1167f9e615755c14996 | [
"MIT"
] | permissive | yonghuming/django-dform | 6f237020573f5e5a5e1d8ed1a58ed5b944f31aef | 3a8cb2ee61b5ea4719e6fc3bfb9ede66f468831e | refs/heads/master | 2021-01-17T06:30:52.467715 | 2015-10-21T19:22:03 | 2015-10-21T19:22:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,128 | py | from django.contrib import admin
from django.core.urlresolvers import reverse, NoReverseMatch
from awl.admintools import make_admin_obj_mixin
from awl.rankedmodel.admintools import admin_link_move_up, admin_link_move_down
from .fields import FIELD_CHOICES_DICT
from .models import (Survey, SurveyVersion, Question, QuestionOrder, Answer,
AnswerGroup)
# ============================================================================
def _questions_link(version, show_reorder=True):
num_q = Question.objects.filter(survey_versions=version).count()
if num_q == 0:
return ''
plural = ''
if num_q > 1:
plural = 's'
show = reverse('admin:dform_question_changelist')
reorder = reverse('admin:dform_questionorder_changelist')
urls = [
'<a href="%s?survey_versions__id=%s">%s Question%s</a>' % (show,
version.id, num_q, plural)
]
if show_reorder:
urls.append(
'<a href="%s?survey_version__id=%s">Reorder</a>' % (reorder,
version.id)
)
return ' | '.join(urls)
def _answers_link(version):
num_a = Answer.objects.filter(answer_group__survey_version=version).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?survey_version__id=%s">%s Answer%s</a>' % (link,
version.id, num_a, plural)
return url
# ============================================================================
# Surveys
# ============================================================================
@admin.register(Survey)
class SurveyAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'version_num', 'show_actions',
'show_versions', 'show_questions', 'show_answers')
def version_num(self, obj):
return '%s' % obj.latest_version.version_num
version_num.short_description = 'Latest Version'
def show_actions(self, obj):
actions = []
if obj.latest_version.is_editable():
url = reverse('dform-edit-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">Edit Survey</a>' % url)
else:
url = reverse('dform-new-version', args=(obj.id,))
actions.append('<a href="%s">New Version</a>' % url)
try:
url = reverse('dform-sample-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">View Sample</a>' % url)
except NoReverseMatch:
# sample-survey view isn't guaranteed to be there
pass
try:
url = reverse('dform-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">Answer Survey</a>' % url)
except NoReverseMatch:
# survey view isn't guaranteed to be there
pass
return ', '.join(actions)
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
def show_versions(self, obj):
num_v = SurveyVersion.objects.filter(survey=obj).count()
link = reverse('admin:dform_surveyversion_changelist')
url = '<a href="%s?survey__id=%s">%s Versions</a>' % (link, obj.id,
num_v)
return url
show_versions.short_description = 'Versions'
show_versions.allow_tags = True
def show_questions(self, obj):
return _questions_link(obj.latest_version)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
return _answers_link(obj.latest_version)
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
mixin = make_admin_obj_mixin('SurveyVersionMixin')
mixin.add_obj_link('show_survey', 'survey')
@admin.register(SurveyVersion)
class SurveyVersionAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'show_survey', 'version_num', 'show_actions',
'show_questions', 'show_answers')
def show_actions(self, obj):
actions = []
if obj.is_editable():
url = reverse('dform-edit-survey', args=(obj.id,))
actions.append('<a href="%s">Edit Survey</a>' % url)
try:
url = reverse('dform-sample-survey', args=(obj.id,))
actions.append('<a href="%s">View Sample</a>' % url)
except NoReverseMatch:
# view sample isn't guaranteed to be there
pass
try:
url = reverse('dform-survey', args=(obj.id,))
actions.append('<a href="%s">Answer Survey</a>' % url)
except NoReverseMatch:
# survey view isn't guaranteed to be there
pass
return ', '.join(actions)
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
def show_questions(self, obj):
return _questions_link(obj)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
return _answers_link(obj)
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
# ============================================================================
# Questions
# ============================================================================
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'field_key', 'required', 'show_reorder',
'show_answers')
def show_reorder(self, obj):
link = reverse('admin:dform_questionorder_changelist')
url = '<a href="%s?survey_version__id=%s">Reorder</a>' % (link,
obj.survey.latest_version.id)
return url
show_reorder.short_description = 'Reorder'
show_reorder.allow_tags = True
def show_answers(self, obj):
num_a = Answer.objects.filter(question=obj).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?question__id=%s">%s Answer%s</a>' % (link, obj.id,
num_a, plural)
return url
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
@admin.register(QuestionOrder)
class QuestionOrderAdmin(admin.ModelAdmin):
list_display = ('id', 'survey_version', 'show_text', 'move_up',
'move_down')
def show_text(self, obj):
return obj.question.text
show_text.short_description = 'Question Text'
def move_up(self, obj):
return admin_link_move_up(obj, 'Up')
move_up.allow_tags = True
move_up.short_description = 'Move Up'
def move_down(self, obj):
return admin_link_move_down(obj, 'Down')
move_down.allow_tags = True
move_down.short_description = 'Move Down'
# ============================================================================
# Answers
# ============================================================================
mixin = make_admin_obj_mixin('AnswerMixin')
mixin.add_obj_link('show_group', 'answer_group',
display='AnswerGroup.id={{obj.id}}')
mixin.add_obj_link('show_question', 'question',
display='Question.id={{obj.id}}')
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'show_group', 'show_question', 'show_text',
'show_field_key', 'value')
def show_text(self, obj):
return obj.question.text
show_text.short_description = 'Question Text'
def show_field_key(self, obj):
return FIELD_CHOICES_DICT[obj.question.field_key]
show_field_key.short_description = 'Field Key'
mixin = make_admin_obj_mixin('AnswerGroupMixin')
mixin.add_obj_link('show_data', 'group_data')
mixin.add_obj_link('show_version', 'survey_version',
display='SurveyVersion.id={{obj.id}}')
@admin.register(AnswerGroup)
class AnswerGroupAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'updated', 'show_version', 'show_data',
'show_questions', 'show_answers', 'show_actions')
def show_questions(self, obj):
return _questions_link(obj.survey_version, False)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
num_a = Answer.objects.filter(answer_group=obj).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?answer_group__id=%s">%s Answer%s</a>' % (link,
obj.id, num_a, plural)
return url
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
def show_actions(self, obj):
try:
url = reverse('dform-survey-with-answers', args=(
obj.survey_version.id, obj.id))
return '<a href="%s">Change Answers</a>' % url
except NoReverseMatch:
# view survey-with-answers isn't guaranteed to be there
return ''
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
| [
"[email protected]"
] | |
b3e5a17f360ef8e8b663ce5a0ab75242da5653b7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2633.py | 5aadfd5c6ad8f97ff1c98cd44fa7abe439d9a7dd | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
$ID = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
25a67c4819e5f76e8597007afbef568d28dcd9f0 | 63c8b9227a6b3178d918769042ecb060acc557be | /symphony/cli/pyinventory/graphql/add_service_endpoint_mutation.py | 4f7d20fa43ca5e0c5407290c9053e8a3f6f0fe27 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | snwfdhmp/magma | 7c4898db68d2668fd39ed25f73bb9a2bc5959066 | 8b3ff20a2717337a83c8ef531fa773a851d2e54d | refs/heads/master | 2020-12-06T09:06:25.806497 | 2020-01-07T18:27:09 | 2020-01-07T18:28:51 | 232,418,366 | 1 | 0 | NOASSERTION | 2020-01-07T21:12:28 | 2020-01-07T21:12:27 | null | UTF-8 | Python | false | false | 3,003 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
def enum_field(enum_type):
def encode_enum(value):
return value.value
def decode_enum(t, value):
return t(value)
return field(
metadata={
"dataclasses_json": {
"encoder": encode_enum,
"decoder": partial(decode_enum, enum_type),
}
}
)
class ServiceEndpointRole(Enum):
CONSUMER = "CONSUMER"
PROVIDER = "PROVIDER"
@dataclass_json
@dataclass
class AddServiceEndpointInput:
id: str
portId: str
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class AddServiceEndpointMutation:
__QUERY__ = """
mutation AddServiceEndpointMutation($input: AddServiceEndpointInput!) {
addServiceEndpoint(input: $input) {
id
name
externalId
customer {
id
name
externalId
}
endpoints {
id
port {
id
}
role
}
links {
id
}
}
}
"""
@dataclass_json
@dataclass
class AddServiceEndpointMutationData:
@dataclass_json
@dataclass
class Service:
@dataclass_json
@dataclass
class Customer:
id: str
name: str
externalId: Optional[str] = None
@dataclass_json
@dataclass
class ServiceEndpoint:
@dataclass_json
@dataclass
class EquipmentPort:
id: str
id: str
port: EquipmentPort
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class Link:
id: str
id: str
name: str
endpoints: List[ServiceEndpoint]
links: List[Link]
externalId: Optional[str] = None
customer: Optional[Customer] = None
addServiceEndpoint: Optional[Service] = None
data: Optional[AddServiceEndpointMutationData] = None
errors: Any = None
@classmethod
# fmt: off
def execute(cls, client, input: AddServiceEndpointInput):
# fmt: off
variables = {"input": input}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
| [
"[email protected]"
] | |
28e11970a757421df8a3c2d034a2856bde5b414f | 93582aa46c835b66a2117bf24178fd80236af89d | /setup.py | e2eaee39d2b4d1cd674afe84307252167e1f9eba | [] | no_license | collective/collective.leadmedia | 0fbe4e03421fcec6f026a80de80c4af28d2f218e | 5fb3749861fd21859ae84686dc29f877859de45b | refs/heads/master | 2023-08-24T01:19:19.470625 | 2019-07-23T13:30:53 | 2019-07-23T13:30:53 | 26,549,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='collective.leadmedia',
version=version,
description="Adds a slideshow to any dexterity folderish type.",
long_description=open("README.rst").read(),
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Plone',
author='Andre Goncalves',
author_email='[email protected]',
url='https://github.com/collective/collective.leadmedia',
download_url='https://github.com/collective/collective.leadmedia/tarball/0.1',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
setup_requires=["PasteScript"],
paster_plugins=["ZopeSkel"],
)
| [
"[email protected]"
] | |
326168d8de06212813ef98b555650a25305f7aab | fff561e0e4f351d85d038cf87569c23280622157 | /cmsplugin_cascade/generic/cms_plugins.py | 3eaaf072f99d2b3a564045fc617a550d4bb910eb | [
"MIT"
] | permissive | schacki/djangocms-cascade | 9d3e9176e54c7cca619fdc6917c38b1588bc7c88 | 2809f701a1cfa17a53539fac4d9dadaa5ebe40b7 | refs/heads/master | 2021-01-20T22:02:42.959467 | 2015-12-23T19:31:07 | 2015-12-23T19:31:07 | 42,931,185 | 0 | 0 | null | 2015-09-22T12:02:53 | 2015-09-22T12:02:52 | null | UTF-8 | Python | false | false | 1,881 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import PartialFormField
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.mixins import TransparentMixin
class SimpleWrapperPlugin(TransparentMixin, CascadePluginBase):
name = _("Simple Wrapper")
parent_classes = None
require_parent = False
allow_children = True
alien_child_classes = True
TAG_CHOICES = tuple((cls, _("<{}> – Element").format(cls))
for cls in ('div', 'span', 'section', 'article',)) + (('naked', _("Naked Wrapper")),)
glossary_fields = (
PartialFormField('tag_type',
widgets.Select(choices=TAG_CHOICES),
label=_("HTML element tag"),
help_text=_('Choose a tag type for this HTML element.')
),
)
@classmethod
def get_identifier(cls, instance):
identifier = super(SimpleWrapperPlugin, cls).get_identifier(instance)
tag_name = dict(cls.TAG_CHOICES).get(instance.glossary.get('tag_type'))
if tag_name:
return format_html('{0}{1}', identifier, tag_name)
return identifier
def get_render_template(self, context, instance, placeholder):
if instance.glossary.get('tag_type') == 'naked':
return 'cascade/generic/naked.html'
return 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(SimpleWrapperPlugin)
class HorizontalRulePlugin(CascadePluginBase):
name = _("Horizontal Rule")
parent_classes = None
allow_children = False
tag_type = 'hr'
render_template = 'cascade/generic/single.html'
glossary_fields = ()
plugin_pool.register_plugin(HorizontalRulePlugin)
| [
"[email protected]"
] | |
0f60ebe14d9d2799e58cc2c5c412340c48ead03d | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/seqex/bundle_to_seqex_test.runfiles/pypi__nose_1_3_7/nose/case.py | cd5661e50fd58f7d63994967189c34fdb7209d2c | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 115 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__nose_1_3_7/nose/case.py | [
"[email protected]"
] | |
311b3d5d01adbf281ec2f810b8579072154079d4 | a32c2ee4e6b2b1c6f8db02320c4bd50b17940af5 | /modules/YiXinNotSlot/YiXinRegister3.8/YiXinRegister.py | d216ecbed0ac33026d944068a185a506a142af16 | [] | no_license | wszg5/studyGit | 93d670884d4cba7445c4df3a5def8085e5bf9ac0 | bebfc90bc38689990c2ddf52e5a2f7a02649ea00 | refs/heads/master | 2020-04-05T02:55:17.367722 | 2018-11-07T06:01:03 | 2018-11-07T06:01:03 | 156,494,390 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,553 | py | # coding:utf-8
import colorsys
import os
import random
import string
from PIL import Image
from imageCode import imageCode
from slot import Slot
from smsCode import smsCode
from uiautomator import Device
from Repo import *
from zservice import ZDevice
class YiXinRegister:
def __init__(self):
self.repo = Repo()
self.type = 'yixin'
def GenPassword(self, numOfNum=4, numOfLetter=4):
# 选中numOfNum个数字
slcNum = [random.choice( string.digits ) for i in range( numOfNum )]
# 选中numOfLetter个字母
slcLetter = [random.choice( string.lowercase ) for i in range( numOfLetter )]
slcChar = slcLetter + slcNum
genPwd = ''.join( [i for i in slcChar] )
return genPwd
def register(self, d, z, args, password):
str = d.info # 获取屏幕大小等信息
height = str["displayHeight"]
width = str["displayWidth"]
z.toast("开始注册")
d.server.adb.cmd( "shell", "pm clear im.yixin" ).communicate( ) # 清除缓存
d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate( ) # 拉起易信
<<<<<<< HEAD
z.sleep( 10 )
=======
z.sleep( 18 )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
z.heartbeat( )
if d( text='很抱歉,“易信”已停止运行。' ).exists:
d( text='确定' ).click( )
return 'fail'
if d( text='注册' ).exists:
d( text='注册' ).click()
z.sleep( 2 )
self.scode = smsCode(d.server.adb.device_serial())
while True:
material_cate_id = args['repo_material_id']
# material_time_limit = args['material_time_limit']
nicknameLsit = self.repo.GetMaterial(material_cate_id, 0, 1)
if len( nicknameLsit ) == 0:
d.server.adb.cmd( "shell",
"am broadcast -a com.zunyun.zime.toast --es msg \"素材库%s号仓库为空\"" % material_cate_id ).communicate( )
else:
break
nickname = nicknameLsit[0]['content']
while True:
z.heartbeat()
z.toast(u'开始获取手机号')
while True:
if d(resourceId='im.yixin:id/register_phone_number_edittext').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click.bottomright()
number_cate_id = args['repo_number_id']
# number_time_limit = int(args['number_time_limit']) # 号码提取时间间隔
exist_numbers = self.repo.GetNumber(number_cate_id, 0, 1, 'exist')
remain = 1 - len(exist_numbers)
normal_numbers = self.repo.GetNumber(number_cate_id, 0, remain, 'normal')
numbers = exist_numbers + normal_numbers
if len(numbers) == 0:
d.server.adb.cmd("shell",
"am broadcast -a com.zunyun.zime.toast --es msg \"电话号码%s号仓库为空\"" % number_cate_id).communicate()
else:
break
number = numbers[0]["number"]
if d(resourceId='im.yixin:id/register_phone_number_edittext').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click()
try:
PhoneNumber = self.scode.GetPhoneNumber(self.scode.WECHAT_REGISTER, number) # 获取接码平台手机号码
except:
PhoneNumber = None
# PhoneNumber = self.scode.GetPhoneNumber(self.scode.WECHAT_REGISTER) # 获取接码平台手机号码
if PhoneNumber is None:
z.toast(u'讯码查不无此号,重新获取')
continue
else:
z.toast(u'成功获取到手机号')
z.input(PhoneNumber)
if not d( text='中国', resourceId='im.yixin:id/tv_register_country' ).exists:
d( resourceId='im.yixin:id/tv_register_country' ).click( )
z.sleep( 1 )
while True:
if d( text='中国' ).exists:
d( text='中国' ).click( )
break
else:
d.swipe( width / 2, height * 6 / 7, width / 2, height / 7 )
if d(text='下一步').exists:
d(text='下一步').click()
z.sleep(8)
z.heartbeat()
if d(text='为了验证身份,我们将会发送短信验证码到你的手机').exists:
d(resourceId='im.yixin:id/register_phone_number_edittext').click.bottomright() # 清空输入框
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
continue
if d(textContains='验证码短信已发送至').exists:
break
try:
code = self.scode.GetVertifyCode(PhoneNumber, self.scode.WECHAT_REGISTER, 4) # 获取接码验证码
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
except:
self.scode.defriendPhoneNumber(PhoneNumber, self.scode.WECHAT_REGISTER)
code = ''
if code == '':
z.toast( PhoneNumber + '手机号,获取不到验证码' )
return "fail"
z.input(code[0])
z.input(code[1])
z.input(code[2])
z.input(code[3])
if d(resourceId='im.yixin:id/register_username_edittext').exists:
d(resourceId='im.yixin:id/register_username_edittext').click()
z.input(nickname)
if d(resourceId='im.yixin:id/register_password_edittext').exists:
d( resourceId='im.yixin:id/register_password_edittext' ).click()
z.input(password)
if d(text='下一步').exists:
d(text='下一步').click()
z.sleep(3)
if d(text='进入易信',resourceId='im.yixin:id/btn_register_start').exists:
d(text='进入易信',resourceId='im.yixin:id/btn_register_start').click()
z.sleep(20)
<<<<<<< HEAD
=======
if d(text='完善信息').exists:
d( index=1 ).click()
z.sleep(1)
ageArray = ['00后', '95后', '90后', '85后']
age = ageArray[random.randint(0, 3)]
if d(text=age).exists:
d(text=age).click()
if d(text='开启易信').exists:
d(text='开启易信').click()
z.sleep( 20 )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
# d.server.adb.cmd( "shell", "am force-stop im.yixin" ).communicate( ) # 强制停止
# d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate( ) # 拉起易信
z.heartbeat()
if d( text='立即更新' ).exists:
d(text='下次再说').click()
<<<<<<< HEAD
if d(text='消息').exists and d(text='电话').exists and d(text='发现').exists:
z.toast( u'注册成功' )
=======
if d(text='好友').exists and d(text='我').exists and d(text='发现').exists:
z.toast( u'注册成功' )
d(text='我').click()
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
return PhoneNumber
else:
z.toast( u'注册失败,重新注册' )
return "fail"
def action(self, d, z, args):
while True:
z.toast( "正在ping网络是否通畅" )
while True:
ping = d.server.adb.cmd( "shell", "ping -c 3 baidu.com" ).communicate( )
print(ping)
if 'icmp_seq' and 'bytes from' and 'time' in ping[0]:
<<<<<<< HEAD
z.toast( "开始执行:易信注册模块 有卡槽" )
=======
z.toast( "开始执行:易信注册模块 无卡槽" )
>>>>>>> afe1e0af91f2a33f2ae5bdf0300d90ce5cc22551
break
z.sleep( 2 )
z.generate_serial( "im.yixin" ) # 随机生成手机特征码
z.toast( "随机生成手机特征码" )
saveCate = args['repo_account_id']
password = self.GenPassword( )
register_result = self.register( d, z, args, password )
if register_result == "fail":
continue
else:
# 入库
featureCodeInfo = z.get_serial( "im.yixin" )
self.repo.RegisterAccount( register_result, password, "", saveCate, "using", featureCodeInfo )
break
if (args['time_delay']):
z.sleep( int( args['time_delay'] ) )
def getPluginClass():
return YiXinRegister
if __name__ == "__main__":
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
clazz = getPluginClass()
o = clazz()
d = Device("HT54VSK01061")
z = ZDevice("HT54VSK01061")
d.server.adb.cmd("shell", "ime set com.zunyun.qk/.ZImeService").communicate()
args = {"repo_account_id": "279", "repo_number_id": "123", "repo_material_id": "139", "slot_time_limit": "2", "time_delay": "3"};
o.action(d, z, args)
# slot = Slot(d.server.adb.device_serial(),'yixin')
# slot.clear(1)
# slot.clear(2)
# d.server.adb.cmd( "shell", "pm clear im.yixin" ).communicate( ) # 清除缓存
# slot.restore( 1 )
# d.server.adb.cmd( "shell", "am start -n im.yixin/.activity.WelcomeActivity" ).communicate() # 拉起易信
| [
"[email protected]"
] | |
d1961e74a2e79af96908d797e62f8c02b98f3feb | 6e68ef0a53ce48da79b4906d85fc9785deee4ca5 | /Reverse/urls.py | 393afb2306b734c2dd1c0ad59846b0a9bf76a76c | [] | no_license | shubhamkharose/CODEDAEMON | e3ed8050b5c43ec146c6d253d06121fc37cdb2d4 | 6df7af35c51f5f54b2e2167e3d64d163c9a688f9 | refs/heads/master | 2021-04-06T00:58:01.515828 | 2018-03-15T11:04:31 | 2018-03-15T11:04:31 | 125,353,062 | 1 | 4 | null | 2019-10-28T04:03:58 | 2018-03-15T10:48:53 | JavaScript | UTF-8 | Python | false | false | 930 | py | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
app_name = 'Reverse'
'''
app_name is added bcoz name of
'''
urlpatterns = [
url (r'^check/(?P<problem_name>[0-9A-Za-z_]+)/$',views.check,name='check'),
url (r'^(?P<problem_name>[0-9A-Za-z_]+)/$',views.index,name='index'),
]
| [
"[email protected]"
] | |
67175736189e77eb4d95c43ea91bc66748416e04 | 8a55b9000920b75f937073c043249090c13b04b1 | /mlcomp/utils/config.py | b036f3030ec955ff17b4b4b841ebe710cec54587 | [
"MIT"
] | permissive | jingmouren/mlcomp | 209f43296325387447549d1d206ffaeab5739d8e | 3fd251429be3892903ab6b3361bcd69c6ea9eeff | refs/heads/master | 2020-07-10T04:31:26.928425 | 2019-08-22T10:07:07 | 2019-08-22T10:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | from collections import defaultdict
from typing import List
import os
import json
import albumentations as A
from mlcomp import DATA_FOLDER
from mlcomp.utils.io import yaml_load
from mlcomp.utils.misc import dict_flatten, dict_unflatten
class Config(dict):
@property
def data_folder(self):
return os.path.join(DATA_FOLDER, self['info']['project'])
@staticmethod
def from_json(config: str):
return Config(json.loads(config))
@staticmethod
def from_yaml(config: str):
return Config(yaml_load(config))
def merge_dicts_smart(target: dict, source: dict, sep='/'):
target_flatten = dict_flatten(target)
mapping = defaultdict(list)
for k, v in target_flatten.items():
parts = k.split(sep)
for i in range(len(parts) - 1, -1, -1):
key = sep.join(parts[i:])
mapping[key].append(k)
for k, v in source.items():
assert len(mapping[k]) == 1, f'ambiguous mapping for {k}'
key = mapping[k][0]
target_flatten[key] = v
return dict_unflatten(target_flatten)
def parse_albu(configs: List[dict]):
res = []
for config in configs:
assert 'name' in config, f'name is required in {config}'
config = config.copy()
name = config.pop('name')
if name == 'Compose':
items = config.pop('items')
aug = A.Compose(parse_albu(items), **config)
else:
aug = getattr(A, name)(**config)
res.append(aug)
return res
__all__ = ['Config', 'merge_dicts_smart', 'parse_albu']
| [
"[email protected]"
] | |
b75bd97af0d87c71caf404ca4aed646d76e18dca | 2ef27655cd1deb9de4074249e559269abd334fa1 | /6 kyu/Decipher Student Messages.py | cbe021ef697c6a0afe2e18953a1c584352271249 | [] | no_license | sieczkah/Codewars_KATA | c7606b9a88693e2550af0ef55808f34c00e77b73 | 68d5d4a133a015e49bcdbff29ee45e3baefcd652 | refs/heads/main | 2023-05-06T03:59:01.403765 | 2021-05-24T19:36:34 | 2021-05-24T19:36:34 | 334,698,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """https://www.codewars.com/kata/5a1a144f8ba914bbe800003f/train/python"""
def decipher_message(message):
lng = int(len(message) ** 0.5) # the coding square is always perfect so we need to know the lenght
words = [message[i::lng] for i in range(lng)] # in 5x5 we take every 5th letter in 6x6 we take every 6th...
return ''.join(words)
| [
"[email protected]"
] | |
959ee0746b95977a7b889b6b12e476719844568f | 7516dfcd3d2e012d98fa3aec45aafe0e2c64ffe1 | /py/utest/utest_fsoci.py | d3c0f7ac8809b0c7282c29600f364a91671f08a5 | [] | no_license | ReiMatsuzaki/naewdy2 | 64e1c06a7eca228811c83e49eed57c9502ba1c2e | 10f0110417b6d2699a688c64cdf39df0ef6d06c2 | refs/heads/master | 2021-03-16T10:12:02.856923 | 2018-03-15T03:30:00 | 2018-03-15T03:30:00 | 115,374,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | import unittest
from numpy import sqrt
from naewdy2.fsoci import *
class TestFsoci(unittest.TestCase):
def test_sign_ai(self):
self.assertAlmostEqual(0, sign_ai([1,2,3], 4))
self.assertAlmostEqual(1, sign_ai([1,2,3], 3))
self.assertAlmostEqual(-1, sign_ai([1,2,3], 2))
self.assertAlmostEqual(1, sign_ai([1,2,3], 1))
def test_aiaj(self):
self.assertAlmostEqual(1, aiaj([1,2,3], 1, 1, [1,2,3]))
self.assertAlmostEqual(0, aiaj([1,2,3], 4, 1, [1,2,3]))
self.assertAlmostEqual(1, aiaj([1,2,4], 4, 3, [1,2,3]))
self.assertAlmostEqual(-1, aiaj([1,3,4], 4, 2, [1,2,3]))
def test_eij(self):
self.assertAlmostEqual(sqrt(2.0),
eij([1,2,3], [1,2,3],
1, 1,
[1,2,3], [1,2,3]))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c6887a8ffe4aaa0df0666ed9ab5b8c601c225868 | 941ace80571b53f53ab4e1f44d7b3ee9300e6a84 | /chapter02/lxml_example.py | 3bf1a23c01ccdd4989a11da724357915b61829e3 | [
"MIT"
] | permissive | qigezai/python-scrap | 81d3855caba095cab36f204a6b74c55f43cb7f15 | 3a9ad37a94008a8071b84e64d90c46f59580cca0 | refs/heads/master | 2021-10-10T06:26:18.023662 | 2019-01-07T14:46:19 | 2019-01-07T14:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/5 下午4:50
import urllib2
import lxml.html
def scrape(html):
tree = lxml.html.fromstring(html)
td = tree.cssselect('tr#places_neighbours__row > td.w2p_fw')[0]
area = td.text_content()
return area
if __name__ == '__main__':
html = urllib2.urlopen('http://example.webscraping.com/view/United-Kingdom-239').read()
print scrape(html) | [
"[email protected]"
] | |
10c3a4d5e3d2f35da492858f8236fd8081029116 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/R/robertionita/bookyfy.py | 9069f400e7f3e52096364d8732181bcdb8bb1ad9 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | # bestsellers from the kindle book store
import scraperwiki
import lxml.html
import time
import re
for x in range(1,6):
html = scraperwiki.scrape("http://www.amazon.com/Best-Sellers-Kindle-Store-eBooks/zgbs/digital-text/154606011/ref=zg_bs_154606011_pg_" + str(x) +
"?_encoding=UTF8&pg=" + str(x))
root = lxml.html.fromstring(html)
pos = 0
for el in root.cssselect("div.zg_itemImmersion"):
title = el.cssselect("div.zg_title a")[0].text_content()
link = el.cssselect("div.zg_title a")[0].attrib['href'].rstrip('\n') # Strip newline characters, funky shit happens if you don't
#rank = el.cssselect("span.zg_rankNumber")[0].text_content()
price = el.cssselect("strong.price")[0].text_content()
#release = el.cssselect("div.zg_releaseDate")[0].text_content()
author = el.cssselect("div.zg_byline")[0].text_content()
days_in_list = el.cssselect("td.zg_daysInList")[0].text_content()
pos += 1
booklink = scraperwiki.scrape(link)
bookpage = lxml.html.fromstring(booklink)
def get_rank(bookpage):
## For each book detail page, select the body element for scraping wizardy
for el in bookpage.cssselect("body"):
## Scraping rank
rank = el.cssselect("li#SalesRank b")[0].tail
## Extract rank number from book page using regex
re1='.*?' # Non-greedy match on filler
re2='(\\d+)' # Integer Number 1
rg = re.compile(re1+re2,re.IGNORECASE|re.DOTALL)
m = rg.search(rank)
if m:
rank=m.group(1)
#print "("+int1+")"+"\n"
print "Rank of book:"
print rank
#print lxml.html.tostring(rank)
return rank
rank = get_rank(bookpage)
print rank
record = {"Title" : title,
"Author" : author,
"Link" : link,
"Ranking" : get_rank(bookpage),
"Price" : price,
"sdate" : time.strftime( "%Y-%m-%d" )
}
scraperwiki.sqlite.save(unique_keys=["sdate"], data=record) | [
"[email protected]"
] | |
dbddf5f34bf33ff7cb4facd928b2c338fa2e36bc | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j20204+0704/sdB_GALEX_J20204+0704_lc.py | 9e2b035b6d7b472d8473d414942cee17af805004 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[305.1135,7.070683], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J20204+0704 /sdB_GALEX_J20204+0704_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
808ee195b759a16cb41071c38fd23df333d355a7 | c25b4125b76654452fc2d5cc2f0f7a47643df177 | /setup.py | bfee8ef032e3226e132a395cb98d6a4c1d1398ae | [
"MIT"
] | permissive | dfjsdkfj/grparks | 416b7fdd68a533573c5f4bb53dd7bf748a80c221 | 365717804fafb27c6e3d65322b6fd6b2a9315aa7 | refs/heads/master | 2020-12-24T09:02:01.982187 | 2016-02-02T20:40:06 | 2016-02-02T20:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | #!/usr/bin/env python
"""
Setup script for ParkFinder.
"""
import setuptools
from parks import __project__, __version__
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description="Find and validate park data on OpenStreetMap.",
url='https://github.com/friendlycode/gr-parks',
author='Jace Browning',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
],
install_requires=open('requirements.txt').readlines(),
)
| [
"[email protected]"
] | |
0f9135810627817d11f44817e880b6272d23f56a | 3b50605ffe45c412ee33de1ad0cadce2c5a25ca2 | /python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py | bd5b2c77983b93933e91520ae3ae0520e160ed9f | [
"Apache-2.0"
] | permissive | Superjomn/Paddle | f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1 | 7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188 | refs/heads/develop | 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 | Apache-2.0 | 2023-04-14T02:29:52 | 2016-08-30T01:45:54 | C++ | UTF-8 | Python | false | false | 8,965 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
class TestInferencePartialFeed(unittest.TestCase):
def setUp(self):
self.iterations = 10
self.size = 10
def run_network(self, places, use_split, has_persistable):
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name='x', shape=[None, self.size], dtype='float32')
y = fluid.data(name='y', shape=[None, self.size], dtype='float32')
if has_persistable:
lr = fluid.data(name='lr', shape=[1], dtype='float32')
lr.persistable = True
else:
lr = fluid.data(name='lr', shape=[None], dtype='float32')
relu_x = F.relu(x)
relu_y = F.relu(y)
relu_lr = F.relu(lr)
exe = fluid.Executor(places[0])
exe.run(startup_prog)
prog = fluid.CompiledProgram(main_prog).with_data_parallel(
places=places
)
gen_random = lambda shape: np.random.uniform(
low=-1.0, high=1.0, size=shape
).astype('float32')
assert_result = lambda feed, result: np.testing.assert_array_equal(
np.maximum(0, feed), result
)
def assert_merged_unmerged(merged, unmerged):
unmerged = np.concatenate(unmerged, axis=0)
np.testing.assert_array_equal(merged, unmerged)
def feed_split_test():
for place_num in range(1, len(places) * 3):
x_np = gen_random([place_num, self.size])
y_np = gen_random([place_num, self.size])
if not lr.persistable or place_num <= len(places):
lr_np = gen_random([place_num])
else:
lr_np = gen_random([1])
feed = {x.name: x_np, y.name: y_np, lr.name: lr_np}
fetch_list = [relu_x, relu_y, relu_lr]
relu_x_np, relu_y_np, relu_lr_np = exe.run(
prog, feed=feed, fetch_list=fetch_list, return_merged=True
)
(
relu_x_np_unmerged,
relu_y_np_unmerged,
relu_lr_np_unmerged,
) = exe.run(
prog, feed=feed, fetch_list=fetch_list, return_merged=False
)
assert_merged_unmerged(relu_x_np, relu_x_np_unmerged)
assert_merged_unmerged(relu_y_np, relu_y_np_unmerged)
assert_merged_unmerged(relu_lr_np, relu_lr_np_unmerged)
assert_result(x_np, relu_x_np)
assert_result(y_np, relu_y_np)
if not lr.persistable or place_num <= len(places):
assert_result(lr_np, relu_lr_np)
else:
expected_relu_lr_np = max(lr_np[0], 0)
self.assertTrue(np.all(expected_relu_lr_np == relu_lr_np))
def feed_list_test():
for place_num in range(1, len(places) + 1):
x_np_list = []
y_np_list = []
lr_np_list = []
feed_list = []
for _ in range(place_num):
x_np = gen_random([1, self.size])
y_np = gen_random([1, self.size])
lr_np = gen_random([1])
x_np_list.append(x_np)
y_np_list.append(y_np)
lr_np_list.append(lr_np)
feed_list.append(
{x.name: x_np, y.name: y_np, lr.name: lr_np}
)
fetch_list = [relu_x, relu_y, relu_lr]
relu_x_np, relu_y_np, relu_lr_np = exe.run(
prog,
feed=feed_list,
fetch_list=fetch_list,
return_merged=True,
)
(
relu_x_np_unmerged,
relu_y_np_unmerged,
relu_lr_np_unmerged,
) = exe.run(
prog,
feed=feed_list,
fetch_list=fetch_list,
return_merged=False,
)
assert_merged_unmerged(relu_x_np, relu_x_np_unmerged)
assert_merged_unmerged(relu_y_np, relu_y_np_unmerged)
assert_merged_unmerged(relu_lr_np, relu_lr_np_unmerged)
x_np = np.concatenate(x_np_list)
y_np = np.concatenate(y_np_list)
lr_np = np.concatenate(lr_np_list)
assert_result(x_np, relu_x_np)
assert_result(y_np, relu_y_np)
assert_result(lr_np, relu_lr_np)
for _ in range(self.iterations):
if use_split:
feed_split_test()
else:
feed_list_test()
def test_main(self):
places = [fluid.cpu_places(4)]
if fluid.is_compiled_with_cuda():
places.append(fluid.cuda_places())
for p in places:
for has_persistable in [False, True]:
for use_split in [False, True]:
self.run_network(
p, use_split=use_split, has_persistable=has_persistable
)
class TestInferencePartialFeedUsingDataLoader(unittest.TestCase):
def setUp(self):
self.epoch_num = 3
self.batch_num = 101 # a prime number
self.batch_size = 32
def create_reader(self):
def __impl__():
for _ in range(self.batch_num):
yield np.random.random([self.batch_size, 1]).astype('float32'),
return __impl__
def run_network(self, iterable, use_cuda, drop_last):
x = fluid.data(shape=[None, 1], name='x', dtype='float32')
places = fluid.cuda_places() if use_cuda else fluid.cpu_places(4)
loader = fluid.io.DataLoader.from_generator(
feed_list=[x], capacity=16, iterable=iterable, drop_last=drop_last
)
y = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(y)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(
fluid.default_main_program()
).with_data_parallel(places=places, loss_name=loss.name)
loader.set_batch_generator(
self.create_reader(), places=places if iterable else None
)
for _ in range(self.epoch_num):
actual_batch_num = 0
if loader.iterable:
for feed_data in loader():
(x_data,) = exe.run(prog, feed=feed_data, fetch_list=[x])
self.assertEqual(x_data.shape[0] % self.batch_size, 0)
self.assertTrue(x_data.shape[0] != 0)
actual_batch_num += int(x_data.shape[0] / self.batch_size)
else:
loader.start()
try:
while True:
(x_data,) = exe.run(prog, fetch_list=[x])
self.assertEqual(x_data.shape[0] % self.batch_size, 0)
self.assertTrue(x_data.shape[0] != 0)
actual_batch_num += int(
x_data.shape[0] / self.batch_size
)
except fluid.core.EOFException:
loader.reset()
if not drop_last or len(places) == 1:
self.assertEqual(self.batch_num, actual_batch_num)
else:
self.assertGreater(self.batch_num, actual_batch_num)
def test_main(self):
use_cuda_list = (
[False, True] if fluid.is_compiled_with_cuda() else [False]
)
iterable_list = [False, True]
drop_last_list = [False, True]
for iterable in iterable_list:
for use_cuda in use_cuda_list:
for drop_last in drop_last_list:
with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.scope_guard(fluid.Scope()):
self.run_network(iterable, use_cuda, drop_last)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ba2090eb36670814f7650ef6ffa2e6fb27f37fb5 | 692654b45228d813c8dc4c9ade0a6836cd2e7f17 | /other_tools/check_token.py | 2904c24fc086dd871cd4ade0faf53c78feebc2da | [] | no_license | sunary/nlp | dd67dce0a2001670efe0e1dc5f5ef7b014845982 | a9fa796118d51dd80cc9525d50247632caa00b7f | refs/heads/master | 2021-01-22T10:02:44.975681 | 2017-05-20T08:43:23 | 2017-05-20T08:43:23 | 43,935,102 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,720 | py | __author__ = 'sunary'
class CheckToken():
def __init__(self):
self.checker_token = []
self.black_token = []
def set_checker(self, checker):
self.checker_token = checker
def add_token(self, token):
'''
add token to the sorted list of token
Args:
token: the token need to be added
'''
if self.black_token:
position = self._find(token)
if token != self.black_token[position]:
self.black_token[position + 1:position + 1] = [token]
else:
self.black_token.append(token)
pass
def _find(self, token):
if not token:
return 0
left_position = 0
right_position = len(self.black_token) - 1
mid_position= (left_position + right_position)/2
mid_value = self.black_token[mid_position]
while left_position <= right_position:
if token < mid_value:
right_position = mid_position - 1
else:
left_position = mid_position + 1
mid_position = (left_position + right_position)/2
mid_value = self.black_token[mid_position]
return left_position - 1
def check_token(self):
'''
check any token in the sorted list of tokens is in the list
Returns:
bool: True if any token is in the list
Examples:
>>> set_checker([1, 2, 3, 4, 5, 6])
>>> add_token([2, 3])
>>> check_token()
True
>>> add_token([3, 4, 6])
False
'''
for i in range(len(self.checker_token)):
len_token = 1
while True:
list_token = self.checker_token[i: i + len_token]
position = self._find(list_token) + 1
if self.black_token[position - 1] == list_token:
del self.black_token[position - 1]
if position >= len(self.black_token) or len_token > len(self.black_token[position]) or len_token > len(list_token) or\
self.black_token[position][len_token - 1] != list_token[len_token - 1]:
break
len_token += 1
return False
if __name__ == '__main__':
check_token = CheckToken()
check_token.set_checker([1, 2, 3, 2, 2, 4, 45, 46, 4, 45, 52, 1, 21, 4, 5, 3, 4, 5, 1, 2])
check_token.add_token([1, 2])
check_token.add_token([5, 2])
check_token.add_token([3, 4, 1])
check_token.add_token([3, 4])
check_token.add_token([2, 2])
print check_token.black_token
check_token.check_token()
print check_token.black_token | [
"[email protected]"
] | |
cc33910210b5a0f0c332798673e332c4b8cb7eb7 | f8aa7306eeea9d2eafc400392acbdff931306e57 | /tests/test_cli.py | abf9c2f77d6c6a7a256664301c1113bc18566435 | [
"Apache-2.0"
] | permissive | b-jazz/warehouse | 929d1a0e7b4de3fd0596ff8334bda31ab5856bdc | 8c5414d709c6fd04c1b013ded680057a7def0833 | refs/heads/master | 2020-12-26T00:34:54.053900 | 2014-03-08T20:30:25 | 2014-03-08T20:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import werkzeug.serving
from warehouse.cli import ServeCommand
from warehouse.serving import WSGIRequestHandler
def test_serve(monkeypatch):
run_simple = pretend.call_recorder(
lambda *a, **kw: None,
)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple)
host, port, app, use_reloader, use_debugger = (
pretend.stub() for x in range(5)
)
ServeCommand()(
app, host, port,
reloader=use_reloader,
debugger=use_debugger,
)
assert run_simple.calls == [
pretend.call(
host, port, app,
use_reloader=use_reloader,
use_debugger=use_debugger,
request_handler=WSGIRequestHandler,
),
]
| [
"[email protected]"
] | |
93250100f4dea25b292e8471b70ae83b71cce42f | e582d60b7996faf7b87c6d857613e63581d415b9 | /elliot/recommender/visual_recommenders/VNPR/visual_neural_personalized_ranking_model.py | 858a318f1ec3594cc6a9eef6e489659da71b7b15 | [] | no_license | Abdel57Grota/Reenvisioning-the-comparison-between-Neural-Collaborative-Filtering-and-Matrix-Factorization | d6e51c32094550789673846acdf9891557b790c1 | 2a2b0148e881cf8ba45c48ad9d42f52421585284 | refs/heads/main | 2023-09-03T09:47:41.894117 | 2021-11-09T09:17:35 | 2021-11-09T09:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,024 | py | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo, Daniele Malitesta, Felice Antonio Merra'
__email__ = '[email protected], [email protected], [email protected], [email protected]'
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(0)
class VNPRModel(keras.Model):
def __init__(self,
num_users,
num_items,
embed_mf_size, l_w, mlp_hidden_size, dropout, learning_rate=0.01,
emb_image=None,
name="VNPR",
**kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.l_w = l_w
self.mlp_hidden_size = mlp_hidden_size
self.dropout = dropout
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='U_MF',
dtype=tf.float32)
self.item_mf_embedding_1 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_1',
dtype=tf.float32)
self.item_mf_embedding_2 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_2',
dtype=tf.float32)
self.emb_image = emb_image
self.F = tf.Variable(
self.emb_image, dtype=tf.float32, trainable=False)
self.mlp_layers_1 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_1.add(keras.layers.Dropout(dropout))
self.mlp_layers_1.add(keras.layers.Dense(units, activation='relu'))
self.mlp_layers_2 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_2.add(keras.layers.Dropout(dropout))
self.mlp_layers_2.add(keras.layers.Dense(units, activation='relu'))
self.optimizer = tf.optimizers.Adam(learning_rate)
@tf.function
def call(self, inputs, training=None, mask=None):
user, item1, item2 = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item1)
item_mf_e_2 = self.item_mf_embedding_2(item2)
feature_e_1 = tf.nn.embedding_lookup(self.F, item1)
feature_e_2 = tf.nn.embedding_lookup(self.F, item2)
embedding_input_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e_1], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(embedding_input_1) # [batch_size, 1]
embedding_input_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e_2], axis=2)
mlp_output_2 = self.mlp_layers_2(embedding_input_2) # [batch_size, 1]
return tf.squeeze(mlp_output_1), tf.squeeze(mlp_output_2), user_mf_e, item_mf_e_1, item_mf_e_2
@tf.function
def train_step(self, batch):
with tf.GradientTape() as tape:
user, pos, neg = batch
# Clean Inference
mlp_output_1, mlp_output_2, user_mf_e, item_mf_e_1, item_mf_e_2 = self.call(inputs=(user, pos, neg),
training=True)
difference = tf.clip_by_value(mlp_output_1 - mlp_output_2, -80.0, 1e8)
loss = tf.reduce_sum(tf.nn.softplus(-difference))
# Regularization Component
reg_loss = self.l_w * tf.reduce_sum([tf.nn.l2_loss(user_mf_e),
tf.nn.l2_loss(item_mf_e_1),
tf.nn.l2_loss(item_mf_e_2)])
# Loss to be optimized
loss += reg_loss
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss
@tf.function
def predict(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
u, i = inputs
output_1, output_2, _, _, _ = self.call(inputs=(u, i, i), training=training)
return (output_1 + output_2) * 0.5
@tf.function
def get_recs(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
user, item = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item)
item_mf_e_2 = self.item_mf_embedding_2(item)
feature_e = tf.nn.embedding_lookup(self.F, item)
mf_output_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e], axis=2) # [batch_size, embedding_size]
mf_output_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(mf_output_1) # [batch_size, 1]
mlp_output_2 = self.mlp_layers_2(mf_output_2) # [batch_size, 1]
return tf.squeeze((mlp_output_1+mlp_output_2)/2)
@tf.function
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, -np.inf), k=k, sorted=True)
| [
"[email protected]"
] | |
a0e4d0fc0edadaf6b668bd6570f6c2ba34a2fc9e | a09740e643d6277ada23c82d8e87853a1cd1a9e5 | /Z_ALL_FILE/Py/omdf5.py | b033ff3d43a5953248d534cd87fd3b5182354496 | [
"Apache-2.0"
] | permissive | FuckBrains/omEngin | c5fb011887c8b272f9951df3880a879456f202e8 | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | refs/heads/main | 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import pandas as pd
dates=['April-10', 'April-11', 'April-12', 'April-13','April-14','April-16']
income1=[10,20,10,15,10,12]
income2=[20,30,10,5,40,13]
df=pd.DataFrame({"Date":dates,
"Income_1":income1,
"Income_2":income2})
print(df.apply(lambda row: "Total income in "+ row["Date"]+ " is:"+str(row["Income_1"]+row["Income_2"]),axis=1))
| [
"[email protected]"
] | |
2068d1710140295cd665f7971b3655a7f2367e15 | f0592d39eaf5f8bcbe46c4b16f6fa631be48887f | /tests/contrib/autoguide/test_hessian.py | f26a124db03826f7d6b1f111d1e4ca602e1d5ab1 | [
"MIT"
] | permissive | wsgharvey/pyro | 0bfc762a20c4bcbbe30e61adbcc2c33e32effdb5 | 5c3ef54050d9ad566e5965174d4ad51bd37e55dd | refs/heads/dev | 2021-05-06T18:57:58.458691 | 2018-10-10T01:48:52 | 2018-10-10T01:48:52 | 111,995,379 | 0 | 0 | null | 2017-11-25T10:33:12 | 2017-11-25T10:33:12 | null | UTF-8 | Python | false | false | 827 | py | from __future__ import absolute_import, division, print_function
import torch
import pyro.distributions as dist
from pyro.contrib.autoguide import _hessian
from tests.common import assert_equal
def test_mvn():
tmp = torch.randn(3, 10)
cov = torch.matmul(tmp, tmp.t())
mvn = dist.MultivariateNormal(cov.new_zeros(3), cov)
x = torch.randn(3, requires_grad=True)
y = mvn.log_prob(x)
assert_equal(_hessian(y, x), -mvn.precision_matrix)
def test_multi_variables():
x = torch.randn(3, requires_grad=True)
z = torch.randn(3, requires_grad=True)
y = (x ** 2 * z + z ** 3).sum()
H = _hessian(y, (x, z))
Hxx = (2 * z).diag()
Hxz = (2 * x).diag()
Hzz = (6 * z).diag()
target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1)
assert_equal(H, target_H)
| [
"[email protected]"
] | |
745e078af6fac62e8b7d0448a12d31bb21a01a17 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/testenv/dayNight.py | 195043c0675c5a8b4e4d78951c90f5ecf6c3de4e | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 8,266 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
dayMusic = loader.loadMusic("phase_4/audio/bgm/TC_nbrhood.mid")
# dayMusic = loader.loadMusic("phase_8/audio/bgm/TB_nbrhood.mid")
# base.cr.playGame.hood.loader.snow.cleanup()
nightMusic = loader.loadMusic("phase_8/audio/bgm/DL_nbrhood.mid")
# Load up some sfx
birdSfx1 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_01.mp3")
birdSfx2 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_02.mp3")
birdSfx3 = loader.loadSfx("phase_8/audio/sfx/SZ_DG_bird_03.mp3")
cricket1 = loader.loadSfx("/c/soundelux/Estate_Cricket_1.mp3")
cricket2 = loader.loadSfx("/c/soundelux/Estate_Cricket_2.mp3")
rooster = loader.loadSfx("/c/soundelux/Estate_rooster.mp3")
# No more tt birds chirping
taskMgr.remove("TT-birds")
# Get rid of the sky that comes with TT central
taskMgr.remove("skyTrack")
base.cr.playGame.hood.sky.hide()
base.cr.playGame.hood.loader.music.stop()
# Load up our own sky models
nightSky = loader.loadModel("phase_8/models/props/DL_sky")
nightSky.setScale(0.8)
nightSky.setTransparency(1)
nightSky.setBin("background", 102)
daySky = loader.loadModel("phase_3.5/models/props/TT_sky")
daySky.setBin("background", 100)
dayCloud1 = daySky.find("**/cloud1")
dayCloud2 = daySky.find("**/cloud2")
dayCloud1.setBin("background", 101)
dayCloud2.setBin("background", 101)
dawnSky = loader.loadModel("phase_6/models/props/MM_sky")
dawnSky.setScale(0.8)
dawnSky.setTransparency(1)
dawnSky.setBin("background", 102
pe = PolylightEffect.make()
brightness = 1.25
darkness = 0.8
pe.setWeight(brightness)
base.localAvatar.node().setEffect(pe)
for sky in (nightSky, daySky, dawnSky):
sky.reparentTo(camera)
sky.setZ(0.0)
sky.setHpr(0.0, 0.0, 0.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
sky.node().setEffect(ce)
sky.setDepthTest(0)
sky.setDepthWrite(0)
# Color scale defines
dawnColor = Vec4(1,0.8,0.4,1)
dayColor = Vec4(1,1,1,1)
duskColor = Vec4(0.8,0.4,0.7,1)
nightColor = Vec4(0.3,0.3,0.5,1)
onAlpha = Vec4(1,1,1,1)
offAlpha = Vec4(1,1,1,0)
# Geom of the hood
geom = base.cr.playGame.hood.loader.geom
# List of butterflies
butterflies = base.cr.doFindAll("DistributedButterfly")
# List of lamps and glow discs
oneLights = geom.findAllMatches("**/prop_post_one_light_DNARoot")
threeLights = geom.findAllMatches("**/prop_post_three_light_DNARoot")
lamps = oneLights + threeLights
discs = []
# List of NodePaths of PolylightNodes
polylights = []
lightIndex = 0
for lamp in oneLights:
lamp.setColorScale(1,1,1,1,1)
disc = loader.loadModel("phase_3.5/models/props/glow")
# Add PolylightNodes
lightIndex += 1
plNode = PolylightNode("pl" + str(lightIndex))
plNode.setRadius(20)
#plNode.setPos(0,0,2)
plNode.setColor(1.0,0.8,0.4)
plNode.setFlickerType(PolylightNode.FSIN)
plNode.setFreq(6.0)
plNode.setOffset(-0.5)
plNodePath = NodePath(plNode)
polylights.append(plNodePath)
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).addLight(plNodePath))
# A glow around the lamp light bulb
disc.setBillboardPointEye()
disc.setPos(0.2,-1,10)
disc.setScale(8)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p13"))
#disc.node().setEffect(pe)
discs.append(disc)
# A glow on the floor
disc = loader.loadModel("phase_3.5/models/props/glow")
disc.setPos(0,0,0.025)
disc.setHpr(0,90,0)
disc.setScale(14)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p13"))
plNodePath.reparentTo(disc)
disc.node().setEffect(pe)
discs.append(disc)
for lamp in threeLights:
lamp.setColorScale(1,1,1,1,1)
disc = loader.loadModel("phase_3.5/models/props/glow")
# Add PolylightNodes
lightIndex += 1
plNode = PolylightNode("pl" + str(lightIndex))
plNode.setRadius(20)
plNode.setColor(1.0,1.0,1.0)
plNode.setFlickerType(PolylightNode.FRANDOM)
#plNode.setFreq(6.0)
plNode.setOffset(-0.5)
plNode.setScale(0.2)
plNode.setAttenuation(PolylightNode.AQUADRATIC)
plNodePath = NodePath(plNode)
polylights.append(plNodePath)
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).addLight(plNodePath))
disc.setBillboardPointEye()
disc.setPos(0,-1,10)
disc.setScale(10)
disc.setColorScale(1,1,0.8,0.25,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p23"))
plNodePath.reparentTo(disc)
#disc.node().setEffect(pe)
discs.append(disc)
# A glow on the floor
disc = loader.loadModel("phase_3.5/models/props/glow")
disc.setPos(0,0,0.025)
disc.setHpr(0,90,0)
disc.setScale(14)
disc.setColorScale(1,1,0.8,0.2,1)
disc.setTransparency(1)
disc.reparentTo(lamp.find("**/p23"))
#disc.node().setEffect(pe)
discs.append(disc)
def makeNight():
for lamp in lamps:
lamp.setColorScale(1,1,1,1,1)
for disc in discs:
disc.show()
base.playSfx(cricket1, volume=0.3)
dayMusic.stop()
base.playMusic(nightMusic, volume=0.5)
for b in butterflies:
b.butterflyNode.hide()
def makeDay():
for lamp in lamps:
lamp.clearColorScale()
for disc in discs:
disc.hide()
base.playSfx(rooster, volume=0.2)
nightMusic.stop()
base.playMusic(dayMusic, volume=0.7)
for b in butterflies:
b.butterflyNode.show()
def lerpDaySkyFunc(color):
daySky.setColorScale(color, 1)
def lerpDawnSkyFunc(color):
dawnSky.setColorScale(color, 1)
def lerpNightSkyFunc(color):
nightSky.setColorScale(color, 1)
def lerpLightWeightFunc(weight):
base.localAvatar.node().setEffect(base.localAvatar.node().getEffect(PolylightEffect.getClassType()).setWeight(weight))
# Change this to change the day/night cycle length
t = 120.0
tSeg = t / 10.0
dayMusic.stop()
nightMusic.stop()
nightSky.setColorScale(onAlpha)
daySky.setColorScale(offAlpha)
dawnSky.setColorScale(offAlpha)
render.setColorScale(nightColor)
i = Parallel(Sequence(Parallel(LerpColorScaleInterval(render, tSeg, dawnColor),
LerpFunctionInterval(lerpLightWeightFunc, duration=tSeg, toData=darkness, fromData=brightness),
LerpFunctionInterval(lerpNightSkyFunc, duration=tSeg, toData=offAlpha, fromData=onAlpha),
LerpFunctionInterval(lerpDawnSkyFunc, duration=tSeg, toData=onAlpha, fromData=offAlpha),
),
Func(makeDay),
Wait(tSeg),
Parallel(LerpFunctionInterval(lerpDawnSkyFunc, duration=tSeg, toData=offAlpha, fromData=onAlpha),
LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=dayColor, fromData=offAlpha),
LerpColorScaleInterval(render, tSeg, dayColor),
),
Func(base.playSfx, birdSfx1, 0, 1, 0.3),
Wait(tSeg),
Func(base.playSfx, birdSfx2, 0, 1, 0.3),
Parallel(LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=duskColor, fromData=dayColor),
LerpColorScaleInterval(render, tSeg, duskColor),
LerpFunctionInterval(lerpLightWeightFunc, duration=tSeg, toData=brightness, fromData=darkness),
),
Func(makeNight),
Parallel(LerpFunctionInterval(lerpDaySkyFunc, duration=tSeg, toData=offAlpha, fromData=duskColor),
LerpFunctionInterval(lerpNightSkyFunc, duration=tSeg, toData=onAlpha, fromData=offAlpha),
LerpColorScaleInterval(render, tSeg, nightColor),
),
Func(base.playSfx, cricket2, 0, 1, 0.2),
Wait(tSeg),
Func(base.playSfx, cricket1, 0, 1, 0.2),
Wait(tSeg),
),
)
i.loop()
"""
# To undo
i.finish()
render.clearColorScale()
dayMusic.stop()
nightMusic.stop()
"""
| [
"[email protected]"
] | |
81fb67bfbbafced31af6e9a8ec85def9ce72c428 | 4b8b0be0588f9e5249729f165b72a6b38324837d | /glycresoft_ms2_classification/prediction_tools/__init__.py | 52e2cd4a029490a37c2b53ed85f7619bf145d4ca | [] | no_license | GlycReSoft2/embed_tandem_ms_classifier | 5e2f569f2b74f2f14f1c1c0cede32de99c150890 | 0495f2234562a9c5dd02d545800c077df2305387 | refs/heads/master | 2020-06-02T09:32:55.457664 | 2015-06-20T21:30:19 | 2015-06-20T21:30:19 | 22,615,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | try:
from data_processing import prepare_model_file
from data_processing import save_model_file
from data_processing import call_by_coverage
from data_processing import determine_ambiguity
from data_processing import PredictionResults
from data_processing import convert_csv_to_nested
from classifier_definitions import *
except ImportError:
print("Unable to import parts of prediction_tools")
from .constants import constants
| [
"[email protected]"
] | |
b85a73a1586c716e42c86755109e4360e6d2a396 | b6068ad0383967f40cf338b6005d728edb1b647f | /DeepQNet/RL_brain.py | 1135e8eef781148a2f60be8b5d72d07114b31255 | [] | no_license | WOW5678/ReinforcementLearning | 8d0962232e7f4d9ea88e990f9bca98dad86f0ef0 | 5a8e1624fbecc5d39ca17ab2613a6555fe3d937f | refs/heads/master | 2020-03-30T07:32:02.576419 | 2019-11-22T03:27:03 | 2019-11-22T03:27:03 | 150,946,581 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,352 | py | # -*- coding:utf-8 -*-
'''
Create time: 2018/11/11 16:50
@Author: 大丫头
'''
"""
This part of code is the DQN brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.7.3
"""
import numpy as np
import pandas as pd
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9, # 回报折扣率
e_greedy=0.9, #控制随机策略,0.9的概率选择概率的行为,0.1的概率进行随机选择行为
replace_target_iter=300, # 控制多少步进行target_net的更新
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
#target_net参数的更新操作
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
# c_names(collections_names) are the collections to store variables
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
# c_names(collections_names) are the collections to store variables
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
# 保存每个epsoid的原状态,行为,回报,新状态值
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
#hstack 不改变行数,再列数上进行拼接
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
#根据状态值选择action
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
#选value最大的action
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
# 随机策略 进行aciton的随机选择
action = np.random.randint(0, self.n_actions)
return action
# 进行参数的学习
def learn(self):
# check to replace target parameters
# 当步数达到了replace_target_iter之后 就进行target_net参数的更新
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:], # fixed params
self.s: batch_memory[:, :self.n_features], # newest params
})
# change q_target w.r.t q_eval's action
#???目的是什么呢?每太懂
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
#更新q_value表格
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponding action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
| [
"[email protected]"
] | |
49a442b058c1eb081db28a321b0d5020c9dec449 | a622e8b295d799b7b9125e2b15243b8bdae1dc09 | /1908/190826/미로문제/미로문제.py | 58c26cc2d9d42d035507cc03d22855d8235c45a4 | [] | no_license | otterji/algorithms | 3a36c04bacc22c46f1ee220b3b129cda876db797 | ebd0ade0cd1de8e489d015aa0b2833afeab3898e | refs/heads/master | 2020-11-26T02:48:13.299848 | 2020-04-11T12:09:41 | 2020-04-11T12:09:41 | 228,942,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # 시작점과 끝점의 위치가 항상 1행은 아님. 중간일수도 있음. 제일 먼저 시작점 위치 찾기
# DFS로 풀어도됨
# 백트래킹으로 풀어도됨
# 도착할 수 있는지 여부 찾기
import sys
sys.stdin = open("input.txt", "r")
T = int(input())
for tc in range(1, T+1):
N = int(input())
miro = [list(map(int,input())) for _ in range(N)]
for i in range(N):
for j in range(N):
if miro[i][j] == 2:
start = (i, j)
break
stack = []
visited = []
dx = [0, 0, -1, 1] # 좌 우
dy = [-1, 1, 0, 0] # 상 하
def DFS(miro, s, g):
x, y = s, g
stack.append((x, y))
while stack:
x = stack[-1][0]
y = stack[-1][1]
for i in range(4):
if 0 <= dx[i] + x <= N-1 and 0 <= dy[i] + y <= N-1: # 범위 안에 있으면
if miro[dx[i] + x][dy[i] + y] == 3:
return 1
if miro[dx[i] + x][dy[i] + y] == 0 and (dx[i] + x, dy[i] + y) not in visited:
x = dx[i] + x
y = dy[i] + y
stack.append((x, y))
visited.append((x, y))
break # 얘를 안하니까 값이 안나오네
else:
stack.pop()
return 0
result = DFS(miro, start[0], start[1])
print('#{} {}'.format(tc, result)) | [
"[email protected]"
] | |
7ca887133d33000f514b699d46925d3b00acac17 | 762742b3c5cb5706e93e12dbdc3f8c46fc65f0db | /Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py | 7ceb301f00593b3b7e14527d6a35493b28e8f30c | [
"MIT"
] | permissive | EmersonElectricCo/content | 018f95f7fe7de13819e093a3661587a18407e348 | 82c82bbee7d428f0b14991a88c67672e2c02f5af | refs/heads/master | 2021-06-17T04:54:22.938033 | 2021-05-06T16:39:59 | 2021-05-06T16:39:59 | 161,693,191 | 2 | 0 | MIT | 2018-12-18T15:16:49 | 2018-12-13T20:47:26 | Python | UTF-8 | Python | false | false | 10,139 | py | from datetime import datetime
import pytest
import OpenPhish_v2
import demistomock as demisto
from OpenPhish_v2 import (
Client,
_is_reload_needed,
remove_backslash,
reload_command,
status_command,
url_command,
)
from freezegun import freeze_time
from test_data.api_raw import RAW_DATA
MOCK_URL = "http://openphish.com"
MOCK_DELIVERED_MESSAGE = {}
DBOT_KEY = "DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)"
RELOADED_DATA = [
(Client(MOCK_URL, True, False, 2), {}, True), # case no data in memory
(
Client(MOCK_URL, True, False, 2),
{"list": []},
True,
), # case no timestamp and list is emtpy
(
Client(MOCK_URL, True, False, 2),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": None,
},
True,
), # case no timestamp
(
Client(MOCK_URL, True, False, 1),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
True,
),
(
Client(MOCK_URL, True, False, 2),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
False,
),
(
Client(MOCK_URL, True, False, 0.5),
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": 1601542800000,
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)
True,
),
]
@pytest.mark.parametrize("client,data,output", RELOADED_DATA)
def test_is_reload_needed(mocker, client, data, output):
"""
Given:
- data as IntegrationContext
When:
- reload command was required
Then:
- Returns False if last reload occurred in the past fetch_interval_hours. True otherwise
"""
with freeze_time(datetime(2020, 10, 1, 10, 00, 00, 0)):
assert _is_reload_needed(client, data) == output
LINKS = [("goo.co/", "goo.co"), ("goo.co", "goo.co")]
@pytest.mark.parametrize("url, expected_result", LINKS)
def test_remove_backslash(url: str, expected_result: str):
"""
Given:
- string representing url
When:
- saving data from to the integration context or checking a specific url
Then:
- checks the url format is without a backslash as last character
"""
assert remove_backslash(url) == expected_result
def test_reload_command(mocker):
"""
When:
- reloading data from to the api to integration context
Then:
- checks if the reloading finished successfully
"""
mock_data_from_api = RAW_DATA
mocker.patch.object(Client, "http_request", return_value=mock_data_from_api)
mocker.patch.object(demisto, "setIntegrationContext")
client = Client(
url=MOCK_URL, use_ssl=False, use_proxy=False, fetch_interval_hours=1
)
status = reload_command(client)
assert (
status.readable_output
== "Database was updated successfully to the integration context."
)
STANDARD_NOT_LOADED_MSG = "OpenPhish Database Status\nDatabase not loaded.\n"
STANDARD_4_LOADED_MSG = (
"OpenPhish Database Status\n"
"Total **4** URLs loaded.\n"
"Last load time **Thu Oct 01 2020 06:00:00 (UTC)**\n"
)
CONTEXT_MOCK_WITH_STATUS = [
({}, STANDARD_NOT_LOADED_MSG), # case no data in memory
(
{"list": [], "timestamp": "1601532000000"},
STANDARD_NOT_LOADED_MSG,
), # case no timestamp and list is emtpy
(
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
}, # datetime(2020, 10, 1, 10, 00, 00, 0) - timedelta(hours=1)}
STANDARD_4_LOADED_MSG,
),
]
@pytest.mark.parametrize("data,expected_result", CONTEXT_MOCK_WITH_STATUS)
@freeze_time("1993-06-17 11:00:00 GMT")
def test_status_command(mocker, data, expected_result):
"""
Given:
- Integration context
When:
- After status command
Then:
- Returns number of loaded urls if data was loaded.
- Otherwise, returns Database not loaded.
"""
client = Client(MOCK_URL, True, False, 1)
mocker.patch.object(demisto, "getIntegrationContext", return_value=data)
status = status_command(client)
assert status.readable_output == expected_result
CONTEXT_MOCK_WITH_URL = [
(
{"url": "hxxp://lloyds.settlemypayee.uk"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [
{
"Data": "hxxp://lloyds.settlemypayee.uk",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://lloyds.settlemypayee.uk",
"Type": "url",
"Vendor": "OpenPhish",
"Score": 3,
}
],
}
],
),
(
{"url": "hxxp://goo.co"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [{"Data": "hxxp://goo.co"}],
"DBOTSCORE": [
{
"Indicator": "hxxp://goo.co",
"Type": "url",
"Vendor": "OpenPhish",
"Score": 0,
}
],
}
],
),
(
{"url": "hxxp://whatsapp-chat02.zzux.com,hxxp://lloyds.settlemypayee.uk"},
{
"list": [
"hxxp://www.niccakorea.com/board/index.html",
"hxxp://lloyds.settlemypayee.uk",
"hxxp://whatsapp-chat02.zzux.com",
"hxxp://dd0ddddddcuser.ey.r.appspot.com",
],
"timestamp": "1601532000000",
},
[
{
"URL": [
{
"Data": "hxxp://whatsapp-chat02.zzux.com",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://whatsapp-chat02.zzux.com",
"Score": 3,
"Type": "url",
"Vendor": "OpenPhish",
}
],
},
{
"URL": [
{
"Data": "hxxp://lloyds.settlemypayee.uk",
"Malicious": {
"Vendor": "OpenPhish",
"Description": "Match found in OpenPhish database",
},
}
],
"DBOTSCORE": [
{
"Indicator": "hxxp://lloyds.settlemypayee.uk",
"Score": 3,
"Type": "url",
"Vendor": "OpenPhish",
}
],
},
],
),
]
@pytest.mark.parametrize("url,context,expected_results", CONTEXT_MOCK_WITH_URL)
def test_url_command(mocker, url, context, expected_results):
"""
Given:
- a url
When:
- mocking the integration context data, runnig url_command
Then:
- validating whether the url is malicious (in integration context)
"""
mocker.patch.object(
demisto, "getIntegrationContext", return_value=context,
)
mocker.patch.object(OpenPhish_v2, "_is_reload_needed", return_value=False)
client = Client(MOCK_URL, True, False, 1)
results = url_command(client, **url)
assert len(results) >= 1
for i in range(len(results)):
output = results[i].to_context().get("EntryContext", {})
assert output.get(
"URL(val.Data && val.Data == obj.Data)", []
) == expected_results[i].get("URL")
assert output.get(DBOT_KEY, []) == expected_results[i].get("DBOTSCORE")
| [
"[email protected]"
] | |
e6a351cca118db0b1b7aa38308e588865881e958 | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/api/instance/import_instance_with_csv_pb2.py | 61e3fc2fbb658d75940052b5048131ef784e637a | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 9,094 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: import_instance_with_csv.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='import_instance_with_csv.proto',
package='instance',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1eimport_instance_with_csv.proto\x12\x08instance\x1a\x1cgoogle/protobuf/struct.proto\"\xe9\x01\n\x1dImportInstanceWithCsvResponse\x12\x14\n\x0cinsert_count\x18\x01 \x01(\x05\x12\x14\n\x0cupdate_count\x18\x02 \x01(\x05\x12\x14\n\x0c\x66\x61iled_count\x18\x03 \x01(\x05\x12:\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32,.instance.ImportInstanceWithCsvResponse.Data\x1aJ\n\x04\x44\x61ta\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Struct\"\x8f\x01\n$ImportInstanceWithCsvResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.instance.ImportInstanceWithCsvResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_IMPORTINSTANCEWITHCSVRESPONSE_DATA = _descriptor.Descriptor(
name='Data',
full_name='instance.ImportInstanceWithCsvResponse.Data',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.ImportInstanceWithCsvResponse.Data.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.ImportInstanceWithCsvResponse.Data.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponse.Data.data', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=308,
)
_IMPORTINSTANCEWITHCSVRESPONSE = _descriptor.Descriptor(
name='ImportInstanceWithCsvResponse',
full_name='instance.ImportInstanceWithCsvResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='insert_count', full_name='instance.ImportInstanceWithCsvResponse.insert_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update_count', full_name='instance.ImportInstanceWithCsvResponse.update_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failed_count', full_name='instance.ImportInstanceWithCsvResponse.failed_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponse.data', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_IMPORTINSTANCEWITHCSVRESPONSE_DATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=308,
)
_IMPORTINSTANCEWITHCSVRESPONSEWRAPPER = _descriptor.Descriptor(
name='ImportInstanceWithCsvResponseWrapper',
full_name='instance.ImportInstanceWithCsvResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.ImportInstanceWithCsvResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance.ImportInstanceWithCsvResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.ImportInstanceWithCsvResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=454,
)
_IMPORTINSTANCEWITHCSVRESPONSE_DATA.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_IMPORTINSTANCEWITHCSVRESPONSE_DATA.containing_type = _IMPORTINSTANCEWITHCSVRESPONSE
_IMPORTINSTANCEWITHCSVRESPONSE.fields_by_name['data'].message_type = _IMPORTINSTANCEWITHCSVRESPONSE_DATA
_IMPORTINSTANCEWITHCSVRESPONSEWRAPPER.fields_by_name['data'].message_type = _IMPORTINSTANCEWITHCSVRESPONSE
DESCRIPTOR.message_types_by_name['ImportInstanceWithCsvResponse'] = _IMPORTINSTANCEWITHCSVRESPONSE
DESCRIPTOR.message_types_by_name['ImportInstanceWithCsvResponseWrapper'] = _IMPORTINSTANCEWITHCSVRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImportInstanceWithCsvResponse = _reflection.GeneratedProtocolMessageType('ImportInstanceWithCsvResponse', (_message.Message,), {
'Data' : _reflection.GeneratedProtocolMessageType('Data', (_message.Message,), {
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSE_DATA,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponse.Data)
})
,
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSE,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponse)
})
_sym_db.RegisterMessage(ImportInstanceWithCsvResponse)
_sym_db.RegisterMessage(ImportInstanceWithCsvResponse.Data)
ImportInstanceWithCsvResponseWrapper = _reflection.GeneratedProtocolMessageType('ImportInstanceWithCsvResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSEWRAPPER,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponseWrapper)
})
_sym_db.RegisterMessage(ImportInstanceWithCsvResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
9f811d0e5fca8f23ad4e3fe6e2188485c4722a37 | 7ee8a3bc4fbe8e094a4acf0bc7dd58899a5f4d3e | /src/djnext_example/artist/migrations/0001_initial.py | 47c807a90b452b75770bc42c2fff12d3e484a197 | [] | no_license | yourlabs/djnext | 95798acf66fb3b507ea701cce31e40f1bcdf2b1d | 76516e2d76495300385223265878b5d30641c965 | refs/heads/master | 2023-02-09T06:40:07.051724 | 2018-05-13T21:37:24 | 2018-05-13T21:37:24 | 133,148,115 | 36 | 5 | null | 2023-01-26T03:23:41 | 2018-05-12T13:20:29 | Python | UTF-8 | Python | false | false | 561 | py | # Generated by Django 2.0.4 on 2018-05-12 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ['name'],
},
),
]
| [
"[email protected]"
] | |
34b64673ff08d394dce6f7563327c1fdc93549b7 | 256746f29f9995accd4fee35b9b8981264ca2e37 | /Ch06/2017-9-25.py | 7d2a5f71b389e7ec916d60249be31ee662dff0f2 | [] | no_license | Vagacoder/Python_for_everyone | adadd55561b2200d461afbc1752157ad7326698e | b2a1d1dcbc3cce5499ecc68447e1a04a8e59dc66 | refs/heads/master | 2021-06-22T00:26:02.169461 | 2019-05-25T16:06:04 | 2019-05-25T16:06:04 | 114,508,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | ##Ch06 R6.5
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,10)
while randomNumber in value:
randomNumber = randint(1, 10)
value.append(randomNumber)
count += 1
print (value)
##Ch06 R6.6
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,100)
while randomNumber in value:
randomNumber = randint(1, 100)
value.append(randomNumber)
count += 1
print (value)
max = value[0]
min = value[0]
for i in value:
if i > max:
max = i
if i < min:
min = i
print("Max is: %d" %max)
print("Min is: %d" %min) | [
"[email protected]"
] | |
57e6299d4c59ae36b3a95d328a5793886a62834a | d6f7ac9541ec803db6f3b528030f6dd94bf2c1fe | /bootcamp_module09/core/tests/test_student_59.py | 9836fc05f38771dec8e001f19bb7483049077493 | [
"BSD-3-Clause"
] | permissive | poloxu/bisb-bootcamp-2021-module09 | c6182abf2b04621e79cec21102da23aabd4fb307 | 46c146e2ffdeebf3b95abcd8fe382f982ce67cb6 | refs/heads/master | 2023-07-29T23:22:22.874853 | 2021-09-17T16:59:55 | 2021-09-17T16:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | from bootcamp_module09.core.student_59 import count_substring # noqa
def test_count_substring_single():
test_string = "CGCTAGCGT"
test_substring = "TAG"
expected_count = 1
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_repeated():
test_string = "AGCTAGCAGT"
test_substring = "AGC"
expected_count = 2
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_none():
test_string = "AGTCCCCTAGA"
test_substring = "AAA"
expected_count = 0
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
| [
"[email protected]"
] | |
1c87a0e2825e26309b4244af6a2ee779071d0f2c | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba3962.pngMap.py | 48a92f7f1c037ff2bf416b51ebe1915236974ca6 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba3962.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111101100100011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111010000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000010011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111010000000000000010011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111001000000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111010000000000000000000000000000000111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000011111111111111111111111111111111111111111',
'11111111111111111111111111111111111111100000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111',
'11111111111111111111111111111111111100000000000000000011000000000000000111000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111110000000000000001111000000000000000111100000000000000001111111111111111111111111111111111111',
]
| [
"[email protected]"
] | |
4a1d6bf2ad0501abe44630ea764ba4fb0f30dd56 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma227.py | 2ebfa1038e0dd1e6ab87822f17605f3c0fadb833 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma227.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
e7dd7163634a0bbdb9a9cad543458590b2bb5119 | 955f9d3fb34af54de2f046d17bbac11c1474819e | /abc174/c.py | 8d677e68c2c1a02bfe30bd9fe642311a51a3f835 | [] | no_license | shimewtr/AtCoderPracticePython | 5bb4c28119fced2d111bd1810e0e290f25b6a191 | f3c22ec1f7a36a27848070c5c6ca4e1717b04ac6 | refs/heads/master | 2023-01-12T17:28:44.770138 | 2020-11-19T22:50:22 | 2020-11-19T22:50:22 | 204,830,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
k = int(input())
if k % 2 == 0:
print(-1)
else:
ans = 1
check = True
while (check):
tmp = 0
for i in range(ans):
tmp += 7 * 10 ** i
if tmp % k == 0:
print(ans)
break
ans += 1
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """101"""
output = """4"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """2"""
output = """-1"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """999983"""
output = """999982"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
1ea03400ca87f6315d33824b3426b6fb0d74d1c5 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/configuration/system/admin/delete_test_scenarios/DeleteTestScenarios.py | 8914ef5a124a3da9001bacaf87ea36bba1885e95 | [] | no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | import logging
logger = logging.getLogger('athenataf')
from athenataf.lib.functionality.test.ConfigurationTest import ConfigurationTest
class DeleteTestScenarios(ConfigurationTest):
'''
Test class for System Admin DeleteTestScenarios.
'''
def test_ath_11329_delete_view_only_guest_registration_only_non_default_values(self):
conf = self.config.config_vars
self.take_s1_snapshot()
system_page = self.LeftPanel.go_to_system_page()
system_page.go_to_admin_tab()
system_page.view_only_non_default_values(conf.viewonly,conf.viewonly,conf.viewonly)
system_page._save_settings()
system_page.go_to_admin_tab()
system_page.guest_registration_only_non_default_values(conf.guest_username,conf.guest_password,conf.guest_password)
system_page._save_settings()
self.take_s2_snapshot()
system_page.go_to_admin_tab()
system_page.restore_view_only_default_values()
system_page.go_to_admin_tab()
system_page.restore_guest_registration_only_default_values()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
| [
"[email protected]"
] | |
fd2b4017602792ace836e55c845558ba791a3588 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_Generate_Random_Number.txt.py | 98b69a5707be5d3b566f616421eb1f62442fefe9 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 60 | py | from numpy import random
x = random.randint(100)
print(x)
| [
"[email protected]"
] | |
8ab3069b9a328363bbbfd0ad67638a4ac549183c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/770.py | c1b36eac52d21c7e378886958c50e72ea92b665e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # Solver for Tic-Tac-Toe-Tomek game
import numpy as np
fin = open('A-large.in')
fout = open('testout_large.txt', 'w')
def CheckWinner(A, player_char, not_player_char):
# Check if X wins
Acopy = A
Acopy = np.where(Acopy=='.', 0, Acopy)
Acopy = np.where(Acopy==not_player_char,0,Acopy)
Acopy = np.where(Acopy=='T',1,Acopy)
Acopy = np.where(Acopy==player_char,1,Acopy)
Acopy = np.array(Acopy, dtype=int)
# print(Acopy)
if max(np.sum(Acopy,0))==4 or max(np.sum(Acopy,1))==4 or np.trace(Acopy)==4 or sum(Acopy[[0,1,2,3], [3,2,1,0]])==4:
return(True)
else:
return(False)
T = int(fin.readline().rstrip('\n'))
for j in range(1,T+1,1):
board = []
line = fin.readline()
while line != '\n' and line != '':
board.append(list(line.strip('\n')))
line = fin.readline()
# CheckWinner(array)
# print(board)
matboard = np.array(board)
if CheckWinner(matboard, 'X', 'O'):
fout.write('Case #%d: X won\n' %j)
elif CheckWinner(matboard, 'O', 'X'):
fout.write('Case #%d: O won\n' %j)
elif np.in1d(['.'], matboard).all():
fout.write('Case #%d: Game has not completed\n' %j)
else:
fout.write('Case #%d: Draw\n' %j)
fin.close()
fout.close() | [
"[email protected]"
] | |
6264a0b4aebc98ab2fd8d75d31f9861aece0fde2 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/gui/scaleform/daapi/view/meta/fortchoicedivisionwindowmeta.py | 12eb657baafa06583d6ac8fb7bce9fbd90dcdb1c | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,202 | py | # 2016.05.01 15:22:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/FortChoiceDivisionWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class FortChoiceDivisionWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
null
"""
def selectedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('selectedDivision')
def changedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('changedDivision')
def as_setDataS(self, data):
"""
:param data:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\fortchoicedivisionwindowmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:42 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
285a2caa90b61ae628ae8b0c2b62c3ae736ac74f | aace5cbeeb567b017984898297192ea6b5c5993f | /文件操作/csv/03pd按照列写入csv文件.py | 67d0d5a2673a07fd7481e0836c2853236a6457af | [
"MIT"
] | permissive | Litao439420999/Spider | 4eb27fc332b9a97c9917c236c3653809c2229ac3 | 47d70ec92936b8bea87c641df47ea30e5dde86a1 | refs/heads/master | 2023-03-24T19:02:22.857250 | 2021-03-14T02:07:59 | 2021-03-14T02:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | '''
Description: 参考:https://blog.csdn.net/weixin_43245453/article/details/90054820?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control
Author: HCQ
Company(School): UCAS
Email: [email protected]
Date: 2021-01-16 21:40:14
LastEditTime: 2021-01-16 21:48:25
FilePath: /Spider/文件操作/csv/03pd按照列写入csv文件.py
'''
import pandas as pd
#a和b的长度必须保持一致,否则报错
a = [x for x in range(5)]
b = [x for x in range(5,10)]
#字典中的key值即为csv中列名
dataframe = pd.DataFrame({'a_name':a,'b_name':b})
#将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv(r"03保存test.csv",index=False, sep=',')
| [
"[email protected]"
] | |
0846ce23d72a96dd3abeb6c06cb588f10a9f6824 | 24dabf63ba445fa4df205b5c9bbe89f9d7230527 | /transfer_learning/tools/double_iterator.py | 244733768081f4b153ad922e06ce30643145c6df | [] | no_license | marco-willi/hco-experiments | e51ea5581eefb4fc3b46fb4337b9f04eb52640fb | 7f3076b476e3311ed22d2db37c6d075e43d0d61f | refs/heads/master | 2021-01-22T04:09:37.706108 | 2018-01-03T20:44:46 | 2018-01-03T20:44:46 | 92,433,439 | 1 | 0 | null | 2017-08-21T03:49:27 | 2017-05-25T18:40:03 | Python | UTF-8 | Python | false | false | 2,987 | py | """
Double Iterator
- Outer (slower) ImageGenerator that serves large batches of data that just
fit into memory
- Inner (numpy) ImageGenerator that serves smaller batches of data
"""
# import modules
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import Iterator
class DoubleIterator(Iterator):
""" Outer / Inner data generators to optimize image serving
- batch_size: int
the number of images returned by the Iterator
- outer_generator: Iterator that returns images
typically ImageDataGenerator.flow_from_directory()
"""
def __init__(self, outer_generator, batch_size, seed=None,
inner_shuffle=True):
self.outer_generator = outer_generator
self.batch_size = batch_size
self.n_on_stack = 0
self.inner = None
self.n = outer_generator.n
self.seed = seed
self.inner_shuffle = inner_shuffle
def next(self):
""" Get next batch """
if (self.n_on_stack == 0) or (self.inner is None):
# get next batch of outer generator
X_outer, y_outer = self.outer_generator.next()
# calculate stack size for inner generator
self.n_on_stack = (self.outer_generator.batch_size //
self.batch_size)
# Create inner data generator (no data agumentation - this is
# done by the outer generator)
self.inner = ImageDataGenerator().flow(
X_outer, y_outer,
batch_size=self.batch_size,
seed=self.seed, shuffle=self.inner_shuffle)
# get next batch
X_inner, y_inner = self.inner.next()
self.n_on_stack -= 1
# print("N on stack: %s, batches_seen: %s" %
# (self.n_on_stack, self.outer_generator.total_batches_seen))
return X_inner, y_inner
if __name__ == '__main__':
from config.config import cfg_path
path = cfg_path['images'] + 'train/'
datagen_train = ImageDataGenerator(
rescale=1./255,
featurewise_center=False,
featurewise_std_normalization=False,
horizontal_flip=True,
zoom_range=[0.9, 1])
train_generator = datagen_train.flow_from_directory(
path,
target_size=(150, 150),
color_mode='rgb',
batch_size=500,
class_mode='sparse',
seed=123)
train_generator.batch_index
train_generator.total_batches_seen
train_generator.batch_size // 32
31 * 32
tt = DoubleIterator(train_generator, 32)
batch_x, batch_y = tt.next()
batch_x2, batch_y2 = tt.next()
import numpy as np
np.array_equal(batch_x, batch_x2)
batch_x.shape
3200 // 32
import time
for i in range(0, 100):
time_s = time.time()
X, y = tt.next()
time_elapsed = time.time() - time_s
print("Iteration %s took %s s" % (i, time_elapsed))
| [
"[email protected]"
] | |
f46483143cee2b1cfa802c56d800dd7312457b50 | 14e19bcaaf917924e7bb78e4f7e6b42662ff5164 | /fancy_month01/day17_fancy/day17_teacher/demo05.py | 5e119a78d204ea68d697a808609411ce80758693 | [] | no_license | Lzffancy/Aid_study | 5b3538443ca0ad1107a83ef237459b035fef70d0 | 4ba5e5045371490d68459edd1f0a94963e0295b1 | refs/heads/master | 2023-02-22T19:11:00.867446 | 2021-01-25T13:01:35 | 2021-01-25T13:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | """
闭包
三大要素:
有外有内
内使用外
外返回内
字面思想:
封闭内存空间
作用:
外部函数栈帧执行后,不释放.
等待内部函数重复使用
"""
def func01():
a = 100
def func02():
print(a)
return func02 # 返回但没有执行内部函数.
# 调用外部函数,得到内部函数
res = func01()
res()
res() | [
"[email protected]"
] | |
511a610b4208faf06340813b7dc036f4cefe122c | 67971c2c66bce8e9746810592f71a33fcbbeb260 | /tests/test_database/test_playlist.py | cd1653bbcdf1c25933f2071b41dce51c388a761b | [
"MIT"
] | permissive | holing/LinDouFm | 78ade890c974b967ba3102cf93c31dee1bfcde09 | 463618599e2f3111c7fc2dd251940e9c4981b40b | refs/heads/master | 2021-01-17T03:39:53.758021 | 2015-01-18T14:13:36 | 2015-01-18T14:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # coding:utf-8
from database.playlist import playlist
from database.music import music_model
from database.channel import channel_model
from tests.test_database.test_music import get_test_music
def test_playlist():
#添加测试频道
channel_name = u"test_channel_name"
channel_uuid = u"mk_test_douban-cid"
channel = channel_model.add_channel(channel_name, channel_uuid)
assert len(playlist.get_music_by_channel(channel, 20)) == 0
#添加测试音乐
music_information = get_test_music()
new_music_list = []
for i in range(20):
music_information[u"cover"].seek(0)
music_information[u"audio"].seek(0)
music_information[u"uuid"] += unicode(i)
music = music_model.add_music(music_information[u"title"], music_information[u"artist"], music_information[u"album"]
, music_information[u"company"], music_information[u"public_time"], music_information[u"kbps"], music_information[u"cover"], music_information[u"audio"], music_information[u"uuid"])
new_music_list.append(music.key)
#往测试频道中添加测试音乐信息
channel_model.update_channel(channel, music_list=new_music_list)
channel = channel_model.get_channel(key=channel.key)[0]
assert len(playlist.get_music_by_channel(channel, 30)) == 20
assert len(playlist.get_music_by_channel(channel, 20)) == 20
assert len(playlist.get_music_by_channel(channel, 10)) == 10
#删除
channel_model.delete_channel(channel)
music_list = music_model.get_music(title=music_information[u"title"])
for music in music_list:
music_model.delete_music(music)
| [
"root@ubuntu.(none)"
] | root@ubuntu.(none) |
63463a703612e5da4d3698590f690f700b1e48e0 | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/RotateList_v0.py | cc1c94e88855fbce957c86bc6277c56718a5008b | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def print_l(head):
if head:
print head.val
if head.next:
print_l(head.next)
class Solution:
# @param {ListNode} head
# @param {integer} k
# @return {ListNode}
def rotateRight(self, head, k):
if not head:
return
if not head.next:
return head
l = 1
tail = head
while tail.next:
tail = tail.next
l += 1
k %= l
if k == 0:
return head
t = head
for i in range(l - k - 1):
t = t.next
new_head = t.next
t.next = None
tail.next = head
return new_head
if __name__ == '__main__':
s = Solution()
head = n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
h = s.rotateRight(head, 5)
print_l(h)
| [
"[email protected]"
] | |
8a7a2e55befff55fa7322db16f944dccb8bddcb3 | f33b30743110532ddae286ba1b34993e61669ab7 | /Optimal Division.py | 171cb2effb649a0cb56f16ae0f104dba31b07f47 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
n = len(nums)
if n == 0:
return
if n == 1:
return nums[0]
if n == 2:
return str(nums[0]) + "/" + str(nums[1])
res = str(nums[0])
res += "/"
res += "(" + str(nums[1])
for i in range(2,n):
res += "/"+str(nums[i])
res += ")"
return res
a = Solution()
print(a.optimalDivision([1000,100,10,2])) | [
"[email protected]"
] | |
7b2653c28ca84b62142d0978452bfbd4823f4d88 | e28fad299c396ff153e5df666443e335a033b657 | /mms/stories/views.py | 183a6686c66e73e2b676c20eb9843e75bcd8bf7c | [] | no_license | easherma/mms_django | 387b179ab74bf4447fa7acefa6ac84f0423edb1f | 1ae30ae8bc30550dce19e288ae43759a8155f8ad | refs/heads/master | 2021-01-10T18:08:01.586356 | 2017-01-12T20:44:09 | 2017-01-12T20:44:09 | 71,917,502 | 0 | 0 | null | 2017-02-20T19:08:29 | 2016-10-25T16:36:14 | HTML | UTF-8 | Python | false | false | 2,896 | py | from django.contrib.auth.models import User
from stories.models import Story, Submission, Waypoint
from stories.serializers import StorySerializer, UserSerializer, SubmissionSerializer, WaypointSerializer
from rest_framework import viewsets
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.decorators import detail_route, list_route
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
import geojson
import json
def waypoint_to_geojson(waypoint, properties):
geometry= waypoint['geom']
#[f.name for f in models.Waypoint._meta.get_fields()]
feature = geojson.Feature(geometry=geometry, properties=properties)
return feature
class StoryViewSet(viewsets.ModelViewSet):
queryset = Story.objects.all()
serializer_class = StorySerializer
@detail_route()
def waypoints(self, request, pk=None):
#serializer = WaypointSerializer
story = self.get_object()
submissions = story.submissions.all()
#waypoints = []
for submission in submissions:
#waypoints = submission.waypoints
features = []
for waypoint in submission.waypoints.values():
geom = geojson.loads(waypoint['geom'])
#should return just the props we need
properties = waypoint
#geom['properties'] = properties
feature = geojson.Feature(geometry=geom, properties=properties)
features.append(feature)
waypoints = geojson.FeatureCollection(features)
return Response(waypoints)
@detail_route()
def users(self, request, pk=None):
story = self.get_object()
pk = self.kwargs['pk']
queryset = User.objects.filter(submission=story.pk)
#get to
return Response(queryset.values())
class WaypointsByStory(viewsets.ModelViewSet):
serializer_class = WaypointSerializer
storyname = 'My First Story'
queryset = Waypoint.objects.filter(submission__story__name='My First Story').select_related('submission')
#these are pretty much useless
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class SubmissionViewSet(viewsets.ModelViewSet):
queryset = Submission.objects.all()
serializer_class = SubmissionSerializer
class WaypointViewSet(viewsets.ModelViewSet):
queryset = Waypoint.objects.all()
serializer_class = WaypointSerializer
class StoryList(APIView):
renderer_classes = (TemplateHTMLRenderer,)
template_name = 'stories_list.html'
def get(self, request):
queryset = Story.objects.all()
return Response({'stories': queryset})
| [
"[email protected]"
] | |
d39c7fb78ac2d32f16918615fb0f8dadb4a8b9d1 | 7af9841dfdeb7192cee9f5bc5ae24ebabeeebdcc | /project/admin.py | 06b0a54f6791eeb4a8343c0af355c73e99ad51a5 | [] | no_license | dimansion/bepy | 513d1d6b8c6f679ce97f46741b50b73dabf20484 | dd92999b9fb0d65e9479372718409785a8d26d26 | refs/heads/master | 2020-06-28T11:27:02.204255 | 2016-11-14T11:26:32 | 2016-11-14T11:26:32 | 67,694,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from django.contrib import admin
from project.models import Page, Content
class ContentInline(admin.TabularInline):
model = Content
prepopulated_fields = {'slug':('name',)}
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'published_date',)
prepopulated_fields = {'slug':('title',)}
inlines = [ContentInline]
# class ContentAdmin(admin.ModelAdmin):
# list_display = ('name', 'lesson',)
# prepopulated_fields = {'slug':('name',)}
admin.site.register(Page, PageAdmin)
# admin.site.register(Content, ContentAdmin) | [
"[email protected]"
] | |
8810e20c0d4928a9c3b0dbf23ef6590ec448b754 | 128d593efd591dc83a3aef2d4bfad39e73ee637e | /python_code/complete/no128 | a8958da736adcb09069e0cf51a44cd9584ed2446 | [] | no_license | jwan/ProjectEuler | 93be87d89cc58516d503dd5ed53bdbd706748cda | 65aec4f87b8899db6bad94a36412a28a4b4527e9 | refs/heads/master | 2021-01-17T08:21:46.654529 | 2011-05-02T23:11:35 | 2011-05-02T23:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | #!/usr/bin/env python
# 1 --> (2,3,4,5,6,7)
# [1] 1
# [2,...,7] 6
# [8,...,19] 12
# [20,...,37] 18
# [38,...,61] 24
# f(k) = 3k^2 - 3k + 1
# f(k) = elements before layer k if k > 0
#Layer 0
# 1 -- (1,1) -- (2,1),(2,2),(2,3),(2,4),(2,5),(2,6)
# Layer 1
# 2 -- (2,1) -- (1,1), (2,2),(2,6), (3,1),(3,2),(3,12) C
# 3 -- (2,2) -- (1,1), (2,1),(2,3), (3,2),(3,3),(3,4) C
# 4 -- (2,3) -- (1,1), (2,2),(2,4), (3,4),(3,5),(3,6) C
# 5 -- (2,4) -- (1,1), (2,3),(2,5), (3,6),(3,7),(3,8) C
# 6 -- (2,5) -- (1,1), (2,4),(2,6), (3,8),(3,9),(3,10) C
# 7 -- (2,6) -- (1,1), (2,5),(2,1), (3,10),(3,11),(3,12) C
# Layer 2
# 8 -- (3,1) -- (2,1), (3,2),(3,12),(4,1),(4,2),(4,18) C
# 9 -- (3,2) -- (2,1),(2,2),(3,1),(3,3), (4,2),(4,3)
# 10 -- (3,3) -- (2,2), (3,2),(3,4), (4,3),(4,4),(4,5) C
# 11 -- (3,4) -- (2,2),(2,3),(3,3),(3,5), (4,5),(4,6)
# 12 -- (3,5) -- (2,3), (3,4),(3,6), (4,6),(4,7),(4,8) C
# 13 -- (3,6) -- (2,3),(2,4)
# 14 -- (3,7) -- (2,4)
# 15 -- (3,8) -- (2,4),(2,5)
# 16 -- (3,9) -- (2,5)
# 17 -- (3,10) -- (2,5),(2,6)
# 18 -- (3,11) -- (2,6)
# 19 -- (3,12) -- (2,6),(2,1)
# 20 -- (4,1) -- (3,)(4,)(5,)
# 21 -- (4,2) --(3,1)(3,2)
# 22 -- (4,3) -- (3,2)(3,3)
# 22 -- (4,4) --
# (n, k) is corner if k % (n - 1) == 1
# A corner is adjacent to 1 block of lower class, 2 of same, and 3 of higher
# the 2 of same will always be (n, k - 1 *wrap*), (n, k + 1 *wrap*)
# (n,1) will always be (n-1,1),(n,0),(n,2),(n+1,0),(n+1,1),(n+1,2)
# Both the n-1 and n+1 grouping will start where the previous one left off
# Only the corners and the final non-corner have a chance at 3 primes
# This is because if we are not either, then they are next to 2 consec. #'s,
# which give a diff. of 1, the other two pairs will give differences that differ
# by one, so at most 1 of each can be prime
##############################
# Case1, k neq 1, corner
##############################
# The corner (n, k) is adjacent to
# (n-1, (k-1)/(n-1)*(n-2) + 1), (n,k-1), (n,k+1)--> don't matter if not end piece,
# (n+1, (k-1)/(n-1)*n), (n+1, (k-1)/(n-1)*n + 1), (n+1, (k-1)/(n-1)*n + 2),
# 3*(n - 1)*(n - 2) + 1 + k vs.
# 3*(n - 2)*(n - 3) + 1 + (k - 1)/(n - 1)*(n - 2) + 1,
# 3*(n - 1)*(n - 2) + k,3*(n - 1)*(n - 2) + 2 + k,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n, 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 1,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 2
# Diffs
# 6*(n - 2) + (k - 1)/(n - 1),
# 1,1,
# 6*(n - 1) + (k - 1)/(n - 1) - 1,
# 6*(n - 1) + (k - 1)/(n - 1),
# 6*(n - 1) + (k - 1)/(n - 1) + 1,
# Only way it can be 3 is if
# c1=6*(n - 2) + (k - 1)/(n - 1),
# c2=6*(n - 1) + (k - 1)/(n - 1) - 1,
# c3=6*(n - 1) + (k - 1)/(n - 1) + 1,
# But if n > 2, c1 prime implies (k-1)/(n-1) == 1,5 mod 6
# implies c2 == 0,4 mod 6, c3 == 0,2 mod 6, so it is never possible
# for n > 2
# For n = 1, 1 works
# For n = 2, of 3,4,5,6,7 none work
##############################
# Case2, k = 1
##############################
# The corner (n, 1) is adjacent to
# (n-1, 1), (n,6*(n-1)), (n,2)--> don't matter if not end piece,
# (n+1, 6*n), (n+1, 1), (n+1, 2),
# 3*(n - 1)*(n - 2) + 2 vs.
# 3*(n - 2)*(n - 3) + 2,
# 3*(n - 1)*(n - 2) + 1 + 6*(n - 1),3*(n - 1)*(n - 2) + 3,
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 2,
# 3*n*(n - 1) + 3
# Diffs
# 6*(n - 2),
# 6*(n - 1) - 1,1
# 6*(2*n - 1) - 1, 6*(n - 1),
# 6*(n - 1) + 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 1) - 1
# c3=6*(n - 1) + 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
##############################
# Case3
##############################
# The one outlier is the final piece (n, 6*(n - 1))
# When n > 2, this is not 1 mod n - 1, hence not a corner
# This is adjacent to (n,1),(n,6*n-7),(n-1,1),(n-1,6*(n-2)),
# (n+1,6*n),(n+1,6*n-1)
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1) vs.
# 3*(n - 1)*(n - 2) + 1 + 1, 3*(n - 1)*(n - 2) + 6*(n - 1),
# 3*(n - 2)*(n - 3) + 1 + 1, 3*(n - 2)*(n - 3) + 1 + 6*(n-2),
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 6*n
# Diffs
# 6*(n - 1) - 1, 1,
# 6*(2*n - 3) - 1, 6*(n - 1),
# 6*n, 6*n - 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 3) - 1
# c3=6*n - 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
from python_code.decorators import euler_timer
from python_code.functions import sieve
# 3*(n - 1)*(n - 2) + 2:
# c1=6*(n - 1) - 1 = 6*n - 7
# c2=6*(2*n - 1) - 1=12*n - 7
# c3=6*(n - 1) + 1=6*n - 5
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1):
# c1=6*(n - 1) - 1=6*n - 7
# c2=6*(2*n - 3) - 1=12*n - 19
# c3=6*n - 1=6*n - 1
# in the first two layers only 1 and 2 do as we wish
# from there, first = 8, last = 19 and we can increment
# first by 6*(layer - 1) and last by 6*layer
# The first corner will be FC(layer) = 3*(layer - 1)*(layer - 2) + 2
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 5) and (12*layer - 7) are prime
# The last corner will be
# LC(layer) = 3*(layer - 1)*(layer - 2) + 1 + 6*(layer - 1)
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 1) and (12*layer - 19) are prime
# Instead of carrying out costly multiplications, we can increment
# these by 6 and 12 respectively, similarly
# FC(L + 1) - FC(L) = 6*(L - 1)
# LC(L + 1) - LC(L) = 6*L
# So we can increment these as well
@euler_timer(128)
def main():
TOTAL = 2000
MAX_n = 10**6
PRIMES = sieve(MAX_n)
# Constant, rather than linear lookup
prime_bools = [False]*(MAX_n + 1)
for prime in PRIMES:
prime_bools[prime] = True
count = 2
current = 2
layer = 3
first_corner = 8 # Value of first corner in layer
last_corner = 19 # Value of last corner in layer
six_shared = 11 # prime candidate shared by both corners,
# with a difference of 6
six_first = 13 # prime candidate for first corner, diff 6
six_last = 17 # prime candidate for last corner, diff 6
twelve_first = 29 # prime candidate for first corner, diff 12
twelve_last = 17 # prime candidate for last corner, diff 12
while count < TOTAL:
if twelve_first > MAX_n:
raise Exception("Primes not large enough")
if prime_bools[six_shared]:
if prime_bools[six_first] and prime_bools[twelve_first]:
current = first_corner
count += 1
if count < TOTAL:
if prime_bools[six_last] and prime_bools[twelve_last]:
current = last_corner
count += 1
six_shared, six_last = six_last, six_last + 6
six_first += 6
twelve_last, twelve_first = twelve_first, twelve_first + 12
first_corner += 6*(layer - 1)
last_corner += 6*layer
layer += 1
print current
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
1c0718148e9e9ebb9bdd52de8a5d00b60b6504b5 | 29c58b3bec6ac0fcdb3070efc118600ee92004da | /test/test_email_html_dto.py | 92e5827bb8e4596c35ee57d8c9ef29da4ca517f5 | [
"MIT"
] | permissive | mailslurp/mailslurp-client-python | a2b5a0545206714bd4462ae517f242852b52aaf9 | 5c9a7cfdd5ea8bf671928023e7263847353d92c4 | refs/heads/master | 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.email_html_dto import EmailHtmlDto # noqa: E501
from mailslurp_client.rest import ApiException
class TestEmailHtmlDto(unittest.TestCase):
"""EmailHtmlDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test EmailHtmlDto
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.email_html_dto.EmailHtmlDto() # noqa: E501
if include_optional :
return EmailHtmlDto(
subject = '0',
body = '0'
)
else :
return EmailHtmlDto(
)
def testEmailHtmlDto(self):
"""Test EmailHtmlDto"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6d04d8977bbb04374efd4d17378fdc14d5da1a84 | a721e4ca65b79ce725c7b5b43539c963a3b55290 | /Halloween_Sale.py | ce32b629161728b86e99fa33e4cc4101e5a4e754 | [] | no_license | joydas65/Hackerrank-Problems | 0832d7cfd1de7e5df4dba76326ede735edc9afea | a16b3b0ebb65e7597f8f6417047da4d415a818c7 | refs/heads/master | 2022-06-21T12:47:55.241409 | 2022-06-18T18:21:08 | 2022-06-18T18:21:08 | 159,071,834 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | p,d,m,s = map(int, input().split())
ans = 0
while s >= p:
ans += 1
if p <= m:
s -= p
elif p > m:
s -= p
p -= d
if p <= m:
p = m
print(ans)
| [
"[email protected]"
] | |
d06f68298b85070352f8aed0d2e30edf7ed61d84 | 4a5caabe31670ab44fe5097df3971d434fc9ca3f | /kgpy/optics/coordinate/decenter.py | d5438c129063ab4f46b7d9b63e6badcb0be0e0d5 | [] | no_license | ngoldsworth/kgpy | c61d64d39a4da011ad7a42566dbeb6ef88266dea | d751fca7f6cc6e762fdc954113f55d407055349d | refs/heads/master | 2022-11-27T14:25:01.972415 | 2020-07-30T23:24:10 | 2020-07-30T23:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import dataclasses
import numpy as np
from astropy import units as u
import kgpy.mixin
__all__ = ['Decenter']
@dataclasses.dataclass
class Decenter(kgpy.mixin.Broadcastable):
x: u.Quantity = 0 * u.mm
y: u.Quantity = 0 * u.mm
@classmethod
def promote(cls, value: 'Decenter'):
return cls(value.x, value.y)
@property
def config_broadcast(self):
return np.broadcast(
super().config_broadcast,
self.x,
self.y,
)
def __invert__(self):
return type(self)(
-self.x,
-self.y,
)
def __call__(self, value: u.Quantity, inverse: bool = False, num_extra_dims: int = 0) -> u.Quantity:
value = value.copy()
sh = list(self.x.shape)
sh[~1:~1] = [1] * num_extra_dims
x = self.x.reshape(sh)
y = self.y.reshape(sh)
if not inverse:
value[..., 0] += x
value[..., 1] += y
else:
value[..., 0] -= x
value[..., 1] -= y
return value
def copy(self):
return Decenter(
x=self.x,
y=self.y,
)
| [
"[email protected]"
] | |
efacad244c5ae011bae81166d0c9355ca56c784c | 430a146307fd1f64781a91ab60e79b45a231da28 | /l10n/admin.py | 347fd6f73abc0b496fa0697dde92dcc90646fdff | [
"BSD-2-Clause",
"MIT"
] | permissive | rsalmaso/django-fluo-l10n | 61455df2154538db665a9414285a85b7538c81c6 | e7b298748a4461407cffe4987a4453db6722c53a | refs/heads/master | 2021-01-18T23:56:46.507679 | 2016-01-03T14:34:37 | 2016-01-03T14:34:37 | 48,949,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2007-2016, Raffaele Salmaso <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from fluo import admin
from .models import Country, AdministrativeArea
class AdministrativeAreaInline(admin.TabularInline):
model = AdministrativeArea
extra = 1
class CountryAdmin(admin.ModelAdmin):
list_display = ('printable_name', 'iso2_code', 'iso3_code',)
list_filter = ('continent', 'status')
search_fields = ('name', 'iso2_code', 'iso3_code')
inlines = [AdministrativeAreaInline]
admin.site.register(Country, CountryAdmin)
| [
"[email protected]"
] | |
e2ff82125ca55f866ce113b6933b903002731bc8 | 70280955a5382d73e58395eba78c119a400f4ce7 | /asakatsu/0609/4.py | 9f554c1b35208567493334073d67e3034afea623 | [] | no_license | cohock13/atcoder | a7d0e26a10a4e58690347a2e36839c2f503a79ba | d268aa68fc96203eab94d021bd158cf84bdb00bc | refs/heads/master | 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | H,W = map(int,input().split())
m = [list(map(int,input().split())) for _ in range(H)]
ans = []
for i in range(H):
if i%2 == 0:##左->右
for j in range(W):
if m[i][j]%2:
if j == W-1:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j+2))
m[i][j+1] += 1
else:##右->左
for j in reversed(range(W)):
if m[i][j]%2:
if j == 0:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j))
m[i][j-1] += 1
print(len(ans))
for i in ans:
print(*i) | [
"[email protected]"
] | |
76dea297ed9137e442997eb9ab7a890747ca3906 | bf076ab3f9dd5c1860474665be646f89937f1a7f | /settings.py | 9acef3e24318d42f1f56f72b921037982218e7f2 | [
"MIT"
] | permissive | telminov/sonm-cdn-dns | f66f16fed0c67ed6f862410777f0c0fc3c87b27f | 960395f2e7f8d79b5dd2623919ccf89e964fe4ac | refs/heads/master | 2020-03-26T21:12:38.279423 | 2018-09-04T07:58:01 | 2018-09-04T07:58:01 | 145,374,340 | 0 | 0 | MIT | 2018-09-04T07:58:02 | 2018-08-20T06:16:27 | Python | UTF-8 | Python | false | false | 156 | py | NODE_MANAGER_URL = 'http://node-manager.cdn.sonm.soft-way.biz'
NODE_MANAGER_TOKEN = '123'
CDN_DOMAIN = 'cdn-sonm.soft-way.biz.'
IP_STACK_ACCESS_KEY = '123'
| [
"[email protected]"
] | |
ed8971e2218caea9e25d1d713f2f26676d439af4 | 672b6ac4700056d6f648ae52b6e58590ea1944b7 | /ch8code/equal.py | 36ff9125ab4400b04a58d3afdbf37ee5580673f9 | [] | no_license | CodedQuen/NumPy-Beginner-s-Guide | 1715de85dae1aea856a613462b132eb2e463170e | 8946c33ac02d61d310bd4b9095cd814add75d7d1 | refs/heads/master | 2022-11-06T10:37:23.821207 | 2020-06-27T03:19:19 | 2020-06-27T03:19:19 | 275,289,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | import numpy as np
print "Equal?", np.testing.assert_equal((1, 2), (1, 3))
| [
"[email protected]"
] | |
20d0368ac8cbfbff2bd5fb04603008994795b7ad | 721406d87f5086cfa0ab8335a936ece839ab2451 | /.venv/lib/python3.8/site-packages/opencensus/metrics/export/metric.py | 658a27e45125376833965c07c6c3db599f5498f8 | [
"MIT"
] | permissive | MarkusMeyer13/graph-teams-presence | 661296b763fe9e204fe1e057e8bd6ff215ab3936 | c302b79248f31623a1b209e098afc4f85d96228d | refs/heads/main | 2023-07-09T03:34:57.344692 | 2021-07-29T07:16:45 | 2021-07-29T07:16:45 | 389,268,821 | 0 | 0 | MIT | 2021-07-29T07:16:46 | 2021-07-25T05:23:08 | Python | UTF-8 | Python | false | false | 3,224 | py | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.metrics.export import metric_descriptor
class Metric(object):
"""A collection of time series data and label metadata.
This class implements the spec for v1 Metrics as of opencensus-proto
release v0.1.0. See opencensus-proto for details:
https://github.com/census-instrumentation/opencensus-proto/blob/v0.1.0/src/opencensus/proto/metrics/v1/metrics.proto#L35
Defines a Metric which has one or more timeseries.
:type descriptor: class: '~opencensus.metrics.export.metric_descriptor.MetricDescriptor'
:param descriptor: The metric's descriptor.
:type timeseries: list(:class: '~opencensus.metrics.export.time_series.TimeSeries')
:param timeseries: One or more timeseries for a single metric, where each
timeseries has one or more points.
""" # noqa
def __init__(self, descriptor, time_series):
if not time_series:
raise ValueError("time_series must not be empty or null")
if descriptor is None:
raise ValueError("descriptor must not be null")
self._time_series = time_series
self._descriptor = descriptor
self._check_type()
def __repr__(self):
return ('{}(time_series={}, descriptor.name="{}")'
.format(
type(self).__name__,
"<{} TimeSeries>".format(len(self.time_series)),
self.descriptor.name,
))
@property
def time_series(self):
return self._time_series
@property
def descriptor(self):
return self._descriptor
def _check_type(self):
"""Check that point value types match the descriptor type."""
check_type = metric_descriptor.MetricDescriptorType.to_type_class(
self.descriptor.type)
for ts in self.time_series:
if not ts.check_points_type(check_type):
raise ValueError("Invalid point value type")
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics")
| [
"[email protected]"
] | |
dea8529b2857b268b43b97008302392c88a6f157 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GtkSource/StyleSchemeChooserButtonClass.py | ee48a3e369d5c26602de032c6f2bf718a121d7bc | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,893 | py | # encoding: utf-8
# module gi.repository.GtkSource
# from /usr/lib64/girepository-1.0/GtkSource-3.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.overrides.Gtk as __gi_overrides_Gtk
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Gtk as __gi_repository_Gtk
import gobject as __gobject
class StyleSchemeChooserButtonClass(__gi.Struct):
"""
:Constructors:
::
StyleSchemeChooserButtonClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(StyleSchemeChooserButtonClass), '__module__': 'gi.repository.GtkSource', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'StyleSchemeChooserButtonClass' objects>, '__weakref__': <attribute '__weakref__' of 'StyleSchemeChooserButtonClass' objects>, '__doc__': None, 'parent': <property object at 0x7f77ca6ecb80>, 'padding': <property object at 0x7f77ca6ecc70>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(StyleSchemeChooserButtonClass)
| [
"[email protected]"
] | |
979c07a99a4de6deead71a30be7e764a1d398bd8 | f900a9f48fe24c6a581bcb28ad1885cfe5743f80 | /Chapter_11/test_name_function.py | 1f6c6b10bf1eed5b8cf64f797faded06b16b0b93 | [] | no_license | Anjali-225/PythonCrashCourse | 76e63415e789f38cee019cd3ea155261ae2e8398 | f9b9649fe0b758c04861dad4d88058d48837a365 | refs/heads/master | 2022-12-03T21:35:07.428613 | 2020-08-18T11:42:58 | 2020-08-18T11:42:58 | 288,430,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'."""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
df3287e337b27feb9ec0bb40be295e9b74ceef18 | 56243d3bf67d8bc7770ab5d12e2ef812e69196de | /setup.py | 2b0c2bbc8e7dd85974ea6e4e24c97eba9dac99fd | [
"MIT"
] | permissive | William-Lake/comparing_lists | a48542bb9c2d8a0de701d2d01b049664ff02e7c0 | d9d53c89d4a36b1843bc536655cf8831afd4a2d4 | refs/heads/master | 2020-04-02T15:40:44.574432 | 2019-01-30T18:34:56 | 2019-01-30T18:34:56 | 154,578,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="William Lake",
author_email='N/A',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="A small python utility program I wrote for the rare instances where I just need to compare two lists of data.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='comparing_lists',
name='comparing_lists',
packages=find_packages(include=['comparing_lists']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/William-Lake/comparing_lists',
version='0.1.0',
zip_safe=False,
)
| [
"noreply"
] | noreply |
4d352b30c38ee5240aa74ad3e2bd79c7693bfa0a | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/notification-hub/azext_notification_hub/vendored_sdks/notificationhubs/models/notification_hubs_management_client_enums.py | f97b28ab81f5bed1408323aa284ef1b8c81d6704 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 789 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class SkuName(str, Enum):
free = "Free"
basic = "Basic"
standard = "Standard"
class NamespaceType(str, Enum):
messaging = "Messaging"
notification_hub = "NotificationHub"
class AccessRights(str, Enum):
manage = "Manage"
send = "Send"
listen = "Listen"
| [
"[email protected]"
] | |
49db302c96c35f528c5f252b1a2f9596dea8b8ad | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /python/ray/serve/examples/echo_full.py | 9639f1a258d95e7009d0f3ab0520bc1ed59235a0 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 1,465 | py | import time
import requests
import ray
import ray.serve as serve
# initialize ray serve system.
ray.init(num_cpus=10)
client = serve.start()
# a backend can be a function or class.
# it can be made to be invoked from web as well as python.
def echo_v1(flask_request):
response = flask_request.args.get("response", "web")
return response
client.create_backend("echo:v1", echo_v1)
# An endpoint is associated with an HTTP path and traffic to the endpoint
# will be serviced by the echo:v1 backend.
client.create_endpoint("my_endpoint", backend="echo:v1", route="/echo")
print(requests.get("http://127.0.0.1:8000/echo", timeout=0.5).text)
# The service will be reachable from http
print(ray.get(client.get_handle("my_endpoint").remote(response="hello")))
# as well as within the ray system.
# We can also add a new backend and split the traffic.
def echo_v2(flask_request):
# magic, only from web.
return "something new"
client.create_backend("echo:v2", echo_v2)
# The two backend will now split the traffic 50%-50%.
client.set_traffic("my_endpoint", {"echo:v1": 0.5, "echo:v2": 0.5})
# Observe requests are now split between two backends.
for _ in range(10):
print(requests.get("http://127.0.0.1:8000/echo").text)
time.sleep(0.2)
# You can also change number of replicas for each backend independently.
client.update_backend_config("echo:v1", {"num_replicas": 2})
client.update_backend_config("echo:v2", {"num_replicas": 2})
| [
"[email protected]"
] | |
98413f87180c601da3d941fbf79ed8b5fb9d4e36 | d2a2546165b3db6295a3f21972dda8ab9aab7846 | /src/vehicles/road_thief_supplies.py | 41875538a6ae4bb32f8ce3800c922d8d5fd5e778 | [] | no_license | andythenorth/road-hog | bab12b133dd674f0e6d7ae87498675f8da96b982 | 1800d57d4ce904e7041f24646c393b37903d9466 | refs/heads/main | 2022-09-26T19:57:31.006800 | 2022-09-17T10:09:37 | 2022-09-17T10:09:37 | 214,848,659 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | from road_vehicle import SuppliesHauler, DieselRoadVehicle
consist = SuppliesHauler(id='road_thief_supplies',
base_numeric_id=560,
name='Road Thief',
power=720,
vehicle_life=40,
intro_date=1989)
consist.add_unit(type=DieselRoadVehicle,
capacity=0,
vehicle_length=7,
always_use_same_spriterow=True)
consist.add_unit(capacity=45,
vehicle_length=7)
| [
"[email protected]"
] | |
81a57cd8a99f18eced67a63639f21d53f756df5d | 9d30115d59ed821a5c7aecf2318b5e0ed22c9676 | /src/codewars/python/8kyu/binary_addition.py | c09a98f894ac53f951b41e2efa477772038c98b0 | [] | no_license | garigari-kun/til | 02c7bf05274d1077b454e1f7d4a7355849441524 | b71f36a66045ab7da7f4a97f7e18de2aaa05f493 | refs/heads/master | 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | """
7kyu
Binary Addition
Implement a function that adds two numbers together and returns their sum in binary. The conversion can be done before, or after the addition.
The binary number returned should be a string.
"""
def add_binary(a,b):
answer = a + b
binary = bin(answer)[2:]
return binary
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.